repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
tdqn | tdqn-master/drrn/env.py | from os.path import basename
from jericho import *
from jericho.template_action_generator import TemplateActionGenerator
from jericho.util import *
from jericho.defines import *
import redis
def load_vocab_rev(env):
vocab = {i+2: str(v) for i, v in enumerate(env.get_dictionary())}
vocab[0] = ' '
vocab[1] = '<s>'
vocab_rev = {v: idx for idx, v in vocab.items()}
return vocab_rev
class JerichoEnv:
''' Returns valid actions at each step of the game. '''
def __init__(self, rom_path, seed, step_limit=None):
self.rom_path = rom_path
self.bindings = load_bindings(rom_path)
self.act_gen = TemplateActionGenerator(self.bindings)
self.seed = seed
self.steps = 0
self.step_limit = step_limit
self.env = None
self.conn = None
self.vocab_rev = None
def create(self):
self.env = FrotzEnv(self.rom_path, self.seed)
self.vocab_rev = load_vocab_rev(self.env)
self.conn = redis.Redis(host='localhost', port=6379, db=0)
self.conn.flushdb()
def step(self, action):
ob, reward, done, info = self.env.step(action)
# Initialize with default values
info['look'] = 'unknown'
info['inv'] = 'unknown'
info['valid'] = ['wait','yes','no']
if not done:
try:
save = self.env.save_str()
look, _, _, _ = self.env.step('look')
info['look'] = look
self.env.load_str(save)
inv, _, _, _ = self.env.step('inventory')
info['inv'] = inv
self.env.load_str(save)
# Get the valid actions for this state
world_state_hash = self.env.get_world_state_hash()
valid = self.conn.get(world_state_hash)
if valid is None:
objs = [o[0] for o in self.env.identify_interactive_objects(ob)]
obj_ids = [self.vocab_rev[o[:self.bindings['max_word_length']]] for o in objs]
acts = self.act_gen.generate_template_actions(objs, obj_ids)
valid = self.env.find_valid_actions(acts)
redis_valid_value = '/'.join([str(a) for a in valid])
self.conn.set(world_state_hash, redis_valid_value)
valid = [a.action for a in valid]
else:
valid = valid.decode('cp1252')
if valid:
valid = [eval(a).action for a in valid.split('/')]
else:
valid = []
if len(valid) == 0:
valid = ['wait','yes','no']
info['valid'] = valid
except RuntimeError:
print('RuntimeError: {}, Done: {}, Info: {}'.format(clean(ob), done, info))
self.steps += 1
if self.step_limit and self.steps >= self.step_limit:
done = True
return ob, reward, done, info
def reset(self):
initial_ob, info = self.env.reset()
save = self.env.save_str()
look, _, _, _ = self.env.step('look')
info['look'] = look
self.env.load_str(save)
inv, _, _, _ = self.env.step('inventory')
info['inv'] = inv
self.env.load_str(save)
objs = [o[0] for o in self.env.identify_interactive_objects(initial_ob)]
acts = self.act_gen.generate_actions(objs)
valid = self.env.find_valid_actions(acts)
info['valid'] = valid
self.steps = 0
return initial_ob, info
def get_dictionary(self):
if not self.env:
self.create()
return self.env.get_dictionary()
def get_action_set(self):
return None
def close(self):
self.env.close()
| 3,819 | 36.087379 | 98 | py |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/filters_lowlight.py | import tensorflow as tf
import numpy as np
import tensorflow.contrib.layers as ly
from util_filters import lrelu, rgb2lum, tanh_range, lerp
import cv2
import math
class Filter:
def __init__(self, net, cfg):
self.cfg = cfg
# self.height, self.width, self.channels = list(map(int, net.get_shape()[1:]))
# Specified in child classes
self.num_filter_parameters = None
self.short_name = None
self.filter_parameters = None
def get_short_name(self):
assert self.short_name
return self.short_name
def get_num_filter_parameters(self):
assert self.num_filter_parameters
return self.num_filter_parameters
def get_begin_filter_parameter(self):
return self.begin_filter_parameter
def extract_parameters(self, features):
# output_dim = self.get_num_filter_parameters(
# ) + self.get_num_mask_parameters()
# features = ly.fully_connected(
# features,
# self.cfg.fc1_size,
# scope='fc1',
# activation_fn=lrelu,
# weights_initializer=tf.contrib.layers.xavier_initializer())
# features = ly.fully_connected(
# features,
# output_dim,
# scope='fc2',
# activation_fn=None,
# weights_initializer=tf.contrib.layers.xavier_initializer())
return features[:, self.get_begin_filter_parameter():(self.get_begin_filter_parameter() + self.get_num_filter_parameters())], \
features[:, self.get_begin_filter_parameter():(self.get_begin_filter_parameter() + self.get_num_filter_parameters())]
# Should be implemented in child classes
def filter_param_regressor(self, features):
assert False
# Process the whole image, without masking
# Should be implemented in child classes
def process(self, img, param):
assert False
def debug_info_batched(self):
return False
def no_high_res(self):
return False
# Apply the whole filter with masking
def apply(self,
img,
img_features=None,
specified_parameter=None,
high_res=None):
assert (img_features is None) ^ (specified_parameter is None)
if img_features is not None:
filter_features, mask_parameters = self.extract_parameters(img_features)
filter_parameters = self.filter_param_regressor(filter_features)
else:
assert not self.use_masking()
filter_parameters = specified_parameter
mask_parameters = tf.zeros(
shape=(1, self.get_num_mask_parameters()), dtype=np.float32)
if high_res is not None:
# working on high res...
pass
debug_info = {}
# We only debug the first image of this batch
if self.debug_info_batched():
debug_info['filter_parameters'] = filter_parameters
else:
debug_info['filter_parameters'] = filter_parameters[0]
# self.mask_parameters = mask_parameters
# self.mask = self.get_mask(img, mask_parameters)
# debug_info['mask'] = self.mask[0]
#low_res_output = lerp(img, self.process(img, filter_parameters), self.mask)
low_res_output = self.process(img, filter_parameters)
if high_res is not None:
if self.no_high_res():
high_res_output = high_res
else:
self.high_res_mask = self.get_mask(high_res, mask_parameters)
high_res_output = lerp(high_res,
self.process(high_res, filter_parameters),
self.high_res_mask)
else:
high_res_output = None
#return low_res_output, high_res_output, debug_info
return low_res_output, filter_parameters
def use_masking(self):
return self.cfg.masking
def get_num_mask_parameters(self):
return 6
# Input: no need for tanh or sigmoid
# Closer to 1 values are applied by filter more strongly
# no additional TF variables inside
def get_mask(self, img, mask_parameters):
if not self.use_masking():
print('* Masking Disabled')
return tf.ones(shape=(1, 1, 1, 1), dtype=tf.float32)
else:
print('* Masking Enabled')
with tf.name_scope(name='mask'):
# Six parameters for one filter
filter_input_range = 5
assert mask_parameters.shape[1] == self.get_num_mask_parameters()
mask_parameters = tanh_range(
l=-filter_input_range, r=filter_input_range,
initial=0)(mask_parameters)
size = list(map(int, img.shape[1:3]))
grid = np.zeros(shape=[1] + size + [2], dtype=np.float32)
shorter_edge = min(size[0], size[1])
for i in range(size[0]):
for j in range(size[1]):
grid[0, i, j,
0] = (i + (shorter_edge - size[0]) / 2.0) / shorter_edge - 0.5
grid[0, i, j,
1] = (j + (shorter_edge - size[1]) / 2.0) / shorter_edge - 0.5
grid = tf.constant(grid)
# Ax + By + C * L + D
inp = grid[:, :, :, 0, None] * mask_parameters[:, None, None, 0, None] + \
grid[:, :, :, 1, None] * mask_parameters[:, None, None, 1, None] + \
mask_parameters[:, None, None, 2, None] * (rgb2lum(img) - 0.5) + \
mask_parameters[:, None, None, 3, None] * 2
# Sharpness and inversion
inp *= self.cfg.maximum_sharpness * mask_parameters[:, None, None, 4,
None] / filter_input_range
mask = tf.sigmoid(inp)
# Strength
mask = mask * (
mask_parameters[:, None, None, 5, None] / filter_input_range * 0.5 +
0.5) * (1 - self.cfg.minimum_strength) + self.cfg.minimum_strength
print('mask', mask.shape)
return mask
# def visualize_filter(self, debug_info, canvas):
# # Visualize only the filter information
# assert False
def visualize_mask(self, debug_info, res):
return cv2.resize(
debug_info['mask'] * np.ones((1, 1, 3), dtype=np.float32),
dsize=res,
interpolation=cv2.cv2.INTER_NEAREST)
def draw_high_res_text(self, text, canvas):
cv2.putText(
canvas,
text, (30, 128),
cv2.FONT_HERSHEY_SIMPLEX,
0.8, (0, 0, 0),
thickness=5)
return canvas
class ExposureFilter(Filter):#gamma_param is 2*exposure_range + exposure_range
def __init__(self, net, cfg):
Filter.__init__(self, net, cfg)
self.short_name = 'E'
self.begin_filter_parameter = cfg.exposure_begin_param
self.num_filter_parameters = 1
def filter_param_regressor(self, features):
return tanh_range(
-self.cfg.exposure_range, self.cfg.exposure_range, initial=0)(features)
def process(self, img, param):
return img * tf.exp(param[:, None, None, :] * np.log(2))
# def visualize_filter(self, debug_info, canvas):
# exposure = debug_info['filter_parameters'][0]
# if canvas.shape[0] == 64:
# cv2.rectangle(canvas, (8, 40), (56, 52), (1, 1, 1), cv2.FILLED)
# cv2.putText(canvas, 'EV %+.2f' % exposure, (8, 48),
# cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0))
# else:
# self.draw_high_res_text('Exposure %+.2f' % exposure, canvas)
class UsmFilter(Filter):#Usm_param is in [Defog_range]
def __init__(self, net, cfg):
Filter.__init__(self, net, cfg)
self.short_name = 'UF'
self.begin_filter_parameter = cfg.usm_begin_param
self.num_filter_parameters = 1
def filter_param_regressor(self, features):
return tanh_range(*self.cfg.usm_range)(features)
def process(self, img, param):
def make_gaussian_2d_kernel(sigma, dtype=tf.float32):
radius = 12
x = tf.cast(tf.range(-radius, radius + 1), dtype=dtype)
k = tf.exp(-0.5 * tf.square(x / sigma))
k = k / tf.reduce_sum(k)
return tf.expand_dims(k, 1) * k
kernel_i = make_gaussian_2d_kernel(5)
print('kernel_i.shape', kernel_i.shape)
kernel_i = tf.tile(kernel_i[:, :, tf.newaxis, tf.newaxis], [1, 1, 1, 1])
pad_w = (25 - 1) // 2
padded = tf.pad(img, [[0, 0], [pad_w, pad_w], [pad_w, pad_w], [0, 0]], mode='REFLECT')
outputs = []
for channel_idx in range(3):
data_c = padded[:, :, :, channel_idx:(channel_idx + 1)]
data_c = tf.nn.conv2d(data_c, kernel_i, [1, 1, 1, 1], 'VALID')
outputs.append(data_c)
output = tf.concat(outputs, axis=3)
img_out = (img - output) * param[:, None, None, :] + img
return img_out
class GammaFilter(Filter): #gamma_param is in [1/gamma_range, gamma_range]
def __init__(self, net, cfg):
Filter.__init__(self, net, cfg)
self.short_name = 'G'
self.begin_filter_parameter = cfg.gamma_begin_param
self.num_filter_parameters = 1
def filter_param_regressor(self, features):
log_gamma_range = np.log(self.cfg.gamma_range)
return tf.exp(tanh_range(-log_gamma_range, log_gamma_range)(features))
def process(self, img, param):
param_1 = tf.tile(param, [1, 3])
return tf.pow(tf.maximum(img, 0.001), param_1[:, None, None, :])
# def visualize_filter(self, debug_info, canvas):
# gamma = debug_info['filter_parameters']
# cv2.rectangle(canvas, (8, 40), (56, 52), (1, 1, 1), cv2.FILLED)
# cv2.putText(canvas, 'G 1/%.2f' % (1.0 / gamma), (8, 48),
# cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0))
class ImprovedWhiteBalanceFilter(Filter):
def __init__(self, net, cfg):
Filter.__init__(self, net, cfg)
self.short_name = 'W'
self.channels = 3
self.begin_filter_parameter = cfg.wb_begin_param
self.num_filter_parameters = self.channels
def filter_param_regressor(self, features):
log_wb_range = 0.5
mask = np.array(((0, 1, 1)), dtype=np.float32).reshape(1, 3)
# mask = np.array(((1, 0, 1)), dtype=np.float32).reshape(1, 3)
print(mask.shape)
assert mask.shape == (1, 3)
features = features * mask
color_scaling = tf.exp(tanh_range(-log_wb_range, log_wb_range)(features))
# There will be no division by zero here unless the WB range lower bound is 0
# normalize by luminance
color_scaling *= 1.0 / (
1e-5 + 0.27 * color_scaling[:, 0] + 0.67 * color_scaling[:, 1] +
0.06 * color_scaling[:, 2])[:, None]
return color_scaling
def process(self, img, param):
return img * param[:, None, None, :]
# def visualize_filter(self, debug_info, canvas):
# scaling = debug_info['filter_parameters']
# s = canvas.shape[0]
# cv2.rectangle(canvas, (int(s * 0.2), int(s * 0.4)), (int(s * 0.8), int(
# s * 0.6)), list(map(float, scaling)), cv2.FILLED)
class ColorFilter(Filter):
def __init__(self, net, cfg):
Filter.__init__(self, net, cfg)
self.curve_steps = cfg.curve_steps
self.channels = int(net.shape[3])
self.short_name = 'C'
self.begin_filter_parameter = cfg.color_begin_param
self.num_filter_parameters = self.channels * cfg.curve_steps
def filter_param_regressor(self, features):
color_curve = tf.reshape(
features, shape=(-1, self.channels,
self.cfg.curve_steps))[:, None, None, :]
color_curve = tanh_range(
*self.cfg.color_curve_range, initial=1)(color_curve)
return color_curve
def process(self, img, param):
color_curve = param
# There will be no division by zero here unless the color filter range lower bound is 0
color_curve_sum = tf.reduce_sum(param, axis=4) + 1e-30
total_image = img * 0
for i in range(self.cfg.curve_steps):
total_image += tf.clip_by_value(img - 1.0 * i / self.cfg.curve_steps, 0, 1.0 / self.cfg.curve_steps) * \
color_curve[:, :, :, :, i]
total_image *= self.cfg.curve_steps / color_curve_sum
return total_image
# def visualize_filter(self, debug_info, canvas):
# curve = debug_info['filter_parameters']
# height, width = canvas.shape[:2]
# for i in range(self.channels):
# values = np.array([0] + list(curve[0][0][i]))
# values /= sum(values) + 1e-30
# scale = 1
# values *= scale
# for j in range(0, self.cfg.curve_steps):
# values[j + 1] += values[j]
# for j in range(self.cfg.curve_steps):
# p1 = tuple(
# map(int, (width / self.cfg.curve_steps * j, height - 1 -
# values[j] * height)))
# p2 = tuple(
# map(int, (width / self.cfg.curve_steps * (j + 1), height - 1 -
# values[j + 1] * height)))
# color = []
# for t in range(self.channels):
# color.append(1 if t == i else 0)
# cv2.line(canvas, p1, p2, tuple(color), thickness=1)
class ToneFilter(Filter):
def __init__(self, net, cfg):
Filter.__init__(self, net, cfg)
self.curve_steps = cfg.curve_steps
self.short_name = 'T'
self.begin_filter_parameter = cfg.tone_begin_param
self.num_filter_parameters = cfg.curve_steps
def filter_param_regressor(self, features):
tone_curve = tf.reshape(
features, shape=(-1, 1, self.cfg.curve_steps))[:, None, None, :]
tone_curve = tanh_range(*self.cfg.tone_curve_range)(tone_curve)
return tone_curve
def process(self, img, param):
# img = tf.minimum(img, 1.0)
tone_curve = param
tone_curve_sum = tf.reduce_sum(tone_curve, axis=4) + 1e-30
total_image = img * 0
for i in range(self.cfg.curve_steps):
total_image += tf.clip_by_value(img - 1.0 * i / self.cfg.curve_steps, 0, 1.0 / self.cfg.curve_steps) \
* param[:, :, :, :, i]
total_image *= self.cfg.curve_steps / tone_curve_sum
img = total_image
return img
# def visualize_filter(self, debug_info, canvas):
# curve = debug_info['filter_parameters']
# height, width = canvas.shape[:2]
# values = np.array([0] + list(curve[0][0][0]))
# values /= sum(values) + 1e-30
# for j in range(0, self.curve_steps):
# values[j + 1] += values[j]
# for j in range(self.curve_steps):
# p1 = tuple(
# map(int, (width / self.curve_steps * j, height - 1 -
# values[j] * height)))
# p2 = tuple(
# map(int, (width / self.curve_steps * (j + 1), height - 1 -
# values[j + 1] * height)))
# cv2.line(canvas, p1, p2, (0, 0, 0), thickness=1)
class VignetFilter(Filter):
def __init__(self, net, cfg):
Filter.__init__(self, net, cfg)
self.short_name = 'V'
self.begin_filter_parameter = cfg.vignet_begin_param
self.num_filter_parameters = 1
def filter_param_regressor(self, features):
return tf.sigmoid(features)
def process(self, img, param):
return img * 0 # + param[:, None, None, :]
def get_num_mask_parameters(self):
return 5
# Input: no need for tanh or sigmoid
# Closer to 1 values are applied by filter more strongly
# no additional TF variables inside
def get_mask(self, img, mask_parameters):
with tf.name_scope(name='mask'):
# Five parameters for one filter
filter_input_range = 5
assert mask_parameters.shape[1] == self.get_num_mask_parameters()
mask_parameters = tanh_range(
l=-filter_input_range, r=filter_input_range,
initial=0)(mask_parameters)
size = list(map(int, img.shape[1:3]))
grid = np.zeros(shape=[1] + size + [2], dtype=np.float32)
shorter_edge = min(size[0], size[1])
for i in range(size[0]):
for j in range(size[1]):
grid[0, i, j,
0] = (i + (shorter_edge - size[0]) / 2.0) / shorter_edge - 0.5
grid[0, i, j,
1] = (j + (shorter_edge - size[1]) / 2.0) / shorter_edge - 0.5
grid = tf.constant(grid)
# (Ax)^2 + (By)^2 + C
inp = (grid[:, :, :, 0, None] * mask_parameters[:, None, None, 0, None]) ** 2 + \
(grid[:, :, :, 1, None] * mask_parameters[:, None, None, 1, None]) ** 2 + \
mask_parameters[:, None, None, 2, None] - filter_input_range
# Sharpness and inversion
inp *= self.cfg.maximum_sharpness * mask_parameters[:, None, None, 3,
None] / filter_input_range
mask = tf.sigmoid(inp)
# Strength
mask *= mask_parameters[:, None, None, 4,
None] / filter_input_range * 0.5 + 0.5
if not self.use_masking():
print('* Masking Disabled')
mask = mask * 0 + 1
else:
print('* Masking Enabled')
print('mask', mask.shape)
return mask
# def visualize_filter(self, debug_info, canvas):
# brightness = float(debug_info['filter_parameters'][0])
# cv2.rectangle(canvas, (8, 40), (56, 52), (brightness, brightness,
# brightness), cv2.FILLED)
#
class ContrastFilter(Filter):
def __init__(self, net, cfg):
Filter.__init__(self, net, cfg)
self.short_name = 'Ct'
self.begin_filter_parameter = cfg.contrast_begin_param
self.num_filter_parameters = 1
def filter_param_regressor(self, features):
# return tf.sigmoid(features)
# return tanh_range(*self.cfg.contrast_range)(features)
return tf.tanh(features)
def process(self, img, param):
luminance = tf.minimum(tf.maximum(rgb2lum(img), 0.0), 1.0)
contrast_lum = -tf.cos(math.pi * luminance) * 0.5 + 0.5
contrast_image = img / (luminance + 1e-6) * contrast_lum
return lerp(img, contrast_image, param[:, :, None, None])
# def visualize_filter(self, debug_info, canvas):
# exposure = debug_info['filter_parameters'][0]
# cv2.rectangle(canvas, (8, 40), (56, 52), (1, 1, 1), cv2.FILLED)
# cv2.putText(canvas, 'Ct %+.2f' % exposure, (8, 48),
# cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0))
class WNBFilter(Filter):
def __init__(self, net, cfg):
Filter.__init__(self, net, cfg)
self.short_name = 'BW'
self.begin_filter_parameter = cfg.wnb_begin_param
self.num_filter_parameters = 1
def filter_param_regressor(self, features):
return tf.sigmoid(features)
def process(self, img, param):
luminance = rgb2lum(img)
return lerp(img, luminance, param[:, :, None, None])
# def visualize_filter(self, debug_info, canvas):
# exposure = debug_info['filter_parameters'][0]
# cv2.rectangle(canvas, (8, 40), (56, 52), (1, 1, 1), cv2.FILLED)
# cv2.putText(canvas, 'B&W%+.2f' % exposure, (8, 48),
# cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0))
class LevelFilter(Filter):
def __init__(self, net, cfg):
Filter.__init__(self, net, cfg)
self.short_name = 'Le'
self.begin_filter_parameter = cfg.level_begin_param
self.num_filter_parameters = 2
def filter_param_regressor(self, features):
return tf.sigmoid(features)
def process(self, img, param):
lower = param[:, 0]
upper = param[:, 1] + 1
lower = lower[:, None, None, None]
upper = upper[:, None, None, None]
return tf.clip_by_value((img - lower) / (upper - lower + 1e-6), 0.0, 1.0)
# def visualize_filter(self, debug_info, canvas):
# level = list(map(float, debug_info['filter_parameters']))
# level[1] += 1
# cv2.rectangle(canvas, (8, 40), (56, 52), (1, 1, 1), cv2.FILLED)
# cv2.putText(canvas, '%.2f %.2f' % tuple(level), (8, 48),
# cv2.FONT_HERSHEY_SIMPLEX, 0.25, (0, 0, 0))
class SaturationPlusFilter(Filter):
def __init__(self, net, cfg):
Filter.__init__(self, net, cfg)
self.short_name = 'S+'
self.begin_filter_parameter = cfg.saturation_begin_param
self.num_filter_parameters = 1
def filter_param_regressor(self, features):
return tf.sigmoid(features)
def process(self, img, param):
img = tf.minimum(img, 1.0)
hsv = tf.image.rgb_to_hsv(img)
s = hsv[:, :, :, 1:2]
v = hsv[:, :, :, 2:3]
# enhanced_s = s + (1 - s) * 0.7 * (0.5 - tf.abs(0.5 - v)) ** 2
enhanced_s = s + (1 - s) * (0.5 - tf.abs(0.5 - v)) * 0.8
hsv1 = tf.concat([hsv[:, :, :, 0:1], enhanced_s, hsv[:, :, :, 2:]], axis=3)
full_color = tf.image.hsv_to_rgb(hsv1)
param = param[:, :, None, None]
color_param = param
img_param = 1.0 - param
return img * img_param + full_color * color_param
# def visualize_filter(self, debug_info, canvas):
# exposure = debug_info['filter_parameters'][0]
# if canvas.shape[0] == 64:
# cv2.rectangle(canvas, (8, 40), (56, 52), (1, 1, 1), cv2.FILLED)
# cv2.putText(canvas, 'S %+.2f' % exposure, (8, 48),
# cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0))
# else:
# self.draw_high_res_text('Saturation %+.2f' % exposure, canvas)
| 20,316 | 34.64386 | 131 | py |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/train_lowlight.py | #! /usr/bin/env python
# coding=utf-8
import os
import time
import shutil
import numpy as np
import tensorflow as tf
import core.utils as utils
from tqdm import tqdm
from core.dataset_lowlight import Dataset
from core.yolov3_lowlight import YOLOV3
from core.config_lowlight import cfg
from core.config_lowlight import args
import random
if args.use_gpu == 0:
gpu_id = '-1'
else:
gpu_id = args.gpu_id
gpu_list = list()
gpu_ids = gpu_id.split(',')
for i in range(len(gpu_ids)):
gpu_list.append('/gpu:%d' % int(i))
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id
exp_folder = os.path.join(args.exp_dir, 'exp_{}'.format(args.exp_num))
set_ckpt_dir = args.ckpt_dir
args.ckpt_dir = os.path.join(exp_folder, set_ckpt_dir)
if not os.path.exists(args.ckpt_dir):
os.makedirs(args.ckpt_dir)
config_log = os.path.join(exp_folder, 'config.txt')
arg_dict = args.__dict__
msg = ['{}: {}\n'.format(k, v) for k, v in arg_dict.items()]
utils.write_mes(msg, config_log, mode='w')
class YoloTrain(object):
def __init__(self):
self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
self.num_classes = len(self.classes)
self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT
self.time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
self.max_bbox_per_scale = 150
self.train_logdir = "./data/log/train"
self.trainset = Dataset('train')
self.testset = Dataset('test')
self.steps_per_period = len(self.trainset)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
# self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
with tf.name_scope('define_input'):
self.input_data = tf.placeholder(tf.float32, [None, None, None, 3], name='input_data')
self.label_sbbox = tf.placeholder(dtype=tf.float32, name='label_sbbox')
self.label_mbbox = tf.placeholder(dtype=tf.float32, name='label_mbbox')
self.label_lbbox = tf.placeholder(dtype=tf.float32, name='label_lbbox')
self.true_sbboxes = tf.placeholder(dtype=tf.float32, name='sbboxes')
self.true_mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes')
self.true_lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes')
self.input_data_clean = tf.placeholder(tf.float32, [None, None, None, 3], name='input_data')
self.trainable = tf.placeholder(dtype=tf.bool, name='training')
with tf.name_scope("define_loss"):
self.model = YOLOV3(self.input_data, self.trainable, self.input_data_clean)
t_variables = tf.trainable_variables()
print("t_variables", t_variables)
# self.net_var = [v for v in t_variables if not 'extract_parameters' in v.name]
self.net_var = tf.global_variables()
self.giou_loss, self.conf_loss, self.prob_loss, self.recovery_loss = self.model.compute_loss(
self.label_sbbox, self.label_mbbox, self.label_lbbox,
self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)
# self.loss only includes the detection loss.
self.loss = self.giou_loss + self.conf_loss + self.prob_loss
with tf.name_scope('learn_rate'):
self.global_step = tf.Variable(1.0, dtype=tf.float64, trainable=False, name='global_step')
warmup_steps = tf.constant(self.warmup_periods * self.steps_per_period,
dtype=tf.float64, name='warmup_steps')
train_steps = tf.constant( (self.first_stage_epochs + self.second_stage_epochs)* self.steps_per_period,
dtype=tf.float64, name='train_steps')
self.learn_rate = tf.cond(
pred=self.global_step < warmup_steps,
true_fn=lambda: self.global_step / warmup_steps * self.learn_rate_init,
false_fn=lambda: self.learn_rate_end + 0.5 * (self.learn_rate_init - self.learn_rate_end) *
(1 + tf.cos(
(self.global_step - warmup_steps) / (train_steps - warmup_steps) * np.pi))
)
global_step_update = tf.assign_add(self.global_step, 1.0)
with tf.name_scope("define_weight_decay"):
moving_ave = tf.train.ExponentialMovingAverage(self.moving_ave_decay).apply(tf.trainable_variables())
with tf.name_scope("define_first_stage_train"):
self.first_stage_trainable_var_list = []
for var in tf.trainable_variables():
var_name = var.op.name
var_name_mess = str(var_name).split('/')
if var_name_mess[0] in ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']:
self.first_stage_trainable_var_list.append(var)
first_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss,
var_list=self.first_stage_trainable_var_list)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
with tf.control_dependencies([first_stage_optimizer, global_step_update]):
with tf.control_dependencies([moving_ave]):
self.train_op_with_frozen_variables = tf.no_op()
with tf.name_scope("define_second_stage_train"):
second_stage_trainable_var_list = tf.trainable_variables()
second_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss,
var_list=second_stage_trainable_var_list)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
with tf.control_dependencies([second_stage_optimizer, global_step_update]):
with tf.control_dependencies([moving_ave]):
self.train_op_with_all_variables = tf.no_op()
with tf.name_scope('loader_and_saver'):
self.loader = tf.train.Saver(self.net_var)
self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)
with tf.name_scope('summary'):
tf.summary.scalar("learn_rate", self.learn_rate)
tf.summary.scalar("giou_loss", self.giou_loss)
tf.summary.scalar("conf_loss", self.conf_loss)
tf.summary.scalar("prob_loss", self.prob_loss)
tf.summary.scalar("recovery_loss", self.recovery_loss)
tf.summary.scalar("total_loss", self.loss)
# logdir = "./data/log/"
logdir = os.path.join(exp_folder, 'log')
if os.path.exists(logdir): shutil.rmtree(logdir)
os.mkdir(logdir)
self.write_op = tf.summary.merge_all()
self.summary_writer = tf.summary.FileWriter(logdir, graph=self.sess.graph)
def train(self):
self.sess.run(tf.global_variables_initializer())
try:
print('=> Restoring weights from: %s ... ' % self.initial_weight)
self.loader.restore(self.sess, self.initial_weight)
except:
print('=> %s does not exist !!!' % self.initial_weight)
print('=> Now it starts to train YOLOV3 from scratch ...')
self.first_stage_epochs = 0
for epoch in range(1, 1+self.first_stage_epochs+self.second_stage_epochs):
if epoch <= self.first_stage_epochs:
train_op = self.train_op_with_frozen_variables
else:
train_op = self.train_op_with_all_variables
pbar = tqdm(self.trainset)
train_epoch_loss, test_epoch_loss = [], []
for train_data in pbar:
if args.lowlight_FLAG:
# lowlight_param = random.uniform(-2, 0)
lowlight_param = 1
if random.randint(0, 2) > 0:
lowlight_param = random.uniform(1.5, 5)
_, summary, train_step_loss, train_step_loss_recovery, global_step_val = self.sess.run(
[train_op, self.write_op, self.loss, self.recovery_loss, self.global_step], feed_dict={
self.input_data: np.power(train_data[0], lowlight_param),# train_data[0]*np.exp(lowlight_param*np.log(2)),
self.label_sbbox: train_data[1],
self.label_mbbox: train_data[2],
self.label_lbbox: train_data[3],
self.true_sbboxes: train_data[4],
self.true_mbboxes: train_data[5],
self.true_lbboxes: train_data[6],
self.input_data_clean: train_data[0],
self.trainable: True,
})
else:
_, summary, train_step_loss, global_step_val = self.sess.run(
[train_op, self.write_op, self.loss, self.global_step], feed_dict={
self.input_data: train_data[0],
self.label_sbbox: train_data[1],
self.label_mbbox: train_data[2],
self.label_lbbox: train_data[3],
self.true_sbboxes: train_data[4],
self.true_mbboxes: train_data[5],
self.true_lbboxes: train_data[6],
self.input_data_clean: train_data[0],
self.trainable: True,
})
train_epoch_loss.append(train_step_loss)
self.summary_writer.add_summary(summary, global_step_val)
pbar.set_description("train loss: %.2f"%(train_step_loss))
if args.lowlight_FLAG:
for test_data in self.testset:
# lowlight_param = random.uniform(-2, 0)
lowlight_param = 1
if random.randint(0, 2) > 0:
lowlight_param = random.uniform(1.5, 5)
test_step_loss = self.sess.run(self.loss, feed_dict={
self.input_data: np.power(test_data[0], lowlight_param), #test_data[0]*np.exp(lowlight_param*np.log(2)),
self.label_sbbox: test_data[1],
self.label_mbbox: test_data[2],
self.label_lbbox: test_data[3],
self.true_sbboxes: test_data[4],
self.true_mbboxes: test_data[5],
self.true_lbboxes: test_data[6],
self.input_data_clean: test_data[0],
self.trainable: False,
})
test_epoch_loss.append(test_step_loss)
else:
for test_data in self.testset:
test_step_loss = self.sess.run(self.loss, feed_dict={
self.input_data: test_data[0],
self.label_sbbox: test_data[1],
self.label_mbbox: test_data[2],
self.label_lbbox: test_data[3],
self.true_sbboxes: test_data[4],
self.true_mbboxes: test_data[5],
self.true_lbboxes: test_data[6],
self.input_data_clean: test_data[0],
self.trainable: False,
})
test_epoch_loss.append(test_step_loss)
train_epoch_loss, test_epoch_loss = np.mean(train_epoch_loss), np.mean(test_epoch_loss)
ckpt_file = args.ckpt_dir + "/yolov3_test_loss=%.4f.ckpt" % test_epoch_loss
log_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
print("=> Epoch: %2d Time: %s Train loss: %.2f Test loss: %.2f Saving %s ..."
%(epoch, log_time, train_epoch_loss, test_epoch_loss, ckpt_file))
self.saver.save(self.sess, ckpt_file, global_step=epoch)
if __name__ == '__main__': YoloTrain().train()
| 12,834 | 48.941634 | 134 | py |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/util_filters.py | import math
import cv2
import tensorflow as tf
import os
import sys
'''
output states:
0: has rewards?
1: stopped?
2: num steps
3:
'''
STATE_REWARD_DIM = 0
STATE_STOPPED_DIM = 1
STATE_STEP_DIM = 2
STATE_DROPOUT_BEGIN = 3
def get_expert_file_path(expert):
expert_path = 'data/artists/fk_%s/' % expert
return expert_path
# From github.com/OlavHN/fast-neural-style
def instance_norm(x):
epsilon = 1e-9
mean, var = tf.nn.moments(x, [1, 2], keep_dims=True)
return (x - mean) / tf.sqrt(var + epsilon)
def enrich_image_input(cfg, net, states):
if cfg.img_include_states:
print(("states for enriching", states.shape))
states = states[:, None, None, :] + (net[:, :, :, 0:1] * 0)
net = tf.concat([net, states], axis=3)
return net
# based on https://stackoverflow.com/questions/2352181/how-to-use-a-dot-to-access-members-of-dictionary
class Dict(dict):
"""
Example:
m = Dict({'first_name': 'Eduardo'}, last_name='Pool', age=24, sports=['Soccer'])
"""
def __init__(self, *args, **kwargs):
super(Dict, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for k, v in arg.items():
self[k] = v
if kwargs:
for k, v in kwargs.items():
self[k] = v
def __getattr__(self, attr):
return self[attr]
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __setitem__(self, key, value):
super(Dict, self).__setitem__(key, value)
self.__dict__.update({key: value})
def __delattr__(self, item):
self.__delitem__(item)
def __delitem__(self, key):
super(Dict, self).__delitem__(key)
del self.__dict__[key]
def make_image_grid(images, per_row=8, padding=2):
npad = ((0, 0), (padding, padding), (padding, padding), (0, 0))
images = np.pad(images, pad_width=npad, mode='constant', constant_values=1.0)
assert images.shape[0] % per_row == 0
num_rows = images.shape[0] // per_row
image_rows = []
for i in range(num_rows):
image_rows.append(np.hstack(images[i * per_row:(i + 1) * per_row]))
return np.vstack(image_rows)
def get_image_center(image):
if image.shape[0] > image.shape[1]:
start = (image.shape[0] - image.shape[1]) // 2
image = image[start:start + image.shape[1], :]
if image.shape[1] > image.shape[0]:
start = (image.shape[1] - image.shape[0]) // 2
image = image[:, start:start + image.shape[0]]
return image
def rotate_image(image, angle):
"""
Rotates an OpenCV 2 / NumPy image about it's centre by the given angle
(in degrees). The returned image will be large enough to hold the entire
new image, with a black background
"""
# Get the image size
# No that's not an error - NumPy stores image matricies backwards
image_size = (image.shape[1], image.shape[0])
image_center = tuple(np.array(image_size) // 2)
# Convert the OpenCV 3x2 rotation matrix to 3x3
rot_mat = np.vstack(
[cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]])
rot_mat_notranslate = np.matrix(rot_mat[0:2, 0:2])
# Shorthand for below calcs
image_w2 = image_size[0] * 0.5
image_h2 = image_size[1] * 0.5
# Obtain the rotated coordinates of the image corners
rotated_coords = [
(np.array([-image_w2, image_h2]) * rot_mat_notranslate).A[0],
(np.array([image_w2, image_h2]) * rot_mat_notranslate).A[0],
(np.array([-image_w2, -image_h2]) * rot_mat_notranslate).A[0],
(np.array([image_w2, -image_h2]) * rot_mat_notranslate).A[0]
]
# Find the size of the new image
x_coords = [pt[0] for pt in rotated_coords]
x_pos = [x for x in x_coords if x > 0]
x_neg = [x for x in x_coords if x < 0]
y_coords = [pt[1] for pt in rotated_coords]
y_pos = [y for y in y_coords if y > 0]
y_neg = [y for y in y_coords if y < 0]
right_bound = max(x_pos)
left_bound = min(x_neg)
top_bound = max(y_pos)
bot_bound = min(y_neg)
new_w = int(abs(right_bound - left_bound))
new_h = int(abs(top_bound - bot_bound))
# We require a translation matrix to keep the image centred
trans_mat = np.matrix([[1, 0, int(new_w * 0.5 - image_w2)],
[0, 1, int(new_h * 0.5 - image_h2)], [0, 0, 1]])
# Compute the tranform for the combined rotation and translation
affine_mat = (np.matrix(trans_mat) * np.matrix(rot_mat))[0:2, :]
# Apply the transform
result = cv2.warpAffine(
image, affine_mat, (new_w, new_h), flags=cv2.INTER_LINEAR)
return result
def largest_rotated_rect(w, h, angle):
"""
Given a rectangle of size wxh that has been rotated by 'angle' (in
radians), computes the width and height of the largest possible
axis-aligned rectangle within the rotated rectangle.
Original JS code by 'Andri' and Magnus Hoff from Stack Overflow
Converted to Python by Aaron Snoswell
"""
quadrant = int(math.floor(angle / (math.pi / 2))) & 3
sign_alpha = angle if ((quadrant & 1) == 0) else math.pi - angle
alpha = (sign_alpha % math.pi + math.pi) % math.pi
bb_w = w * math.cos(alpha) + h * math.sin(alpha)
bb_h = w * math.sin(alpha) + h * math.cos(alpha)
gamma = math.atan2(bb_w, bb_w) if (w < h) else math.atan2(bb_w, bb_w)
delta = math.pi - alpha - gamma
length = h if (w < h) else w
d = length * math.cos(alpha)
a = d * math.sin(alpha) / math.sin(delta)
y = a * math.cos(gamma)
x = y * math.tan(gamma)
return (bb_w - 2 * x, bb_h - 2 * y)
def crop_around_center(image, width, height):
"""
Given a NumPy / OpenCV 2 image, crops it to the given width and height,
around it's centre point
"""
image_size = (image.shape[1], image.shape[0])
image_center = (int(image_size[0] * 0.5), int(image_size[1] * 0.5))
if (width > image_size[0]):
width = image_size[0]
if (height > image_size[1]):
height = image_size[1]
x1 = int(image_center[0] - width * 0.5)
x2 = int(image_center[0] + width * 0.5)
y1 = int(image_center[1] - height * 0.5)
y2 = int(image_center[1] + height * 0.5)
return image[y1:y2, x1:x2]
# angle: degrees
def rotate_and_crop(image, angle):
image_width, image_height = image.shape[:2]
image_rotated = rotate_image(image, angle)
image_rotated_cropped = crop_around_center(image_rotated,
*largest_rotated_rect(
image_width, image_height,
math.radians(angle)))
return image_rotated_cropped
def lrelu(x, leak=0.2, name="lrelu"):
with tf.variable_scope(name):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
# clamps to 0, 1 with leak
def double_lrelu(x, leak=0.1, name="double_lrelu"):
with tf.variable_scope(name):
return tf.minimum(tf.maximum(leak * x, x), leak * x - (leak - 1))
# clamp to lower, upper; leak is RELATIVE
def leaky_clamp(x, lower, upper, leak=0.1, name="leaky_clamp"):
with tf.variable_scope(name):
x = (x - lower) / (upper - lower)
return tf.minimum(tf.maximum(leak * x, x), leak * x -
(leak - 1)) * (upper - lower) + lower
class Tee(object):
def __init__(self, name):
self.file = open(name, 'w')
self.stdout = sys.stdout
self.stderr = sys.stderr
sys.stdout = self
sys.stderr = self
def __del__(self):
self.file.close()
def write(self, data):
self.file.write(data)
self.stdout.write(data)
self.file.flush()
self.stdout.flush()
def write_to_file(self, data):
self.file.write(data)
def flush(self):
self.file.flush()
def rgb2lum(image):
image = 0.27 * image[:, :, :, 0] + 0.67 * image[:, :, :,
1] + 0.06 * image[:, :, :, 2]
return image[:, :, :, None]
def tanh01(x):
return tf.tanh(x) * 0.5 + 0.5
def tanh_range(l, r, initial=None):
def get_activation(left, right, initial):
def activation(x):
if initial is not None:
bias = math.atanh(2 * (initial - left) / (right - left) - 1)
else:
bias = 0
return tanh01(x + bias) * (right - left) + left
return activation
return get_activation(l, r, initial)
def merge_dict(a, b):
ret = a.copy()
for key, val in list(b.items()):
if key in ret:
assert False, 'Item ' + key + 'already exists'
else:
ret[key] = val
return ret
def lerp(a, b, l):
return (1 - l) * a + l * b
def read_tiff16(fn):
import tifffile
import numpy as np
img = tifffile.imread(fn)
if img.dtype == np.uint8:
depth = 8
elif img.dtype == np.uint16:
depth = 16
else:
print("Warning: unsupported data type {}. Assuming 16-bit.", img.dtype)
depth = 16
return (img * (1.0 / (2**depth - 1))).astype(np.float32)
def load_config(config_name):
scope = {}
exec ('from config_%s import cfg' % config_name, scope)
return scope['cfg']
# ======================================================================================================================
# added by Hao He
# ======================================================================================================================
def get_artist_batch(folder, size=128, num=64):
import os
js = os.listdir(folder)
np.random.shuffle(js)
imgs = np.zeros((num, size, size, 3))
for i, jpg in enumerate(js[:num]):
img = cv2.imread(folder + '/' + jpg)
img = get_image_center(img) / 255.
imgs[i] = cv2.resize(img, dsize=(size, size))
return imgs
def show_artist_subnails(folder, size=128, num_row=8, num_column=8):
imgs = get_artist_batch(folder, size, num_row * num_column)
return make_image_grid(imgs, per_row=num_row)
def np_tanh_range(l, r):
def get_activation(left, right):
def activation(x):
return np.tanh(x) * (right - left) + left
return activation
return get_activation(l, r)
class WB2:
def filter_param_regressor(self, features):
log_wb_range = np.log(5)
color_scaling = np.exp(
np_tanh_range(-log_wb_range, log_wb_range)(features[:, :3]))
# There will be no division by zero here unless the WB range lower bound is 0
return color_scaling
def process(self, img, param):
lum = (img[:, :, :, 0] * 0.27 + img[:, :, :, 1] * 0.67 +
img[:, :, :, 2] * 0.06 + 1e-5)[:, :, :, None]
tmp = img * param[:, None, None, :]
tmp = tmp / (tmp[:, :, :, 0] * 0.27 + tmp[:, :, :, 1] * 0.67 +
tmp[:, :, :, 2] * 0.06 + 1e-5)[:, :, :, None] * lum
return tmp
def degrade_images_in_folder(
folder,
dst_folder_suffix,
LIGHTDOWN=True,
UNBALANCECOLOR=True,):
import os
js = os.listdir(folder)
dst_folder = folder + '-' + dst_folder_suffix
try:
os.mkdir(dst_folder)
except:
print('dir exist!')
print('in ' + dst_folder)
num = 3
for j in js:
img = cv2.imread(folder + '/' + j) / 255.
if LIGHTDOWN:
for _ in range(num - 1):
out = pow(img, np.random.uniform(0.4, 0.6)) * np.random.uniform(
0.25, 0.5)
cv2.imwrite(dst_folder + '/' + ('L%d-' % _) + j, out * 255.)
out = img * img
out = out * (1.0 / out.max())
cv2.imwrite(dst_folder + '/' + ('L%d-' % num) + j, out * 255.)
if UNBALANCECOLOR:
filter = WB2()
outs = np.array([img] * num)
features = np.abs(np.random.rand(num, 3))
for _, out in enumerate(
filter.process(outs, filter.filter_param_regressor(features))):
# print out.max()
out /= out.max()
out *= np.random.uniform(0.7, 1)
cv2.imwrite(dst_folder + '/' + ('C%d-' % _) + j, out * 255.)
def vis_images_and_indexs(images, features, dir, name):
# indexs = np.reshape(indexs, (len(indexs),))
# print('visualizing images and indexs: ', images.shape, indexs.shape)
id_imgs = []
for feature in features:
img = np.ones((64, 64, 3))
cv2.putText(img,
str(feature), (4, 33), cv2.FONT_HERSHEY_SIMPLEX, 0.25,
(1.0, 0.0, 0.0))
id_imgs.append(img)
id_imgs = np.stack(id_imgs, axis=0)
# print('id imgs: ', id_imgs.shape)
vis_imgs = np.vstack([images, id_imgs])
image = make_image_grid(vis_imgs, per_row=images.shape[0])
vis_dir = dir
try:
os.mkdir(vis_dir)
except:
pass
cv2.imwrite(os.path.join(vis_dir, name + '.png'), image[:, :, ::-1] * 255.0)
def read_set(name):
if name == 'u_test':
fn = 'data/folds/FiveK_test.txt'
need_reverse = False
elif name == 'u_amt':
fn = 'data/folds/FiveK_test_AMT.txt'
need_reverse = False
elif name == '5k': # add by hao
return list(range(1, 5001))
elif name == '2k_train':
fn = 'data/folds/FiveK_train_first2k.txt'
need_reverse = False
elif name == '2k_target':
fn = 'data/folds/FiveK_train_second2k.txt'
need_reverse = False
else:
assert False, name + ' not found'
l = []
ln = 0
with open(fn, 'r') as f:
for i in f:
if i[0] != '#':
try:
i = int(i)
ln += 1
l.append(i)
except Exception as e:
print(e)
pass
if need_reverse:
l = list(set(range(1, 5001)) - set(l))
return l
'''
util_image.py
Copyright (c) 2014 Zhicheng Yan (zhicheng.yan@live.com)
modified 2017 by Yuanming Hu (yuanmhu@gmail.com)
note that some of the color space conversions are NOT exact, like gamma 1.8 or 2.2
'''
import numpy as np
from skimage import color
import tifffile as tiff
class UtilImageError(Exception):
pass
''' undo gamma correction '''
def linearize_ProPhotoRGB(pp_rgb, reverse=False):
if not reverse:
gamma = 1.8
else:
gamma = 1.0 / 1.8
pp_rgb = np.power(pp_rgb, gamma)
return pp_rgb
def XYZ_chromatic_adapt(xyz, src_white='D65', dest_white='D50'):
if src_white == 'D65' and dest_white == 'D50':
M = [[1.0478112, 0.0228866, -0.0501270], \
[0.0295424, 0.9904844, -0.0170491], \
[-0.0092345, 0.0150436, 0.7521316]]
elif src_white == 'D50' and dest_white == 'D65':
M = [[0.9555766, -0.0230393, 0.0631636], \
[-0.0282895, 1.0099416, 0.0210077], \
[0.0122982, -0.0204830, 1.3299098]]
else:
raise UtilCnnImageEnhanceError('invalid pair of source and destination white reference %s,%s') \
% (src_white, dest_white)
M = np.array(M)
sp = xyz.shape
assert sp[2] == 3
xyz = np.transpose(np.dot(M, np.transpose(xyz.reshape((sp[0] * sp[1], 3)))))
return xyz.reshape((sp[0], sp[1], 3))
# pp_rgb float in range [0,1], linear ProPhotoRGB
# refernce white is D50
def ProPhotoRGB2XYZ(pp_rgb, reverse=False):
if not reverse:
M = [[0.7976749, 0.1351917, 0.0313534], \
[0.2880402, 0.7118741, 0.0000857], \
[0.0000000, 0.0000000, 0.8252100]]
else:
M = [[1.34594337, -0.25560752, -0.05111183], \
[-0.54459882, 1.5081673, 0.02053511], \
[0, 0, 1.21181275]]
M = np.array(M)
sp = pp_rgb.shape
xyz = np.transpose(
np.dot(M, np.transpose(pp_rgb.reshape((sp[0] * sp[1], sp[2])))))
return xyz.reshape((sp[0], sp[1], 3))
''' normalize L channel so that minimum of L is 0 and maximum of L is 100 '''
def normalize_Lab_image(lab_image):
h, w, ch = lab_image.shape[0], lab_image.shape[1], lab_image.shape[2]
assert ch == 3
lab_image = lab_image.reshape((h * w, ch))
L_ch = lab_image[:, 0]
L_min, L_max = np.min(L_ch), np.max(L_ch)
# print 'before normalization L min %f,Lmax %f' % (L_min,L_max)
scale = 100.0 / (L_max - L_min)
lab_image[:, 0] = (lab_image[:, 0] - L_min) * scale
# print 'after normalization L min %f,Lmax %f' %\
(np.min(lab_image[:, 0]), np.max(lab_image[:, 0]))
return lab_image.reshape((h, w, ch))
''' white reference 'D65' '''
def read_tiff_16bit_img_into_XYZ(tiff_fn, exposure=0):
pp_rgb = tiff.imread(tiff_fn)
pp_rgb = np.float64(pp_rgb) / (2**16 - 1.0)
if not pp_rgb.shape[2] == 3:
print('pp_rgb shape', pp_rgb.shape)
raise UtilImageError('image channel number is not 3')
pp_rgb = linearize_ProPhotoRGB(pp_rgb)
pp_rgb *= np.power(2, exposure)
xyz = ProPhotoRGB2XYZ(pp_rgb)
xyz = XYZ_chromatic_adapt(xyz, src_white='D50', dest_white='D65')
return xyz
def ProPhotoRGB2Lab(img):
if not img.shape[2] == 3:
print('pp_rgb shape', img.shape)
raise UtilImageError('image channel number is not 3')
img = linearize_ProPhotoRGB(img)
xyz = ProPhotoRGB2XYZ(img)
lab = color.xyz2lab(xyz)
return lab
def linearProPhotoRGB2Lab(img):
if not img.shape[2] == 3:
print('pp_rgb shape', img.shape)
raise UtilImageError('image channel number is not 3')
xyz = ProPhotoRGB2XYZ(img)
lab = color.xyz2lab(xyz)
return lab
| 16,568 | 27.035533 | 120 | py |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/freeze_graph.py | #! /usr/bin/env python
# coding=utf-8
import tensorflow as tf
from core.yolov3 import YOLOV3
pb_file = "./yolov3_coco.pb"
ckpt_file = "./checkpoint/yolov3_coco_demo.ckpt"
output_node_names = ["input/input_data", "pred_sbbox/concat_2", "pred_mbbox/concat_2", "pred_lbbox/concat_2"]
with tf.name_scope('input'):
input_data = tf.placeholder(dtype=tf.float32, name='input_data')
model = YOLOV3(input_data, trainable=False)
print(model.conv_sbbox, model.conv_mbbox, model.conv_lbbox)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
saver = tf.train.Saver()
saver.restore(sess, ckpt_file)
converted_graph_def = tf.graph_util.convert_variables_to_constants(sess,
input_graph_def = sess.graph.as_graph_def(),
output_node_names = output_node_names)
with tf.gfile.GFile(pb_file, "wb") as f:
f.write(converted_graph_def.SerializeToString())
| 929 | 27.181818 | 109 | py |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/evaluate.py | #! /usr/bin/env python
# coding=utf-8
import cv2
import os
import shutil
import numpy as np
import tensorflow as tf
import core.utils as utils
from core.config import cfg
from core.yolov3 import YOLOV3
from core.config import args
import random
import math
import subprocess as sub
import time
from filters import *
exp_folder = os.path.join(args.exp_dir, 'exp_{}'.format(args.exp_num))
if args.use_gpu == 0:
gpu_id = '-1'
else:
gpu_id = args.gpu_id
gpu_list = list()
gpu_ids = gpu_id.split(',')
for i in range(len(gpu_ids)):
gpu_list.append('/gpu:%d' % int(i))
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id
class YoloTest(object):
def __init__(self):
self.input_size = cfg.TEST.INPUT_SIZE
self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
self.num_classes = len(self.classes)
self.anchors = np.array(utils.get_anchors(cfg.YOLO.ANCHORS))
self.score_threshold = cfg.TEST.SCORE_THRESHOLD
self.iou_threshold = cfg.TEST.IOU_THRESHOLD
self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
self.annotation_path = args.test_path
self.weight_file = cfg.TEST.WEIGHT_FILE
self.write_image = cfg.TEST.WRITE_IMAGE
self.write_image_path = cfg.TEST.WRITE_IMAGE_PATH
self.show_label = cfg.TEST.SHOW_LABEL
self.isp_flag = cfg.YOLO.ISP_FLAG
with tf.name_scope('input'):
self.input_data = tf.placeholder(tf.float32, [None, None, None, 3], name='input_data')
self.defog_A = tf.placeholder(tf.float32, [None, 3], name='defog_A')
self.IcA = tf.placeholder(tf.float32, [None, None, None,1], name='IcA')
self.trainable = tf.placeholder(dtype=tf.bool, name='trainable')
self.input_data_clean = tf.placeholder(tf.float32, [None, None, None, 3], name='input_data')
model = YOLOV3(self.input_data, self.trainable,self.input_data_clean, self.defog_A, self.IcA)
self.pred_sbbox, self.pred_mbbox, self.pred_lbbox, self.image_isped, self.isp_params, self.filter_imgs_series = \
model.pred_sbbox, model.pred_mbbox, model.pred_lbbox, model.image_isped,model.filter_params, model.filter_imgs_series
with tf.name_scope('ema'):
ema_obj = tf.train.ExponentialMovingAverage(self.moving_ave_decay)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
# self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
self.saver = tf.train.Saver(ema_obj.variables_to_restore())
self.saver.restore(self.sess, self.weight_file)
def predict(self, image, image_name):
org_image = np.copy(image)
org_h, org_w, _ = org_image.shape
image_data = utils.image_preporcess(image, [self.input_size, self.input_size])
image_data = image_data[np.newaxis, ...]
def DarkChannel(im):
b, g, r = cv2.split(im)
dc = cv2.min(cv2.min(r, g), b)
return dc
def AtmLight(im, dark):
[h, w] = im.shape[:2]
imsz = h * w
numpx = int(max(math.floor(imsz / 1000), 1))
darkvec = dark.reshape(imsz, 1)
imvec = im.reshape(imsz, 3)
indices = darkvec.argsort(0)
indices = indices[(imsz - numpx):imsz]
atmsum = np.zeros([1, 3])
for ind in range(1, numpx):
atmsum = atmsum + imvec[indices[ind]]
A = atmsum / numpx
return A
def DarkIcA(im, A):
im3 = np.empty(im.shape, im.dtype)
for ind in range(0, 3):
im3[:, :, ind] = im[:, :, ind] / A[0, ind]
return DarkChannel(im3)
if self.isp_flag:
dark = np.zeros((image_data.shape[0], image_data.shape[1], image_data.shape[2]))
defog_A = np.zeros((image_data.shape[0], image_data.shape[3]))
IcA = np.zeros((image_data.shape[0], image_data.shape[1], image_data.shape[2]))
if DefogFilter in cfg.filters:
for i in range(image_data.shape[0]):
dark_i = DarkChannel(image_data[i])
defog_A_i = AtmLight(image_data[i], dark_i)
IcA_i = DarkIcA(image_data[i], defog_A_i)
dark[i, ...] = dark_i
defog_A[i, ...] = defog_A_i
IcA[i, ...] = IcA_i
IcA = np.expand_dims(IcA, axis=-1)
start_time = time.time()
pred_sbbox, pred_mbbox, pred_lbbox, image_isped, isp_param, filter_imgs_series = self.sess.run(
[self.pred_sbbox, self.pred_mbbox, self.pred_lbbox, self.image_isped, self.isp_params, self.filter_imgs_series],
feed_dict={
self.input_data: image_data, # image_data*np.exp(lowlight_param*np.log(2)),
self.defog_A: defog_A,
self.IcA: IcA,
self.trainable: False,
self.input_data_clean:image_data
}
)
time_one_img = time.time() - start_time
print('process one image need:', time_one_img)
else:
start_time = time.time()
pred_sbbox, pred_mbbox, pred_lbbox, image_isped, isp_param = self.sess.run(
[self.pred_sbbox, self.pred_mbbox, self.pred_lbbox, self.image_isped, self.isp_params],
feed_dict={
self.input_data: image_data, # image_data*np.exp(lowlight_param*np.log(2)),
self.trainable: False
}
)
time_one_img = time.time() - start_time
print('process one image need:', time_one_img)
pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + self.num_classes)),
np.reshape(pred_mbbox, (-1, 5 + self.num_classes)),
np.reshape(pred_lbbox, (-1, 5 + self.num_classes))], axis=0)
bboxes = utils.postprocess_boxes(pred_bbox, (org_h, org_w), self.input_size, self.score_threshold)
bboxes = utils.nms(bboxes, self.iou_threshold)
if self.isp_flag:
print('ISP params : ', isp_param)
image_isped = utils.image_unpreporcess(image_isped[0, ...], [org_h, org_w])
image_isped = np.clip(image_isped * 255, 0, 255)
# filter_imgs_series = np.array(filter_imgs_series)
# print('filter_imgs_series.shape:', filter_imgs_series.shape)
# for i in range(filter_imgs_series.shape[0]):
# image_isped_i = utils.image_unpreporcess(filter_imgs_series[i, 0, ...], [org_h, org_w])
# image_isped_i = np.clip(image_isped_i * 255, 0, 255)
# cv2.imwrite(self.write_image_path + image_name[:-4] + 'f' + str(i) +'.png', image_isped_i)
else:
image_isped = np.clip(image, 0, 255)
# image_isped = utils.image_unpreporcess(image_isped, [org_h, org_w])
# cv2.imwrite(self.write_image_path + 'low'+ image_name, image_isped)
return bboxes, image_isped, time_one_img
def evaluate(self):
mAP_path = exp_folder + '/mAP'
if not os.path.exists(mAP_path):
os.makedirs(mAP_path)
predicted_dir_path = mAP_path + '/predicted'
ground_truth_dir_path = mAP_path + '/ground-truth'
if os.path.exists(predicted_dir_path): shutil.rmtree(predicted_dir_path)
if os.path.exists(ground_truth_dir_path): shutil.rmtree(ground_truth_dir_path)
if os.path.exists(self.write_image_path): shutil.rmtree(self.write_image_path)
os.mkdir(predicted_dir_path)
os.mkdir(ground_truth_dir_path)
os.mkdir(self.write_image_path)
time_total = 0
time_total_cnn_process_img = 0
num_img = 0
with open(self.annotation_path, 'r') as annotation_file:
for num, line in enumerate(annotation_file):
# if len(line.strip().split()[1:]) == 0:
# continue
annotation = line.strip().split()
image_path = annotation[0]
image_name = image_path.split('/')[-1]
image = cv2.imread(image_path)
bbox_data_gt = np.array([list(map(int, box.split(','))) for box in annotation[1:]])
if len(bbox_data_gt) == 0:
bboxes_gt=[]
classes_gt=[]
else:
bboxes_gt, classes_gt = bbox_data_gt[:, :4], bbox_data_gt[:, 4]
ground_truth_path = os.path.join(ground_truth_dir_path, str(num) + '.txt')
print('=> ground truth of %s:' % image_name)
num_bbox_gt = len(bboxes_gt)
with open(ground_truth_path, 'w') as f:
for i in range(num_bbox_gt):
class_name = self.classes[classes_gt[i]]
xmin, ymin, xmax, ymax = list(map(str, bboxes_gt[i]))
bbox_mess = ' '.join([class_name, xmin, ymin, xmax, ymax]) + '\n'
f.write(bbox_mess)
print('\t' + str(bbox_mess).strip())
print('=> predict result of %s:' % image_name)
predict_result_path = os.path.join(predicted_dir_path, str(num) + '.txt')
t1 = time.time()
bboxes_pr, image_isped, time_one_img = self.predict(image, image_name)
num_img += 1
time_total_cnn_process_img += time_one_img
time_total += time.time() - t1
if self.write_image:
if self.isp_flag:
image = utils.draw_bbox(image_isped, bboxes_pr, self.classes, show_label=self.show_label)
else:
image = utils.draw_bbox(image_isped, bboxes_pr, self.classes, show_label=self.show_label)
cv2.imwrite(self.write_image_path+image_name, image)
with open(predict_result_path, 'w') as f:
for bbox in bboxes_pr:
coor = np.array(bbox[:4], dtype=np.int32)
score = bbox[4]
class_ind = int(bbox[5])
class_name = self.classes[class_ind]
score = '%.4f' % score
xmin, ymin, xmax, ymax = list(map(str, coor))
bbox_mess = ' '.join([class_name, score, xmin, ymin, xmax, ymax]) + '\n'
f.write(bbox_mess)
print('\t' + str(bbox_mess).strip())
print('****process uses:', time_total)
print('validation time:%s, total_proce_time:%s, num_img:%s, aver_time:%s'%(time_total, time_total_cnn_process_img, num_img, time_total_cnn_process_img / num_img))
if __name__ == '__main__': YoloTest().evaluate()
| 11,108 | 42.73622 | 170 | py |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/from_darknet_weights_to_ckpt.py | import tensorflow as tf
from core.yolov3 import YOLOV3
iput_size = 416
darknet_weights = '<your yolov3.weights' path>'
ckpt_file = './checkpoint/yolov3_coco.ckpt'
def load_weights(var_list, weights_file):
"""
Loads and converts pre-trained weights.
:param var_list: list of network variables.
:param weights_file: name of the binary file.
:return: list of assign ops
"""
with open(weights_file, "rb") as fp:
_ = np.fromfile(fp, dtype=np.int32, count=5)
weights = np.fromfile(fp, dtype=np.float32) # np.ndarray
print('weights_num:', weights.shape[0])
ptr = 0
i = 0
assign_ops = []
while i < len(var_list) - 1:
var1 = var_list[i]
var2 = var_list[i + 1]
# do something only if we process conv layer
if 'conv' in var1.name.split('/')[-2]:
# check type of next layer
if 'batch_normalization' in var2.name.split('/')[-2]:
# load batch norm params
gamma, beta, mean, var = var_list[i + 1:i + 5]
batch_norm_vars = [beta, gamma, mean, var]
for vari in batch_norm_vars:
shape = vari.shape.as_list()
num_params = np.prod(shape)
vari_weights = weights[ptr:ptr + num_params].reshape(shape)
ptr += num_params
assign_ops.append(
tf.assign(vari, vari_weights, validate_shape=True))
i += 4
elif 'conv' in var2.name.split('/')[-2]:
# load biases
bias = var2
bias_shape = bias.shape.as_list()
bias_params = np.prod(bias_shape)
bias_weights = weights[ptr:ptr +
bias_params].reshape(bias_shape)
ptr += bias_params
assign_ops.append(
tf.assign(bias, bias_weights, validate_shape=True))
i += 1
shape = var1.shape.as_list()
num_params = np.prod(shape)
var_weights = weights[ptr:ptr + num_params].reshape(
(shape[3], shape[2], shape[0], shape[1]))
# remember to transpose to column-major
var_weights = np.transpose(var_weights, (2, 3, 1, 0))
ptr += num_params
assign_ops.append(
tf.assign(var1, var_weights, validate_shape=True))
i += 1
print('ptr:', ptr)
return assign_ops
with tf.name_scope('input'):
input_data = tf.placeholder(dtype=tf.float32,shape=(None, iput_size, iput_size, 3), name='input_data')
model = YOLOV3(input_data, trainable=False)
load_ops = load_weights(tf.global_variables(), darknet_weights)
saver = tf.train.Saver(tf.global_variables())
with tf.Session() as sess:
sess.run(load_ops)
save_path = saver.save(sess, save_path=ckpt_file)
print('Model saved in path: {}'.format(save_path))
| 2,972 | 38.118421 | 106 | py |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/evaluate_lowlight.py | #! /usr/bin/env python
# coding=utf-8
import cv2
import os
import shutil
import numpy as np
import tensorflow as tf
import core.utils as utils
from core.config_lowlight import cfg
from core.yolov3_lowlight import YOLOV3
from core.config_lowlight import args
import random
import time
exp_folder = os.path.join(args.exp_dir, 'exp_{}'.format(args.exp_num))
if args.use_gpu == 0:
gpu_id = '-1'
else:
gpu_id = args.gpu_id
gpu_list = list()
gpu_ids = gpu_id.split(',')
for i in range(len(gpu_ids)):
gpu_list.append('/gpu:%d' % int(i))
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id
class YoloTest(object):
def __init__(self):
self.input_size = cfg.TEST.INPUT_SIZE
self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
self.num_classes = len(self.classes)
self.anchors = np.array(utils.get_anchors(cfg.YOLO.ANCHORS))
self.score_threshold = cfg.TEST.SCORE_THRESHOLD
self.iou_threshold = cfg.TEST.IOU_THRESHOLD
self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
self.annotation_path = cfg.TEST.ANNOT_PATH
self.weight_file = cfg.TEST.WEIGHT_FILE
self.write_image = cfg.TEST.WRITE_IMAGE
self.write_image_path = cfg.TEST.WRITE_IMAGE_PATH
self.show_label = cfg.TEST.SHOW_LABEL
self.isp_flag = cfg.YOLO.ISP_FLAG
with tf.name_scope('input'):
self.input_data = tf.placeholder(tf.float32, [None, None, None, 3], name='input_data')
self.trainable = tf.placeholder(dtype=tf.bool, name='trainable')
self.input_data_clean = tf.placeholder(tf.float32, [None, None, None, 3], name='input_data')
model = YOLOV3(self.input_data, self.trainable, self.input_data_clean)
self.pred_sbbox, self.pred_mbbox, self.pred_lbbox, self.image_isped, self.isp_params = \
model.pred_sbbox, model.pred_mbbox, model.pred_lbbox, model.image_isped,model.filter_params
with tf.name_scope('ema'):
ema_obj = tf.train.ExponentialMovingAverage(self.moving_ave_decay)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
# self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
self.saver = tf.train.Saver(ema_obj.variables_to_restore())
self.saver.restore(self.sess, self.weight_file)
def predict(self, image, image_name):
org_image = np.copy(image)
org_h, org_w, _ = org_image.shape
image_data = utils.image_preporcess(image, [self.input_size, self.input_size])
image_data = image_data[np.newaxis, ...]
pred_sbbox, pred_mbbox, pred_lbbox, image_isped, isp_param = self.sess.run(
[self.pred_sbbox, self.pred_mbbox, self.pred_lbbox, self.image_isped, self.isp_params],
feed_dict={
self.input_data: image_data, # image_data*np.exp(lowlight_param*np.log(2)),
self.trainable: False
}
)
pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + self.num_classes)),
np.reshape(pred_mbbox, (-1, 5 + self.num_classes)),
np.reshape(pred_lbbox, (-1, 5 + self.num_classes))], axis=0)
bboxes = utils.postprocess_boxes(pred_bbox, (org_h, org_w), self.input_size, self.score_threshold)
bboxes = utils.nms(bboxes, self.iou_threshold)
if self.isp_flag:
print('ISP params : ', isp_param)
image_isped = np.clip(image_isped[0, ...]*255, 0, 255)
image_isped = utils.image_unpreporcess(image_isped, [org_h, org_w])
else:
image_isped = np.clip(image, 0, 255)
# image_isped = utils.image_unpreporcess(image_isped, [org_h, org_w])
# cv2.imwrite(self.write_image_path + 'low'+ image_name, image_isped)
return bboxes, image_isped
def evaluate(self):
mAP_path = exp_folder + '/mAP'
if not os.path.exists(mAP_path):
os.makedirs(mAP_path)
predicted_dir_path = mAP_path + '/predicted'
ground_truth_dir_path = mAP_path + '/ground-truth'
if os.path.exists(predicted_dir_path): shutil.rmtree(predicted_dir_path)
if os.path.exists(ground_truth_dir_path): shutil.rmtree(ground_truth_dir_path)
if os.path.exists(self.write_image_path): shutil.rmtree(self.write_image_path)
os.mkdir(predicted_dir_path)
os.mkdir(ground_truth_dir_path)
os.mkdir(self.write_image_path)
time_total = 0
with open(self.annotation_path, 'r') as annotation_file:
for num, line in enumerate(annotation_file):
annotation = line.strip().split()
image_path = annotation[0]
image_name = image_path.split('/')[-1]
image = cv2.imread(image_path)
bbox_data_gt = np.array([list(map(int, box.split(','))) for box in annotation[1:]])
if len(bbox_data_gt) == 0:
bboxes_gt=[]
classes_gt=[]
else:
bboxes_gt, classes_gt = bbox_data_gt[:, :4], bbox_data_gt[:, 4]
ground_truth_path = os.path.join(ground_truth_dir_path, str(num) + '.txt')
print('=> ground truth of %s:' % image_path)
num_bbox_gt = len(bboxes_gt)
with open(ground_truth_path, 'w') as f:
for i in range(num_bbox_gt):
class_name = self.classes[classes_gt[i]]
xmin, ymin, xmax, ymax = list(map(str, bboxes_gt[i]))
bbox_mess = ' '.join([class_name, xmin, ymin, xmax, ymax]) + '\n'
f.write(bbox_mess)
print('\t' + str(bbox_mess).strip())
print('=> predict result of %s:' % image_path)
predict_result_path = os.path.join(predicted_dir_path, str(num) + '.txt')
# bboxes_pr, image_isped = self.predict(image, image_name)
t1 = time.time()
bboxes_pr, image_isped = self.predict(image, image_name)
time_total += time.time() - t1
if self.write_image:
if self.isp_flag:
image = utils.draw_bbox(image_isped, bboxes_pr, self.classes, show_label=self.show_label)
else:
image = utils.draw_bbox(image_isped, bboxes_pr, self.classes, show_label=self.show_label)
cv2.imwrite(self.write_image_path+image_name, image)
with open(predict_result_path, 'w') as f:
for bbox in bboxes_pr:
coor = np.array(bbox[:4], dtype=np.int32)
score = bbox[4]
class_ind = int(bbox[5])
class_name = self.classes[class_ind]
score = '%.4f' % score
xmin, ymin, xmax, ymax = list(map(str, coor))
bbox_mess = ' '.join([class_name, score, xmin, ymin, xmax, ymax]) + '\n'
f.write(bbox_mess)
print('\t' + str(bbox_mess).strip())
if __name__ == '__main__': YoloTest().evaluate()
| 7,446 | 42.046243 | 113 | py |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/from_darknet_weights_to_pb.py | import tensorflow as tf
from core.yolov3 import YOLOV3
from from_darknet_weights_to_ckpt import load_weights
input_size = 416
darknet_weights = '<your darknet weights file path>'
pb_file = './yolov3.pb'
output_node_names = ["input/input_data", "pred_sbbox/concat_2", "pred_mbbox/concat_2", "pred_lbbox/concat_2"]
with tf.name_scope('input'):
input_data = tf.placeholder(dtype=tf.float32, shape=(None, input_size, input_size, 3), name='input_data')
model = YOLOV3(input_data, trainable=False)
load_ops = load_weights(tf.global_variables(), darknet_weights)
with tf.Session() as sess:
sess.run(load_ops)
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
tf.get_default_graph().as_graph_def(),
output_node_names=output_node_names
)
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
print("{} ops written to {}.".format(len(output_graph_def.node), output_graph))
| 983 | 35.444444 | 109 | py |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/filters.py | import tensorflow as tf
import numpy as np
import tensorflow.contrib.layers as ly
from util_filters import lrelu, rgb2lum, tanh_range, lerp
import cv2
import math
class Filter:
def __init__(self, net, cfg):
self.cfg = cfg
# self.height, self.width, self.channels = list(map(int, net.get_shape()[1:]))
# Specified in child classes
self.num_filter_parameters = None
self.short_name = None
self.filter_parameters = None
def get_short_name(self):
assert self.short_name
return self.short_name
def get_num_filter_parameters(self):
assert self.num_filter_parameters
return self.num_filter_parameters
def get_begin_filter_parameter(self):
return self.begin_filter_parameter
def extract_parameters(self, features):
# output_dim = self.get_num_filter_parameters(
# ) + self.get_num_mask_parameters()
# features = ly.fully_connected(
# features,
# self.cfg.fc1_size,
# scope='fc1',
# activation_fn=lrelu,
# weights_initializer=tf.contrib.layers.xavier_initializer())
# features = ly.fully_connected(
# features,
# output_dim,
# scope='fc2',
# activation_fn=None,
# weights_initializer=tf.contrib.layers.xavier_initializer())
return features[:, self.get_begin_filter_parameter():(self.get_begin_filter_parameter() + self.get_num_filter_parameters())], \
features[:, self.get_begin_filter_parameter():(self.get_begin_filter_parameter() + self.get_num_filter_parameters())]
# Should be implemented in child classes
def filter_param_regressor(self, features):
assert False
# Process the whole image, without masking
# Should be implemented in child classes
def process(self, img, param, defog, IcA):
assert False
def debug_info_batched(self):
return False
def no_high_res(self):
return False
# Apply the whole filter with masking
def apply(self,
img,
img_features=None,
defog_A=None,
IcA=None,
specified_parameter=None,
high_res=None):
assert (img_features is None) ^ (specified_parameter is None)
if img_features is not None:
filter_features, mask_parameters = self.extract_parameters(img_features)
filter_parameters = self.filter_param_regressor(filter_features)
else:
assert not self.use_masking()
filter_parameters = specified_parameter
mask_parameters = tf.zeros(
shape=(1, self.get_num_mask_parameters()), dtype=np.float32)
if high_res is not None:
# working on high res...
pass
debug_info = {}
# We only debug the first image of this batch
if self.debug_info_batched():
debug_info['filter_parameters'] = filter_parameters
else:
debug_info['filter_parameters'] = filter_parameters[0]
# self.mask_parameters = mask_parameters
# self.mask = self.get_mask(img, mask_parameters)
# debug_info['mask'] = self.mask[0]
#low_res_output = lerp(img, self.process(img, filter_parameters), self.mask)
low_res_output = self.process(img, filter_parameters, defog_A, IcA)
if high_res is not None:
if self.no_high_res():
high_res_output = high_res
else:
self.high_res_mask = self.get_mask(high_res, mask_parameters)
# high_res_output = lerp(high_res,
# self.process(high_res, filter_parameters, defog, IcA),
# self.high_res_mask)
else:
high_res_output = None
#return low_res_output, high_res_output, debug_info
return low_res_output, filter_parameters
def use_masking(self):
return self.cfg.masking
def get_num_mask_parameters(self):
return 6
# Input: no need for tanh or sigmoid
# Closer to 1 values are applied by filter more strongly
# no additional TF variables inside
def get_mask(self, img, mask_parameters):
if not self.use_masking():
print('* Masking Disabled')
return tf.ones(shape=(1, 1, 1, 1), dtype=tf.float32)
else:
print('* Masking Enabled')
with tf.name_scope(name='mask'):
# Six parameters for one filter
filter_input_range = 5
assert mask_parameters.shape[1] == self.get_num_mask_parameters()
mask_parameters = tanh_range(
l=-filter_input_range, r=filter_input_range,
initial=0)(mask_parameters)
size = list(map(int, img.shape[1:3]))
grid = np.zeros(shape=[1] + size + [2], dtype=np.float32)
shorter_edge = min(size[0], size[1])
for i in range(size[0]):
for j in range(size[1]):
grid[0, i, j,
0] = (i + (shorter_edge - size[0]) / 2.0) / shorter_edge - 0.5
grid[0, i, j,
1] = (j + (shorter_edge - size[1]) / 2.0) / shorter_edge - 0.5
grid = tf.constant(grid)
# Ax + By + C * L + D
inp = grid[:, :, :, 0, None] * mask_parameters[:, None, None, 0, None] + \
grid[:, :, :, 1, None] * mask_parameters[:, None, None, 1, None] + \
mask_parameters[:, None, None, 2, None] * (rgb2lum(img) - 0.5) + \
mask_parameters[:, None, None, 3, None] * 2
# Sharpness and inversion
inp *= self.cfg.maximum_sharpness * mask_parameters[:, None, None, 4,
None] / filter_input_range
mask = tf.sigmoid(inp)
# Strength
mask = mask * (
mask_parameters[:, None, None, 5, None] / filter_input_range * 0.5 +
0.5) * (1 - self.cfg.minimum_strength) + self.cfg.minimum_strength
print('mask', mask.shape)
return mask
# def visualize_filter(self, debug_info, canvas):
# # Visualize only the filter information
# assert False
def visualize_mask(self, debug_info, res):
return cv2.resize(
debug_info['mask'] * np.ones((1, 1, 3), dtype=np.float32),
dsize=res,
interpolation=cv2.cv2.INTER_NEAREST)
def draw_high_res_text(self, text, canvas):
cv2.putText(
canvas,
text, (30, 128),
cv2.FONT_HERSHEY_SIMPLEX,
0.8, (0, 0, 0),
thickness=5)
return canvas
class ExposureFilter(Filter):#gamma_param is 2*exposure_range + exposure_range
def __init__(self, net, cfg):
Filter.__init__(self, net, cfg)
self.short_name = 'E'
self.begin_filter_parameter = cfg.exposure_begin_param
self.num_filter_parameters = 1
def filter_param_regressor(self, features):
return tanh_range(
-self.cfg.exposure_range, self.cfg.exposure_range, initial=0)(features)
def process(self, img, param, defog, IcA):
return img * tf.exp(param[:, None, None, :] * np.log(2))
# def visualize_filter(self, debug_info, canvas):
# exposure = debug_info['filter_parameters'][0]
# if canvas.shape[0] == 64:
# cv2.rectangle(canvas, (8, 40), (56, 52), (1, 1, 1), cv2.FILLED)
# cv2.putText(canvas, 'EV %+.2f' % exposure, (8, 48),
# cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0))
# else:
# self.draw_high_res_text('Exposure %+.2f' % exposure, canvas)
class UsmFilter(Filter):#Usm_param is in [Defog_range]
def __init__(self, net, cfg):
Filter.__init__(self, net, cfg)
self.short_name = 'UF'
self.begin_filter_parameter = cfg.usm_begin_param
self.num_filter_parameters = 1
def filter_param_regressor(self, features):
return tanh_range(*self.cfg.usm_range)(features)
def process(self, img, param, defog_A, IcA):
def make_gaussian_2d_kernel(sigma, dtype=tf.float32):
radius = 12
x = tf.cast(tf.range(-radius, radius + 1), dtype=dtype)
k = tf.exp(-0.5 * tf.square(x / sigma))
k = k / tf.reduce_sum(k)
return tf.expand_dims(k, 1) * k
kernel_i = make_gaussian_2d_kernel(5)
print('kernel_i.shape', kernel_i.shape)
kernel_i = tf.tile(kernel_i[:, :, tf.newaxis, tf.newaxis], [1, 1, 1, 1])
# outputs = []
# for channel_idx in range(3):
# data_c = img[:, :, :, channel_idx:(channel_idx + 1)]
# data_c = tf.nn.conv2d(data_c, kernel_i, [1, 1, 1, 1], 'SAME')
# outputs.append(data_c)
pad_w = (25 - 1) // 2
padded = tf.pad(img, [[0, 0], [pad_w, pad_w], [pad_w, pad_w], [0, 0]], mode='REFLECT')
outputs = []
for channel_idx in range(3):
data_c = padded[:, :, :, channel_idx:(channel_idx + 1)]
data_c = tf.nn.conv2d(data_c, kernel_i, [1, 1, 1, 1], 'VALID')
outputs.append(data_c)
output = tf.concat(outputs, axis=3)
img_out = (img - output) * param[:, None, None, :] + img
# img_out = (img - output) * 2.5 + img
return img_out
class UsmFilter_sigma(Filter):#Usm_param is in [Defog_range]
def __init__(self, net, cfg):
Filter.__init__(self, net, cfg)
self.short_name = 'UF'
self.begin_filter_parameter = cfg.usm_begin_param
self.num_filter_parameters = 1
def filter_param_regressor(self, features):
return tanh_range(*self.cfg.usm_range)(features)
def process(self, img, param, defog_A, IcA):
def make_gaussian_2d_kernel(sigma, dtype=tf.float32):
radius = 12
x = tf.cast(tf.range(-radius, radius + 1), dtype=dtype)
k = tf.exp(-0.5 * tf.square(x / sigma))
k = k / tf.reduce_sum(k)
return tf.expand_dims(k, 1) * k
kernel_i = make_gaussian_2d_kernel(param[:, None, None, :])
print('kernel_i.shape', kernel_i.shape)
kernel_i = tf.tile(kernel_i[:, :, tf.newaxis, tf.newaxis], [1, 1, 1, 1])
# outputs = []
# for channel_idx in range(3):
# data_c = img[:, :, :, channel_idx:(channel_idx + 1)]
# data_c = tf.nn.conv2d(data_c, kernel_i, [1, 1, 1, 1], 'SAME')
# outputs.append(data_c)
pad_w = (25 - 1) // 2
padded = tf.pad(img, [[0, 0], [pad_w, pad_w], [pad_w, pad_w], [0, 0]], mode='REFLECT')
outputs = []
for channel_idx in range(3):
data_c = padded[:, :, :, channel_idx:(channel_idx + 1)]
data_c = tf.nn.conv2d(data_c, kernel_i, [1, 1, 1, 1], 'VALID')
outputs.append(data_c)
output = tf.concat(outputs, axis=3)
img_out = (img - output) * param[:, None, None, :] + img
return img_out
class DefogFilter(Filter):#Defog_param is in [Defog_range]
def __init__(self, net, cfg):
Filter.__init__(self, net, cfg)
self.short_name = 'DF'
self.begin_filter_parameter = cfg.defog_begin_param
self.num_filter_parameters = 1
def filter_param_regressor(self, features):
return tanh_range(*self.cfg.defog_range)(features)
def process(self, img, param, defog_A, IcA):
print(' defog_A:', img.shape)
print(' defog_A:', IcA.shape)
print(' defog_A:', defog_A.shape)
tx = 1 - param[:, None, None, :]*IcA
# tx = 1 - 0.5*IcA
tx_1 = tf.tile(tx, [1, 1, 1, 3])
return (img - defog_A[:, None, None, :])/tf.maximum(tx_1, 0.01) + defog_A[:, None, None, :]
class GammaFilter(Filter): #gamma_param is in [-gamma_range, gamma_range]
def __init__(self, net, cfg):
Filter.__init__(self, net, cfg)
self.short_name = 'G'
self.begin_filter_parameter = cfg.gamma_begin_param
self.num_filter_parameters = 1
def filter_param_regressor(self, features):
log_gamma_range = np.log(self.cfg.gamma_range)
return tf.exp(tanh_range(-log_gamma_range, log_gamma_range)(features))
def process(self, img, param, defog_A, IcA):
param_1 = tf.tile(param, [1, 3])
return tf.pow(tf.maximum(img, 0.0001), param_1[:, None, None, :])
# return img
# def visualize_filter(self, debug_info, canvas):
# gamma = debug_info['filter_parameters']
# cv2.rectangle(canvas, (8, 40), (56, 52), (1, 1, 1), cv2.FILLED)
# cv2.putText(canvas, 'G 1/%.2f' % (1.0 / gamma), (8, 48),
# cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0))
class ImprovedWhiteBalanceFilter(Filter):
def __init__(self, net, cfg):
Filter.__init__(self, net, cfg)
self.short_name = 'W'
self.channels = 3
self.begin_filter_parameter = cfg.wb_begin_param
self.num_filter_parameters = self.channels
def filter_param_regressor(self, features):
log_wb_range = 0.5
mask = np.array(((0, 1, 1)), dtype=np.float32).reshape(1, 3)
# mask = np.array(((1, 0, 1)), dtype=np.float32).reshape(1, 3)
print(mask.shape)
assert mask.shape == (1, 3)
features = features * mask
color_scaling = tf.exp(tanh_range(-log_wb_range, log_wb_range)(features))
# There will be no division by zero here unless the WB range lower bound is 0
# normalize by luminance
color_scaling *= 1.0 / (
1e-5 + 0.27 * color_scaling[:, 0] + 0.67 * color_scaling[:, 1] +
0.06 * color_scaling[:, 2])[:, None]
return color_scaling
def process(self, img, param, defog, IcA):
return img * param[:, None, None, :]
# return img
# def visualize_filter(self, debug_info, canvas):
# scaling = debug_info['filter_parameters']
# s = canvas.shape[0]
# cv2.rectangle(canvas, (int(s * 0.2), int(s * 0.4)), (int(s * 0.8), int(
# s * 0.6)), list(map(float, scaling)), cv2.FILLED)
class ColorFilter(Filter):
def __init__(self, net, cfg):
Filter.__init__(self, net, cfg)
self.curve_steps = cfg.curve_steps
self.channels = int(net.shape[3])
self.short_name = 'C'
self.begin_filter_parameter = cfg.color_begin_param
self.num_filter_parameters = self.channels * cfg.curve_steps
def filter_param_regressor(self, features):
color_curve = tf.reshape(
features, shape=(-1, self.channels,
self.cfg.curve_steps))[:, None, None, :]
color_curve = tanh_range(
*self.cfg.color_curve_range, initial=1)(color_curve)
return color_curve
def process(self, img, param, defog, IcA):
color_curve = param
# There will be no division by zero here unless the color filter range lower bound is 0
color_curve_sum = tf.reduce_sum(param, axis=4) + 1e-30
total_image = img * 0
for i in range(self.cfg.curve_steps):
total_image += tf.clip_by_value(img - 1.0 * i / self.cfg.curve_steps, 0, 1.0 / self.cfg.curve_steps) * \
color_curve[:, :, :, :, i]
total_image *= self.cfg.curve_steps / color_curve_sum
return total_image
# def visualize_filter(self, debug_info, canvas):
# curve = debug_info['filter_parameters']
# height, width = canvas.shape[:2]
# for i in range(self.channels):
# values = np.array([0] + list(curve[0][0][i]))
# values /= sum(values) + 1e-30
# scale = 1
# values *= scale
# for j in range(0, self.cfg.curve_steps):
# values[j + 1] += values[j]
# for j in range(self.cfg.curve_steps):
# p1 = tuple(
# map(int, (width / self.cfg.curve_steps * j, height - 1 -
# values[j] * height)))
# p2 = tuple(
# map(int, (width / self.cfg.curve_steps * (j + 1), height - 1 -
# values[j + 1] * height)))
# color = []
# for t in range(self.channels):
# color.append(1 if t == i else 0)
# cv2.line(canvas, p1, p2, tuple(color), thickness=1)
class ToneFilter(Filter):
def __init__(self, net, cfg):
Filter.__init__(self, net, cfg)
self.curve_steps = cfg.curve_steps
self.short_name = 'T'
self.begin_filter_parameter = cfg.tone_begin_param
self.num_filter_parameters = cfg.curve_steps
def filter_param_regressor(self, features):
tone_curve = tf.reshape(
features, shape=(-1, 1, self.cfg.curve_steps))[:, None, None, :]
tone_curve = tanh_range(*self.cfg.tone_curve_range)(tone_curve)
return tone_curve
def process(self, img, param, defog, IcA):
# img = tf.minimum(img, 1.0)
# param = tf.constant([[0.52, 0.53, 0.55, 1.9, 1.8, 1.7, 0.7, 0.6], [0.52, 0.53, 0.55, 1.9, 1.8, 1.7, 0.7, 0.6],
# [0.52, 0.53, 0.55, 1.9, 1.8, 1.7, 0.7, 0.6], [0.52, 0.53, 0.55, 1.9, 1.8, 1.7, 0.7, 0.6],
# [0.52, 0.53, 0.55, 1.9, 1.8, 1.7, 0.7, 0.6], [0.52, 0.53, 0.55, 1.9, 1.8, 1.7, 0.7, 0.6]])
# param = tf.constant([[0.52, 0.53, 0.55, 1.9, 1.8, 1.7, 0.7, 0.6]])
# param = tf.reshape(
# param, shape=(-1, 1, self.cfg.curve_steps))[:, None, None, :]
tone_curve = param
tone_curve_sum = tf.reduce_sum(tone_curve, axis=4) + 1e-30
total_image = img * 0
for i in range(self.cfg.curve_steps):
total_image += tf.clip_by_value(img - 1.0 * i / self.cfg.curve_steps, 0, 1.0 / self.cfg.curve_steps) \
* param[:, :, :, :, i]
# p_cons = [0.52, 0.53, 0.55, 1.9, 1.8, 1.7, 0.7, 0.6]
# for i in range(self.cfg.curve_steps):
# total_image += tf.clip_by_value(img - 1.0 * i / self.cfg.curve_steps, 0, 1.0 / self.cfg.curve_steps) \
# * p_cons[i]
total_image *= self.cfg.curve_steps / tone_curve_sum
img = total_image
return img
# def visualize_filter(self, debug_info, canvas):
# curve = debug_info['filter_parameters']
# height, width = canvas.shape[:2]
# values = np.array([0] + list(curve[0][0][0]))
# values /= sum(values) + 1e-30
# for j in range(0, self.curve_steps):
# values[j + 1] += values[j]
# for j in range(self.curve_steps):
# p1 = tuple(
# map(int, (width / self.curve_steps * j, height - 1 -
# values[j] * height)))
# p2 = tuple(
# map(int, (width / self.curve_steps * (j + 1), height - 1 -
# values[j + 1] * height)))
# cv2.line(canvas, p1, p2, (0, 0, 0), thickness=1)
class VignetFilter(Filter):
def __init__(self, net, cfg):
Filter.__init__(self, net, cfg)
self.short_name = 'V'
self.begin_filter_parameter = cfg.vignet_begin_param
self.num_filter_parameters = 1
def filter_param_regressor(self, features):
return tf.sigmoid(features)
def process(self, img, param):
return img * 0 # + param[:, None, None, :]
def get_num_mask_parameters(self):
return 5
# Input: no need for tanh or sigmoid
# Closer to 1 values are applied by filter more strongly
# no additional TF variables inside
def get_mask(self, img, mask_parameters):
with tf.name_scope(name='mask'):
# Five parameters for one filter
filter_input_range = 5
assert mask_parameters.shape[1] == self.get_num_mask_parameters()
mask_parameters = tanh_range(
l=-filter_input_range, r=filter_input_range,
initial=0)(mask_parameters)
size = list(map(int, img.shape[1:3]))
grid = np.zeros(shape=[1] + size + [2], dtype=np.float32)
shorter_edge = min(size[0], size[1])
for i in range(size[0]):
for j in range(size[1]):
grid[0, i, j,
0] = (i + (shorter_edge - size[0]) / 2.0) / shorter_edge - 0.5
grid[0, i, j,
1] = (j + (shorter_edge - size[1]) / 2.0) / shorter_edge - 0.5
grid = tf.constant(grid)
# (Ax)^2 + (By)^2 + C
inp = (grid[:, :, :, 0, None] * mask_parameters[:, None, None, 0, None]) ** 2 + \
(grid[:, :, :, 1, None] * mask_parameters[:, None, None, 1, None]) ** 2 + \
mask_parameters[:, None, None, 2, None] - filter_input_range
# Sharpness and inversion
inp *= self.cfg.maximum_sharpness * mask_parameters[:, None, None, 3,
None] / filter_input_range
mask = tf.sigmoid(inp)
# Strength
mask *= mask_parameters[:, None, None, 4,
None] / filter_input_range * 0.5 + 0.5
if not self.use_masking():
print('* Masking Disabled')
mask = mask * 0 + 1
else:
print('* Masking Enabled')
print('mask', mask.shape)
return mask
# def visualize_filter(self, debug_info, canvas):
# brightness = float(debug_info['filter_parameters'][0])
# cv2.rectangle(canvas, (8, 40), (56, 52), (brightness, brightness,
# brightness), cv2.FILLED)
#
class ContrastFilter(Filter):
def __init__(self, net, cfg):
Filter.__init__(self, net, cfg)
self.short_name = 'Ct'
self.begin_filter_parameter = cfg.contrast_begin_param
self.num_filter_parameters = 1
def filter_param_regressor(self, features):
# return tf.sigmoid(features)
return tf.tanh(features)
def process(self, img, param, defog, IcA):
luminance = tf.minimum(tf.maximum(rgb2lum(img), 0.0), 1.0)
contrast_lum = -tf.cos(math.pi * luminance) * 0.5 + 0.5
contrast_image = img / (luminance + 1e-6) * contrast_lum
return lerp(img, contrast_image, param[:, :, None, None])
# return lerp(img, contrast_image, 0.5)
# def visualize_filter(self, debug_info, canvas):
# exposure = debug_info['filter_parameters'][0]
# cv2.rectangle(canvas, (8, 40), (56, 52), (1, 1, 1), cv2.FILLED)
# cv2.putText(canvas, 'Ct %+.2f' % exposure, (8, 48),
# cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0))
class WNBFilter(Filter):
def __init__(self, net, cfg):
Filter.__init__(self, net, cfg)
self.short_name = 'BW'
self.begin_filter_parameter = cfg.wnb_begin_param
self.num_filter_parameters = 1
def filter_param_regressor(self, features):
return tf.sigmoid(features)
def process(self, img, param, defog, IcA):
luminance = rgb2lum(img)
return lerp(img, luminance, param[:, :, None, None])
# def visualize_filter(self, debug_info, canvas):
# exposure = debug_info['filter_parameters'][0]
# cv2.rectangle(canvas, (8, 40), (56, 52), (1, 1, 1), cv2.FILLED)
# cv2.putText(canvas, 'B&W%+.2f' % exposure, (8, 48),
# cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0))
class LevelFilter(Filter):
def __init__(self, net, cfg):
Filter.__init__(self, net, cfg)
self.short_name = 'Le'
self.begin_filter_parameter = cfg.level_begin_param
self.num_filter_parameters = 2
def filter_param_regressor(self, features):
return tf.sigmoid(features)
def process(self, img, param):
lower = param[:, 0]
upper = param[:, 1] + 1
lower = lower[:, None, None, None]
upper = upper[:, None, None, None]
return tf.clip_by_value((img - lower) / (upper - lower + 1e-6), 0.0, 1.0)
# def visualize_filter(self, debug_info, canvas):
# level = list(map(float, debug_info['filter_parameters']))
# level[1] += 1
# cv2.rectangle(canvas, (8, 40), (56, 52), (1, 1, 1), cv2.FILLED)
# cv2.putText(canvas, '%.2f %.2f' % tuple(level), (8, 48),
# cv2.FONT_HERSHEY_SIMPLEX, 0.25, (0, 0, 0))
class SaturationPlusFilter(Filter):
def __init__(self, net, cfg):
Filter.__init__(self, net, cfg)
self.short_name = 'S+'
self.begin_filter_parameter = cfg.saturation_begin_param
self.num_filter_parameters = 1
def filter_param_regressor(self, features):
return tf.sigmoid(features)
def process(self, img, param, defog, IcA):
img = tf.minimum(img, 1.0)
hsv = tf.image.rgb_to_hsv(img)
s = hsv[:, :, :, 1:2]
v = hsv[:, :, :, 2:3]
# enhanced_s = s + (1 - s) * 0.7 * (0.5 - tf.abs(0.5 - v)) ** 2
enhanced_s = s + (1 - s) * (0.5 - tf.abs(0.5 - v)) * 0.8
hsv1 = tf.concat([hsv[:, :, :, 0:1], enhanced_s, hsv[:, :, :, 2:]], axis=3)
full_color = tf.image.hsv_to_rgb(hsv1)
param = param[:, :, None, None]
color_param = param
img_param = 1.0 - param
return img * img_param + full_color * color_param
# def visualize_filter(self, debug_info, canvas):
# exposure = debug_info['filter_parameters'][0]
# if canvas.shape[0] == 64:
# cv2.rectangle(canvas, (8, 40), (56, 52), (1, 1, 1), cv2.FILLED)
# cv2.putText(canvas, 'S %+.2f' % exposure, (8, 48),
# cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0))
# else:
# self.draw_high_res_text('Saturation %+.2f' % exposure, canvas)
| 23,809 | 35.295732 | 131 | py |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/train.py | #! /usr/bin/env python
# coding=utf-8
import os
import time
import shutil
import numpy as np
import tensorflow as tf
import core.utils as utils
from tqdm import tqdm
from core.dataset import Dataset
from core.yolov3 import YOLOV3
from core.config import cfg
from core.config import args
import random
import cv2
import math
from filters import *
if args.use_gpu == 0:
gpu_id = '-1'
else:
gpu_id = args.gpu_id
gpu_list = list()
gpu_ids = gpu_id.split(',')
for i in range(len(gpu_ids)):
gpu_list.append('/gpu:%d' % int(i))
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id
exp_folder = os.path.join(args.exp_dir, 'exp_{}'.format(args.exp_num))
set_ckpt_dir = args.ckpt_dir
args.ckpt_dir = os.path.join(exp_folder, set_ckpt_dir)
if not os.path.exists(args.ckpt_dir):
os.makedirs(args.ckpt_dir)
config_log = os.path.join(exp_folder, 'config.txt')
arg_dict = args.__dict__
msg = ['{}: {}\n'.format(k, v) for k, v in arg_dict.items()]
utils.write_mes(msg, config_log, mode='w')
class YoloTrain(object):
def __init__(self):
self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
self.num_classes = len(self.classes)
self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT
self.time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
self.max_bbox_per_scale = 150
self.train_logdir = "./data_cityfog/log/train"
self.trainset = Dataset('train')
self.testset = Dataset('test')
self.steps_per_period = len(self.trainset)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
# self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
with tf.name_scope('define_input'):
self.input_data = tf.placeholder(tf.float32, [None, None, None, 3], name='input_data')
self.defog_A = tf.placeholder(tf.float32, [None, 3], name='defog_A')
self.IcA = tf.placeholder(tf.float32, [None, None, None,1], name='IcA')
self.label_sbbox = tf.placeholder(dtype=tf.float32, name='label_sbbox')
self.label_mbbox = tf.placeholder(dtype=tf.float32, name='label_mbbox')
self.label_lbbox = tf.placeholder(dtype=tf.float32, name='label_lbbox')
self.true_sbboxes = tf.placeholder(dtype=tf.float32, name='sbboxes')
self.true_mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes')
self.true_lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes')
self.input_data_clean = tf.placeholder(tf.float32, [None, None, None, 3], name='input_data')
self.trainable = tf.placeholder(dtype=tf.bool, name='training')
with tf.name_scope("define_loss"):
self.model = YOLOV3(self.input_data, self.trainable, self.input_data_clean, self.defog_A, self.IcA)
t_variables = tf.trainable_variables()
print("t_variables", t_variables)
# self.net_var = [v for v in t_variables if not 'extract_parameters' in v.name]
self.net_var = tf.global_variables()
self.giou_loss, self.conf_loss, self.prob_loss, self.recovery_loss = self.model.compute_loss(
self.label_sbbox, self.label_mbbox, self.label_lbbox,
self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)
# self.loss only includes the detection loss.
self.loss = self.giou_loss + self.conf_loss + self.prob_loss
with tf.name_scope('learn_rate'):
self.global_step = tf.Variable(1.0, dtype=tf.float64, trainable=False, name='global_step')
warmup_steps = tf.constant(self.warmup_periods * self.steps_per_period,
dtype=tf.float64, name='warmup_steps')
train_steps = tf.constant( (self.first_stage_epochs + self.second_stage_epochs)* self.steps_per_period,
dtype=tf.float64, name='train_steps')
self.learn_rate = tf.cond(
pred=self.global_step < warmup_steps,
true_fn=lambda: self.global_step / warmup_steps * self.learn_rate_init,
false_fn=lambda: self.learn_rate_end + 0.5 * (self.learn_rate_init - self.learn_rate_end) *
(1 + tf.cos(
(self.global_step - warmup_steps) / (train_steps - warmup_steps) * np.pi))
)
global_step_update = tf.assign_add(self.global_step, 1.0)
with tf.name_scope("define_weight_decay"):
moving_ave = tf.train.ExponentialMovingAverage(self.moving_ave_decay).apply(tf.trainable_variables())
with tf.name_scope("define_first_stage_train"):
self.first_stage_trainable_var_list = []
for var in tf.trainable_variables():
var_name = var.op.name
var_name_mess = str(var_name).split('/')
if var_name_mess[0] in ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']:
self.first_stage_trainable_var_list.append(var)
first_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss,
var_list=self.first_stage_trainable_var_list)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
with tf.control_dependencies([first_stage_optimizer, global_step_update]):
with tf.control_dependencies([moving_ave]):
self.train_op_with_frozen_variables = tf.no_op()
with tf.name_scope("define_second_stage_train"):
second_stage_trainable_var_list = tf.trainable_variables()
second_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss,
var_list=second_stage_trainable_var_list)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
with tf.control_dependencies([second_stage_optimizer, global_step_update]):
with tf.control_dependencies([moving_ave]):
self.train_op_with_all_variables = tf.no_op()
with tf.name_scope('loader_and_saver'):
self.loader = tf.train.Saver(self.net_var)
self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)
with tf.name_scope('summary'):
tf.summary.scalar("learn_rate", self.learn_rate)
tf.summary.scalar("recovery_loss", self.recovery_loss)
tf.summary.scalar("giou_loss", self.giou_loss)
tf.summary.scalar("conf_loss", self.conf_loss)
tf.summary.scalar("prob_loss", self.prob_loss)
tf.summary.scalar("total_loss", self.loss)
# logdir = "./data/log/"
logdir = os.path.join(exp_folder, 'log')
if os.path.exists(logdir): shutil.rmtree(logdir)
os.mkdir(logdir)
self.write_op = tf.summary.merge_all()
self.summary_writer = tf.summary.FileWriter(logdir, graph=self.sess.graph)
def train(self):
self.sess.run(tf.global_variables_initializer())
try:
print('=> Restoring weights from: %s ... ' % self.initial_weight)
self.loader.restore(self.sess, self.initial_weight)
except:
print('=> %s does not exist !!!' % self.initial_weight)
print('=> Now it starts to train YOLOV3 from scratch ...')
self.first_stage_epochs = 0
def DarkChannel(im):
b, g, r = cv2.split(im)
dc = cv2.min(cv2.min(r, g), b);
return dc
def AtmLight(im, dark):
[h, w] = im.shape[:2]
imsz = h * w
numpx = int(max(math.floor(imsz / 1000), 1))
darkvec = dark.reshape(imsz, 1)
imvec = im.reshape(imsz, 3)
indices = darkvec.argsort(0)
indices = indices[(imsz - numpx):imsz]
atmsum = np.zeros([1, 3])
for ind in range(1, numpx):
atmsum = atmsum + imvec[indices[ind]]
A = atmsum / numpx
return A
def DarkIcA(im, A):
im3 = np.empty(im.shape, im.dtype)
for ind in range(0, 3):
im3[:, :, ind] = im[:, :, ind] / A[0, ind]
return DarkChannel(im3)
for epoch in range(1, 1+self.first_stage_epochs+self.second_stage_epochs):
if epoch <= self.first_stage_epochs:
train_op = self.train_op_with_frozen_variables
else:
train_op = self.train_op_with_all_variables
pbar = tqdm(self.trainset)
train_epoch_loss, test_epoch_loss = [], []
for train_data in pbar:
if args.fog_FLAG:
# start_time = time.time()
dark = np.zeros((train_data[0].shape[0], train_data[0].shape[1], train_data[0].shape[2]))
defog_A = np.zeros((train_data[0].shape[0], train_data[0].shape[3]))
IcA = np.zeros((train_data[0].shape[0], train_data[0].shape[1], train_data[0].shape[2]))
if DefogFilter in cfg.filters:
# print("**************************")
for i in range(train_data[0].shape[0]):
dark_i = DarkChannel(train_data[0][i])
defog_A_i = AtmLight(train_data[0][i], dark_i)
IcA_i = DarkIcA(train_data[0][i], defog_A_i)
dark[i, ...] = dark_i
defog_A[i, ...] = defog_A_i
IcA[i, ...] = IcA_i
IcA = np.expand_dims(IcA, axis=-1)
_, summary, train_step_loss, train_step_loss_recovery, global_step_val = self.sess.run(
[train_op, self.write_op, self.loss, self.recovery_loss, self.global_step], feed_dict={
self.input_data: train_data[0],
self.defog_A: defog_A,
self.IcA: IcA,
self.label_sbbox: train_data[1],
self.label_mbbox: train_data[2],
self.label_lbbox: train_data[3],
self.true_sbboxes: train_data[4],
self.true_mbboxes: train_data[5],
self.true_lbboxes: train_data[6],
self.input_data_clean: train_data[7],
self.trainable: True,
})
else:
_, summary, train_step_loss, global_step_val = self.sess.run(
[train_op, self.write_op, self.loss, self.global_step], feed_dict={
self.input_data: train_data[7],
self.label_sbbox: train_data[1],
self.label_mbbox: train_data[2],
self.label_lbbox: train_data[3],
self.true_sbboxes: train_data[4],
self.true_mbboxes: train_data[5],
self.true_lbboxes: train_data[6],
self.input_data_clean: train_data[7],
self.trainable: True,
})
train_epoch_loss.append(train_step_loss)
self.summary_writer.add_summary(summary, global_step_val)
pbar.set_description("train loss: %.2f" % train_step_loss)
if args.fog_FLAG:
for test_data in self.testset:
dark = np.zeros((test_data[0].shape[0], test_data[0].shape[1], test_data[0].shape[2]))
defog_A = np.zeros((test_data[0].shape[0], test_data[0].shape[3]))
IcA = np.zeros((test_data[0].shape[0], test_data[0].shape[1], test_data[0].shape[2]))
if DefogFilter in cfg.filters:
for i in range(test_data[0].shape[0]):
dark_i = DarkChannel(test_data[0][i])
defog_A_i = AtmLight(test_data[0][i], dark_i)
IcA_i = DarkIcA(test_data[0][i], defog_A_i)
dark[i, ...] = dark_i
defog_A[i, ...] = defog_A_i
IcA[i, ...] = IcA_i
IcA = np.expand_dims(IcA, axis=-1)
test_step_loss = self.sess.run(self.loss, feed_dict={
self.input_data: test_data[0],
self.defog_A: defog_A,
self.IcA: IcA,
self.label_sbbox: test_data[1],
self.label_mbbox: test_data[2],
self.label_lbbox: test_data[3],
self.true_sbboxes: test_data[4],
self.true_mbboxes: test_data[5],
self.true_lbboxes: test_data[6],
self.input_data_clean: test_data[7],
self.trainable: False,
})
test_epoch_loss.append(test_step_loss)
else:
for test_data in self.testset:
test_step_loss = self.sess.run(self.loss, feed_dict={
self.input_data: test_data[7],
self.label_sbbox: test_data[1],
self.label_mbbox: test_data[2],
self.label_lbbox: test_data[3],
self.true_sbboxes: test_data[4],
self.true_mbboxes: test_data[5],
self.true_lbboxes: test_data[6],
self.input_data_clean: test_data[7],
self.trainable: False,
})
test_epoch_loss.append(test_step_loss)
train_epoch_loss, test_epoch_loss = np.mean(train_epoch_loss), np.mean(test_epoch_loss)
ckpt_file = args.ckpt_dir + "/yolov3_test_loss=%.4f.ckpt" % test_epoch_loss
log_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
print("=> Epoch: %2d Time: %s Train loss: %.2f Test loss: %.2f Saving %s ..."
%(epoch, log_time, train_epoch_loss, test_epoch_loss, ckpt_file))
self.saver.save(self.sess, ckpt_file, global_step=epoch)
if __name__ == '__main__': YoloTrain().train()
| 15,293 | 46.203704 | 115 | py |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/convert_weight.py | #! /usr/bin/env python
# coding=utf-8
import argparse
import tensorflow as tf
from core.yolov3 import YOLOV3
from core.config import cfg
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
parser = argparse.ArgumentParser()
parser.add_argument("--train_from_coco", dest='train_from_coco', type=bool, default=True)
flag = parser.parse_args()
org_weights_path = cfg.YOLO.ORIGINAL_WEIGHT
cur_weights_path = cfg.YOLO.DEMO_WEIGHT
preserve_cur_names = ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']
preserve_org_names = ['Conv_6', 'Conv_14', 'Conv_22']
org_weights_mess = []
tf.Graph().as_default()
load = tf.train.import_meta_graph(org_weights_path + '.meta')
with tf.Session() as sess:
load.restore(sess, org_weights_path)
for var in tf.global_variables():
var_name = var.op.name
var_name_mess = str(var_name).split('/')
var_shape = var.shape
if flag.train_from_coco:
if (var_name_mess[-1] not in ['weights', 'gamma', 'beta', 'moving_mean', 'moving_variance']) or \
(var_name_mess[1] == 'yolo-v3' and (var_name_mess[-2] in preserve_org_names)): continue
org_weights_mess.append([var_name, var_shape])
print("=> " + str(var_name).ljust(50), var_shape)
print()
tf.reset_default_graph()
cur_weights_mess = []
tf.Graph().as_default()
with tf.name_scope('input'):
input_data = tf.placeholder(dtype=tf.float32, shape=(1, 416, 416, 3), name='input_data')
training = tf.placeholder(dtype=tf.bool, name='trainable')
model = YOLOV3(input_data, training)
for var in tf.global_variables():
var_name = var.op.name
var_name_mess = str(var_name).split('/')
var_shape = var.shape
print(var_name_mess[0])
if flag.train_from_coco:
if var_name_mess[0] in preserve_cur_names: continue
cur_weights_mess.append([var_name, var_shape])
print("=> " + str(var_name).ljust(50), var_shape)
org_weights_num = len(org_weights_mess)
cur_weights_num = len(cur_weights_mess)
if cur_weights_num != org_weights_num:
raise RuntimeError
print('=> Number of weights that will rename:\t%d' % cur_weights_num)
cur_to_org_dict = {}
for index in range(org_weights_num):
org_name, org_shape = org_weights_mess[index]
cur_name, cur_shape = cur_weights_mess[index]
if cur_shape != org_shape:
print(org_weights_mess[index])
print(cur_weights_mess[index])
raise RuntimeError
cur_to_org_dict[cur_name] = org_name
print("=> " + str(cur_name).ljust(50) + ' : ' + org_name)
with tf.name_scope('load_save'):
name_to_var_dict = {var.op.name: var for var in tf.global_variables()}
restore_dict = {cur_to_org_dict[cur_name]: name_to_var_dict[cur_name] for cur_name in cur_to_org_dict}
load = tf.train.Saver(restore_dict)
save = tf.train.Saver(tf.global_variables())
for var in tf.global_variables():
print("=> " + var.op.name)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('=> Restoring weights from:\t %s' % org_weights_path)
load.restore(sess, org_weights_path)
save.save(sess, cur_weights_path)
tf.reset_default_graph()
| 3,176 | 34.3 | 109 | py |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/core/data_make.py | import numpy as np
import os
import cv2
import math
from numba import jit
import random
# only use the image including the labeled instance objects for training
def load_annotations(annot_path):
print(annot_path)
with open(annot_path, 'r') as f:
txt = f.readlines()
annotations = [line.strip() for line in txt if len(line.strip().split()[1:]) != 0]
return annotations
# print('*****************Add haze offline***************************')
def parse_annotation(annotation):
line = annotation.split()
image_path = line[0]
# print(image_path)
img_name = image_path.split('/')[-1]
# print(img_name)
image_name = img_name.split('.')[0]
# print(image_name)
image_name_index = img_name.split('.')[1]
# print(image_name_index)
#'/data/vdd/liuwenyu/data_vocfog/train/JPEGImages/'
if not os.path.exists(image_path):
raise KeyError("%s does not exist ... " %image_path)
image = cv2.imread(image_path)
for i in range(10):
@jit()
def AddHaz_loop(img_f, center, size, beta, A):
(row, col, chs) = img_f.shape
for j in range(row):
for l in range(col):
d = -0.04 * math.sqrt((j - center[0]) ** 2 + (l - center[1]) ** 2) + size
td = math.exp(-beta * d)
img_f[j][l][:] = img_f[j][l][:] * td + A * (1 - td)
return img_f
img_f = image/255
(row, col, chs) = image.shape
A = 0.5
# beta = 0.08
beta = 0.01 * i + 0.05
size = math.sqrt(max(row, col))
center = (row // 2, col // 2)
foggy_image = AddHaz_loop(img_f, center, size, beta, A)
img_f = np.clip(foggy_image*255, 0, 255)
img_f = img_f.astype(np.uint8)
img_name = '/data/vdd/liuwenyu/data_vocfog/train/JPEGImages/' + image_name \
+ '_' + ("%.2f"%beta) + '.' + image_name_index
#img_name = '/data/vdd/liuwenyu/data_vocfog/val/JPEGImages/' + image_name \
# + '_' + ("%.2f"%beta) + '.' + image_name_index
cv2.imwrite(img_name, img_f)
if __name__ == '__main__':
an = load_annotations('/home/liuwenyu.lwy/code/defog_yolov3/data/dataset/voc_norm_train.txt')
#an = load_annotations('/home/liuwenyu.lwy/code/defog_yolov3/data/dataset/voc_norm_test.txt')
ll = len(an)
print(ll)
for j in range(ll):
parse_annotation(an[j])
| 2,434 | 33.295775 | 97 | py |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/core/dataset_lowlight.py | #! /usr/bin/env python
# coding=utf-8
import os
import cv2
import random
import numpy as np
import tensorflow as tf
import core.utils as utils
from core.config_lowlight import cfg
class Dataset(object):
"""implement Dataset here"""
def __init__(self, dataset_type):
self.annot_path = cfg.TRAIN.ANNOT_PATH if dataset_type == 'train' else cfg.TEST.ANNOT_PATH
self.input_sizes = cfg.TRAIN.INPUT_SIZE if dataset_type == 'train' else cfg.TEST.INPUT_SIZE
self.batch_size = cfg.TRAIN.BATCH_SIZE if dataset_type == 'train' else cfg.TEST.BATCH_SIZE
self.data_aug = cfg.TRAIN.DATA_AUG if dataset_type == 'train' else cfg.TEST.DATA_AUG
self.train_input_sizes = cfg.TRAIN.INPUT_SIZE
self.strides = np.array(cfg.YOLO.STRIDES)
self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
self.num_classes = len(self.classes)
self.anchors = np.array(utils.get_anchors(cfg.YOLO.ANCHORS))
self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
self.max_bbox_per_scale = 150
self.annotations = self.load_annotations(dataset_type)
self.num_samples = len(self.annotations)
self.num_batchs = int(np.ceil(self.num_samples / self.batch_size))
self.batch_count = 0
def load_annotations(self, dataset_type):
with open(self.annot_path, 'r') as f:
txt = f.readlines()
annotations = [line.strip() for line in txt if len(line.strip().split()[1:]) != 0]
np.random.shuffle(annotations)
print('###################the total image:', len(annotations))
return annotations
def __iter__(self):
return self
def __next__(self):
with tf.device('/cpu:0'):
self.train_input_size = random.choice(self.train_input_sizes)
self.train_output_sizes = self.train_input_size // self.strides
batch_image = np.zeros((self.batch_size, self.train_input_size, self.train_input_size, 3))
batch_label_sbbox = np.zeros((self.batch_size, self.train_output_sizes[0], self.train_output_sizes[0],
self.anchor_per_scale, 5 + self.num_classes))
batch_label_mbbox = np.zeros((self.batch_size, self.train_output_sizes[1], self.train_output_sizes[1],
self.anchor_per_scale, 5 + self.num_classes))
batch_label_lbbox = np.zeros((self.batch_size, self.train_output_sizes[2], self.train_output_sizes[2],
self.anchor_per_scale, 5 + self.num_classes))
batch_sbboxes = np.zeros((self.batch_size, self.max_bbox_per_scale, 4))
batch_mbboxes = np.zeros((self.batch_size, self.max_bbox_per_scale, 4))
batch_lbboxes = np.zeros((self.batch_size, self.max_bbox_per_scale, 4))
num = 0
if self.batch_count < self.num_batchs:
while num < self.batch_size:
index = self.batch_count * self.batch_size + num
if index >= self.num_samples: index -= self.num_samples
annotation = self.annotations[index]
image, bboxes = self.parse_annotation(annotation)
label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes = self.preprocess_true_boxes(bboxes)
batch_image[num, :, :, :] = image
batch_label_sbbox[num, :, :, :, :] = label_sbbox
batch_label_mbbox[num, :, :, :, :] = label_mbbox
batch_label_lbbox[num, :, :, :, :] = label_lbbox
batch_sbboxes[num, :, :] = sbboxes
batch_mbboxes[num, :, :] = mbboxes
batch_lbboxes[num, :, :] = lbboxes
num += 1
self.batch_count += 1
return batch_image, batch_label_sbbox, batch_label_mbbox, batch_label_lbbox, \
batch_sbboxes, batch_mbboxes, batch_lbboxes
else:
self.batch_count = 0
np.random.shuffle(self.annotations)
raise StopIteration
def random_horizontal_flip(self, image, bboxes):
if random.random() < 0.5:
_, w, _ = image.shape
image = image[:, ::-1, :]
bboxes[:, [0,2]] = w - bboxes[:, [2,0]]
return image, bboxes
def random_crop(self, image, bboxes):
if random.random() < 0.5:
h, w, _ = image.shape
max_bbox = np.concatenate([np.min(bboxes[:, 0:2], axis=0), np.max(bboxes[:, 2:4], axis=0)], axis=-1)
max_l_trans = max_bbox[0]
max_u_trans = max_bbox[1]
max_r_trans = w - max_bbox[2]
max_d_trans = h - max_bbox[3]
crop_xmin = max(0, int(max_bbox[0] - random.uniform(0, max_l_trans)))
crop_ymin = max(0, int(max_bbox[1] - random.uniform(0, max_u_trans)))
crop_xmax = max(w, int(max_bbox[2] + random.uniform(0, max_r_trans)))
crop_ymax = max(h, int(max_bbox[3] + random.uniform(0, max_d_trans)))
image = image[crop_ymin : crop_ymax, crop_xmin : crop_xmax]
bboxes[:, [0, 2]] = bboxes[:, [0, 2]] - crop_xmin
bboxes[:, [1, 3]] = bboxes[:, [1, 3]] - crop_ymin
return image, bboxes
def random_translate(self, image, bboxes):
if random.random() < 0.5:
h, w, _ = image.shape
max_bbox = np.concatenate([np.min(bboxes[:, 0:2], axis=0), np.max(bboxes[:, 2:4], axis=0)], axis=-1)
max_l_trans = max_bbox[0]
max_u_trans = max_bbox[1]
max_r_trans = w - max_bbox[2]
max_d_trans = h - max_bbox[3]
tx = random.uniform(-(max_l_trans - 1), (max_r_trans - 1))
ty = random.uniform(-(max_u_trans - 1), (max_d_trans - 1))
M = np.array([[1, 0, tx], [0, 1, ty]])
image = cv2.warpAffine(image, M, (w, h))
bboxes[:, [0, 2]] = bboxes[:, [0, 2]] + tx
bboxes[:, [1, 3]] = bboxes[:, [1, 3]] + ty
return image, bboxes
def parse_annotation(self, annotation):
line = annotation.split()
image_path = line[0]
if not os.path.exists(image_path):
raise KeyError("%s does not exist ... " %image_path)
image = np.array(cv2.imread(image_path))
# print('*****************read image***************************')
bboxes = np.array([list(map(lambda x: int(float(x)), box.split(','))) for box in line[1:]])
if self.data_aug:
image, bboxes = self.random_horizontal_flip(np.copy(image), np.copy(bboxes))
image, bboxes = self.random_crop(np.copy(image), np.copy(bboxes))
image, bboxes = self.random_translate(np.copy(image), np.copy(bboxes))
image, bboxes = utils.image_preporcess(np.copy(image), [self.train_input_size, self.train_input_size], np.copy(bboxes))
return image, bboxes
def bbox_iou(self, boxes1, boxes2):
boxes1 = np.array(boxes1)
boxes2 = np.array(boxes2)
boxes1_area = boxes1[..., 2] * boxes1[..., 3]
boxes2_area = boxes2[..., 2] * boxes2[..., 3]
boxes1 = np.concatenate([boxes1[..., :2] - boxes1[..., 2:] * 0.5,
boxes1[..., :2] + boxes1[..., 2:] * 0.5], axis=-1)
boxes2 = np.concatenate([boxes2[..., :2] - boxes2[..., 2:] * 0.5,
boxes2[..., :2] + boxes2[..., 2:] * 0.5], axis=-1)
left_up = np.maximum(boxes1[..., :2], boxes2[..., :2])
right_down = np.minimum(boxes1[..., 2:], boxes2[..., 2:])
inter_section = np.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = boxes1_area + boxes2_area - inter_area
return inter_area / union_area
def preprocess_true_boxes(self, bboxes):
label = [np.zeros((self.train_output_sizes[i], self.train_output_sizes[i], self.anchor_per_scale,
5 + self.num_classes)) for i in range(3)]
bboxes_xywh = [np.zeros((self.max_bbox_per_scale, 4)) for _ in range(3)]
bbox_count = np.zeros((3,))
for bbox in bboxes:
bbox_coor = bbox[:4]
bbox_class_ind = bbox[4]
onehot = np.zeros(self.num_classes, dtype=np.float)
onehot[bbox_class_ind] = 1.0
uniform_distribution = np.full(self.num_classes, 1.0 / self.num_classes)
deta = 0.01
smooth_onehot = onehot * (1 - deta) + deta * uniform_distribution
bbox_xywh = np.concatenate([(bbox_coor[2:] + bbox_coor[:2]) * 0.5, bbox_coor[2:] - bbox_coor[:2]], axis=-1)
bbox_xywh_scaled = 1.0 * bbox_xywh[np.newaxis, :] / self.strides[:, np.newaxis]
iou = []
exist_positive = False
for i in range(3):
anchors_xywh = np.zeros((self.anchor_per_scale, 4))
anchors_xywh[:, 0:2] = np.floor(bbox_xywh_scaled[i, 0:2]).astype(np.int32) + 0.5
anchors_xywh[:, 2:4] = self.anchors[i]
iou_scale = self.bbox_iou(bbox_xywh_scaled[i][np.newaxis, :], anchors_xywh)
iou.append(iou_scale)
iou_mask = iou_scale > 0.3
if np.any(iou_mask):
xind, yind = np.floor(bbox_xywh_scaled[i, 0:2]).astype(np.int32)
label[i][yind, xind, iou_mask, :] = 0
label[i][yind, xind, iou_mask, 0:4] = bbox_xywh
label[i][yind, xind, iou_mask, 4:5] = 1.0
label[i][yind, xind, iou_mask, 5:] = smooth_onehot
bbox_ind = int(bbox_count[i] % self.max_bbox_per_scale)
bboxes_xywh[i][bbox_ind, :4] = bbox_xywh
bbox_count[i] += 1
exist_positive = True
if not exist_positive:
best_anchor_ind = np.argmax(np.array(iou).reshape(-1), axis=-1)
best_detect = int(best_anchor_ind / self.anchor_per_scale)
best_anchor = int(best_anchor_ind % self.anchor_per_scale)
xind, yind = np.floor(bbox_xywh_scaled[best_detect, 0:2]).astype(np.int32)
label[best_detect][yind, xind, best_anchor, :] = 0
label[best_detect][yind, xind, best_anchor, 0:4] = bbox_xywh
label[best_detect][yind, xind, best_anchor, 4:5] = 1.0
label[best_detect][yind, xind, best_anchor, 5:] = smooth_onehot
bbox_ind = int(bbox_count[best_detect] % self.max_bbox_per_scale)
bboxes_xywh[best_detect][bbox_ind, :4] = bbox_xywh
bbox_count[best_detect] += 1
label_sbbox, label_mbbox, label_lbbox = label
sbboxes, mbboxes, lbboxes = bboxes_xywh
return label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes
def __len__(self):
return self.num_batchs
| 11,016 | 42.203922 | 127 | py |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/core/config_lowlight.py | #! /usr/bin/env python
# coding=utf-8
from easydict import EasyDict as edict
from filters_lowlight import *
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--exp_num', dest='exp_num', type=str, default='58', help='current experiment number')
parser.add_argument('--epoch_first_stage', dest='epoch_first_stage', type=int, default=0, help='# of epochs')
parser.add_argument('--epoch_second_stage', dest='epoch_second_stage', type=int, default=70, help='# of epochs')
parser.add_argument('--use_gpu', dest='use_gpu', type=int, default=1, help='gpu flag, 1 for GPU and 0 for CPU')
parser.add_argument('--checkpoint_dir', dest='ckpt_dir', default='checkpoint', help='models are saved here')
parser.add_argument('--exp_dir', dest='exp_dir', default='./experiments_lowlight', help='models are saved here')
parser.add_argument('--gpu_id', dest='gpu_id', type=str, default='5', help='if use gpu, use gpu device id')
parser.add_argument('--ISP_FLAG', dest='ISP_FLAG', type=bool, default=True, help='whether use isp')
parser.add_argument('--lowlight_FLAG', dest='lowlight_FLAG', type=bool, default=False, help='whether use Hybrid data training')
parser.add_argument('--train_path', dest='train_path', nargs='*', default='./data/dataset_dark/voc_norm_train.txt', help='folder of the training data')
parser.add_argument('--test_path', dest='test_path', nargs='*', default='./data/dataset_dark/voc_norm_test.txt', help='folder of the training data')
parser.add_argument('--class_name', dest='class_name', nargs='*', default='./data/classes/vocdark.names', help='folder of the training data')
parser.add_argument('--WRITE_IMAGE_PATH', dest='WRITE_IMAGE_PATH', nargs='*', default='./experiments_lowlight/exp_58/detection_vocnorm_test/', help='folder of the training data')
parser.add_argument('--WEIGHT_FILE', dest='WEIGHT_FILE', nargs='*', default='./experiments_lowlight/exp_58/checkpoint/yolov3_test_loss=9.7815.ckpt-62', help='folder of the training data')
parser.add_argument('--pre_train', dest='pre_train', default='NULL', help='the path of pretrained models if is not null. not used for now')
# we trained our model from scratch.
args = parser.parse_args()
__C = edict()
# Consumers can get config by: from config import cfg
cfg = __C
###########################################################################
# Filter Parameters
###########################################################################
cfg.filters = [ImprovedWhiteBalanceFilter, GammaFilter,
ToneFilter, ContrastFilter, UsmFilter
]
cfg.num_filter_parameters = 14
cfg.wb_begin_param = 0
cfg.gamma_begin_param = 3
cfg.tone_begin_param = 4
cfg.contrast_begin_param = 12
cfg.usm_begin_param = 13
cfg.curve_steps = 4
cfg.gamma_range = 2.5
cfg.exposure_range = 3.5
cfg.wb_range = 1.1
cfg.color_curve_range = (0.90, 1.10)
cfg.lab_curve_range = (0.90, 1.10)
cfg.tone_curve_range = (0.5, 2)
cfg.defog_range = (0.5, 1.0)
cfg.usm_range = (0.0, 2.5)
cfg.contrast_range = (0.0, 1.0)
# Masking is DISABLED
cfg.masking = False
cfg.minimum_strength = 0.3
cfg.maximum_sharpness = 1
cfg.clamp = False
###########################################################################
# CNN Parameters
###########################################################################
cfg.source_img_size = 64
cfg.base_channels = 32
cfg.dropout_keep_prob = 0.5
# G and C use the same feed dict?
cfg.share_feed_dict = True
cfg.shared_feature_extractor = True
cfg.fc1_size = 128
cfg.bnw = False
# number of filters for the first convolutional layers for all networks
# (stochastic/deterministic policy, critic, value)
cfg.feature_extractor_dims = 4096
###########################################################################
# YOLO options
__C.YOLO = edict()
# Set the class name
__C.YOLO.CLASSES = args.class_name
__C.YOLO.ANCHORS = "./data/anchors/baseline_anchors.txt"
__C.YOLO.MOVING_AVE_DECAY = 0.9995
__C.YOLO.STRIDES = [8, 16, 32]
__C.YOLO.ANCHOR_PER_SCALE = 3
__C.YOLO.IOU_LOSS_THRESH = 0.5
__C.YOLO.UPSAMPLE_METHOD = "resize"
__C.YOLO.ISP_FLAG = args.ISP_FLAG
# Train options
__C.TRAIN = edict()
__C.TRAIN.ANNOT_PATH = args.train_path
__C.TRAIN.BATCH_SIZE = 6
__C.TRAIN.INPUT_SIZE = [320, 352, 384, 416, 448, 480, 512, 544, 576, 608]
__C.TRAIN.DATA_AUG = True
__C.TRAIN.LEARN_RATE_INIT = 1e-4
__C.TRAIN.LEARN_RATE_END = 1e-6
__C.TRAIN.WARMUP_EPOCHS = 2
__C.TRAIN.FISRT_STAGE_EPOCHS = args.epoch_first_stage
__C.TRAIN.SECOND_STAGE_EPOCHS = args.epoch_second_stage
__C.TRAIN.INITIAL_WEIGHT = args.pre_train
# TEST options
__C.TEST = edict()
__C.TEST.ANNOT_PATH = args.test_path
__C.TEST.BATCH_SIZE = 6
__C.TEST.INPUT_SIZE = 544
__C.TEST.DATA_AUG = False
__C.TEST.WRITE_IMAGE = True
__C.TEST.WRITE_IMAGE_PATH = args.WRITE_IMAGE_PATH
__C.TEST.WRITE_IMAGE_SHOW_LABEL = True
__C.TEST.WEIGHT_FILE = args.WEIGHT_FILE
__C.TEST.SHOW_LABEL = True
__C.TEST.SCORE_THRESHOLD = 0.3
__C.TEST.IOU_THRESHOLD = 0.45
| 5,327 | 37.057143 | 187 | py |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/core/utils.py | #! /usr/bin/env python
# coding=utf-8
import cv2
import random
import colorsys
import numpy as np
import tensorflow as tf
def read_class_names(class_file_name):
'''loads class name from a file'''
names = {}
with open(class_file_name, 'r') as data:
for ID, name in enumerate(data):
names[ID] = name.strip('\n')
return names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = np.array(anchors.split(','), dtype=np.float32)
return anchors.reshape(3, 3, 2)
def image_preporcess(image, target_size, gt_boxes=None):
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
ih, iw = target_size
h, w, _ = image.shape
scale = min(iw/w, ih/h)
nw, nh = int(scale * w), int(scale * h)
image_resized = cv2.resize(image, (nw, nh))
image_paded = np.full(shape=[ih, iw, 3], fill_value=0.0)
dw, dh = (iw - nw) // 2, (ih-nh) // 2
image_paded[dh:nh+dh, dw:nw+dw, :] = image_resized
image_paded = image_paded / 255.
if gt_boxes is None:
return image_paded
else:
gt_boxes[:, [0, 2]] = gt_boxes[:, [0, 2]] * scale + dw
gt_boxes[:, [1, 3]] = gt_boxes[:, [1, 3]] * scale + dh
return image_paded, gt_boxes
def image_unpreporcess(image, target_size, gt_boxes=None):
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR).astype(np.float32)
ih, iw = target_size
h, w, _ = image.shape
scale = min(w/iw, h/ih)
nw, nh = int(1/scale * w), int(1/scale * h)
image_resized = cv2.resize(image, (nw, nh))
dw, dh = (nw - iw) // 2, (nh-ih) // 2
dw = max(0, dw)
dh = max(0, dh)
image_paded = image_resized[dh:ih+dh, dw:iw+dw, :]
# image_paded = image_paded / 255.
if gt_boxes is None:
return image_paded
else:
gt_boxes[:, [0, 2]] = gt_boxes[:, [0, 2]] * scale + dw
gt_boxes[:, [1, 3]] = gt_boxes[:, [1, 3]] * scale + dh
return image_paded, gt_boxes
def draw_bbox(image, bboxes, classes, show_label=True):
"""
bboxes: [x_min, y_min, x_max, y_max, probability, cls_id] format coordinates.
"""
num_classes = len(classes)
image_h, image_w, _ = image.shape
hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))
random.seed(0)
random.shuffle(colors)
random.seed(None)
for i, bbox in enumerate(bboxes):
coor = np.array(bbox[:4], dtype=np.int32)
fontScale = 0.5
score = bbox[4]
class_ind = int(bbox[5])
bbox_color = colors[class_ind]
bbox_thick = int(0.6 * (image_h + image_w) / 600)
c1, c2 = (coor[0], coor[1]), (coor[2], coor[3])
cv2.rectangle(image, c1, c2, bbox_color, bbox_thick)
if show_label:
bbox_mess = '%s: %.2f' % (classes[class_ind], score)
t_size = cv2.getTextSize(bbox_mess, 0, fontScale, thickness=bbox_thick//2)[0]
cv2.rectangle(image, c1, (c1[0] + t_size[0], c1[1] - t_size[1] - 3), bbox_color, -1) # filled
cv2.putText(image, bbox_mess, (c1[0], c1[1]-2), cv2.FONT_HERSHEY_SIMPLEX,
fontScale, (0, 0, 0), bbox_thick//2, lineType=cv2.LINE_AA)
return image
def bboxes_iou(boxes1, boxes2):
boxes1 = np.array(boxes1)
boxes2 = np.array(boxes2)
boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1])
boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1])
left_up = np.maximum(boxes1[..., :2], boxes2[..., :2])
right_down = np.minimum(boxes1[..., 2:], boxes2[..., 2:])
inter_section = np.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = boxes1_area + boxes2_area - inter_area
ious = np.maximum(1.0 * inter_area / union_area, np.finfo(np.float32).eps)
return ious
def read_pb_return_tensors(graph, pb_file, return_elements):
with tf.gfile.FastGFile(pb_file, 'rb') as f:
frozen_graph_def = tf.GraphDef()
frozen_graph_def.ParseFromString(f.read())
with graph.as_default():
return_elements = tf.import_graph_def(frozen_graph_def,
return_elements=return_elements)
return return_elements
def nms(bboxes, iou_threshold, sigma=0.3, method='nms'):
"""
:param bboxes: (xmin, ymin, xmax, ymax, score, class)
Note: soft-nms, https://arxiv.org/pdf/1704.04503.pdf
https://github.com/bharatsingh430/soft-nms
"""
classes_in_img = list(set(bboxes[:, 5]))
best_bboxes = []
for cls in classes_in_img:
cls_mask = (bboxes[:, 5] == cls)
cls_bboxes = bboxes[cls_mask]
while len(cls_bboxes) > 0:
max_ind = np.argmax(cls_bboxes[:, 4])
best_bbox = cls_bboxes[max_ind]
best_bboxes.append(best_bbox)
cls_bboxes = np.concatenate([cls_bboxes[: max_ind], cls_bboxes[max_ind + 1:]])
iou = bboxes_iou(best_bbox[np.newaxis, :4], cls_bboxes[:, :4])
weight = np.ones((len(iou),), dtype=np.float32)
assert method in ['nms', 'soft-nms']
if method == 'nms':
iou_mask = iou > iou_threshold
weight[iou_mask] = 0.0
if method == 'soft-nms':
weight = np.exp(-(1.0 * iou ** 2 / sigma))
cls_bboxes[:, 4] = cls_bboxes[:, 4] * weight
score_mask = cls_bboxes[:, 4] > 0.
cls_bboxes = cls_bboxes[score_mask]
return best_bboxes
def postprocess_boxes(pred_bbox, org_img_shape, input_size, score_threshold):
valid_scale=[0, np.inf]
pred_bbox = np.array(pred_bbox)
pred_xywh = pred_bbox[:, 0:4]
pred_conf = pred_bbox[:, 4]
pred_prob = pred_bbox[:, 5:]
# # (1) (x, y, w, h) --> (xmin, ymin, xmax, ymax)
pred_coor = np.concatenate([pred_xywh[:, :2] - pred_xywh[:, 2:] * 0.5,
pred_xywh[:, :2] + pred_xywh[:, 2:] * 0.5], axis=-1)
# # (2) (xmin, ymin, xmax, ymax) -> (xmin_org, ymin_org, xmax_org, ymax_org)
org_h, org_w = org_img_shape
resize_ratio = min(input_size / org_w, input_size / org_h)
dw = (input_size - resize_ratio * org_w) / 2
dh = (input_size - resize_ratio * org_h) / 2
pred_coor[:, 0::2] = 1.0 * (pred_coor[:, 0::2] - dw) / resize_ratio
pred_coor[:, 1::2] = 1.0 * (pred_coor[:, 1::2] - dh) / resize_ratio
# # (3) clip some boxes those are out of range
pred_coor = np.concatenate([np.maximum(pred_coor[:, :2], [0, 0]),
np.minimum(pred_coor[:, 2:], [org_w - 1, org_h - 1])], axis=-1)
invalid_mask = np.logical_or((pred_coor[:, 0] > pred_coor[:, 2]), (pred_coor[:, 1] > pred_coor[:, 3]))
pred_coor[invalid_mask] = 0
# # (4) discard some invalid boxes
bboxes_scale = np.sqrt(np.multiply.reduce(pred_coor[:, 2:4] - pred_coor[:, 0:2], axis=-1))
scale_mask = np.logical_and((valid_scale[0] < bboxes_scale), (bboxes_scale < valid_scale[1]))
# # (5) discard some boxes with low scores
classes = np.argmax(pred_prob, axis=-1)
scores = pred_conf * pred_prob[np.arange(len(pred_coor)), classes]
score_mask = scores > score_threshold
mask = np.logical_and(scale_mask, score_mask)
coors, scores, classes = pred_coor[mask], scores[mask], classes[mask]
return np.concatenate([coors, scores[:, np.newaxis], classes[:, np.newaxis]], axis=-1)
def write_mes(msg, log_name=None, show=True, mode='a'):
get_end = lambda line: '' if line.endswith('\n') else '\n'
if show:
if isinstance(msg, str):
print(msg, end=get_end(msg))
elif isinstance(msg, (list, tuple)):
for line in msg:
print(line, end=get_end(line)) # might be a different thing
else:
print(msg)
if log_name is not None:
with open(log_name, mode) as f:
f.writelines(msg)
| 8,188 | 33.263598 | 106 | py |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/core/dataset.py | #! /usr/bin/env python
# coding=utf-8
import os
import cv2
import random
import numpy as np
import tensorflow as tf
import core.utils as utils
from core.config import cfg
from core.config import args
import time
import math
from numba import jit
class Dataset(object):
"""implement Dataset here"""
def __init__(self, dataset_type):
self.annot_path = cfg.TRAIN.ANNOT_PATH if dataset_type == 'train' else cfg.TEST.ANNOT_PATH
self.input_sizes = cfg.TRAIN.INPUT_SIZE if dataset_type == 'train' else cfg.TEST.INPUT_SIZE
self.batch_size = cfg.TRAIN.BATCH_SIZE if dataset_type == 'train' else cfg.TEST.BATCH_SIZE
self.data_aug = cfg.TRAIN.DATA_AUG if dataset_type == 'train' else cfg.TEST.DATA_AUG
self.data_train_flag = True if dataset_type == 'train' else False
self.train_input_sizes = cfg.TRAIN.INPUT_SIZE
self.strides = np.array(cfg.YOLO.STRIDES)
self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
self.num_classes = len(self.classes)
self.anchors = np.array(utils.get_anchors(cfg.YOLO.ANCHORS))
self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
self.max_bbox_per_scale = 150
self.annotations = self.load_annotations(dataset_type)
self.num_samples = len(self.annotations)
self.num_batchs = int(np.ceil(self.num_samples / self.batch_size))
self.batch_count = 0
# only use the image including the labeled instance objects for training
def load_annotations(self, dataset_type):
with open(self.annot_path, 'r') as f:
txt = f.readlines()
annotations = [line.strip() for line in txt if len(line.strip().split()[1:]) != 0]
np.random.shuffle(annotations)
print('###################the total image:', len(annotations))
return annotations
def __iter__(self):
return self
def __next__(self):
with tf.device('/cpu:0'):
self.train_input_size = random.choice(self.train_input_sizes)
self.train_output_sizes = self.train_input_size // self.strides
batch_image = np.zeros((self.batch_size, self.train_input_size, self.train_input_size, 3))
batch_clean_image = np.zeros((self.batch_size, self.train_input_size, self.train_input_size, 3))
batch_label_sbbox = np.zeros((self.batch_size, self.train_output_sizes[0], self.train_output_sizes[0],
self.anchor_per_scale, 5 + self.num_classes))
batch_label_mbbox = np.zeros((self.batch_size, self.train_output_sizes[1], self.train_output_sizes[1],
self.anchor_per_scale, 5 + self.num_classes))
batch_label_lbbox = np.zeros((self.batch_size, self.train_output_sizes[2], self.train_output_sizes[2],
self.anchor_per_scale, 5 + self.num_classes))
batch_sbboxes = np.zeros((self.batch_size, self.max_bbox_per_scale, 4))
batch_mbboxes = np.zeros((self.batch_size, self.max_bbox_per_scale, 4))
batch_lbboxes = np.zeros((self.batch_size, self.max_bbox_per_scale, 4))
num = 0
if self.batch_count < self.num_batchs:
# start_time = time.time()
while num < self.batch_size:
index = self.batch_count * self.batch_size + num
if index >= self.num_samples: index -= self.num_samples
annotation = self.annotations[index]
image, bboxes, clean_image = self.parse_annotation(annotation)
label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes = self.preprocess_true_boxes(bboxes)
batch_image[num, :, :, :] = image
batch_clean_image[num, :, :, :] = clean_image
batch_label_sbbox[num, :, :, :, :] = label_sbbox
batch_label_mbbox[num, :, :, :, :] = label_mbbox
batch_label_lbbox[num, :, :, :, :] = label_lbbox
batch_sbboxes[num, :, :] = sbboxes
batch_mbboxes[num, :, :] = mbboxes
batch_lbboxes[num, :, :] = lbboxes
num += 1
self.batch_count += 1
# end_time = time.time()
# print('method1所用时间:', end_time - start_time)
return batch_image, batch_label_sbbox, batch_label_mbbox, batch_label_lbbox, \
batch_sbboxes, batch_mbboxes, batch_lbboxes, batch_clean_image
else:
self.batch_count = 0
np.random.shuffle(self.annotations)
raise StopIteration
def random_horizontal_flip(self, image, bboxes):
if random.random() < 0.5:
_, w, _ = image.shape
image = image[:, ::-1, :]
bboxes[:, [0,2]] = w - bboxes[:, [2,0]]
return image, bboxes
def random_crop(self, image, bboxes):
if random.random() < 0.5:
h, w, _ = image.shape
max_bbox = np.concatenate([np.min(bboxes[:, 0:2], axis=0), np.max(bboxes[:, 2:4], axis=0)], axis=-1)
max_l_trans = max_bbox[0]
max_u_trans = max_bbox[1]
max_r_trans = w - max_bbox[2]
max_d_trans = h - max_bbox[3]
crop_xmin = max(0, int(max_bbox[0] - random.uniform(0, max_l_trans)))
crop_ymin = max(0, int(max_bbox[1] - random.uniform(0, max_u_trans)))
crop_xmax = max(w, int(max_bbox[2] + random.uniform(0, max_r_trans)))
crop_ymax = max(h, int(max_bbox[3] + random.uniform(0, max_d_trans)))
image = image[crop_ymin : crop_ymax, crop_xmin : crop_xmax]
bboxes[:, [0, 2]] = bboxes[:, [0, 2]] - crop_xmin
bboxes[:, [1, 3]] = bboxes[:, [1, 3]] - crop_ymin
return image, bboxes
def random_translate(self, image, bboxes):
if random.random() < 0.5:
h, w, _ = image.shape
max_bbox = np.concatenate([np.min(bboxes[:, 0:2], axis=0), np.max(bboxes[:, 2:4], axis=0)], axis=-1)
max_l_trans = max_bbox[0]
max_u_trans = max_bbox[1]
max_r_trans = w - max_bbox[2]
max_d_trans = h - max_bbox[3]
tx = random.uniform(-(max_l_trans - 1), (max_r_trans - 1))
ty = random.uniform(-(max_u_trans - 1), (max_d_trans - 1))
M = np.array([[1, 0, tx], [0, 1, ty]])
image = cv2.warpAffine(image, M, (w, h))
bboxes[:, [0, 2]] = bboxes[:, [0, 2]] + tx
bboxes[:, [1, 3]] = bboxes[:, [1, 3]] + ty
return image, bboxes
def parse_annotation(self, annotation):
line = annotation.split()
image_path = line[0]
if not os.path.exists(image_path):
raise KeyError("%s does not exist ... " % image_path)
image = cv2.imread(image_path)
img_name = image_path.split('/')[-1]
# print(img_name)
image_name = img_name.split('.')[0]
# print(image_name)
image_name_index = img_name.split('.')[1]
# image = np.array(cv2.imread(image_path))
# print('*****************read image***************************')
bboxes = np.array([list(map(lambda x: int(float(x)), box.split(','))) for box in line[1:]])
# Each image has a probability of 2/3 to be randomly added with some kind of fog
if random.randint(0, 2) > 0:
beta = random.randint(0, 9)
beta = 0.01 * beta + 0.05
# load voc_foggy_synthetic image offline (The synthesized code is ./core/data_make.py)
if self.data_train_flag:
img_name = args.vocfog_traindata_dir + image_name \
+ '_' + ("%.2f" % beta) + '.' + image_name_index
else:
img_name = args.vocfog_valdata_dir + image_name \
+ '_' + ("%.2f" % beta) + '.' + image_name_index
foggy_image = cv2.imread(img_name)
if self.data_aug:
if random.random() < 0.5:
_, w, _ = image.shape
image = image[:, ::-1, :]
foggy_image = foggy_image[:, ::-1, :]
bboxes[:, [0, 2]] = w - bboxes[:, [2, 0]]
if random.random() < 0.5:
h, w, _ = image.shape
max_bbox = np.concatenate([np.min(bboxes[:, 0:2], axis=0), np.max(bboxes[:, 2:4], axis=0)],
axis=-1)
max_l_trans = max_bbox[0]
max_u_trans = max_bbox[1]
max_r_trans = w - max_bbox[2]
max_d_trans = h - max_bbox[3]
crop_xmin = max(0, int(max_bbox[0] - random.uniform(0, max_l_trans)))
crop_ymin = max(0, int(max_bbox[1] - random.uniform(0, max_u_trans)))
crop_xmax = max(w, int(max_bbox[2] + random.uniform(0, max_r_trans)))
crop_ymax = max(h, int(max_bbox[3] + random.uniform(0, max_d_trans)))
image = image[crop_ymin: crop_ymax, crop_xmin: crop_xmax]
foggy_image = foggy_image[crop_ymin: crop_ymax, crop_xmin: crop_xmax]
bboxes[:, [0, 2]] = bboxes[:, [0, 2]] - crop_xmin
bboxes[:, [1, 3]] = bboxes[:, [1, 3]] - crop_ymin
if random.random() < 0.5:
h, w, _ = image.shape
max_bbox = np.concatenate([np.min(bboxes[:, 0:2], axis=0), np.max(bboxes[:, 2:4], axis=0)],
axis=-1)
max_l_trans = max_bbox[0]
max_u_trans = max_bbox[1]
max_r_trans = w - max_bbox[2]
max_d_trans = h - max_bbox[3]
tx = random.uniform(-(max_l_trans - 1), (max_r_trans - 1))
ty = random.uniform(-(max_u_trans - 1), (max_d_trans - 1))
M = np.array([[1, 0, tx], [0, 1, ty]])
image = cv2.warpAffine(image, M, (w, h))
foggy_image = cv2.warpAffine(foggy_image, M, (w, h))
bboxes[:, [0, 2]] = bboxes[:, [0, 2]] + tx
bboxes[:, [1, 3]] = bboxes[:, [1, 3]] + ty
foggy_image, _ = utils.image_preporcess(np.copy(foggy_image),
[self.train_input_size, self.train_input_size],
np.copy(bboxes))
clean_image, bboxes = utils.image_preporcess(np.copy(image),
[self.train_input_size, self.train_input_size],
np.copy(bboxes))
else:
if self.data_aug:
image, bboxes = self.random_horizontal_flip(np.copy(image), np.copy(bboxes))
image, bboxes = self.random_crop(np.copy(image), np.copy(bboxes))
image, bboxes = self.random_translate(np.copy(image), np.copy(bboxes))
clean_image, bboxes = utils.image_preporcess(np.copy(image),
[self.train_input_size, self.train_input_size],
np.copy(bboxes))
foggy_image = clean_image
return foggy_image, bboxes, clean_image
def bbox_iou(self, boxes1, boxes2):
boxes1 = np.array(boxes1)
boxes2 = np.array(boxes2)
boxes1_area = boxes1[..., 2] * boxes1[..., 3]
boxes2_area = boxes2[..., 2] * boxes2[..., 3]
boxes1 = np.concatenate([boxes1[..., :2] - boxes1[..., 2:] * 0.5,
boxes1[..., :2] + boxes1[..., 2:] * 0.5], axis=-1)
boxes2 = np.concatenate([boxes2[..., :2] - boxes2[..., 2:] * 0.5,
boxes2[..., :2] + boxes2[..., 2:] * 0.5], axis=-1)
left_up = np.maximum(boxes1[..., :2], boxes2[..., :2])
right_down = np.minimum(boxes1[..., 2:], boxes2[..., 2:])
inter_section = np.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = boxes1_area + boxes2_area - inter_area
return inter_area / union_area
def preprocess_true_boxes(self, bboxes):
label = [np.zeros((self.train_output_sizes[i], self.train_output_sizes[i], self.anchor_per_scale,
5 + self.num_classes)) for i in range(3)]
bboxes_xywh = [np.zeros((self.max_bbox_per_scale, 4)) for _ in range(3)]
bbox_count = np.zeros((3,))
for bbox in bboxes:
bbox_coor = bbox[:4]
bbox_class_ind = bbox[4]
onehot = np.zeros(self.num_classes, dtype=np.float)
onehot[bbox_class_ind] = 1.0
uniform_distribution = np.full(self.num_classes, 1.0 / self.num_classes)
deta = 0.01
smooth_onehot = onehot * (1 - deta) + deta * uniform_distribution
bbox_xywh = np.concatenate([(bbox_coor[2:] + bbox_coor[:2]) * 0.5, bbox_coor[2:] - bbox_coor[:2]], axis=-1)
bbox_xywh_scaled = 1.0 * bbox_xywh[np.newaxis, :] / self.strides[:, np.newaxis]
iou = []
exist_positive = False
for i in range(3):
anchors_xywh = np.zeros((self.anchor_per_scale, 4))
anchors_xywh[:, 0:2] = np.floor(bbox_xywh_scaled[i, 0:2]).astype(np.int32) + 0.5
anchors_xywh[:, 2:4] = self.anchors[i]
iou_scale = self.bbox_iou(bbox_xywh_scaled[i][np.newaxis, :], anchors_xywh)
iou.append(iou_scale)
iou_mask = iou_scale > 0.3
if np.any(iou_mask):
xind, yind = np.floor(bbox_xywh_scaled[i, 0:2]).astype(np.int32)
label[i][yind, xind, iou_mask, :] = 0
label[i][yind, xind, iou_mask, 0:4] = bbox_xywh
label[i][yind, xind, iou_mask, 4:5] = 1.0
label[i][yind, xind, iou_mask, 5:] = smooth_onehot
bbox_ind = int(bbox_count[i] % self.max_bbox_per_scale)
bboxes_xywh[i][bbox_ind, :4] = bbox_xywh
bbox_count[i] += 1
exist_positive = True
if not exist_positive:
best_anchor_ind = np.argmax(np.array(iou).reshape(-1), axis=-1)
best_detect = int(best_anchor_ind / self.anchor_per_scale)
best_anchor = int(best_anchor_ind % self.anchor_per_scale)
xind, yind = np.floor(bbox_xywh_scaled[best_detect, 0:2]).astype(np.int32)
label[best_detect][yind, xind, best_anchor, :] = 0
label[best_detect][yind, xind, best_anchor, 0:4] = bbox_xywh
label[best_detect][yind, xind, best_anchor, 4:5] = 1.0
label[best_detect][yind, xind, best_anchor, 5:] = smooth_onehot
bbox_ind = int(bbox_count[best_detect] % self.max_bbox_per_scale)
bboxes_xywh[best_detect][bbox_ind, :4] = bbox_xywh
bbox_count[best_detect] += 1
label_sbbox, label_mbbox, label_lbbox = label
sbboxes, mbboxes, lbboxes = bboxes_xywh
return label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes
def __len__(self):
return self.num_batchs
| 15,547 | 43.806916 | 121 | py |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/core/backbone.py | #! /usr/bin/env python
# coding=utf-8
import core.common as common
import tensorflow as tf
def darknet53(input_data, trainable):
with tf.variable_scope('darknet'):
input_data = common.convolutional(input_data, filters_shape=(3, 3, 3, 32), trainable=trainable, name='conv0')
input_data = common.convolutional(input_data, filters_shape=(3, 3, 32, 64),
trainable=trainable, name='conv1', downsample=True)
for i in range(1):
input_data = common.residual_block(input_data, 64, 32, 64, trainable=trainable, name='residual%d' %(i+0))
input_data = common.convolutional(input_data, filters_shape=(3, 3, 64, 128),
trainable=trainable, name='conv4', downsample=True)
for i in range(2):
input_data = common.residual_block(input_data, 128, 64, 128, trainable=trainable, name='residual%d' %(i+1))
input_data = common.convolutional(input_data, filters_shape=(3, 3, 128, 256),
trainable=trainable, name='conv9', downsample=True)
for i in range(8):
input_data = common.residual_block(input_data, 256, 128, 256, trainable=trainable, name='residual%d' %(i+3))
route_1 = input_data
input_data = common.convolutional(input_data, filters_shape=(3, 3, 256, 512),
trainable=trainable, name='conv26', downsample=True)
for i in range(8):
input_data = common.residual_block(input_data, 512, 256, 512, trainable=trainable, name='residual%d' %(i+11))
route_2 = input_data
input_data = common.convolutional(input_data, filters_shape=(3, 3, 512, 1024),
trainable=trainable, name='conv43', downsample=True)
for i in range(4):
input_data = common.residual_block(input_data, 1024, 512, 1024, trainable=trainable, name='residual%d' %(i+19))
return route_1, route_2, input_data
| 2,051 | 42.659574 | 123 | py |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/core/config.py | #! /usr/bin/env python
# coding=utf-8
from easydict import EasyDict as edict
from filters import *
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--exp_num', dest='exp_num', type=str, default='101', help='current experiment number')
parser.add_argument('--epoch_first_stage', dest='epoch_first_stage', type=int, default=0, help='# of epochs')
parser.add_argument('--epoch_second_stage', dest='epoch_second_stage', type=int, default=80, help='# of epochs')
parser.add_argument('--use_gpu', dest='use_gpu', type=int, default=1, help='gpu flag, 1 for GPU and 0 for CPU')
parser.add_argument('--checkpoint_dir', dest='ckpt_dir', default='checkpoint', help='models are saved here')
parser.add_argument('--exp_dir', dest='exp_dir', default='./experiments', help='models are saved here')
parser.add_argument('--gpu_id', dest='gpu_id', type=str, default='7', help='if use gpu, use gpu device id')
parser.add_argument('--ISP_FLAG', dest='ISP_FLAG', type=bool, default=True, help='whether use DIP Module')
parser.add_argument('--fog_FLAG', dest='fog_FLAG', type=bool, default=True, help='whether use Hybrid data training')
parser.add_argument('--vocfog_traindata_dir', dest='vocfog_traindata_dir', default='/data/vdd/liuwenyu/data_vocfog/train/JPEGImages/',
help='the dir contains ten levels synthetic foggy images')
parser.add_argument('--vocfog_valdata_dir', dest='vocfog_valdata_dir', default='/data/vdd/liuwenyu/data_vocfog/val/JPEGImages/',
help='the dir contains ten levels synthetic foggy images')
parser.add_argument('--train_path', dest='train_path', nargs='*', default='./data/dataset_fog/voc_norm_train.txt', help='folder of the training data')
parser.add_argument('--val_path', dest='val_path', nargs='*', default='./data/dataset_fog/voc_norm_test.txt', help='folder of the training data')
parser.add_argument('--test_path', dest='test_path', nargs='*', default='./data/dataset_fog/quick_test.txt', help='folder of the training data')
parser.add_argument('--class_name', dest='class_name', nargs='*', default='./data/classes/vocfog.names', help='folder of the training data')
parser.add_argument('--WRITE_IMAGE_PATH', dest='WRITE_IMAGE_PATH', nargs='*', default='./experiments/exp_101/detection_results/', help='folder of the training data')
parser.add_argument('--WEIGHT_FILE', dest='WEIGHT_FILE', nargs='*', default='./experiments/exp_101/checkpoint/yolov3_test_loss=5.8980.ckpt-75', help='folder of the training data')
parser.add_argument('--pre_train', dest='pre_train', default='NULL', help='the path of pretrained models if is not null. not used for now')
# we trained our model from scratch.
args = parser.parse_args()
__C = edict()
# Consumers can get config by: from config import cfg
cfg = __C
###########################################################################
# Filter Parameters
###########################################################################
cfg.filters = [
DefogFilter, ImprovedWhiteBalanceFilter, GammaFilter,
ToneFilter, ContrastFilter, UsmFilter
]
cfg.num_filter_parameters = 15
cfg.defog_begin_param = 0
cfg.wb_begin_param = 1
cfg.gamma_begin_param = 4
cfg.tone_begin_param = 5
cfg.contrast_begin_param = 13
cfg.usm_begin_param = 14
cfg.curve_steps = 8
cfg.gamma_range = 3
cfg.exposure_range = 3.5
cfg.wb_range = 1.1
cfg.color_curve_range = (0.90, 1.10)
cfg.lab_curve_range = (0.90, 1.10)
cfg.tone_curve_range = (0.5, 2)
cfg.defog_range = (0.1, 1.0)
cfg.usm_range = (0.0, 5)
# Masking is DISABLED
cfg.masking = False
cfg.minimum_strength = 0.3
cfg.maximum_sharpness = 1
cfg.clamp = False
###########################################################################
# CNN Parameters
###########################################################################
cfg.source_img_size = 64
cfg.base_channels = 32
cfg.dropout_keep_prob = 0.5
# G and C use the same feed dict?
cfg.share_feed_dict = True
cfg.shared_feature_extractor = True
cfg.fc1_size = 128
cfg.bnw = False
# number of filters for the first convolutional layers for all networks
# (stochastic/deterministic policy, critic, value)
cfg.feature_extractor_dims = 4096
###########################################################################
# YOLO options
__C.YOLO = edict()
# Set the class name
__C.YOLO.CLASSES = args.class_name
__C.YOLO.ANCHORS = "./data/anchors/coco_anchors.txt"
__C.YOLO.MOVING_AVE_DECAY = 0.9995
__C.YOLO.STRIDES = [8, 16, 32]
__C.YOLO.ANCHOR_PER_SCALE = 3
__C.YOLO.IOU_LOSS_THRESH = 0.5
__C.YOLO.UPSAMPLE_METHOD = "resize"
__C.YOLO.ISP_FLAG = args.ISP_FLAG
# Train options
__C.TRAIN = edict()
__C.TRAIN.ANNOT_PATH = args.train_path
__C.TRAIN.BATCH_SIZE = 6
# __C.TRAIN.INPUT_SIZE = [320, 352, 384, 416, 448, 480, 512, 544, 576, 608]
__C.TRAIN.INPUT_SIZE = [320, 352, 384, 416, 448, 480, 512, 544, 576, 608]
# __C.TRAIN.INPUT_SIZE = [512, 544, 576, 608, 640, 672, 704, 736, 768, 800, 832, 864, 896]
__C.TRAIN.DATA_AUG = True
__C.TRAIN.LEARN_RATE_INIT = 1e-4
__C.TRAIN.LEARN_RATE_END = 1e-6
__C.TRAIN.WARMUP_EPOCHS = 2
__C.TRAIN.FISRT_STAGE_EPOCHS = args.epoch_first_stage
__C.TRAIN.SECOND_STAGE_EPOCHS = args.epoch_second_stage
__C.TRAIN.INITIAL_WEIGHT = args.pre_train
# TEST options
__C.TEST = edict()
__C.TEST.ANNOT_PATH = args.val_path
__C.TEST.BATCH_SIZE = 6
__C.TEST.INPUT_SIZE = 544
__C.TEST.DATA_AUG = False
__C.TEST.WRITE_IMAGE = True
__C.TEST.WRITE_IMAGE_PATH = args.WRITE_IMAGE_PATH
__C.TEST.WRITE_IMAGE_SHOW_LABEL = True
__C.TEST.WEIGHT_FILE = args.WEIGHT_FILE
__C.TEST.SHOW_LABEL = True
__C.TEST.SCORE_THRESHOLD = 0.3
__C.TEST.IOU_THRESHOLD = 0.45
| 6,043 | 38.763158 | 179 | py |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/core/common.py | #! /usr/bin/env python
# coding=utf-8
import tensorflow as tf
import tensorflow.contrib.layers as ly
from util_filters import *
def extract_parameters(net, cfg, trainable):
output_dim = cfg.num_filter_parameters
# net = net - 0.5
min_feature_map_size = 4
print('extract_parameters CNN:')
channels = cfg.base_channels
print(' ', str(net.get_shape()))
net = convolutional(net, filters_shape=(3, 3, 3, channels), trainable=trainable, name='ex_conv0',
downsample=True, activate=True, bn=False)
net = convolutional(net, filters_shape=(3, 3, channels, 2*channels), trainable=trainable, name='ex_conv1',
downsample=True, activate=True, bn=False)
net = convolutional(net, filters_shape=(3, 3, 2*channels, 2*channels), trainable=trainable, name='ex_conv2',
downsample=True, activate=True, bn=False)
net = convolutional(net, filters_shape=(3, 3, 2*channels, 2*channels), trainable=trainable, name='ex_conv3',
downsample=True, activate=True, bn=False)
net = convolutional(net, filters_shape=(3, 3, 2*channels, 2*channels), trainable=trainable, name='ex_conv4',
downsample=True, activate=True, bn=False)
net = tf.reshape(net, [-1, 4096])
features = ly.fully_connected(
net,
cfg.fc1_size,
scope='fc1',
activation_fn=lrelu,
weights_initializer=tf.contrib.layers.xavier_initializer())
filter_features = ly.fully_connected(
features,
output_dim,
scope='fc2',
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer())
return filter_features
def extract_parameters_2(net, cfg, trainable):
output_dim = cfg.num_filter_parameters
# net = net - 0.5
min_feature_map_size = 4
print('extract_parameters_2 CNN:')
channels = 16
print(' ', str(net.get_shape()))
net = convolutional(net, filters_shape=(3, 3, 3, channels), trainable=trainable, name='ex_conv0',
downsample=True, activate=True, bn=False)
net = convolutional(net, filters_shape=(3, 3, channels, 2*channels), trainable=trainable, name='ex_conv1',
downsample=True, activate=True, bn=False)
net = convolutional(net, filters_shape=(3, 3, 2*channels, 2*channels), trainable=trainable, name='ex_conv2',
downsample=True, activate=True, bn=False)
net = convolutional(net, filters_shape=(3, 3, 2*channels, 2*channels), trainable=trainable, name='ex_conv3',
downsample=True, activate=True, bn=False)
net = convolutional(net, filters_shape=(3, 3, 2*channels, 2*channels), trainable=trainable, name='ex_conv4',
downsample=True, activate=True, bn=False)
net = tf.reshape(net, [-1, 2048])
features = ly.fully_connected(
net,
64,
scope='fc1',
activation_fn=lrelu,
weights_initializer=tf.contrib.layers.xavier_initializer())
filter_features = ly.fully_connected(
features,
output_dim,
scope='fc2',
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer())
return filter_features
def convolutional(input_data, filters_shape, trainable, name, downsample=False, activate=True, bn=True):
with tf.variable_scope(name):
if downsample:
pad_h, pad_w = (filters_shape[0] - 2) // 2 + 1, (filters_shape[1] - 2) // 2 + 1
paddings = tf.constant([[0, 0], [pad_h, pad_h], [pad_w, pad_w], [0, 0]])
input_data = tf.pad(input_data, paddings, 'CONSTANT')
strides = (1, 2, 2, 1)
padding = 'VALID'
else:
strides = (1, 1, 1, 1)
padding = "SAME"
weight = tf.get_variable(name='weight', dtype=tf.float32, trainable=True,
shape=filters_shape, initializer=tf.random_normal_initializer(stddev=0.01))
conv = tf.nn.conv2d(input=input_data, filter=weight, strides=strides, padding=padding)
if bn:
conv = tf.layers.batch_normalization(conv, beta_initializer=tf.zeros_initializer(),
gamma_initializer=tf.ones_initializer(),
moving_mean_initializer=tf.zeros_initializer(),
moving_variance_initializer=tf.ones_initializer(), training=trainable)
else:
bias = tf.get_variable(name='bias', shape=filters_shape[-1], trainable=True,
dtype=tf.float32, initializer=tf.constant_initializer(0.0))
conv = tf.nn.bias_add(conv, bias)
if activate == True: conv = tf.nn.leaky_relu(conv, alpha=0.1)
return conv
def residual_block(input_data, input_channel, filter_num1, filter_num2, trainable, name):
short_cut = input_data
with tf.variable_scope(name):
input_data = convolutional(input_data, filters_shape=(1, 1, input_channel, filter_num1),
trainable=trainable, name='conv1')
input_data = convolutional(input_data, filters_shape=(3, 3, filter_num1, filter_num2),
trainable=trainable, name='conv2')
residual_output = input_data + short_cut
return residual_output
def route(name, previous_output, current_output):
with tf.variable_scope(name):
output = tf.concat([current_output, previous_output], axis=-1)
return output
def upsample(input_data, name, method="deconv"):
assert method in ["resize", "deconv"]
if method == "resize":
with tf.variable_scope(name):
input_shape = tf.shape(input_data)
output = tf.image.resize_nearest_neighbor(input_data, (input_shape[1] * 2, input_shape[2] * 2))
if method == "deconv":
# replace resize_nearest_neighbor with conv2d_transpose To support TensorRT optimization
numm_filter = input_data.shape.as_list()[-1]
output = tf.layers.conv2d_transpose(input_data, numm_filter, kernel_size=2, padding='same',
strides=(2,2), kernel_initializer=tf.random_normal_initializer())
return output
| 6,331 | 41.783784 | 119 | py |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/core/yolov3_lowlight.py | #! /usr/bin/env python
# coding=utf-8
import numpy as np
import tensorflow as tf
import core.utils as utils
import core.common as common
import core.backbone as backbone
from core.config_lowlight import cfg
class YOLOV3(object):
"""Implement tensoflow yolov3 here"""
def __init__(self, input_data, trainable, input_data_clean):
self.trainable = trainable
self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
self.num_class = len(self.classes)
self.strides = np.array(cfg.YOLO.STRIDES)
self.anchors = utils.get_anchors(cfg.YOLO.ANCHORS)
self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
self.iou_loss_thresh = cfg.YOLO.IOU_LOSS_THRESH
self.upsample_method = cfg.YOLO.UPSAMPLE_METHOD
self.isp_flag = cfg.YOLO.ISP_FLAG
try:
self.conv_lbbox, self.conv_mbbox, self.conv_sbbox, self.recovery_loss= self.__build_nework(input_data, self.isp_flag, input_data_clean)
except:
raise NotImplementedError("Can not build up yolov3 network!")
with tf.variable_scope('pred_sbbox'):
self.pred_sbbox = self.decode(self.conv_sbbox, self.anchors[0], self.strides[0])
with tf.variable_scope('pred_mbbox'):
self.pred_mbbox = self.decode(self.conv_mbbox, self.anchors[1], self.strides[1])
with tf.variable_scope('pred_lbbox'):
self.pred_lbbox = self.decode(self.conv_lbbox, self.anchors[2], self.strides[2])
def __build_nework(self, input_data, isp_flag, input_data_clean):
filtered_image_batch = input_data
self.filter_params = input_data
filter_imgs_series = []
if isp_flag:
with tf.variable_scope('extract_parameters_2'):
input_data = tf.image.resize_images(input_data, [256, 256], method=tf.image.ResizeMethod.BILINEAR)
filter_features = common.extract_parameters_2(input_data, cfg, self.trainable)
# filter_features = tf.random_normal([1, 10], 0.5, 0.1)
filters = cfg.filters
filters = [x(input_data, cfg) for x in filters]
filter_parameters = []
for j, filter in enumerate(filters):
with tf.variable_scope('filter_%d' % j):
print(' creating filter:', j, 'name:', str(filter.__class__), 'abbr.',
filter.get_short_name())
print(' filter_features:', filter_features.shape)
filtered_image_batch, filter_parameter = filter.apply(
filtered_image_batch, filter_features)
filter_parameters.append(filter_parameter)
filter_imgs_series.append(filtered_image_batch)
print(' output:', filtered_image_batch.shape)
self.filter_params = filter_parameters
self.image_isped = filtered_image_batch
self.filter_imgs_series = filter_imgs_series
recovery_loss = tf.reduce_sum(tf.pow(filtered_image_batch - input_data_clean, 2.0))#/(2.0 * batch_size)
input_data = filtered_image_batch
route_1, route_2, input_data = backbone.darknet53(input_data, self.trainable)
input_data = common.convolutional(input_data, (1, 1, 1024, 512), self.trainable, 'conv52')
input_data = common.convolutional(input_data, (3, 3, 512, 1024), self.trainable, 'conv53')
input_data = common.convolutional(input_data, (1, 1, 1024, 512), self.trainable, 'conv54')
input_data = common.convolutional(input_data, (3, 3, 512, 1024), self.trainable, 'conv55')
input_data = common.convolutional(input_data, (1, 1, 1024, 512), self.trainable, 'conv56')
conv_lobj_branch = common.convolutional(input_data, (3, 3, 512, 1024), self.trainable, name='conv_lobj_branch')
conv_lbbox = common.convolutional(conv_lobj_branch, (1, 1, 1024, 3*(self.num_class + 5)),
trainable=self.trainable, name='conv_lbbox', activate=False, bn=False)
input_data = common.convolutional(input_data, (1, 1, 512, 256), self.trainable, 'conv57')
input_data = common.upsample(input_data, name='upsample0', method=self.upsample_method)
with tf.variable_scope('route_1'):
input_data = tf.concat([input_data, route_2], axis=-1)
input_data = common.convolutional(input_data, (1, 1, 768, 256), self.trainable, 'conv58')
input_data = common.convolutional(input_data, (3, 3, 256, 512), self.trainable, 'conv59')
input_data = common.convolutional(input_data, (1, 1, 512, 256), self.trainable, 'conv60')
input_data = common.convolutional(input_data, (3, 3, 256, 512), self.trainable, 'conv61')
input_data = common.convolutional(input_data, (1, 1, 512, 256), self.trainable, 'conv62')
conv_mobj_branch = common.convolutional(input_data, (3, 3, 256, 512), self.trainable, name='conv_mobj_branch' )
conv_mbbox = common.convolutional(conv_mobj_branch, (1, 1, 512, 3*(self.num_class + 5)),
trainable=self.trainable, name='conv_mbbox', activate=False, bn=False)
input_data = common.convolutional(input_data, (1, 1, 256, 128), self.trainable, 'conv63')
input_data = common.upsample(input_data, name='upsample1', method=self.upsample_method)
with tf.variable_scope('route_2'):
input_data = tf.concat([input_data, route_1], axis=-1)
input_data = common.convolutional(input_data, (1, 1, 384, 128), self.trainable, 'conv64')
input_data = common.convolutional(input_data, (3, 3, 128, 256), self.trainable, 'conv65')
input_data = common.convolutional(input_data, (1, 1, 256, 128), self.trainable, 'conv66')
input_data = common.convolutional(input_data, (3, 3, 128, 256), self.trainable, 'conv67')
input_data = common.convolutional(input_data, (1, 1, 256, 128), self.trainable, 'conv68')
conv_sobj_branch = common.convolutional(input_data, (3, 3, 128, 256), self.trainable, name='conv_sobj_branch')
conv_sbbox = common.convolutional(conv_sobj_branch, (1, 1, 256, 3*(self.num_class + 5)),
trainable=self.trainable, name='conv_sbbox', activate=False, bn=False)
return conv_lbbox, conv_mbbox, conv_sbbox, recovery_loss
def decode(self, conv_output, anchors, stride):
"""
return tensor of shape [batch_size, output_size, output_size, anchor_per_scale, 5 + num_classes]
contains (x, y, w, h, score, probability)
"""
conv_shape = tf.shape(conv_output)
batch_size = conv_shape[0]
output_size = conv_shape[1]
anchor_per_scale = len(anchors)
conv_output = tf.reshape(conv_output, (batch_size, output_size, output_size, anchor_per_scale, 5 + self.num_class))
conv_raw_dxdy = conv_output[:, :, :, :, 0:2]
conv_raw_dwdh = conv_output[:, :, :, :, 2:4]
conv_raw_conf = conv_output[:, :, :, :, 4:5]
conv_raw_prob = conv_output[:, :, :, :, 5: ]
y = tf.tile(tf.range(output_size, dtype=tf.int32)[:, tf.newaxis], [1, output_size])
x = tf.tile(tf.range(output_size, dtype=tf.int32)[tf.newaxis, :], [output_size, 1])
xy_grid = tf.concat([x[:, :, tf.newaxis], y[:, :, tf.newaxis]], axis=-1)
xy_grid = tf.tile(xy_grid[tf.newaxis, :, :, tf.newaxis, :], [batch_size, 1, 1, anchor_per_scale, 1])
xy_grid = tf.cast(xy_grid, tf.float32)
pred_xy = (tf.sigmoid(conv_raw_dxdy) + xy_grid) * stride
pred_wh = (tf.exp(conv_raw_dwdh) * anchors) * stride
pred_xywh = tf.concat([pred_xy, pred_wh], axis=-1)
pred_conf = tf.sigmoid(conv_raw_conf)
pred_prob = tf.sigmoid(conv_raw_prob)
return tf.concat([pred_xywh, pred_conf, pred_prob], axis=-1)
def focal(self, target, actual, alpha=1, gamma=2):
focal_loss = alpha * tf.pow(tf.abs(target - actual), gamma)
return focal_loss
def bbox_giou(self, boxes1, boxes2):
boxes1 = tf.concat([boxes1[..., :2] - boxes1[..., 2:] * 0.5,
boxes1[..., :2] + boxes1[..., 2:] * 0.5], axis=-1)
boxes2 = tf.concat([boxes2[..., :2] - boxes2[..., 2:] * 0.5,
boxes2[..., :2] + boxes2[..., 2:] * 0.5], axis=-1)
boxes1 = tf.concat([tf.minimum(boxes1[..., :2], boxes1[..., 2:]),
tf.maximum(boxes1[..., :2], boxes1[..., 2:])], axis=-1)
boxes2 = tf.concat([tf.minimum(boxes2[..., :2], boxes2[..., 2:]),
tf.maximum(boxes2[..., :2], boxes2[..., 2:])], axis=-1)
boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1])
boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1])
left_up = tf.maximum(boxes1[..., :2], boxes2[..., :2])
right_down = tf.minimum(boxes1[..., 2:], boxes2[..., 2:])
inter_section = tf.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = boxes1_area + boxes2_area - inter_area
iou = inter_area / union_area
enclose_left_up = tf.minimum(boxes1[..., :2], boxes2[..., :2])
enclose_right_down = tf.maximum(boxes1[..., 2:], boxes2[..., 2:])
enclose = tf.maximum(enclose_right_down - enclose_left_up, 0.0)
enclose_area = enclose[..., 0] * enclose[..., 1]
giou = iou - 1.0 * (enclose_area - union_area) / enclose_area
return giou
def bbox_iou(self, boxes1, boxes2):
boxes1_area = boxes1[..., 2] * boxes1[..., 3]
boxes2_area = boxes2[..., 2] * boxes2[..., 3]
boxes1 = tf.concat([boxes1[..., :2] - boxes1[..., 2:] * 0.5,
boxes1[..., :2] + boxes1[..., 2:] * 0.5], axis=-1)
boxes2 = tf.concat([boxes2[..., :2] - boxes2[..., 2:] * 0.5,
boxes2[..., :2] + boxes2[..., 2:] * 0.5], axis=-1)
left_up = tf.maximum(boxes1[..., :2], boxes2[..., :2])
right_down = tf.minimum(boxes1[..., 2:], boxes2[..., 2:])
inter_section = tf.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = boxes1_area + boxes2_area - inter_area
iou = 1.0 * inter_area / union_area
return iou
def loss_layer(self, conv, pred, label, bboxes, anchors, stride):
conv_shape = tf.shape(conv)
batch_size = conv_shape[0]
output_size = conv_shape[1]
input_size = stride * output_size
conv = tf.reshape(conv, (batch_size, output_size, output_size,
self.anchor_per_scale, 5 + self.num_class))
conv_raw_conf = conv[:, :, :, :, 4:5]
conv_raw_prob = conv[:, :, :, :, 5:]
pred_xywh = pred[:, :, :, :, 0:4]
pred_conf = pred[:, :, :, :, 4:5]
label_xywh = label[:, :, :, :, 0:4]
respond_bbox = label[:, :, :, :, 4:5]
label_prob = label[:, :, :, :, 5:]
giou = tf.expand_dims(self.bbox_giou(pred_xywh, label_xywh), axis=-1)
input_size = tf.cast(input_size, tf.float32)
bbox_loss_scale = 2.0 - 1.0 * label_xywh[:, :, :, :, 2:3] * label_xywh[:, :, :, :, 3:4] / (input_size ** 2)
giou_loss = respond_bbox * bbox_loss_scale * (1- giou)
iou = self.bbox_iou(pred_xywh[:, :, :, :, np.newaxis, :], bboxes[:, np.newaxis, np.newaxis, np.newaxis, :, :])
max_iou = tf.expand_dims(tf.reduce_max(iou, axis=-1), axis=-1)
respond_bgd = (1.0 - respond_bbox) * tf.cast( max_iou < self.iou_loss_thresh, tf.float32 )
conf_focal = self.focal(respond_bbox, pred_conf)
conf_loss = conf_focal * (
respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels=respond_bbox, logits=conv_raw_conf)
+
respond_bgd * tf.nn.sigmoid_cross_entropy_with_logits(labels=respond_bbox, logits=conv_raw_conf)
)
prob_loss = respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels=label_prob, logits=conv_raw_prob)
giou_loss = tf.reduce_mean(tf.reduce_sum(giou_loss, axis=[1,2,3,4]))
conf_loss = tf.reduce_mean(tf.reduce_sum(conf_loss, axis=[1,2,3,4]))
prob_loss = tf.reduce_mean(tf.reduce_sum(prob_loss, axis=[1,2,3,4]))
return giou_loss, conf_loss, prob_loss
def compute_loss(self, label_sbbox, label_mbbox, label_lbbox, true_sbbox, true_mbbox, true_lbbox):
with tf.name_scope('smaller_box_loss'):
loss_sbbox = self.loss_layer(self.conv_sbbox, self.pred_sbbox, label_sbbox, true_sbbox,
anchors = self.anchors[0], stride = self.strides[0])
with tf.name_scope('medium_box_loss'):
loss_mbbox = self.loss_layer(self.conv_mbbox, self.pred_mbbox, label_mbbox, true_mbbox,
anchors = self.anchors[1], stride = self.strides[1])
with tf.name_scope('bigger_box_loss'):
loss_lbbox = self.loss_layer(self.conv_lbbox, self.pred_lbbox, label_lbbox, true_lbbox,
anchors = self.anchors[2], stride = self.strides[2])
with tf.name_scope('giou_loss'):
giou_loss = loss_sbbox[0] + loss_mbbox[0] + loss_lbbox[0]
with tf.name_scope('conf_loss'):
conf_loss = loss_sbbox[1] + loss_mbbox[1] + loss_lbbox[1]
with tf.name_scope('prob_loss'):
prob_loss = loss_sbbox[2] + loss_mbbox[2] + loss_lbbox[2]
with tf.name_scope('recovery_loss'):
recovery_loss = self.recovery_loss
return giou_loss, conf_loss, prob_loss, recovery_loss
| 13,856 | 47.114583 | 147 | py |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/core/__init__.py | 0 | 0 | 0 | py | |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/core/yolov3.py | #! /usr/bin/env python
# coding=utf-8
import numpy as np
import tensorflow as tf
import core.utils as utils
import core.common as common
import core.backbone as backbone
from core.config import cfg
import time
class YOLOV3(object):
"""Implement tensoflow yolov3 here"""
def __init__(self, input_data, trainable, input_data_clean, defog_A=None, IcA=None):
self.trainable = trainable
self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
self.num_class = len(self.classes)
self.strides = np.array(cfg.YOLO.STRIDES)
self.anchors = utils.get_anchors(cfg.YOLO.ANCHORS)
self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
self.iou_loss_thresh = cfg.YOLO.IOU_LOSS_THRESH
self.upsample_method = cfg.YOLO.UPSAMPLE_METHOD
self.isp_flag = cfg.YOLO.ISP_FLAG
try:
self.conv_lbbox, self.conv_mbbox, self.conv_sbbox, self.recovery_loss = \
self.__build_nework(input_data, self.isp_flag, input_data_clean, defog_A, IcA)
except:
raise NotImplementedError("Can not build up yolov3 network!")
with tf.variable_scope('pred_sbbox'):
self.pred_sbbox = self.decode(self.conv_sbbox, self.anchors[0], self.strides[0])
with tf.variable_scope('pred_mbbox'):
self.pred_mbbox = self.decode(self.conv_mbbox, self.anchors[1], self.strides[1])
with tf.variable_scope('pred_lbbox'):
self.pred_lbbox = self.decode(self.conv_lbbox, self.anchors[2], self.strides[2])
def __build_nework(self, input_data, isp_flag, input_data_clean, defog_A, IcA):
filtered_image_batch = input_data
self.filter_params = input_data
filter_imgs_series = []
if isp_flag:
# start_time = time.time()
with tf.variable_scope('extract_parameters_2'):
input_data = tf.image.resize_images(input_data, [256, 256], method=tf.image.ResizeMethod.BILINEAR)
filter_features = common.extract_parameters_2(input_data, cfg, self.trainable)
# filter_features = tf.random_normal([1, 15], 0.5, 0.1)
filters = cfg.filters
filters = [x(filtered_image_batch, cfg) for x in filters]
filter_parameters = []
for j, filter in enumerate(filters):
with tf.variable_scope('filter_%d' % j):
print(' creating filter:', j, 'name:', str(filter.__class__), 'abbr.',
filter.get_short_name())
print(' filter_features:', filter_features.shape)
filtered_image_batch, filter_parameter = filter.apply(
filtered_image_batch, filter_features, defog_A, IcA)
filter_parameters.append(filter_parameter)
filter_imgs_series.append(filtered_image_batch)
print(' output:', filtered_image_batch.shape)
self.filter_params = filter_parameters
# end_time = time.time()
# print('filters所用时间:', end_time - start_time)
# input_data_shape = tf.shape(input_data)
# batch_size = input_data_shape[0]
recovery_loss = tf.reduce_sum(tf.pow(filtered_image_batch - input_data_clean, 2.0))#/(2.0 * batch_size)
self.image_isped = filtered_image_batch
self.filter_imgs_series = filter_imgs_series
input_data = filtered_image_batch
route_1, route_2, input_data = backbone.darknet53(input_data, self.trainable)
input_data = common.convolutional(input_data, (1, 1, 1024, 512), self.trainable, 'conv52')
input_data = common.convolutional(input_data, (3, 3, 512, 1024), self.trainable, 'conv53')
input_data = common.convolutional(input_data, (1, 1, 1024, 512), self.trainable, 'conv54')
input_data = common.convolutional(input_data, (3, 3, 512, 1024), self.trainable, 'conv55')
input_data = common.convolutional(input_data, (1, 1, 1024, 512), self.trainable, 'conv56')
conv_lobj_branch = common.convolutional(input_data, (3, 3, 512, 1024), self.trainable, name='conv_lobj_branch')
conv_lbbox = common.convolutional(conv_lobj_branch, (1, 1, 1024, 3*(self.num_class + 5)),
trainable=self.trainable, name='conv_lbbox', activate=False, bn=False)
input_data = common.convolutional(input_data, (1, 1, 512, 256), self.trainable, 'conv57')
input_data = common.upsample(input_data, name='upsample0', method=self.upsample_method)
with tf.variable_scope('route_1'):
input_data = tf.concat([input_data, route_2], axis=-1)
input_data = common.convolutional(input_data, (1, 1, 768, 256), self.trainable, 'conv58')
input_data = common.convolutional(input_data, (3, 3, 256, 512), self.trainable, 'conv59')
input_data = common.convolutional(input_data, (1, 1, 512, 256), self.trainable, 'conv60')
input_data = common.convolutional(input_data, (3, 3, 256, 512), self.trainable, 'conv61')
input_data = common.convolutional(input_data, (1, 1, 512, 256), self.trainable, 'conv62')
conv_mobj_branch = common.convolutional(input_data, (3, 3, 256, 512), self.trainable, name='conv_mobj_branch' )
conv_mbbox = common.convolutional(conv_mobj_branch, (1, 1, 512, 3*(self.num_class + 5)),
trainable=self.trainable, name='conv_mbbox', activate=False, bn=False)
input_data = common.convolutional(input_data, (1, 1, 256, 128), self.trainable, 'conv63')
input_data = common.upsample(input_data, name='upsample1', method=self.upsample_method)
with tf.variable_scope('route_2'):
input_data = tf.concat([input_data, route_1], axis=-1)
input_data = common.convolutional(input_data, (1, 1, 384, 128), self.trainable, 'conv64')
input_data = common.convolutional(input_data, (3, 3, 128, 256), self.trainable, 'conv65')
input_data = common.convolutional(input_data, (1, 1, 256, 128), self.trainable, 'conv66')
input_data = common.convolutional(input_data, (3, 3, 128, 256), self.trainable, 'conv67')
input_data = common.convolutional(input_data, (1, 1, 256, 128), self.trainable, 'conv68')
conv_sobj_branch = common.convolutional(input_data, (3, 3, 128, 256), self.trainable, name='conv_sobj_branch')
conv_sbbox = common.convolutional(conv_sobj_branch, (1, 1, 256, 3*(self.num_class + 5)),
trainable=self.trainable, name='conv_sbbox', activate=False, bn=False)
return conv_lbbox, conv_mbbox, conv_sbbox, recovery_loss
def decode(self, conv_output, anchors, stride):
"""
return tensor of shape [batch_size, output_size, output_size, anchor_per_scale, 5 + num_classes]
contains (x, y, w, h, score, probability)
"""
conv_shape = tf.shape(conv_output)
batch_size = conv_shape[0]
output_size = conv_shape[1]
anchor_per_scale = len(anchors)
conv_output = tf.reshape(conv_output, (batch_size, output_size, output_size, anchor_per_scale, 5 + self.num_class))
conv_raw_dxdy = conv_output[:, :, :, :, 0:2]
conv_raw_dwdh = conv_output[:, :, :, :, 2:4]
conv_raw_conf = conv_output[:, :, :, :, 4:5]
conv_raw_prob = conv_output[:, :, :, :, 5: ]
y = tf.tile(tf.range(output_size, dtype=tf.int32)[:, tf.newaxis], [1, output_size])
x = tf.tile(tf.range(output_size, dtype=tf.int32)[tf.newaxis, :], [output_size, 1])
xy_grid = tf.concat([x[:, :, tf.newaxis], y[:, :, tf.newaxis]], axis=-1)
xy_grid = tf.tile(xy_grid[tf.newaxis, :, :, tf.newaxis, :], [batch_size, 1, 1, anchor_per_scale, 1])
xy_grid = tf.cast(xy_grid, tf.float32)
pred_xy = (tf.sigmoid(conv_raw_dxdy) + xy_grid) * stride
pred_wh = (tf.exp(conv_raw_dwdh) * anchors) * stride
pred_xywh = tf.concat([pred_xy, pred_wh], axis=-1)
pred_conf = tf.sigmoid(conv_raw_conf)
pred_prob = tf.sigmoid(conv_raw_prob)
return tf.concat([pred_xywh, pred_conf, pred_prob], axis=-1)
def focal(self, target, actual, alpha=1, gamma=2):
focal_loss = alpha * tf.pow(tf.abs(target - actual), gamma)
return focal_loss
def bbox_giou(self, boxes1, boxes2):
boxes1 = tf.concat([boxes1[..., :2] - boxes1[..., 2:] * 0.5,
boxes1[..., :2] + boxes1[..., 2:] * 0.5], axis=-1)
boxes2 = tf.concat([boxes2[..., :2] - boxes2[..., 2:] * 0.5,
boxes2[..., :2] + boxes2[..., 2:] * 0.5], axis=-1)
boxes1 = tf.concat([tf.minimum(boxes1[..., :2], boxes1[..., 2:]),
tf.maximum(boxes1[..., :2], boxes1[..., 2:])], axis=-1)
boxes2 = tf.concat([tf.minimum(boxes2[..., :2], boxes2[..., 2:]),
tf.maximum(boxes2[..., :2], boxes2[..., 2:])], axis=-1)
boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1])
boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1])
left_up = tf.maximum(boxes1[..., :2], boxes2[..., :2])
right_down = tf.minimum(boxes1[..., 2:], boxes2[..., 2:])
inter_section = tf.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = boxes1_area + boxes2_area - inter_area
iou = inter_area / union_area
enclose_left_up = tf.minimum(boxes1[..., :2], boxes2[..., :2])
enclose_right_down = tf.maximum(boxes1[..., 2:], boxes2[..., 2:])
enclose = tf.maximum(enclose_right_down - enclose_left_up, 0.0)
enclose_area = enclose[..., 0] * enclose[..., 1]
giou = iou - 1.0 * (enclose_area - union_area) / enclose_area
return giou
def bbox_iou(self, boxes1, boxes2):
boxes1_area = boxes1[..., 2] * boxes1[..., 3]
boxes2_area = boxes2[..., 2] * boxes2[..., 3]
boxes1 = tf.concat([boxes1[..., :2] - boxes1[..., 2:] * 0.5,
boxes1[..., :2] + boxes1[..., 2:] * 0.5], axis=-1)
boxes2 = tf.concat([boxes2[..., :2] - boxes2[..., 2:] * 0.5,
boxes2[..., :2] + boxes2[..., 2:] * 0.5], axis=-1)
left_up = tf.maximum(boxes1[..., :2], boxes2[..., :2])
right_down = tf.minimum(boxes1[..., 2:], boxes2[..., 2:])
inter_section = tf.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = boxes1_area + boxes2_area - inter_area
iou = 1.0 * inter_area / union_area
return iou
def loss_layer(self, conv, pred, label, bboxes, anchors, stride):
conv_shape = tf.shape(conv)
batch_size = conv_shape[0]
output_size = conv_shape[1]
input_size = stride * output_size
conv = tf.reshape(conv, (batch_size, output_size, output_size,
self.anchor_per_scale, 5 + self.num_class))
conv_raw_conf = conv[:, :, :, :, 4:5]
conv_raw_prob = conv[:, :, :, :, 5:]
pred_xywh = pred[:, :, :, :, 0:4]
pred_conf = pred[:, :, :, :, 4:5]
label_xywh = label[:, :, :, :, 0:4]
respond_bbox = label[:, :, :, :, 4:5]
label_prob = label[:, :, :, :, 5:]
giou = tf.expand_dims(self.bbox_giou(pred_xywh, label_xywh), axis=-1)
input_size = tf.cast(input_size, tf.float32)
bbox_loss_scale = 2.0 - 1.0 * label_xywh[:, :, :, :, 2:3] * label_xywh[:, :, :, :, 3:4] / (input_size ** 2)
giou_loss = respond_bbox * bbox_loss_scale * (1- giou)
iou = self.bbox_iou(pred_xywh[:, :, :, :, np.newaxis, :], bboxes[:, np.newaxis, np.newaxis, np.newaxis, :, :])
max_iou = tf.expand_dims(tf.reduce_max(iou, axis=-1), axis=-1)
respond_bgd = (1.0 - respond_bbox) * tf.cast( max_iou < self.iou_loss_thresh, tf.float32 )
conf_focal = self.focal(respond_bbox, pred_conf)
conf_loss = conf_focal * (
respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels=respond_bbox, logits=conv_raw_conf)
+
respond_bgd * tf.nn.sigmoid_cross_entropy_with_logits(labels=respond_bbox, logits=conv_raw_conf)
)
prob_loss = respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels=label_prob, logits=conv_raw_prob)
giou_loss = tf.reduce_mean(tf.reduce_sum(giou_loss, axis=[1,2,3,4]))
conf_loss = tf.reduce_mean(tf.reduce_sum(conf_loss, axis=[1,2,3,4]))
prob_loss = tf.reduce_mean(tf.reduce_sum(prob_loss, axis=[1,2,3,4]))
return giou_loss, conf_loss, prob_loss
def compute_loss(self, label_sbbox, label_mbbox, label_lbbox, true_sbbox, true_mbbox, true_lbbox):
with tf.name_scope('smaller_box_loss'):
loss_sbbox = self.loss_layer(self.conv_sbbox, self.pred_sbbox, label_sbbox, true_sbbox,
anchors = self.anchors[0], stride = self.strides[0])
with tf.name_scope('medium_box_loss'):
loss_mbbox = self.loss_layer(self.conv_mbbox, self.pred_mbbox, label_mbbox, true_mbbox,
anchors = self.anchors[1], stride = self.strides[1])
with tf.name_scope('bigger_box_loss'):
loss_lbbox = self.loss_layer(self.conv_lbbox, self.pred_lbbox, label_lbbox, true_lbbox,
anchors = self.anchors[2], stride = self.strides[2])
with tf.name_scope('giou_loss'):
giou_loss = loss_sbbox[0] + loss_mbbox[0] + loss_lbbox[0]
with tf.name_scope('conf_loss'):
conf_loss = loss_sbbox[1] + loss_mbbox[1] + loss_lbbox[1]
with tf.name_scope('prob_loss'):
prob_loss = loss_sbbox[2] + loss_mbbox[2] + loss_lbbox[2]
with tf.name_scope('recovery_loss'):
recovery_loss = self.recovery_loss
return giou_loss, conf_loss, prob_loss, recovery_loss
| 14,179 | 47.561644 | 123 | py |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/scripts/voc_annotation.py | import os
import argparse
import xml.etree.ElementTree as ET
def convert_voc_annotation(data_path, data_type, anno_path, use_difficult_bbox=False):
# classes = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
# 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
# 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa',
# 'train', 'tvmonitor']
classes = ['person', 'car', 'bus', 'bicycle', 'motorbike']
img_inds_file = os.path.join(data_path, 'ImageSets', 'Main', data_type + '.txt')
with open(img_inds_file, 'r') as f:
txt = f.readlines()
image_inds = [line.strip() for line in txt]
with open(anno_path, 'a') as f:
for image_ind in image_inds:
image_path = os.path.join(data_path, 'JPEGImages', image_ind + '.jpg')
annotation = image_path
label_path = os.path.join(data_path, 'Annotations', image_ind + '.xml')
root = ET.parse(label_path).getroot()
objects = root.findall('object')
for obj in objects:
difficult = obj.find('difficult').text.strip()
if (not use_difficult_bbox) and(int(difficult) == 1):
continue
bbox = obj.find('bndbox')
if obj.find('name').text.lower().strip() in ['person', 'car', 'bus', 'bicycle', 'motorbike']:
class_ind = classes.index(obj.find('name').text.lower().strip())
xmin = bbox.find('xmin').text.strip()
xmax = bbox.find('xmax').text.strip()
ymin = bbox.find('ymin').text.strip()
ymax = bbox.find('ymax').text.strip()
annotation += ' ' + ','.join([xmin, ymin, xmax, ymax, str(class_ind)])
print(annotation)
f.write(annotation + "\n")
return len(image_inds)
if __name__ == '__main__':
# for foggy conditions
parser = argparse.ArgumentParser()
parser.add_argument("--data_path", default="/home/lwy/work/code/tensorflow-yolov3/data/VOC/")
parser.add_argument("--train_annotation", default="../data/dataset_fog/voc_norm_train.txt")
parser.add_argument("--test_annotation", default="../data/dataset_fog/voc_norm_test.txt")
flags = parser.parse_args()
if os.path.exists(flags.train_annotation):os.remove(flags.train_annotation)
if os.path.exists(flags.val_annotation):os.remove(flags.val_annotation)
if os.path.exists(flags.test_annotation):os.remove(flags.val_annotation)
num1 = convert_voc_annotation(os.path.join(flags.data_path, 'train/VOCdevkit/VOC2007'), 'trainval', flags.train_annotation, False)
num2 = convert_voc_annotation(os.path.join(flags.data_path, 'train/VOCdevkit/VOC2012'), 'trainval', flags.train_annotation, False)
num3 = convert_voc_annotation(os.path.join(flags.data_path, 'test/VOCdevkit/VOC2007'), 'test', flags.test_annotation, False)
print('=> The number of image for train is: %d\tThe number of image for val is:%d\tThe number of image for test is:%d' %(num1, num2, num3))
| 3,104 | 49.901639 | 143 | py |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/scripts/show_bboxes.py | #! /usr/bin/env python
# coding=utf-8
import cv2
import numpy as np
from PIL import Image
import math
ID = 60
label_txt = ""
image_info = open(label_txt).readlines()[ID].split()
image_path = image_info[0]
image = cv2.imread(image_path)
for bbox in image_info[1:]:
bbox = bbox.split(",")
image = cv2.rectangle(image, (int(float(bbox[0])),
int(float(bbox[1]))),
(int(float(bbox[2])),
int(float(bbox[3]))), (255,0,0), 2)
image = Image.fromarray(np.uint8(image))
image.show()
import time
from numba import jit
@jit()
def AddHaze1(img):
img_f = img / 255.0
(row, col, chs) = img.shape
A = 0.5 # 亮度
beta = 0.01 * 8 # 雾的浓度
size = math.sqrt(max(row, col)) # 雾化尺寸
center = (row // 2, col // 2) # 雾化中心
t1 = time.time()
for j in range(row):
for l in range(col):
d = -0.04 * math.sqrt((j - center[0]) ** 2 + (l - center[1]) ** 2) + size
td = math.exp(-beta * d)
img_f[j][l][:] = img_f[j][l][:] * td + A * (1 - td)
t2 = time.time()
print('time:',t2-t1)
img_f = img_f * 255
img_f = np.clip(img_f, 0, 255)
img_f = img_f.astype(np.uint8)
return img_f
def AddHaze2(img):
img_f = img / 255.0
A = np.random.uniform(0.8, 0.95)
t = np.random.uniform(0.3, 0.6)
img_f = img_f * t + A * (1 - t)
img_f = img_f * 255
img_f = np.clip(img_f, 0, 255)
img_f = img_f.astype(np.uint8)
return img_f
def Gammafilter(img, gamma = 0.5):
# img_f = img / 255.0
img_f = np.power(img, gamma)
# img_f = img_f * 255
# img_f = np.clip(img_f, 0, 255)
# img_f = img_f.astype(np.uint8)
return img_f
# def DarkChannel(im,sz):
# b,g,r = cv2.split(im)
# dc = cv2.min(cv2.min(r,g),b);
# kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(sz,sz))
# dark = cv2.erode(dc, kernel)
# # dark = dc
# return dark
#
# def AtmLight(im,dark):
# [h,w] = im.shape[:2]
# imsz = h*w
# numpx = int(max(math.floor(imsz/1000),1))
# darkvec = dark.reshape(imsz,1)
# imvec = im.reshape(imsz,3)
#
# indices = darkvec.argsort()
# indices = indices[imsz-numpx::]
#
# atmsum = np.zeros([1,3])
# for ind in range(1,numpx):
# atmsum = atmsum + imvec[indices[ind]]
#
# A = atmsum / numpx
# return A
#
# def TransmissionEstimate(im,A,sz):
# omega = 0.95;
# im3 = np.empty(im.shape,im.dtype);
#
# for ind in range(0,3):
# im3[:,:,ind] = im[:,:,ind]/A[0,ind]
#
# transmission = 1 - omega*DarkChannel(im3,sz);
# return transmission
#
# def Guidedfilter(im,p,r,eps):
# mean_I = cv2.boxFilter(im,cv2.CV_64F,(r,r));
# mean_p = cv2.boxFilter(p, cv2.CV_64F,(r,r));
# mean_Ip = cv2.boxFilter(im*p,cv2.CV_64F,(r,r));
# cov_Ip = mean_Ip - mean_I*mean_p;
#
# mean_II = cv2.boxFilter(im*im,cv2.CV_64F,(r,r));
# var_I = mean_II - mean_I*mean_I;
#
# a = cov_Ip/(var_I + eps);
# b = mean_p - a*mean_I;
#
# mean_a = cv2.boxFilter(a,cv2.CV_64F,(r,r));
# mean_b = cv2.boxFilter(b,cv2.CV_64F,(r,r));
#
# q = mean_a*im + mean_b;
# return q;
#
# def TransmissionRefine(im,et):
# gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY);
# gray = np.float64(gray)/255;
# r = 60;
# eps = 0.0001;
# t = Guidedfilter(gray,et,r,eps);
#
# return t;
#
# def Recover(im,t,A,tx = 0.1):
# res = np.empty(im.shape,im.dtype);
# t = cv2.max(t,tx);
#
# for ind in range(0,3):
# res[:,:,ind] = (im[:,:,ind]-A[0,ind])/t + A[0,ind]
#
# return res
def DarkChannel(im):
b, g, r = cv2.split(im)
dc = cv2.min(cv2.min(r, g), b);
return dc
def AtmLight(im, dark):
[h, w] = im.shape[:2]
imsz = h * w
numpx = int(max(math.floor(imsz / 1000), 1))
darkvec = dark.reshape(imsz, 1)
imvec = im.reshape(imsz, 3)
indices = darkvec.argsort(0)
indices = indices[(imsz - numpx):imsz]
atmsum = np.zeros([1, 3])
for ind in range(1, numpx):
atmsum = atmsum + imvec[indices[ind]]
A = atmsum / numpx
return A
def DarkIcA(im, A):
im3 = np.empty(im.shape, im.dtype)
for ind in range(0, 3):
im3[:, :, ind] = im[:, :, ind] / A[0, ind]
return DarkChannel(im3)
'''if __name__ == '__main__':
img = cv2.imread('/home/lwy/work/code/defog_yolov3/scripts/AM_Bing_274.png')
cv2.imwrite('org.png', img)
I = img.astype('float64') / 255
I = Gammafilter(I, 0.5)
dark_i = DarkChannel(I)
defog_A_i = AtmLight(I, dark_i)
IcA_i = DarkIcA(I, defog_A_i)
tx = 1 - 0.59*IcA_i
tx[tx < 0.01] = 0.01
res = np.empty(I.shape,I.dtype);
for ind in range(0,3):
res[:,:,ind] = (I[:,:,ind]-defog_A_i[:,ind])/tx[:,:] + defog_A_i[:,ind]
# J = (I - defog_A_i) / tf_1 + defog_A_i
# tx_1 = tf.tile(tx, [1, 1, 1, 3])
# return (img - defog_A[:, None, None, :])/tf.maximum(tx_1, 0.01) + defog_A[:, None, None, :]
# dark = DarkChannel(I, 15)
# A = AtmLight(I, dark)
# t = TransmissionEstimate(I, A, 15)
# # t = TransmissionRefine(img, t)
# J = Recover(I, t, A, 0.1)
img_f = res * 255
# img_f = Gammafilter(img)
img_name = 'lwyGamma' + '.png'
cv2.imwrite(img_name, img_f)
'''
#
# cv2.imshow("src", img)
# cv2.imshow("dst", img_f)
# cv2.waitKey()
| 5,333 | 23.925234 | 97 | py |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/scripts/voc_RTTS.py | import os
import argparse
import xml.etree.ElementTree as ET
def convert_voc_annotation(data_path, data_type, anno_path, use_difficult_bbox=True):
# classes = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
# 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
# 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa',
# 'train', 'tvmonitor']
classes = ['person', 'car', 'bus', 'bicycle', 'motorbike']
img_inds_file = os.path.join(data_path, 'ImageSets', 'Main', data_type + '.txt')
with open(img_inds_file, 'r') as f:
txt = f.readlines()
image_inds = [line.strip() for line in txt]
with open(anno_path, 'a') as f:
for image_ind in image_inds:
image_path = os.path.join(data_path, 'JPEGImages', image_ind + '.png')
annotation = image_path
label_path = os.path.join(data_path, 'Annotations', image_ind + '.xml')
root = ET.parse(label_path).getroot()
objects = root.findall('object')
for obj in objects:
difficult = obj.find('difficult').text.strip()
if (not use_difficult_bbox) and(int(difficult) == 1):
continue
bbox = obj.find('bndbox')
class_ind = classes.index(obj.find('name').text.lower().strip())
xmin = bbox.find('xmin').text.strip()
xmax = bbox.find('xmax').text.strip()
ymin = bbox.find('ymin').text.strip()
ymax = bbox.find('ymax').text.strip()
annotation += ' ' + ','.join([xmin, ymin, xmax, ymax, str(class_ind)])
print(annotation)
f.write(annotation + "\n")
return len(image_inds)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_path", default="/media/lwy/BackupPlus/data_RTTS/")
# parser.add_argument("--train_annotation", default="/media/lwy/BackupPlus/data_RTTS/dataset/voc_train.txt")
parser.add_argument("--test_annotation", default="/media/lwy/BackupPlus/data_RTTS/dataset/RTTS_test.txt")
flags = parser.parse_args()
# if os.path.exists(flags.train_annotation):os.remove(flags.train_annotation)
if os.path.exists(flags.test_annotation):os.remove(flags.test_annotation)
# num1 = convert_voc_annotation(os.path.join(flags.data_path, 'train/VOCdevkit/VOC2007'), 'trainval', flags.train_annotation, False)
# num2 = convert_voc_annotation(os.path.join(flags.data_path, 'train/VOCdevkit/VOC2012'), 'trainval', flags.train_annotation, False)
num3 = convert_voc_annotation(flags.data_path, 'test', flags.test_annotation, False)
print('=> The number of image for test is:%d' %num3) | 2,759 | 50.111111 | 136 | py |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/experiments/exp_101/mAP/main.py | import glob
import json
import os
import shutil
import operator
import sys
import argparse
MINOVERLAP = 0.5 # default value (defined in the PASCAL VOC2012 challenge)
parser = argparse.ArgumentParser()
parser.add_argument('-na', '--no-animation', help="no animation is shown.", action="store_true")
parser.add_argument('-np', '--no-plot', help="no plot is shown.", action="store_true")
parser.add_argument('-q', '--quiet', help="minimalistic console output.", action="store_true")
# argparse receiving list of classes to be ignored
parser.add_argument('-i', '--ignore', nargs='+', type=str, help="ignore a list of classes.")
# argparse receiving list of classes with specific IoU
parser.add_argument('--set-class-iou', nargs='+', type=str, help="set IoU for a specific class.")
args = parser.parse_args()
# if there are no classes to ignore then replace None by empty list
if args.ignore is None:
args.ignore = []
specific_iou_flagged = False
if args.set_class_iou is not None:
specific_iou_flagged = True
# if there are no images then no animation can be shown
img_path = 'images'
if os.path.exists(img_path):
for dirpath, dirnames, files in os.walk(img_path):
if not files:
# no image files found
args.no_animation = True
else:
args.no_animation = True
# try to import OpenCV if the user didn't choose the option --no-animation
show_animation = False
if not args.no_animation:
try:
import cv2
show_animation = True
except ImportError:
print("\"opencv-python\" not found, please install to visualize the results.")
args.no_animation = True
# try to import Matplotlib if the user didn't choose the option --no-plot
draw_plot = False
if not args.no_plot:
try:
import matplotlib.pyplot as plt
draw_plot = True
except ImportError:
print("\"matplotlib\" not found, please install it to get the resulting plots.")
args.no_plot = True
"""
throw error and exit
"""
def error(msg):
print(msg)
sys.exit(0)
"""
check if the number is a float between 0.0 and 1.0
"""
def is_float_between_0_and_1(value):
try:
val = float(value)
if val > 0.0 and val < 1.0:
return True
else:
return False
except ValueError:
return False
"""
Calculate the AP given the recall and precision array
1st) We compute a version of the measured precision/recall curve with
precision monotonically decreasing
2nd) We compute the AP as the area under this curve by numerical integration.
"""
def voc_ap(rec, prec):
"""
--- Official matlab code VOC2012---
mrec=[0 ; rec ; 1];
mpre=[0 ; prec ; 0];
for i=numel(mpre)-1:-1:1
mpre(i)=max(mpre(i),mpre(i+1));
end
i=find(mrec(2:end)~=mrec(1:end-1))+1;
ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
"""
rec.insert(0, 0.0) # insert 0.0 at begining of list
rec.append(1.0) # insert 1.0 at end of list
mrec = rec[:]
prec.insert(0, 0.0) # insert 0.0 at begining of list
prec.append(0.0) # insert 0.0 at end of list
mpre = prec[:]
"""
This part makes the precision monotonically decreasing
(goes from the end to the beginning)
matlab: for i=numel(mpre)-1:-1:1
mpre(i)=max(mpre(i),mpre(i+1));
"""
# matlab indexes start in 1 but python in 0, so I have to do:
# range(start=(len(mpre) - 2), end=0, step=-1)
# also the python function range excludes the end, resulting in:
# range(start=(len(mpre) - 2), end=-1, step=-1)
for i in range(len(mpre)-2, -1, -1):
mpre[i] = max(mpre[i], mpre[i+1])
"""
This part creates a list of indexes where the recall changes
matlab: i=find(mrec(2:end)~=mrec(1:end-1))+1;
"""
i_list = []
for i in range(1, len(mrec)):
if mrec[i] != mrec[i-1]:
i_list.append(i) # if it was matlab would be i + 1
"""
The Average Precision (AP) is the area under the curve
(numerical integration)
matlab: ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
"""
ap = 0.0
for i in i_list:
ap += ((mrec[i]-mrec[i-1])*mpre[i])
return ap, mrec, mpre
"""
Convert the lines of a file to a list
"""
def file_lines_to_list(path):
# open txt file lines to a list
with open(path) as f:
content = f.readlines()
# remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
return content
"""
Draws text in image
"""
def draw_text_in_image(img, text, pos, color, line_width):
font = cv2.FONT_HERSHEY_PLAIN
fontScale = 1
lineType = 1
bottomLeftCornerOfText = pos
cv2.putText(img, text,
bottomLeftCornerOfText,
font,
fontScale,
color,
lineType)
text_width, _ = cv2.getTextSize(text, font, fontScale, lineType)[0]
return img, (line_width + text_width)
"""
Plot - adjust axes
"""
def adjust_axes(r, t, fig, axes):
# get text width for re-scaling
bb = t.get_window_extent(renderer=r)
text_width_inches = bb.width / fig.dpi
# get axis width in inches
current_fig_width = fig.get_figwidth()
new_fig_width = current_fig_width + text_width_inches
propotion = new_fig_width / current_fig_width
# get axis limit
x_lim = axes.get_xlim()
axes.set_xlim([x_lim[0], x_lim[1]*propotion])
"""
Draw plot using Matplotlib
"""
def draw_plot_func(dictionary, n_classes, window_title, plot_title, x_label, output_path, to_show, plot_color, true_p_bar):
# sort the dictionary by decreasing value, into a list of tuples
sorted_dic_by_value = sorted(dictionary.items(), key=operator.itemgetter(1))
# unpacking the list of tuples into two lists
sorted_keys, sorted_values = zip(*sorted_dic_by_value)
#
if true_p_bar != "":
"""
Special case to draw in (green=true predictions) & (red=false predictions)
"""
fp_sorted = []
tp_sorted = []
for key in sorted_keys:
fp_sorted.append(dictionary[key] - true_p_bar[key])
tp_sorted.append(true_p_bar[key])
plt.barh(range(n_classes), fp_sorted, align='center', color='crimson', label='False Predictions')
plt.barh(range(n_classes), tp_sorted, align='center', color='forestgreen', label='True Predictions', left=fp_sorted)
# add legend
plt.legend(loc='lower right')
"""
Write number on side of bar
"""
fig = plt.gcf() # gcf - get current figure
axes = plt.gca()
r = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
fp_val = fp_sorted[i]
tp_val = tp_sorted[i]
fp_str_val = " " + str(fp_val)
tp_str_val = fp_str_val + " " + str(tp_val)
# trick to paint multicolor with offset:
# first paint everything and then repaint the first number
t = plt.text(val, i, tp_str_val, color='forestgreen', va='center', fontweight='bold')
plt.text(val, i, fp_str_val, color='crimson', va='center', fontweight='bold')
if i == (len(sorted_values)-1): # largest bar
adjust_axes(r, t, fig, axes)
else:
plt.barh(range(n_classes), sorted_values, color=plot_color)
"""
Write number on side of bar
"""
fig = plt.gcf() # gcf - get current figure
axes = plt.gca()
r = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
str_val = " " + str(val) # add a space before
if val < 1.0:
str_val = " {0:.2f}".format(val)
t = plt.text(val, i, str_val, color=plot_color, va='center', fontweight='bold')
# re-set axes to show number inside the figure
if i == (len(sorted_values)-1): # largest bar
adjust_axes(r, t, fig, axes)
# set window title
fig.canvas.set_window_title(window_title)
# write classes in y axis
tick_font_size = 12
plt.yticks(range(n_classes), sorted_keys, fontsize=tick_font_size)
"""
Re-scale height accordingly
"""
init_height = fig.get_figheight()
# comput the matrix height in points and inches
dpi = fig.dpi
height_pt = n_classes * (tick_font_size * 1.4) # 1.4 (some spacing)
height_in = height_pt / dpi
# compute the required figure height
top_margin = 0.15 # in percentage of the figure height
bottom_margin = 0.05 # in percentage of the figure height
figure_height = height_in / (1 - top_margin - bottom_margin)
# set new height
if figure_height > init_height:
fig.set_figheight(figure_height)
# set plot title
plt.title(plot_title, fontsize=14)
# set axis titles
# plt.xlabel('classes')
plt.xlabel(x_label, fontsize='large')
# adjust size of window
fig.tight_layout()
# save the plot
fig.savefig(output_path)
# show image
if to_show:
plt.show()
# close the plot
plt.close()
"""
Create a "tmp_files/" and "results/" directory
"""
tmp_files_path = "tmp_files"
if not os.path.exists(tmp_files_path): # if it doesn't exist already
os.makedirs(tmp_files_path)
results_files_path = "results"
if os.path.exists(results_files_path): # if it exist already
# reset the results directory
shutil.rmtree(results_files_path)
os.makedirs(results_files_path)
if draw_plot:
os.makedirs(results_files_path + "/classes")
if show_animation:
os.makedirs(results_files_path + "/images")
os.makedirs(results_files_path + "/images/single_predictions")
"""
Ground-Truth
Load each of the ground-truth files into a temporary ".json" file.
Create a list of all the class names present in the ground-truth (gt_classes).
"""
# get a list with the ground-truth files
ground_truth_files_list = glob.glob('ground-truth/*.txt')
if len(ground_truth_files_list) == 0:
error("Error: No ground-truth files found!")
ground_truth_files_list.sort()
# dictionary with counter per class
gt_counter_per_class = {}
for txt_file in ground_truth_files_list:
#print(txt_file)
file_id = txt_file.split(".txt",1)[0]
file_id = os.path.basename(os.path.normpath(file_id))
# check if there is a correspondent predicted objects file
if not os.path.exists('predicted/' + file_id + ".txt"):
error_msg = "Error. File not found: predicted/" + file_id + ".txt\n"
error_msg += "(You can avoid this error message by running extra/intersect-gt-and-pred.py)"
error(error_msg)
lines_list = file_lines_to_list(txt_file)
# create ground-truth dictionary
bounding_boxes = []
is_difficult = False
for line in lines_list:
try:
if "difficult" in line:
class_name, left, top, right, bottom, _difficult = line.split()
is_difficult = True
else:
class_name, left, top, right, bottom = line.split()
except ValueError:
error_msg = "Error: File " + txt_file + " in the wrong format.\n"
error_msg += " Expected: <class_name> <left> <top> <right> <bottom> ['difficult']\n"
error_msg += " Received: " + line
error_msg += "\n\nIf you have a <class_name> with spaces between words you should remove them\n"
error_msg += "by running the script \"remove_space.py\" or \"rename_class.py\" in the \"extra/\" folder."
error(error_msg)
# check if class is in the ignore list, if yes skip
if class_name in args.ignore:
continue
bbox = left + " " + top + " " + right + " " +bottom
if is_difficult:
bounding_boxes.append({"class_name":class_name, "bbox":bbox, "used":False, "difficult":True})
is_difficult = False
else:
bounding_boxes.append({"class_name":class_name, "bbox":bbox, "used":False})
# count that object
if class_name in gt_counter_per_class:
gt_counter_per_class[class_name] += 1
else:
# if class didn't exist yet
gt_counter_per_class[class_name] = 1
# dump bounding_boxes into a ".json" file
with open(tmp_files_path + "/" + file_id + "_ground_truth.json", 'w') as outfile:
json.dump(bounding_boxes, outfile)
gt_classes = list(gt_counter_per_class.keys())
# let's sort the classes alphabetically
gt_classes = sorted(gt_classes)
n_classes = len(gt_classes)
#print(gt_classes)
#print(gt_counter_per_class)
"""
Check format of the flag --set-class-iou (if used)
e.g. check if class exists
"""
if specific_iou_flagged:
n_args = len(args.set_class_iou)
error_msg = \
'\n --set-class-iou [class_1] [IoU_1] [class_2] [IoU_2] [...]'
if n_args % 2 != 0:
error('Error, missing arguments. Flag usage:' + error_msg)
# [class_1] [IoU_1] [class_2] [IoU_2]
# specific_iou_classes = ['class_1', 'class_2']
specific_iou_classes = args.set_class_iou[::2] # even
# iou_list = ['IoU_1', 'IoU_2']
iou_list = args.set_class_iou[1::2] # odd
if len(specific_iou_classes) != len(iou_list):
error('Error, missing arguments. Flag usage:' + error_msg)
for tmp_class in specific_iou_classes:
if tmp_class not in gt_classes:
error('Error, unknown class \"' + tmp_class + '\". Flag usage:' + error_msg)
for num in iou_list:
if not is_float_between_0_and_1(num):
error('Error, IoU must be between 0.0 and 1.0. Flag usage:' + error_msg)
"""
Predicted
Load each of the predicted files into a temporary ".json" file.
"""
# get a list with the predicted files
predicted_files_list = glob.glob('predicted/*.txt')
predicted_files_list.sort()
for class_index, class_name in enumerate(gt_classes):
bounding_boxes = []
for txt_file in predicted_files_list:
#print(txt_file)
# the first time it checks if all the corresponding ground-truth files exist
file_id = txt_file.split(".txt",1)[0]
file_id = os.path.basename(os.path.normpath(file_id))
if class_index == 0:
if not os.path.exists('ground-truth/' + file_id + ".txt"):
error_msg = "Error. File not found: ground-truth/" + file_id + ".txt\n"
error_msg += "(You can avoid this error message by running extra/intersect-gt-and-pred.py)"
error(error_msg)
lines = file_lines_to_list(txt_file)
for line in lines:
try:
tmp_class_name, confidence, left, top, right, bottom = line.split()
except ValueError:
error_msg = "Error: File " + txt_file + " in the wrong format.\n"
error_msg += " Expected: <class_name> <confidence> <left> <top> <right> <bottom>\n"
error_msg += " Received: " + line
error(error_msg)
if tmp_class_name[1:] == class_name[1:]:
#print("match")
bbox = left + " " + top + " " + right + " " +bottom
bounding_boxes.append({"confidence":confidence, "file_id":file_id, "bbox":bbox})
#print(bounding_boxes)
# sort predictions by decreasing confidence
bounding_boxes.sort(key=lambda x:float(x['confidence']), reverse=True)
with open(tmp_files_path + "/" + class_name + "_predictions.json", 'w') as outfile:
json.dump(bounding_boxes, outfile)
"""
Calculate the AP for each class
"""
sum_AP = 0.0
ap_dictionary = {}
# open file to store the results
with open(results_files_path + "/results.txt", 'w') as results_file:
results_file.write("# AP and precision/recall per class\n")
count_true_positives = {}
for class_index, class_name in enumerate(gt_classes):
count_true_positives[class_name] = 0
"""
Load predictions of that class
"""
predictions_file = tmp_files_path + "/" + class_name + "_predictions.json"
predictions_data = json.load(open(predictions_file))
"""
Assign predictions to ground truth objects
"""
nd = len(predictions_data)
tp = [0] * nd # creates an array of zeros of size nd
fp = [0] * nd
for idx, prediction in enumerate(predictions_data):
file_id = prediction["file_id"]
if show_animation:
# find ground truth image
ground_truth_img = glob.glob1(img_path, file_id + ".*")
#tifCounter = len(glob.glob1(myPath,"*.tif"))
if len(ground_truth_img) == 0:
error("Error. Image not found with id: " + file_id)
elif len(ground_truth_img) > 1:
error("Error. Multiple image with id: " + file_id)
else: # found image
#print(img_path + "/" + ground_truth_img[0])
# Load image
img = cv2.imread(img_path + "/" + ground_truth_img[0])
# load image with draws of multiple detections
img_cumulative_path = results_files_path + "/images/" + ground_truth_img[0]
if os.path.isfile(img_cumulative_path):
img_cumulative = cv2.imread(img_cumulative_path)
else:
img_cumulative = img.copy()
# Add bottom border to image
bottom_border = 60
BLACK = [0, 0, 0]
img = cv2.copyMakeBorder(img, 0, bottom_border, 0, 0, cv2.BORDER_CONSTANT, value=BLACK)
# assign prediction to ground truth object if any
# open ground-truth with that file_id
gt_file = tmp_files_path + "/" + file_id + "_ground_truth.json"
ground_truth_data = json.load(open(gt_file))
ovmax = -1
gt_match = -1
# load prediction bounding-box
bb = [ float(x) for x in prediction["bbox"].split() ]
for obj in ground_truth_data:
# look for a class_name match
if obj["class_name"] == class_name:
bbgt = [ float(x) for x in obj["bbox"].split() ]
bi = [max(bb[0],bbgt[0]), max(bb[1],bbgt[1]), min(bb[2],bbgt[2]), min(bb[3],bbgt[3])]
iw = bi[2] - bi[0] + 1
ih = bi[3] - bi[1] + 1
if iw > 0 and ih > 0:
# compute overlap (IoU) = area of intersection / area of union
ua = (bb[2] - bb[0] + 1) * (bb[3] - bb[1] + 1) + (bbgt[2] - bbgt[0]
+ 1) * (bbgt[3] - bbgt[1] + 1) - iw * ih
ov = iw * ih / ua
if ov > ovmax:
ovmax = ov
gt_match = obj
# assign prediction as true positive/don't care/false positive
if show_animation:
status = "NO MATCH FOUND!" # status is only used in the animation
# set minimum overlap
min_overlap = MINOVERLAP
if specific_iou_flagged:
if class_name in specific_iou_classes:
index = specific_iou_classes.index(class_name)
min_overlap = float(iou_list[index])
if ovmax >= min_overlap:
if "difficult" not in gt_match:
if not bool(gt_match["used"]):
# true positive
tp[idx] = 1
gt_match["used"] = True
count_true_positives[class_name] += 1
# update the ".json" file
with open(gt_file, 'w') as f:
f.write(json.dumps(ground_truth_data))
if show_animation:
status = "MATCH!"
else:
# false positive (multiple detection)
fp[idx] = 1
if show_animation:
status = "REPEATED MATCH!"
else:
# false positive
fp[idx] = 1
if ovmax > 0:
status = "INSUFFICIENT OVERLAP"
"""
Draw image to show animation
"""
if show_animation:
height, widht = img.shape[:2]
# colors (OpenCV works with BGR)
white = (255,255,255)
light_blue = (255,200,100)
green = (0,255,0)
light_red = (30,30,255)
# 1st line
margin = 10
v_pos = int(height - margin - (bottom_border / 2))
text = "Image: " + ground_truth_img[0] + " "
img, line_width = draw_text_in_image(img, text, (margin, v_pos), white, 0)
text = "Class [" + str(class_index) + "/" + str(n_classes) + "]: " + class_name + " "
img, line_width = draw_text_in_image(img, text, (margin + line_width, v_pos), light_blue, line_width)
if ovmax != -1:
color = light_red
if status == "INSUFFICIENT OVERLAP":
text = "IoU: {0:.2f}% ".format(ovmax*100) + "< {0:.2f}% ".format(min_overlap*100)
else:
text = "IoU: {0:.2f}% ".format(ovmax*100) + ">= {0:.2f}% ".format(min_overlap*100)
color = green
img, _ = draw_text_in_image(img, text, (margin + line_width, v_pos), color, line_width)
# 2nd line
v_pos += int(bottom_border / 2)
rank_pos = str(idx+1) # rank position (idx starts at 0)
text = "Prediction #rank: " + rank_pos + " confidence: {0:.2f}% ".format(float(prediction["confidence"])*100)
img, line_width = draw_text_in_image(img, text, (margin, v_pos), white, 0)
color = light_red
if status == "MATCH!":
color = green
text = "Result: " + status + " "
img, line_width = draw_text_in_image(img, text, (margin + line_width, v_pos), color, line_width)
font = cv2.FONT_HERSHEY_SIMPLEX
if ovmax > 0: # if there is intersections between the bounding-boxes
bbgt = [ int(x) for x in gt_match["bbox"].split() ]
cv2.rectangle(img,(bbgt[0],bbgt[1]),(bbgt[2],bbgt[3]),light_blue,2)
cv2.rectangle(img_cumulative,(bbgt[0],bbgt[1]),(bbgt[2],bbgt[3]),light_blue,2)
cv2.putText(img_cumulative, class_name, (bbgt[0],bbgt[1] - 5), font, 0.6, light_blue, 1, cv2.LINE_AA)
bb = [int(i) for i in bb]
cv2.rectangle(img,(bb[0],bb[1]),(bb[2],bb[3]),color,2)
cv2.rectangle(img_cumulative,(bb[0],bb[1]),(bb[2],bb[3]),color,2)
cv2.putText(img_cumulative, class_name, (bb[0],bb[1] - 5), font, 0.6, color, 1, cv2.LINE_AA)
# show image
cv2.imshow("Animation", img)
cv2.waitKey(20) # show for 20 ms
# save image to results
output_img_path = results_files_path + "/images/single_predictions/" + class_name + "_prediction" + str(idx) + ".jpg"
cv2.imwrite(output_img_path, img)
# save the image with all the objects drawn to it
cv2.imwrite(img_cumulative_path, img_cumulative)
#print(tp)
# compute precision/recall
cumsum = 0
for idx, val in enumerate(fp):
fp[idx] += cumsum
cumsum += val
cumsum = 0
for idx, val in enumerate(tp):
tp[idx] += cumsum
cumsum += val
#print(tp)
rec = tp[:]
for idx, val in enumerate(tp):
rec[idx] = float(tp[idx]) / gt_counter_per_class[class_name]
#print(rec)
prec = tp[:]
for idx, val in enumerate(tp):
prec[idx] = float(tp[idx]) / (fp[idx] + tp[idx])
#print(prec)
ap, mrec, mprec = voc_ap(rec, prec)
if ap >0.10:
sum_AP += ap
# sum_AP += ap
text = "{0:.2f}%".format(ap*100) + " = " + class_name + " AP " #class_name + " AP = {0:.2f}%".format(ap*100)
"""
Write to results.txt
"""
rounded_prec = [ '%.2f' % elem for elem in prec ]
rounded_rec = [ '%.2f' % elem for elem in rec ]
results_file.write(text + "\n Precision: " + str(rounded_prec) + "\n Recall :" + str(rounded_rec) + "\n\n")
if not args.quiet:
print(text)
ap_dictionary[class_name] = ap
"""
Draw plot
"""
if draw_plot:
plt.plot(rec, prec, '-o')
# add a new penultimate point to the list (mrec[-2], 0.0)
# since the last line segment (and respective area) do not affect the AP value
area_under_curve_x = mrec[:-1] + [mrec[-2]] + [mrec[-1]]
area_under_curve_y = mprec[:-1] + [0.0] + [mprec[-1]]
plt.fill_between(area_under_curve_x, 0, area_under_curve_y, alpha=0.2, edgecolor='r')
# set window title
fig = plt.gcf() # gcf - get current figure
fig.canvas.set_window_title('AP ' + class_name)
# set plot title
plt.title('class: ' + text)
#plt.suptitle('This is a somewhat long figure title', fontsize=16)
# set axis titles
plt.xlabel('Recall')
plt.ylabel('Precision')
# optional - set axes
axes = plt.gca() # gca - get current axes
axes.set_xlim([0.0,1.0])
axes.set_ylim([0.0,1.05]) # .05 to give some extra space
# Alternative option -> wait for button to be pressed
#while not plt.waitforbuttonpress(): pass # wait for key display
# Alternative option -> normal display
#plt.show()
# save the plot
fig.savefig(results_files_path + "/classes/" + class_name + ".png")
plt.cla() # clear axes for next plot
if show_animation:
cv2.destroyAllWindows()
results_file.write("\n# mAP of all classes\n")
mAP = sum_AP / n_classes
text = "mAP = {0:.2f}%".format(mAP*100)
results_file.write(text + "\n")
print(text)
# remove the tmp_files directory
shutil.rmtree(tmp_files_path)
"""
Count total of Predictions
"""
# iterate through all the files
pred_counter_per_class = {}
#all_classes_predicted_files = set([])
for txt_file in predicted_files_list:
# get lines to list
lines_list = file_lines_to_list(txt_file)
for line in lines_list:
class_name = line.split()[0]
# check if class is in the ignore list, if yes skip
if class_name in args.ignore:
continue
# count that object
if class_name in pred_counter_per_class:
pred_counter_per_class[class_name] += 1
else:
# if class didn't exist yet
pred_counter_per_class[class_name] = 1
#print(pred_counter_per_class)
pred_classes = list(pred_counter_per_class.keys())
"""
Plot the total number of occurences of each class in the ground-truth
"""
if draw_plot:
window_title = "Ground-Truth Info"
plot_title = "Ground-Truth\n"
plot_title += "(" + str(len(ground_truth_files_list)) + " files and " + str(n_classes) + " classes)"
x_label = "Number of objects per class"
output_path = results_files_path + "/Ground-Truth Info.png"
to_show = False
plot_color = 'forestgreen'
draw_plot_func(
gt_counter_per_class,
n_classes,
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
'',
)
"""
Write number of ground-truth objects per class to results.txt
"""
with open(results_files_path + "/results.txt", 'a') as results_file:
results_file.write("\n# Number of ground-truth objects per class\n")
for class_name in sorted(gt_counter_per_class):
results_file.write(class_name + ": " + str(gt_counter_per_class[class_name]) + "\n")
"""
Finish counting true positives
"""
for class_name in pred_classes:
# if class exists in predictions but not in ground-truth then there are no true positives in that class
if class_name not in gt_classes:
count_true_positives[class_name] = 0
#print(count_true_positives)
"""
Plot the total number of occurences of each class in the "predicted" folder
"""
if draw_plot:
window_title = "Predicted Objects Info"
# Plot title
plot_title = "Predicted Objects\n"
plot_title += "(" + str(len(predicted_files_list)) + " files and "
count_non_zero_values_in_dictionary = sum(int(x) > 0 for x in list(pred_counter_per_class.values()))
plot_title += str(count_non_zero_values_in_dictionary) + " detected classes)"
# end Plot title
x_label = "Number of objects per class"
output_path = results_files_path + "/Predicted Objects Info.png"
to_show = False
plot_color = 'forestgreen'
true_p_bar = count_true_positives
draw_plot_func(
pred_counter_per_class,
len(pred_counter_per_class),
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
true_p_bar
)
"""
Write number of predicted objects per class to results.txt
"""
with open(results_files_path + "/results.txt", 'a') as results_file:
results_file.write("\n# Number of predicted objects per class\n")
for class_name in sorted(pred_classes):
n_pred = pred_counter_per_class[class_name]
text = class_name + ": " + str(n_pred)
text += " (tp:" + str(count_true_positives[class_name]) + ""
text += ", fp:" + str(n_pred - count_true_positives[class_name]) + ")\n"
results_file.write(text)
"""
Draw mAP plot (Show AP's of all classes in decreasing order)
"""
if draw_plot:
window_title = "mAP"
plot_title = "mAP = {0:.2f}%".format(mAP*100)
x_label = "Average Precision"
output_path = results_files_path + "/mAP.png"
to_show = True
plot_color = 'royalblue'
draw_plot_func(
ap_dictionary,
n_classes,
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
""
)
| 27,755 | 34.768041 | 125 | py |
Image-Adaptive-YOLO | Image-Adaptive-YOLO-main/experiments_lowlight/exp_58/mAP/main.py | import glob
import json
import os
import shutil
import operator
import sys
import argparse
MINOVERLAP = 0.5 # default value (defined in the PASCAL VOC2012 challenge)
parser = argparse.ArgumentParser()
parser.add_argument('-na', '--no-animation', help="no animation is shown.", action="store_true")
parser.add_argument('-np', '--no-plot', help="no plot is shown.", action="store_true")
parser.add_argument('-q', '--quiet', help="minimalistic console output.", action="store_true")
# argparse receiving list of classes to be ignored
parser.add_argument('-i', '--ignore', nargs='+', type=str, help="ignore a list of classes.")
# argparse receiving list of classes with specific IoU
parser.add_argument('--set-class-iou', nargs='+', type=str, help="set IoU for a specific class.")
args = parser.parse_args()
# if there are no classes to ignore then replace None by empty list
if args.ignore is None:
args.ignore = []
specific_iou_flagged = False
if args.set_class_iou is not None:
specific_iou_flagged = True
# if there are no images then no animation can be shown
img_path = 'images'
if os.path.exists(img_path):
for dirpath, dirnames, files in os.walk(img_path):
if not files:
# no image files found
args.no_animation = True
else:
args.no_animation = True
# try to import OpenCV if the user didn't choose the option --no-animation
show_animation = False
if not args.no_animation:
try:
import cv2
show_animation = True
except ImportError:
print("\"opencv-python\" not found, please install to visualize the results.")
args.no_animation = True
# try to import Matplotlib if the user didn't choose the option --no-plot
draw_plot = False
if not args.no_plot:
try:
import matplotlib.pyplot as plt
draw_plot = True
except ImportError:
print("\"matplotlib\" not found, please install it to get the resulting plots.")
args.no_plot = True
"""
throw error and exit
"""
def error(msg):
print(msg)
sys.exit(0)
"""
check if the number is a float between 0.0 and 1.0
"""
def is_float_between_0_and_1(value):
try:
val = float(value)
if val > 0.0 and val < 1.0:
return True
else:
return False
except ValueError:
return False
"""
Calculate the AP given the recall and precision array
1st) We compute a version of the measured precision/recall curve with
precision monotonically decreasing
2nd) We compute the AP as the area under this curve by numerical integration.
"""
def voc_ap(rec, prec):
"""
--- Official matlab code VOC2012---
mrec=[0 ; rec ; 1];
mpre=[0 ; prec ; 0];
for i=numel(mpre)-1:-1:1
mpre(i)=max(mpre(i),mpre(i+1));
end
i=find(mrec(2:end)~=mrec(1:end-1))+1;
ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
"""
rec.insert(0, 0.0) # insert 0.0 at begining of list
rec.append(1.0) # insert 1.0 at end of list
mrec = rec[:]
prec.insert(0, 0.0) # insert 0.0 at begining of list
prec.append(0.0) # insert 0.0 at end of list
mpre = prec[:]
"""
This part makes the precision monotonically decreasing
(goes from the end to the beginning)
matlab: for i=numel(mpre)-1:-1:1
mpre(i)=max(mpre(i),mpre(i+1));
"""
# matlab indexes start in 1 but python in 0, so I have to do:
# range(start=(len(mpre) - 2), end=0, step=-1)
# also the python function range excludes the end, resulting in:
# range(start=(len(mpre) - 2), end=-1, step=-1)
for i in range(len(mpre)-2, -1, -1):
mpre[i] = max(mpre[i], mpre[i+1])
"""
This part creates a list of indexes where the recall changes
matlab: i=find(mrec(2:end)~=mrec(1:end-1))+1;
"""
i_list = []
for i in range(1, len(mrec)):
if mrec[i] != mrec[i-1]:
i_list.append(i) # if it was matlab would be i + 1
"""
The Average Precision (AP) is the area under the curve
(numerical integration)
matlab: ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
"""
ap = 0.0
for i in i_list:
ap += ((mrec[i]-mrec[i-1])*mpre[i])
return ap, mrec, mpre
"""
Convert the lines of a file to a list
"""
def file_lines_to_list(path):
# open txt file lines to a list
with open(path) as f:
content = f.readlines()
# remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
return content
"""
Draws text in image
"""
def draw_text_in_image(img, text, pos, color, line_width):
font = cv2.FONT_HERSHEY_PLAIN
fontScale = 1
lineType = 1
bottomLeftCornerOfText = pos
cv2.putText(img, text,
bottomLeftCornerOfText,
font,
fontScale,
color,
lineType)
text_width, _ = cv2.getTextSize(text, font, fontScale, lineType)[0]
return img, (line_width + text_width)
"""
Plot - adjust axes
"""
def adjust_axes(r, t, fig, axes):
# get text width for re-scaling
bb = t.get_window_extent(renderer=r)
text_width_inches = bb.width / fig.dpi
# get axis width in inches
current_fig_width = fig.get_figwidth()
new_fig_width = current_fig_width + text_width_inches
propotion = new_fig_width / current_fig_width
# get axis limit
x_lim = axes.get_xlim()
axes.set_xlim([x_lim[0], x_lim[1]*propotion])
"""
Draw plot using Matplotlib
"""
def draw_plot_func(dictionary, n_classes, window_title, plot_title, x_label, output_path, to_show, plot_color, true_p_bar):
# sort the dictionary by decreasing value, into a list of tuples
sorted_dic_by_value = sorted(dictionary.items(), key=operator.itemgetter(1))
# unpacking the list of tuples into two lists
sorted_keys, sorted_values = zip(*sorted_dic_by_value)
#
if true_p_bar != "":
"""
Special case to draw in (green=true predictions) & (red=false predictions)
"""
fp_sorted = []
tp_sorted = []
for key in sorted_keys:
fp_sorted.append(dictionary[key] - true_p_bar[key])
tp_sorted.append(true_p_bar[key])
plt.barh(range(n_classes), fp_sorted, align='center', color='crimson', label='False Predictions')
plt.barh(range(n_classes), tp_sorted, align='center', color='forestgreen', label='True Predictions', left=fp_sorted)
# add legend
plt.legend(loc='lower right')
"""
Write number on side of bar
"""
fig = plt.gcf() # gcf - get current figure
axes = plt.gca()
r = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
fp_val = fp_sorted[i]
tp_val = tp_sorted[i]
fp_str_val = " " + str(fp_val)
tp_str_val = fp_str_val + " " + str(tp_val)
# trick to paint multicolor with offset:
# first paint everything and then repaint the first number
t = plt.text(val, i, tp_str_val, color='forestgreen', va='center', fontweight='bold')
plt.text(val, i, fp_str_val, color='crimson', va='center', fontweight='bold')
if i == (len(sorted_values)-1): # largest bar
adjust_axes(r, t, fig, axes)
else:
plt.barh(range(n_classes), sorted_values, color=plot_color)
"""
Write number on side of bar
"""
fig = plt.gcf() # gcf - get current figure
axes = plt.gca()
r = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
str_val = " " + str(val) # add a space before
if val < 1.0:
str_val = " {0:.2f}".format(val)
t = plt.text(val, i, str_val, color=plot_color, va='center', fontweight='bold')
# re-set axes to show number inside the figure
if i == (len(sorted_values)-1): # largest bar
adjust_axes(r, t, fig, axes)
# set window title
fig.canvas.set_window_title(window_title)
# write classes in y axis
tick_font_size = 12
plt.yticks(range(n_classes), sorted_keys, fontsize=tick_font_size)
"""
Re-scale height accordingly
"""
init_height = fig.get_figheight()
# comput the matrix height in points and inches
dpi = fig.dpi
height_pt = n_classes * (tick_font_size * 1.4) # 1.4 (some spacing)
height_in = height_pt / dpi
# compute the required figure height
top_margin = 0.15 # in percentage of the figure height
bottom_margin = 0.05 # in percentage of the figure height
figure_height = height_in / (1 - top_margin - bottom_margin)
# set new height
if figure_height > init_height:
fig.set_figheight(figure_height)
# set plot title
plt.title(plot_title, fontsize=14)
# set axis titles
# plt.xlabel('classes')
plt.xlabel(x_label, fontsize='large')
# adjust size of window
fig.tight_layout()
# save the plot
fig.savefig(output_path)
# show image
if to_show:
plt.show()
# close the plot
plt.close()
"""
Create a "tmp_files/" and "results/" directory
"""
tmp_files_path = "tmp_files"
if not os.path.exists(tmp_files_path): # if it doesn't exist already
os.makedirs(tmp_files_path)
results_files_path = "results"
if os.path.exists(results_files_path): # if it exist already
# reset the results directory
shutil.rmtree(results_files_path)
os.makedirs(results_files_path)
if draw_plot:
os.makedirs(results_files_path + "/classes")
if show_animation:
os.makedirs(results_files_path + "/images")
os.makedirs(results_files_path + "/images/single_predictions")
"""
Ground-Truth
Load each of the ground-truth files into a temporary ".json" file.
Create a list of all the class names present in the ground-truth (gt_classes).
"""
# get a list with the ground-truth files
ground_truth_files_list = glob.glob('ground-truth/*.txt')
if len(ground_truth_files_list) == 0:
error("Error: No ground-truth files found!")
ground_truth_files_list.sort()
# dictionary with counter per class
gt_counter_per_class = {}
for txt_file in ground_truth_files_list:
#print(txt_file)
file_id = txt_file.split(".txt",1)[0]
file_id = os.path.basename(os.path.normpath(file_id))
# check if there is a correspondent predicted objects file
if not os.path.exists('predicted/' + file_id + ".txt"):
error_msg = "Error. File not found: predicted/" + file_id + ".txt\n"
error_msg += "(You can avoid this error message by running extra/intersect-gt-and-pred.py)"
error(error_msg)
lines_list = file_lines_to_list(txt_file)
# create ground-truth dictionary
bounding_boxes = []
is_difficult = False
for line in lines_list:
try:
if "difficult" in line:
class_name, left, top, right, bottom, _difficult = line.split()
is_difficult = True
else:
class_name, left, top, right, bottom = line.split()
except ValueError:
error_msg = "Error: File " + txt_file + " in the wrong format.\n"
error_msg += " Expected: <class_name> <left> <top> <right> <bottom> ['difficult']\n"
error_msg += " Received: " + line
error_msg += "\n\nIf you have a <class_name> with spaces between words you should remove them\n"
error_msg += "by running the script \"remove_space.py\" or \"rename_class.py\" in the \"extra/\" folder."
error(error_msg)
# check if class is in the ignore list, if yes skip
if class_name in args.ignore:
continue
bbox = left + " " + top + " " + right + " " +bottom
if is_difficult:
bounding_boxes.append({"class_name":class_name, "bbox":bbox, "used":False, "difficult":True})
is_difficult = False
else:
bounding_boxes.append({"class_name":class_name, "bbox":bbox, "used":False})
# count that object
if class_name in gt_counter_per_class:
gt_counter_per_class[class_name] += 1
else:
# if class didn't exist yet
gt_counter_per_class[class_name] = 1
# dump bounding_boxes into a ".json" file
with open(tmp_files_path + "/" + file_id + "_ground_truth.json", 'w') as outfile:
json.dump(bounding_boxes, outfile)
gt_classes = list(gt_counter_per_class.keys())
# let's sort the classes alphabetically
gt_classes = sorted(gt_classes)
n_classes = len(gt_classes)
#print(gt_classes)
#print(gt_counter_per_class)
"""
Check format of the flag --set-class-iou (if used)
e.g. check if class exists
"""
if specific_iou_flagged:
n_args = len(args.set_class_iou)
error_msg = \
'\n --set-class-iou [class_1] [IoU_1] [class_2] [IoU_2] [...]'
if n_args % 2 != 0:
error('Error, missing arguments. Flag usage:' + error_msg)
# [class_1] [IoU_1] [class_2] [IoU_2]
# specific_iou_classes = ['class_1', 'class_2']
specific_iou_classes = args.set_class_iou[::2] # even
# iou_list = ['IoU_1', 'IoU_2']
iou_list = args.set_class_iou[1::2] # odd
if len(specific_iou_classes) != len(iou_list):
error('Error, missing arguments. Flag usage:' + error_msg)
for tmp_class in specific_iou_classes:
if tmp_class not in gt_classes:
error('Error, unknown class \"' + tmp_class + '\". Flag usage:' + error_msg)
for num in iou_list:
if not is_float_between_0_and_1(num):
error('Error, IoU must be between 0.0 and 1.0. Flag usage:' + error_msg)
"""
Predicted
Load each of the predicted files into a temporary ".json" file.
"""
# get a list with the predicted files
predicted_files_list = glob.glob('predicted/*.txt')
predicted_files_list.sort()
for class_index, class_name in enumerate(gt_classes):
bounding_boxes = []
for txt_file in predicted_files_list:
#print(txt_file)
# the first time it checks if all the corresponding ground-truth files exist
file_id = txt_file.split(".txt",1)[0]
file_id = os.path.basename(os.path.normpath(file_id))
if class_index == 0:
if not os.path.exists('ground-truth/' + file_id + ".txt"):
error_msg = "Error. File not found: ground-truth/" + file_id + ".txt\n"
error_msg += "(You can avoid this error message by running extra/intersect-gt-and-pred.py)"
error(error_msg)
lines = file_lines_to_list(txt_file)
for line in lines:
try:
tmp_class_name, confidence, left, top, right, bottom = line.split()
except ValueError:
error_msg = "Error: File " + txt_file + " in the wrong format.\n"
error_msg += " Expected: <class_name> <confidence> <left> <top> <right> <bottom>\n"
error_msg += " Received: " + line
error(error_msg)
if tmp_class_name[1:] == class_name[1:]:
#print("match")
bbox = left + " " + top + " " + right + " " +bottom
bounding_boxes.append({"confidence":confidence, "file_id":file_id, "bbox":bbox})
#print(bounding_boxes)
# sort predictions by decreasing confidence
bounding_boxes.sort(key=lambda x:float(x['confidence']), reverse=True)
with open(tmp_files_path + "/" + class_name + "_predictions.json", 'w') as outfile:
json.dump(bounding_boxes, outfile)
"""
Calculate the AP for each class
"""
sum_AP = 0.0
ap_dictionary = {}
# open file to store the results
with open(results_files_path + "/results.txt", 'w') as results_file:
results_file.write("# AP and precision/recall per class\n")
count_true_positives = {}
for class_index, class_name in enumerate(gt_classes):
count_true_positives[class_name] = 0
"""
Load predictions of that class
"""
predictions_file = tmp_files_path + "/" + class_name + "_predictions.json"
predictions_data = json.load(open(predictions_file))
"""
Assign predictions to ground truth objects
"""
nd = len(predictions_data)
tp = [0] * nd # creates an array of zeros of size nd
fp = [0] * nd
for idx, prediction in enumerate(predictions_data):
file_id = prediction["file_id"]
if show_animation:
# find ground truth image
ground_truth_img = glob.glob1(img_path, file_id + ".*")
#tifCounter = len(glob.glob1(myPath,"*.tif"))
if len(ground_truth_img) == 0:
error("Error. Image not found with id: " + file_id)
elif len(ground_truth_img) > 1:
error("Error. Multiple image with id: " + file_id)
else: # found image
#print(img_path + "/" + ground_truth_img[0])
# Load image
img = cv2.imread(img_path + "/" + ground_truth_img[0])
# load image with draws of multiple detections
img_cumulative_path = results_files_path + "/images/" + ground_truth_img[0]
if os.path.isfile(img_cumulative_path):
img_cumulative = cv2.imread(img_cumulative_path)
else:
img_cumulative = img.copy()
# Add bottom border to image
bottom_border = 60
BLACK = [0, 0, 0]
img = cv2.copyMakeBorder(img, 0, bottom_border, 0, 0, cv2.BORDER_CONSTANT, value=BLACK)
# assign prediction to ground truth object if any
# open ground-truth with that file_id
gt_file = tmp_files_path + "/" + file_id + "_ground_truth.json"
ground_truth_data = json.load(open(gt_file))
ovmax = -1
gt_match = -1
# load prediction bounding-box
bb = [ float(x) for x in prediction["bbox"].split() ]
for obj in ground_truth_data:
# look for a class_name match
if obj["class_name"] == class_name:
bbgt = [ float(x) for x in obj["bbox"].split() ]
bi = [max(bb[0],bbgt[0]), max(bb[1],bbgt[1]), min(bb[2],bbgt[2]), min(bb[3],bbgt[3])]
iw = bi[2] - bi[0] + 1
ih = bi[3] - bi[1] + 1
if iw > 0 and ih > 0:
# compute overlap (IoU) = area of intersection / area of union
ua = (bb[2] - bb[0] + 1) * (bb[3] - bb[1] + 1) + (bbgt[2] - bbgt[0]
+ 1) * (bbgt[3] - bbgt[1] + 1) - iw * ih
ov = iw * ih / ua
if ov > ovmax:
ovmax = ov
gt_match = obj
# assign prediction as true positive/don't care/false positive
if show_animation:
status = "NO MATCH FOUND!" # status is only used in the animation
# set minimum overlap
min_overlap = MINOVERLAP
if specific_iou_flagged:
if class_name in specific_iou_classes:
index = specific_iou_classes.index(class_name)
min_overlap = float(iou_list[index])
if ovmax >= min_overlap:
if "difficult" not in gt_match:
if not bool(gt_match["used"]):
# true positive
tp[idx] = 1
gt_match["used"] = True
count_true_positives[class_name] += 1
# update the ".json" file
with open(gt_file, 'w') as f:
f.write(json.dumps(ground_truth_data))
if show_animation:
status = "MATCH!"
else:
# false positive (multiple detection)
fp[idx] = 1
if show_animation:
status = "REPEATED MATCH!"
else:
# false positive
fp[idx] = 1
if ovmax > 0:
status = "INSUFFICIENT OVERLAP"
"""
Draw image to show animation
"""
if show_animation:
height, widht = img.shape[:2]
# colors (OpenCV works with BGR)
white = (255,255,255)
light_blue = (255,200,100)
green = (0,255,0)
light_red = (30,30,255)
# 1st line
margin = 10
v_pos = int(height - margin - (bottom_border / 2))
text = "Image: " + ground_truth_img[0] + " "
img, line_width = draw_text_in_image(img, text, (margin, v_pos), white, 0)
text = "Class [" + str(class_index) + "/" + str(n_classes) + "]: " + class_name + " "
img, line_width = draw_text_in_image(img, text, (margin + line_width, v_pos), light_blue, line_width)
if ovmax != -1:
color = light_red
if status == "INSUFFICIENT OVERLAP":
text = "IoU: {0:.2f}% ".format(ovmax*100) + "< {0:.2f}% ".format(min_overlap*100)
else:
text = "IoU: {0:.2f}% ".format(ovmax*100) + ">= {0:.2f}% ".format(min_overlap*100)
color = green
img, _ = draw_text_in_image(img, text, (margin + line_width, v_pos), color, line_width)
# 2nd line
v_pos += int(bottom_border / 2)
rank_pos = str(idx+1) # rank position (idx starts at 0)
text = "Prediction #rank: " + rank_pos + " confidence: {0:.2f}% ".format(float(prediction["confidence"])*100)
img, line_width = draw_text_in_image(img, text, (margin, v_pos), white, 0)
color = light_red
if status == "MATCH!":
color = green
text = "Result: " + status + " "
img, line_width = draw_text_in_image(img, text, (margin + line_width, v_pos), color, line_width)
font = cv2.FONT_HERSHEY_SIMPLEX
if ovmax > 0: # if there is intersections between the bounding-boxes
bbgt = [ int(x) for x in gt_match["bbox"].split() ]
cv2.rectangle(img,(bbgt[0],bbgt[1]),(bbgt[2],bbgt[3]),light_blue,2)
cv2.rectangle(img_cumulative,(bbgt[0],bbgt[1]),(bbgt[2],bbgt[3]),light_blue,2)
cv2.putText(img_cumulative, class_name, (bbgt[0],bbgt[1] - 5), font, 0.6, light_blue, 1, cv2.LINE_AA)
bb = [int(i) for i in bb]
cv2.rectangle(img,(bb[0],bb[1]),(bb[2],bb[3]),color,2)
cv2.rectangle(img_cumulative,(bb[0],bb[1]),(bb[2],bb[3]),color,2)
cv2.putText(img_cumulative, class_name, (bb[0],bb[1] - 5), font, 0.6, color, 1, cv2.LINE_AA)
# show image
cv2.imshow("Animation", img)
cv2.waitKey(20) # show for 20 ms
# save image to results
output_img_path = results_files_path + "/images/single_predictions/" + class_name + "_prediction" + str(idx) + ".jpg"
cv2.imwrite(output_img_path, img)
# save the image with all the objects drawn to it
cv2.imwrite(img_cumulative_path, img_cumulative)
#print(tp)
# compute precision/recall
cumsum = 0
for idx, val in enumerate(fp):
fp[idx] += cumsum
cumsum += val
cumsum = 0
for idx, val in enumerate(tp):
tp[idx] += cumsum
cumsum += val
#print(tp)
rec = tp[:]
for idx, val in enumerate(tp):
rec[idx] = float(tp[idx]) / gt_counter_per_class[class_name]
#print(rec)
prec = tp[:]
for idx, val in enumerate(tp):
prec[idx] = float(tp[idx]) / (fp[idx] + tp[idx])
#print(prec)
ap, mrec, mprec = voc_ap(rec, prec)
if ap >0.10:
sum_AP += ap
# sum_AP += ap
text = "{0:.2f}%".format(ap*100) + " = " + class_name + " AP " #class_name + " AP = {0:.2f}%".format(ap*100)
"""
Write to results.txt
"""
rounded_prec = [ '%.2f' % elem for elem in prec ]
rounded_rec = [ '%.2f' % elem for elem in rec ]
results_file.write(text + "\n Precision: " + str(rounded_prec) + "\n Recall :" + str(rounded_rec) + "\n\n")
if not args.quiet:
print(text)
ap_dictionary[class_name] = ap
"""
Draw plot
"""
if draw_plot:
plt.plot(rec, prec, '-o')
# add a new penultimate point to the list (mrec[-2], 0.0)
# since the last line segment (and respective area) do not affect the AP value
area_under_curve_x = mrec[:-1] + [mrec[-2]] + [mrec[-1]]
area_under_curve_y = mprec[:-1] + [0.0] + [mprec[-1]]
plt.fill_between(area_under_curve_x, 0, area_under_curve_y, alpha=0.2, edgecolor='r')
# set window title
fig = plt.gcf() # gcf - get current figure
fig.canvas.set_window_title('AP ' + class_name)
# set plot title
plt.title('class: ' + text)
#plt.suptitle('This is a somewhat long figure title', fontsize=16)
# set axis titles
plt.xlabel('Recall')
plt.ylabel('Precision')
# optional - set axes
axes = plt.gca() # gca - get current axes
axes.set_xlim([0.0,1.0])
axes.set_ylim([0.0,1.05]) # .05 to give some extra space
# Alternative option -> wait for button to be pressed
#while not plt.waitforbuttonpress(): pass # wait for key display
# Alternative option -> normal display
#plt.show()
# save the plot
fig.savefig(results_files_path + "/classes/" + class_name + ".png")
plt.cla() # clear axes for next plot
if show_animation:
cv2.destroyAllWindows()
results_file.write("\n# mAP of all classes\n")
mAP = sum_AP / n_classes
text = "mAP = {0:.2f}%".format(mAP*100)
results_file.write(text + "\n")
print(text)
# remove the tmp_files directory
shutil.rmtree(tmp_files_path)
"""
Count total of Predictions
"""
# iterate through all the files
pred_counter_per_class = {}
#all_classes_predicted_files = set([])
for txt_file in predicted_files_list:
# get lines to list
lines_list = file_lines_to_list(txt_file)
for line in lines_list:
class_name = line.split()[0]
# check if class is in the ignore list, if yes skip
if class_name in args.ignore:
continue
# count that object
if class_name in pred_counter_per_class:
pred_counter_per_class[class_name] += 1
else:
# if class didn't exist yet
pred_counter_per_class[class_name] = 1
#print(pred_counter_per_class)
pred_classes = list(pred_counter_per_class.keys())
"""
Plot the total number of occurences of each class in the ground-truth
"""
if draw_plot:
window_title = "Ground-Truth Info"
plot_title = "Ground-Truth\n"
plot_title += "(" + str(len(ground_truth_files_list)) + " files and " + str(n_classes) + " classes)"
x_label = "Number of objects per class"
output_path = results_files_path + "/Ground-Truth Info.png"
to_show = False
plot_color = 'forestgreen'
draw_plot_func(
gt_counter_per_class,
n_classes,
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
'',
)
"""
Write number of ground-truth objects per class to results.txt
"""
with open(results_files_path + "/results.txt", 'a') as results_file:
results_file.write("\n# Number of ground-truth objects per class\n")
for class_name in sorted(gt_counter_per_class):
results_file.write(class_name + ": " + str(gt_counter_per_class[class_name]) + "\n")
"""
Finish counting true positives
"""
for class_name in pred_classes:
# if class exists in predictions but not in ground-truth then there are no true positives in that class
if class_name not in gt_classes:
count_true_positives[class_name] = 0
#print(count_true_positives)
"""
Plot the total number of occurences of each class in the "predicted" folder
"""
if draw_plot:
window_title = "Predicted Objects Info"
# Plot title
plot_title = "Predicted Objects\n"
plot_title += "(" + str(len(predicted_files_list)) + " files and "
count_non_zero_values_in_dictionary = sum(int(x) > 0 for x in list(pred_counter_per_class.values()))
plot_title += str(count_non_zero_values_in_dictionary) + " detected classes)"
# end Plot title
x_label = "Number of objects per class"
output_path = results_files_path + "/Predicted Objects Info.png"
to_show = False
plot_color = 'forestgreen'
true_p_bar = count_true_positives
draw_plot_func(
pred_counter_per_class,
len(pred_counter_per_class),
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
true_p_bar
)
"""
Write number of predicted objects per class to results.txt
"""
with open(results_files_path + "/results.txt", 'a') as results_file:
results_file.write("\n# Number of predicted objects per class\n")
for class_name in sorted(pred_classes):
n_pred = pred_counter_per_class[class_name]
text = class_name + ": " + str(n_pred)
text += " (tp:" + str(count_true_positives[class_name]) + ""
text += ", fp:" + str(n_pred - count_true_positives[class_name]) + ")\n"
results_file.write(text)
"""
Draw mAP plot (Show AP's of all classes in decreasing order)
"""
if draw_plot:
window_title = "mAP"
plot_title = "mAP = {0:.2f}%".format(mAP*100)
x_label = "Average Precision"
output_path = results_files_path + "/mAP.png"
to_show = True
plot_color = 'royalblue'
draw_plot_func(
ap_dictionary,
n_classes,
window_title,
plot_title,
x_label,
output_path,
to_show,
plot_color,
""
)
| 27,755 | 34.768041 | 125 | py |
RioGNN | RioGNN-main/train.py | import os
import argparse
from time import localtime, strftime, time
from sklearn.model_selection import train_test_split
from utils.utils import *
from model.model import *
from model.layers import *
from model.graphsage import *
from RL.rl_model import *
"""
Training and testing RIO-GNN
Paper: Reinforced Neighborhood Selection Guided Multi-Relational Graph Neural Networks
Source: https://github.com/safe-graph/RioGNN
"""
parser = argparse.ArgumentParser()
# dataset and model dependent args
parser.add_argument('--data', type=str, default='amazon', help='The dataset name. [yelp, amazon, mimic]')
parser.add_argument('--log_path', default='log/', type=str, help="Path of results")
parser.add_argument('--model', type=str, default='RIO', help='The model name. [RIO, SAGE]')
parser.add_argument('--inter', type=str, default='GNN',
help='The inter-relation aggregator type. [Att, Weight, Mean, GNN]')
parser.add_argument('--batch_size', type=int, default=1024,
help='Batch size 1024 for yelp, 256 for amazon, X for mimic.')
# hyper-parameters
parser.add_argument('--lr', type=float, default=0.01, help='Initial learning rate.')
parser.add_argument('--lambda_1', type=float, default=2, help='Simi loss weight.')
parser.add_argument('--lambda_2', type=float, default=1e-3, help='Weight decay (L2 loss weight).')
parser.add_argument('--emb_size', type=int, default=64, help='Node embedding size at the last layer.')
parser.add_argument('--num_epochs', type=int, default=500, help='Number of epochs.')
parser.add_argument('--test_epochs', type=int, default=3, help='Epoch interval to run test set.')
parser.add_argument('--test_ratio', type=float, default=0.60, help='Test set size.')
parser.add_argument('--under_sample', type=int, default=1, help='Under-sampling scale.')
# other args
parser.add_argument('--use_cuda', default=False, action='store_true', help='Training with CUDA.')
parser.add_argument('--seed', type=int, default=72, help='Random seed.')
# RL args
parser.add_argument('--device', type=str, default="cpu", help='"cuda" if torch.cuda.is_available() else "cpu".')
parser.add_argument('--GAMMA', type=float, default=0.95, help='Actor discount factor.')
parser.add_argument('--LR', type=float, default=0.01, help='Actor learning rate.')
parser.add_argument('--stop_num', type=int, default=3, help='Deep switching or termination conditions.')
parser.add_argument('--ALPHA', type=int, default=10, help='Adjustment parameters for depth and width.')
if __name__ == '__main__':
print('\n+------------------------------------------------------------------------------------------+\n'
'* Training and testing RIO-GNN *\n'
'* Paper: Reinforced Neighborhood Selection Guided Multi-Relational Graph Neural Networks *\n'
'* Source: https://github.com/safe-graph/RioGNN *\n'
'\n+------------------------------------------------------------------------------------------+\n', flush=True
)
# load hyper-parameters
args = parser.parse_args()
# generate log folder
log_save_path = args.log_path + 'log_' + strftime("%m%d%H%M%S", localtime())
os.mkdir(log_save_path)
print("Log save path: ", log_save_path, flush=True)
# device
args.cuda = args.use_cuda and torch.cuda.is_available()
print("CUDA: " + str(args.cuda), flush=True)
# load graph, feature, and label
homo, relations, feat_data, labels, index = load_data(args.data)
print("Running on: " + str(args.data), flush=True)
print("The number of relations: " + str(len(relations)), flush=True)
# train_test split
np.random.seed(args.seed)
random.seed(args.seed)
idx_train, idx_test, y_train, y_test = train_test_split(index, labels, stratify=labels,
test_size=args.test_ratio, random_state=2, shuffle=True)
# split pos neg sets for under-sampling
train_pos, train_neg = pos_neg_split(idx_train, y_train)
# initialize model input
features = nn.Embedding(feat_data.shape[0], feat_data.shape[1])
feat_data = normalize(feat_data)
features.weight = nn.Parameter(torch.FloatTensor(feat_data), requires_grad=False)
if args.cuda:
features.cuda()
# initialize RL action space
width_rl = [args.ALPHA for r in range(len(relations))]
height_rl = [math.ceil(pow(len(max(relations[r].values(), key=len)), 1 / width_rl[r]))
for r in range(len(relations))]
print('Width of each relation tree: ' + str(width_rl), flush=True)
print('Height of each relation tree: ' + str(height_rl), flush=True)
# build one-layer models
print('Model: {0}, Inter-AGG: {1}, emb_size: {2}.'.format(args.model, args.inter, args.emb_size))
if args.model == 'RIO':
adj_lists = relations
intra_aggs = [IntraAgg(features, feat_data.shape[1], cuda=args.cuda) for r in range(len(relations))]
inter1 = InterAgg(width_rl, height_rl, args.device, args.LR, args.GAMMA, args.stop_num,
features, feat_data.shape[1],
args.emb_size, adj_lists,
intra_aggs, inter=args.inter,
cuda=args.cuda)
gnn_model = OneLayerRio(2, inter1, args.lambda_1)
elif args.model == 'SAGE':
adj_lists = homo
agg1 = MeanAggregator(features, cuda=args.cuda)
enc1 = Encoder(features, feat_data.shape[1], args.emb_size, adj_lists, agg1, gcn=True, cuda=args.cuda)
# the vanilla GraphSAGE model as baseline
enc1.num_samples = 5
gnn_model = GraphSage(2, enc1)
if args.cuda:
gnn_model.cuda()
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, gnn_model.parameters()), lr=args.lr,
weight_decay=args.lambda_2)
gnn_auc_train = 0
start_all_time = time()
# train the model
for epoch in range(args.num_epochs):
print('\n+------------------------------------------------------------------------------------------+\n'
' Epoch {0} '
'\n+------------------------------------------------------------------------------------------+\n'.
format(epoch), flush=True
)
# randomly under-sampling negative nodes for each epoch
sampled_idx_train = undersample(train_pos, train_neg, scale=args.under_sample)
rd.shuffle(sampled_idx_train)
# send number of batches to model to let the RLModule know the training progress
num_batches = int(len(sampled_idx_train) / args.batch_size) + 1
if args.model == 'RIO':
inter1.batch_num = num_batches
inter1.auc = gnn_auc_train
loss = 0.0
epoch_time = 0
# mini-batch training
for batch in range(num_batches):
start_time = time()
i_start = batch * args.batch_size
i_end = min((batch + 1) * args.batch_size, len(sampled_idx_train))
batch_nodes = sampled_idx_train[i_start:i_end]
batch_label = labels[np.array(batch_nodes)]
optimizer.zero_grad()
if args.cuda:
loss = gnn_model.loss(batch_nodes, Variable(torch.cuda.LongTensor(batch_label)))
else:
loss = gnn_model.loss(batch_nodes, Variable(torch.LongTensor(batch_label)))
loss.backward()
optimizer.step()
end_time = time()
epoch_time += end_time - start_time
loss += loss.item()
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~', flush=True)
print('Loss: {0}, time: {1}s'.format(loss.item() / num_batches, epoch_time), flush=True)
# testing the model for every $test_epoch$ epoch
if epoch % args.test_epochs == 0:
if args.model == 'SAGE':
test_sage(idx_test, y_test, gnn_model, args.batch_size)
else:
gnn_auc, label_auc, gnn_recall, label_recall = test_rio(idx_test, y_test, gnn_model, args.batch_size)
gnn_auc_train = test_rio_train(idx_train, y_train, gnn_model, args.batch_size)
# termination
if not inter1.RL:
break
# log
with open(log_save_path + '/thresholds_log.txt', 'w') as file:
for l in inter1.rl_tree.thresholds_log:
file.writelines(str(l) + '\n')
with open(log_save_path + '/states_log.txt', 'w') as file:
for l in inter1.rl_tree.states_log:
file.writelines(str(l) + '\n')
# end
print('\n+------------------------------------------------------------------------------------------+\n')
end_all_time = time()
total_epoch_time = end_all_time - start_all_time
print('Total time spent: ' + str(total_epoch_time), flush=True)
print('Total epoch: ' + str(epoch), flush=True)
| 8,973 | 45.497409 | 120 | py |
RioGNN | RioGNN-main/RL/rl_model.py | from operator import itemgetter
from RL.actor_critic import *
"""
RL Forest.
Paper: Reinforced Neighborhood Selection Guided Multi-Relational Graph Neural Networks
Source: https://github.com/safe-graph/RioGNN
"""
class RLForest:
def __init__(self, width_rl, height_rl, device, LR, GAMMA, stop_num, r_num):
"""
Initialize the RL Forest.
:param width_rl: width of each relation tree
:param height_rl: height of each relation tree
:param device: "cuda" / "cpu"
:param LR: Actor learning rate (hyper-parameters of AC)
:param GAMMA: Actor discount factor (hyper-parameters of AC)
:param stop_num: deep switching or termination conditions
:param r_num: the number of relations
"""
self.actors = [[Actor(1, width_rl[r], device, LR) for j in range(height_rl[r])]
for r in range(r_num)]
self.critics = [[Critic(1, width_rl[r], device, LR, GAMMA) for j in range(height_rl[r])]
for r in range(r_num)]
self.r_num = r_num
# current RLT depth for each relation
self.init_rl = [0 for r in range(r_num)]
# number of epochs performed at the current depth for each relation
self.init_termination = [0 for r in range(r_num)]
# action interval of current depth for each relation
self.init_action = [0 for r in range(r_num)]
# backtracking
self.max_auc = 0
self.max_thresholds = [0 for r in range(r_num)]
# termination and boundary conditions
self.width = list(width_rl)
self.stop_num = stop_num
# log
self.thresholds_log = []
self.actions_log = []
self.states_log = []
self.scores_log = []
self.rewards_log = []
def get_threshold(self, scores, labels, previous_thresholds, batch_num, auc):
"""
The reinforcement learning module.
It updates the neighbor filtering threshold for each relation based
on the average neighbor distances between two consecutive epochs.
:param scores: the neighbor nodes label-aware scores for each relation
:param labels: the batch node labels used to select positive nodes
:param previous_thresholds: the current neighbor filtering thresholds for each relation
:param batch_num: numbers batches in an epoch
:param auc: the auc of the previous filter thresholds for each relation
"""
new_scores = get_scores(scores, labels)
rl_flag0 = 0
# during the epoch
if len(self.scores_log) % batch_num != 0 or len(self.scores_log) < batch_num:
# do not call RL module within the epoch or within the first two epochs
new_thresholds = list(previous_thresholds)
# after completing each epoch
else:
# STATE
# get current states according to average scores
# Eq.(8) in the paper
current_epoch_states = [sum(s) / batch_num for s in zip(*self.scores_log[-batch_num:])]
new_states = [np.array([s], float) for i, s in enumerate(current_epoch_states)]
# backtracking
if auc >= self.max_auc:
self.max_auc = auc
self.max_thresholds = list(previous_thresholds)
new_actions = [0 for r in range(self.r_num)]
new_thresholds = [0 for r in range(self.r_num)]
# the first epoch
if len(self.states_log) == 0:
# update the record of the number of epochs in the current depth
self.init_termination = [i + 1 for i in self.init_termination]
# ACTION
# get current actions for current states
# Eq.(11) in the paper
for r_num in range(self.r_num):
new_actions[r_num], new_thresholds[r_num] = self.get_action(new_states, r_num)
# after the first epoch
else:
# STATE
# get previous states
previous_states = self.states_log[-1]
# ACTION
# get previous actions
previous_actions = self.actions_log[-1]
# REWARD
# compute reward for each relation
# Eq. (9) in the paper
new_rewards = [s if 0 < previous_thresholds[i] and previous_thresholds[i] <= 1 else -100 for i, s in
enumerate(current_epoch_states)]
# determine whether to enter the next depth
r_flag = self.adjust_depth()
# after the smallest continuous epoch
for r_num in range(self.r_num):
# go to the next depth
if r_flag[r_num] == 1:
if len(self.actors[r_num]) == self.init_rl[r_num] + 1:
# relation tree remains unchanged after converging
self.init_termination[r_num] = self.init_termination[r_num]
# ACTION
new_actions[r_num] = previous_actions[r_num]
new_thresholds[r_num] = self.max_thresholds[r_num]
rl_flag0 += 1
print("Relation {0} is complete !!!!!".format(str(r_num + 1)), flush=True)
else:
# update the parameter space when entering the next depth
# Eq. (7) in the paper
self.init_termination[r_num] = 0
self.init_rl[r_num] = self.init_rl[r_num] + 1
self.init_action[r_num] = self.max_thresholds[r_num] - (self.width[r_num] / 2) * \
pow(1 / self.width[r_num], self.init_rl[r_num] + 1)
# ACTION
# Eq. (11) in the paper
new_actions[r_num], new_thresholds[r_num] = self.get_action(new_states, r_num)
# keep current depth
else:
self.init_termination[r_num] = self.init_termination[r_num] + 1
# POLICY
# Eq. (10) in the paper
self.learn(previous_states, previous_actions, new_states, new_rewards, r_num)
# ACTION
# Eq. (11) in the paper
new_actions[r_num], new_thresholds[r_num] = self.get_action(new_states, r_num)
self.rewards_log.append(new_rewards)
print('Rewards: ' + str(new_rewards), flush=True)
self.states_log.append(new_states)
print('States: ' + str(new_states), flush=True)
self.thresholds_log.append(new_thresholds)
print('Thresholds: ' + str(new_thresholds), flush=True)
self.actions_log.append(new_actions)
self.scores_log.append(new_scores)
print("Historical maximum AUC: " + str(self.max_auc), flush=True)
print("Thresholds to obtain the historical maximum AUC: " + str(self.max_thresholds), flush=True)
print('Current depth of each RL Tree: ' + str(self.init_rl), flush=True)
# RLF termination
rl_flag = False if rl_flag0 == self.r_num else True
print('Completion flag of the entire RL Forest: ' + str(rl_flag), flush=True)
return new_thresholds, rl_flag
def learn(self, previous_states, previous_actions, new_states, new_rewards, r_num):
"""
:param previous_states: the previous states
:param previous_actions: the previous actions
:param new_states: the current states
:param new_rewards: the current rewards
:param r_num: the index of relation
"""
td_error = self.critics[r_num][self.init_rl[r_num]].train_Q_network(previous_states[r_num],
new_rewards[r_num],
new_states[r_num])
self.actors[r_num][self.init_rl[r_num]].learn(previous_states[r_num],
previous_actions[r_num],
td_error)
return
def get_action(self, new_states, r_num):
"""
:param new_states: the current states
:param r_num: the index of relation
:returns: new actions and thresholds for new_states under relation r_num
"""
new_actions = self.actors[r_num][self.init_rl[r_num]].choose_action(new_states[r_num])
new_thresholds = self.init_action[r_num] + (new_actions + 1) * \
pow(1 / self.width[r_num], self.init_rl[r_num] + 1)
new_thresholds = 1 if new_thresholds >= 1 else new_thresholds
return new_actions, new_thresholds
def adjust_depth(self):
"""
:returns: the depth flag of each relation
"""
r_flag = [1 for r in range(self.r_num)]
for r_num in range(self.r_num):
if self.init_termination[r_num] > self.stop_num:
for s in range(self.stop_num - 1):
r_flag[r_num] = r_flag[r_num] * (
1 if self.actions_log[-1 * (s + 1)][r_num] == self.actions_log[-1 * (s + 2)][r_num] else 0
)
else:
r_flag[r_num] = 0
return r_flag
def get_scores(scores, labels):
"""
Get the scores of current batch.
:param scores: the neighbor nodes label-aware scores for each relation
:param labels: the batch node labels used to select positive nodes
:returns: the state of current batch
"""
relation_scores = []
# only compute the average neighbor distances for positive nodes
pos_index = (labels == 1).nonzero().tolist()
pos_index = [i[0] for i in pos_index]
# compute average neighbor distances for each relation
for score in scores:
pos_scores = itemgetter(*pos_index)(score)
neigh_count = sum([1 if isinstance(i, float) else len(i) for i in pos_scores])
pos_sum = [i if isinstance(i, float) else sum(i) for i in pos_scores]
relation_scores.append(sum(pos_sum) / neigh_count)
return relation_scores
| 10,498 | 41.506073 | 116 | py |
RioGNN | RioGNN-main/RL/actor_critic.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
"""
Actor-Critic implementations
Paper: Actor-Critic Algorithms
Source: https://github.com/llSourcell/actor_critic
"""
# torch.backends.cudnn.enabled = False # Non-deterministic algorithm
class PGNetwork(nn.Module):
def __init__(self, state_dim, action_dim):
"""
Initialize PGNetwork.
:param state_dim: dimension of the state
:param action_dim: dimension of the action
"""
super(PGNetwork, self).__init__()
self.fc1 = nn.Linear(state_dim, 20)
self.fc2 = nn.Linear(20, action_dim)
def forward(self, x):
out = F.relu(self.fc1(x))
out = self.fc2(out)
return out
def initialize_weights(self):
for m in self.modules():
nn.init.normal_(m.weight.data, 0, 0.1)
nn.init.constant_(m.bias.data, 0.01)
class Actor(object):
def __init__(self, state_dim, action_dim, device, LR):
# Dimensions of state space and action space
self.state_dim = state_dim
self.action_dim = action_dim
self.device = device
self.LR = LR
# init network parameters
self.network = PGNetwork(state_dim=self.state_dim, action_dim=self.action_dim).to(self.device)
self.optimizer = torch.optim.Adam(self.network.parameters(), lr=self.LR)
# init some parameters
self.time_step = 0
def choose_action(self, observation):
observation = torch.FloatTensor(observation).to(self.device)
network_output = self.network.forward(observation)
with torch.no_grad():
# prob_weights = F.softmax(network_output, dim=0).cuda().data.cpu().numpy()
prob_weights = F.softmax(network_output, dim=0).data.cpu().numpy()
# prob_weights = F.softmax(network_output, dim=0).detach().numpy()
action = np.random.choice(range(prob_weights.shape[0]),
p=prob_weights) # select action w.r.t the actions prob
return action
def learn(self, state, action, td_error):
self.time_step += 1
# Step 1: Forward propagation
softmax_input = self.network.forward(torch.FloatTensor(state).to(self.device)).unsqueeze(0)
action = torch.LongTensor([action]).to(self.device)
neg_log_prob = F.cross_entropy(input=softmax_input, target=action, reduction='none')
# Step 2: Backpropagation
# Here you need to maximize the value of the current strategy,
# so you need to maximize "neg_log_prob * tf_error", that is, minimize "-neg_log_prob * td_error"
loss_a = -neg_log_prob * td_error
self.optimizer.zero_grad()
loss_a.backward()
self.optimizer.step()
class QNetwork(nn.Module):
def __init__(self, state_dim, action_dim):
super(QNetwork, self).__init__()
self.fc1 = nn.Linear(state_dim, 20)
self.fc2 = nn.Linear(20, 1)
def forward(self, x):
out = F.relu(self.fc1(x))
out = self.fc2(out)
return out
def initialize_weights(self):
for m in self.modules():
nn.init.normal_(m.weight.data, 0, 0.1)
nn.init.constant_(m.bias.data, 0.01)
class Critic(object):
def __init__(self, state_dim, action_dim, device, LR, GAMMA):
# Dimensions of state space and action space
self.state_dim = state_dim
self.action_dim = action_dim
self.device = device
self.LR = LR
self.GAMMA = GAMMA
# init network parameters
self.network = QNetwork(state_dim=self.state_dim, action_dim=self.action_dim).to(self.device)
self.optimizer = torch.optim.Adam(self.network.parameters(), lr=self.LR)
self.loss_func = nn.MSELoss()
def train_Q_network(self, state, reward, next_state):
s, s_ = torch.FloatTensor(state).to(self.device), torch.FloatTensor(next_state).to(self.device)
# Forward propagation
v = self.network.forward(s) # v(s)
v_ = self.network.forward(s_) # v(s')
# Backpropagation
loss_q = self.loss_func(reward + self.GAMMA * v_, v)
self.optimizer.zero_grad()
loss_q.backward()
self.optimizer.step()
with torch.no_grad():
td_error = reward + self.GAMMA * v_ - v
return td_error
| 4,388 | 32.761538 | 105 | py |
RioGNN | RioGNN-main/utils/data_process.py | from utils.utils import sparse_to_adjlist
from scipy.io import loadmat
"""
Read data and save the adjacency matrices to adjacency lists
Paper: Reinforced Neighborhood Selection Guided Multi-Relational Graph Neural Networks
Source: https://github.com/safe-graph/RioGNN
"""
if __name__ == "__main__":
prefix = './data/'
# Yelp
yelp = loadmat('data/YelpChi.mat')
net_rur = yelp['net_rur']
net_rtr = yelp['net_rtr']
net_rsr = yelp['net_rsr']
yelp_homo = yelp['homo']
sparse_to_adjlist(net_rur, prefix + 'yelp_rur_adjlists.pickle')
sparse_to_adjlist(net_rtr, prefix + 'yelp_rtr_adjlists.pickle')
sparse_to_adjlist(net_rsr, prefix + 'yelp_rsr_adjlists.pickle')
sparse_to_adjlist(yelp_homo, prefix + 'yelp_homo_adjlists.pickle')
# Amazon
amz = loadmat('data/Amazon.mat')
net_upu = amz['net_upu']
net_usu = amz['net_usu']
net_uvu = amz['net_uvu']
amz_homo = amz['homo']
sparse_to_adjlist(net_upu, prefix + 'amz_upu_adjlists.pickle')
sparse_to_adjlist(net_usu, prefix + 'amz_usu_adjlists.pickle')
sparse_to_adjlist(net_uvu, prefix + 'amz_uvu_adjlists.pickle')
sparse_to_adjlist(amz_homo, prefix + 'amz_homo_adjlists.pickle')
# Mimic
mic = loadmat('data/Mimic.mat')
rel_vav = mic['rel_vav']
rel_vdv = mic['rel_vdv']
rel_vmv = mic['rel_vmv']
rel_vpv = mic['rel_vpv']
mic_homo = mic['homo']
sparse_to_adjlist(rel_vav, prefix + 'mic_vav_adjlists.pickle')
sparse_to_adjlist(rel_vdv, prefix + 'mic_vdv_adjlists.pickle')
sparse_to_adjlist(rel_vmv, prefix + 'mic_vmv_adjlists.pickle')
sparse_to_adjlist(rel_vpv, prefix + 'mic_vpv_adjlists.pickle')
sparse_to_adjlist(mic_homo, prefix + 'mic_homo_adjlists.pickle')
| 1,642 | 30.596154 | 87 | py |
RioGNN | RioGNN-main/utils/utils.py | import pickle
import random as rd
import numpy as np
import scipy.sparse as sp
from scipy.io import loadmat
import copy as cp
from sklearn.metrics import f1_score, accuracy_score, recall_score, roc_auc_score, average_precision_score
from collections import defaultdict
"""
Utility functions to handle data and evaluate model.
Paper: Reinforced Neighborhood Selection Guided Multi-Relational Graph Neural Networks
Source: https://github.com/safe-graph/RioGNN
"""
def load_data(data):
"""
Load graph, feature, and label
:param data: the dataset name.
:returns: home and single-relation graphs, feature, label, index
"""
prefix = 'data/'
if data == 'yelp':
data_file = loadmat(prefix + 'YelpChi.mat')
labels = data_file['label'].flatten()
feat_data = data_file['features'].todense().A
# load the preprocessed adj_lists
with open(prefix + 'yelp_homo_adjlists.pickle', 'rb') as file:
homo = pickle.load(file)
with open(prefix + 'yelp_rur_adjlists.pickle', 'rb') as file:
relation1 = pickle.load(file)
with open(prefix + 'yelp_rtr_adjlists.pickle', 'rb') as file:
relation2 = pickle.load(file)
with open(prefix + 'yelp_rsr_adjlists.pickle', 'rb') as file:
relation3 = pickle.load(file)
relations = [relation1, relation2, relation3]
index = list(range(len(labels)))
elif data == 'amazon':
data_file = loadmat(prefix + 'Amazon.mat')
labels = data_file['label'].flatten()
feat_data = data_file['features'].todense().A
# load the preprocessed adj_lists
with open(prefix + 'amz_homo_adjlists.pickle', 'rb') as file:
homo = pickle.load(file)
with open(prefix + 'amz_upu_adjlists.pickle', 'rb') as file:
relation1 = pickle.load(file)
with open(prefix + 'amz_usu_adjlists.pickle', 'rb') as file:
relation2 = pickle.load(file)
with open(prefix + 'amz_uvu_adjlists.pickle', 'rb') as file:
relation3 = pickle.load(file)
relations = [relation1, relation2, relation3]
# 0-3304 are unlabeled nodes
index = list(range(len(labels)))
#index = list(range(3305, len(labels)))
#labels = labels[3305:]
elif data == 'mimic':
data_file = loadmat(prefix + 'Mimic.mat')
labels = data_file['label'].flatten()
feat_data = data_file['features']
# load the preprocessed adj_lists
with open(prefix + 'mic_homo_adjlists.pickle', 'rb') as file:
homo = pickle.load(file)
with open(prefix + 'mic_vmv_adjlists.pickle', 'rb') as file:
relation1 = pickle.load(file)
with open(prefix + 'mic_vav_adjlists.pickle', 'rb') as file:
relation2 = pickle.load(file)
with open(prefix + 'mic_vpv_adjlists.pickle', 'rb') as file:
relation3 = pickle.load(file)
with open(prefix + 'mic_vdv_adjlists.pickle', 'rb') as file:
relation4 = pickle.load(file)
relations = [relation1, relation2, relation3, relation4]
index = list(range(len(labels)))
return homo, relations, feat_data, labels, index
def pos_neg_split(nodes, labels):
"""
Find positive and negative nodes given a list of nodes and their labels
:param nodes: a list of nodes
:param labels: a list of node labels
:returns: the spited positive and negative nodes
"""
pos_nodes = []
neg_nodes = cp.deepcopy(nodes)
aux_nodes = cp.deepcopy(nodes)
for idx, label in enumerate(labels):
if label == 1:
pos_nodes.append(aux_nodes[idx])
neg_nodes.remove(aux_nodes[idx])
return pos_nodes, neg_nodes
def normalize(mx):
"""
Row-normalize sparse matrix
Code from https://github.com/williamleif/graphsage-simple/
:param mx: original Matrix
:returns: the normalized matrix
"""
rowsum = np.array(mx.sum(1)) + 0.01
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def sparse_to_adjlist(sp_matrix, filename):
"""
Transfer sparse matrix to adjacency list
:param sp_matrix: the sparse matrix
:param filename: the filename of adjlist
"""
# add self loop
homo_adj = sp_matrix + sp.eye(sp_matrix.shape[0])
# create adj_list
adj_lists = defaultdict(set)
edges = homo_adj.nonzero()
for index, node in enumerate(edges[0]):
adj_lists[node].add(edges[1][index])
adj_lists[edges[1][index]].add(node)
with open(filename, 'wb') as file:
pickle.dump(adj_lists, file)
file.close()
def undersample(pos_nodes, neg_nodes, scale=1):
"""
Under-sample the negative nodes
:param pos_nodes: a list of positive nodes
:param neg_nodes: a list negative nodes
:param scale: the under-sampling scale
:return: a list of under-sampled batch nodes
"""
aux_nodes = cp.deepcopy(neg_nodes)
aux_nodes = rd.sample(aux_nodes, k=int(len(pos_nodes) * scale))
batch_nodes = pos_nodes + aux_nodes
return batch_nodes
def test_sage(test_cases, labels, model, batch_size):
"""
Test the performance of GraphSAGE
:param test_cases: a list of testing node
:param labels: a list of testing node labels
:param model: the GNN model
:param batch_size: number nodes in a batch
"""
test_batch_num = int(len(test_cases) / batch_size) + 1
f1_gnn = 0.0
acc_gnn = 0.0
recall_gnn = 0.0
gnn_list = []
for iteration in range(test_batch_num):
i_start = iteration * batch_size
i_end = min((iteration + 1) * batch_size, len(test_cases))
batch_nodes = test_cases[i_start:i_end]
batch_label = labels[i_start:i_end]
gnn_prob = model.to_prob(batch_nodes)
f1_gnn += f1_score(batch_label, gnn_prob.data.cpu().numpy().argmax(axis=1), average="macro")
acc_gnn += accuracy_score(batch_label, gnn_prob.data.cpu().numpy().argmax(axis=1))
recall_gnn += recall_score(batch_label, gnn_prob.data.cpu().numpy().argmax(axis=1), average="macro")
gnn_list.extend(gnn_prob.data.cpu().numpy()[:, 1].tolist())
auc_gnn = roc_auc_score(labels, np.array(gnn_list))
ap_gnn = average_precision_score(labels, np.array(gnn_list))
print(f"GNN F1: {f1_gnn / test_batch_num:.4f}")
print(f"GNN Accuracy: {acc_gnn / test_batch_num:.4f}")
print(f"GNN Recall: {recall_gnn / test_batch_num:.4f}")
print(f"GNN auc: {auc_gnn:.4f}")
print(f"GNN ap: {ap_gnn:.4f}")
def test_rio(test_cases, labels, model, batch_size):
"""
Test the performance of Rio-GNN and its variants
:param test_cases: a list of testing node
:param labels: a list of testing node labels
:param model: the GNN model
:param batch_size: number nodes in a batch
:returns: the AUC and Recall of GNN and Simi modules
"""
test_batch_num = int(len(test_cases) / batch_size) + 1
f1_gnn = 0.0
acc_gnn = 0.0
recall_gnn = 0.0
f1_label1 = 0.0
acc_label1 = 0.00
recall_label1 = 0.0
gnn_list = []
label_list1 = []
for iteration in range(test_batch_num):
i_start = iteration * batch_size
i_end = min((iteration + 1) * batch_size, len(test_cases))
batch_nodes = test_cases[i_start:i_end]
batch_label = labels[i_start:i_end]
gnn_prob, label_prob1 = model.to_prob(batch_nodes, batch_label, train_flag=False)
f1_gnn += f1_score(batch_label, gnn_prob.data.cpu().numpy().argmax(axis=1), average="macro")
acc_gnn += accuracy_score(batch_label, gnn_prob.data.cpu().numpy().argmax(axis=1))
recall_gnn += recall_score(batch_label, gnn_prob.data.cpu().numpy().argmax(axis=1), average="macro")
f1_label1 += f1_score(batch_label, label_prob1.data.cpu().numpy().argmax(axis=1), average="macro")
acc_label1 += accuracy_score(batch_label, label_prob1.data.cpu().numpy().argmax(axis=1))
recall_label1 += recall_score(batch_label, label_prob1.data.cpu().numpy().argmax(axis=1), average="macro")
gnn_list.extend(gnn_prob.data.cpu().numpy()[:, 1].tolist())
label_list1.extend(label_prob1.data.cpu().numpy()[:, 1].tolist())
auc_gnn = roc_auc_score(labels, np.array(gnn_list))
ap_gnn = average_precision_score(labels, np.array(gnn_list))
auc_label1 = roc_auc_score(labels, np.array(label_list1))
ap_label1 = average_precision_score(labels, np.array(label_list1))
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~', flush=True)
print(f"TEST GNN F1: {f1_gnn / test_batch_num:.4f}")
print(f"TEST GNN Accuracy: {acc_gnn / test_batch_num:.4f}")
print(f"TEST GNN Recall: {recall_gnn / test_batch_num:.4f}")
print(f"TEST GNN auc: {auc_gnn:.4f}")
print(f"TEST GNN ap: {ap_gnn:.4f}")
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~', flush=True)
print(f"TEST Label1 F1: {f1_label1 / test_batch_num:.4f}")
print(f"TEST Label1 Accuracy: {acc_label1 / test_batch_num:.4f}")
print(f"TEST Label1 Recall: {recall_label1 / test_batch_num:.4f}")
print(f"TEST Label1 auc: {auc_label1:.4f}")
print(f"TEST Label1 ap: {ap_label1:.4f}")
return auc_gnn, auc_label1, recall_gnn, recall_label1
def test_rio_train(train_cases, labels, model, batch_size):
"""
Test the performance in training set
:param train_cases: a list of training node
:param labels: a list of training node labels
:param model: the GNN model
:param batch_size: number nodes in a batch
:returns: the AUC and Recall of GNN and Simi modules
"""
test_batch_num = int(len(train_cases) / batch_size) + 1
f1_gnn = 0.0
acc_gnn = 0.0
recall_gnn = 0.0
f1_label1 = 0.0
acc_label1 = 0.00
recall_label1 = 0.0
gnn_list = []
label_list1 = []
for iteration in range(test_batch_num):
i_start = iteration * batch_size
i_end = min((iteration + 1) * batch_size, len(train_cases))
batch_nodes = train_cases[i_start:i_end]
batch_label = labels[i_start:i_end]
gnn_prob, label_prob1 = model.to_prob(batch_nodes, batch_label, train_flag=False)
f1_gnn += f1_score(batch_label, gnn_prob.data.cpu().numpy().argmax(axis=1), average="macro")
acc_gnn += accuracy_score(batch_label, gnn_prob.data.cpu().numpy().argmax(axis=1))
recall_gnn += recall_score(batch_label, gnn_prob.data.cpu().numpy().argmax(axis=1), average="macro")
f1_label1 += f1_score(batch_label, label_prob1.data.cpu().numpy().argmax(axis=1), average="macro")
acc_label1 += accuracy_score(batch_label, label_prob1.data.cpu().numpy().argmax(axis=1))
recall_label1 += recall_score(batch_label, label_prob1.data.cpu().numpy().argmax(axis=1), average="macro")
gnn_list.extend(gnn_prob.data.cpu().numpy()[:, 1].tolist())
label_list1.extend(label_prob1.data.cpu().numpy()[:, 1].tolist())
auc_gnn = roc_auc_score(labels, np.array(gnn_list))
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~', flush=True)
print(f"TRAIN GNN Recall: {recall_gnn / test_batch_num:.4f}")
print(f"TRAIN GNN auc: {auc_gnn:.4f}")
return auc_gnn
| 11,237 | 38.293706 | 114 | py |
RioGNN | RioGNN-main/model/graphsage.py | import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.functional as F
from torch.autograd import Variable
import random
"""
GraphSAGE implementations
Paper: Inductive Representation Learning on Large Graphs
Source: https://github.com/williamleif/graphsage-simple/
"""
class GraphSage(nn.Module):
"""
Vanilla GraphSAGE Model
Code partially from https://github.com/williamleif/graphsage-simple/
"""
def __init__(self, num_classes, enc):
super(GraphSage, self).__init__()
self.enc = enc
self.xent = nn.CrossEntropyLoss()
self.weight = nn.Parameter(torch.FloatTensor(num_classes, enc.embed_dim))
init.xavier_uniform_(self.weight)
def forward(self, nodes):
embeds = self.enc(nodes)
scores = self.weight.mm(embeds)
return scores.t()
def to_prob(self, nodes):
pos_scores = torch.sigmoid(self.forward(nodes))
return pos_scores
def loss(self, nodes, labels):
scores = self.forward(nodes)
return self.xent(scores, labels.squeeze())
class MeanAggregator(nn.Module):
"""
Aggregates a node's embeddings using mean of neighbors' embeddings
"""
def __init__(self, features, cuda=False, gcn=False):
"""
Initializes the aggregator for a specific graph.
features -- function mapping LongTensor of node ids to FloatTensor of feature values.
cuda -- whether to use GPU
gcn --- whether to perform concatenation GraphSAGE-style, or add self-loops GCN-style
"""
super(MeanAggregator, self).__init__()
self.features = features
self.cuda = cuda
self.gcn = gcn
def forward(self, nodes, to_neighs, num_sample=10):
"""
nodes --- list of nodes in a batch
to_neighs --- list of sets, each set is the set of neighbors for node in batch
num_sample --- number of neighbors to sample. No sampling if None.
"""
# Local pointers to functions (speed hack)
_set = set
if not num_sample is None:
_sample = random.sample
samp_neighs = [_set(_sample(to_neigh,
num_sample,
)) if len(to_neigh) >= num_sample else to_neigh for to_neigh in to_neighs]
else:
samp_neighs = to_neighs
if self.gcn:
samp_neighs = [samp_neigh.union(set([int(nodes[i])])) for i, samp_neigh in enumerate(samp_neighs)]
unique_nodes_list = list(set.union(*samp_neighs))
unique_nodes = {n: i for i, n in enumerate(unique_nodes_list)}
mask = Variable(torch.zeros(len(samp_neighs), len(unique_nodes)))
column_indices = [unique_nodes[n] for samp_neigh in samp_neighs for n in samp_neigh]
row_indices = [i for i in range(len(samp_neighs)) for j in range(len(samp_neighs[i]))]
mask[row_indices, column_indices] = 1
if self.cuda:
mask = mask.cuda()
num_neigh = mask.sum(1, keepdim=True)
mask = mask.div(num_neigh)
if self.cuda:
embed_matrix = self.features(torch.LongTensor(unique_nodes_list).cuda())
else:
embed_matrix = self.features(torch.LongTensor(unique_nodes_list))
to_feats = mask.mm(embed_matrix)
return to_feats
class Encoder(nn.Module):
"""
Vanilla GraphSAGE Encoder Module
Encodes a node's using 'convolutional' GraphSage approach
"""
def __init__(self, features, feature_dim,
embed_dim, adj_lists, aggregator,
num_sample=10,
base_model=None, gcn=False, cuda=False,
feature_transform=False):
super(Encoder, self).__init__()
self.features = features
self.feat_dim = feature_dim
self.adj_lists = adj_lists
self.aggregator = aggregator
self.num_sample = num_sample
if base_model != None:
self.base_model = base_model
self.gcn = gcn
self.embed_dim = embed_dim
self.cuda = cuda
self.aggregator.cuda = cuda
self.weight = nn.Parameter(
torch.FloatTensor(embed_dim, self.feat_dim if self.gcn else 2 * self.feat_dim))
init.xavier_uniform_(self.weight)
def forward(self, nodes):
"""
Generates embeddings for a batch of nodes.
nodes -- list of nodes
"""
neigh_feats = self.aggregator.forward(nodes, [self.adj_lists[int(node)] for node in nodes],
self.num_sample)
if isinstance(nodes, list):
index = torch.LongTensor(nodes).cuda()
else:
index = nodes
if not self.gcn:
if self.cuda:
self_feats = self.features(index)
else:
self_feats = self.features(index)
combined = torch.cat((self_feats, neigh_feats), dim=1)
else:
combined = neigh_feats
combined = F.relu(self.weight.mm(combined.t()))
return combined | 4,341 | 27.946667 | 101 | py |
RioGNN | RioGNN-main/model/model.py | import torch
import torch.nn as nn
from torch.nn import init
from torch.autograd import Variable
"""
Rio-GNN Models
Paper: Reinforced Neighborhood Selection Guided Multi-Relational Graph Neural Networks
Source: https://github.com/safe-graph/RioGNN
"""
class OneLayerRio(nn.Module):
"""
The Rio-GNN model in one layer
"""
def __init__(self, num_classes, inter1, lambda_1):
"""
Initialize the Rio-GNN model
:param num_classes: number of classes (2 in our paper)
:param inter1: the inter-relation aggregator that output the final embedding
"""
super(OneLayerRio, self).__init__()
self.inter1 = inter1
self.xent = nn.CrossEntropyLoss()
# the parameter to transform the final embedding
self.weight = nn.Parameter(torch.FloatTensor(num_classes, inter1.embed_dim))
init.xavier_uniform_(self.weight)
self.lambda_1 = lambda_1
def forward(self, nodes, labels, train_flag=True):
embeds1, label_scores = self.inter1(nodes, labels, train_flag)
scores = self.weight.mm(embeds1)
return scores.t(), label_scores
def to_prob(self, nodes, labels, train_flag=True):
gnn_logits, label_logits = self.forward(nodes, labels, train_flag)
gnn_scores = torch.sigmoid(gnn_logits)
label_scores = torch.sigmoid(label_logits)
return gnn_scores, label_scores
def loss(self, nodes, labels, train_flag=True):
gnn_scores, label_scores = self.forward(nodes, labels, train_flag)
# Simi loss, Eq. (4) in the paper
label_loss = self.xent(label_scores, labels.squeeze())
# GNN loss, Eq. (10) in the paper
gnn_loss = self.xent(gnn_scores, labels.squeeze())
# the loss function of Rio-GNN, Eq. (11) in the paper
final_loss = gnn_loss + self.lambda_1 * label_loss
return final_loss
class TwoLayerRio(nn.Module):
"""
The Rio-GNN model in one layer
"""
def __init__(self, num_classes, inter1, inter2, lambda_1, last_label_scores):
"""
Initialize the Rio-GNN model
:param num_classes: number of classes (2 in our paper)
:param inter1: the inter-relation aggregator that output the final embedding
"""
super(TwoLayerRio, self).__init__()
self.inter1 = inter1
self.inter2 = inter2
self.xent = nn.CrossEntropyLoss()
# the parameter to transform the final embedding
self.weight = nn.Parameter(torch.FloatTensor(num_classes, inter2.embed_dim))
init.xavier_uniform_(self.weight)
self.lambda_1 = lambda_1
self.last_label_scores = last_label_scores
def forward(self, nodes, labels, train_flag=True):
label_scores_one = self.last_label_scores
embeds2, label_scores_two = self.inter2(nodes, labels, train_flag)
scores2 = self.weight.mm(embeds2)
return scores2.t(), label_scores_one, label_scores_two
def to_prob(self, nodes, labels, train_flag=True):
gnn_logits2, label_logits_one, label_logits_two = self.forward(nodes, labels, train_flag)
gnn_scores2 = torch.sigmoid(gnn_logits2)
label_scores_one = torch.sigmoid(label_logits_one)
label_scores_two = torch.sigmoid(label_logits_two)
return gnn_scores2, label_scores_one, label_scores_two
def loss(self, nodes, labels, train_flag=True):
gnn_scores2, label_scores_one, label_scores_two = self.forward(nodes, labels, train_flag)
# Simi loss, Eq. (4) in the paper
label_loss_one = self.xent(label_scores_one, labels.squeeze())
label_loss_two = self.xent(label_scores_two, labels.squeeze())
# GNN loss, Eq. (10) in the paper
gnn_loss2 = self.xent(gnn_scores2, labels.squeeze())
# the loss function of Rio-GNN, Eq. (11) in the paper
final_loss = gnn_loss2 + self.lambda_1 * label_loss_one
#final_loss = gnn_loss2 + (label_loss_one + label_loss_two)
return final_loss | 3,611 | 34.067961 | 91 | py |
RioGNN | RioGNN-main/model/layers.py | import sys
import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.functional as F
from torch.autograd import Variable
from operator import itemgetter
import math
from RL.rl_model import *
"""
Rio-GNN Layers
Paper: Reinforced Neighborhood Selection Guided Multi-Relational Graph Neural Networks
Source: https://github.com/safe-graph/RioGNN
"""
class InterAgg(nn.Module):
def __init__(self, width_rl, height_rl, device, LR, GAMMA, stop_num,
features, feature_dim,
embed_dim, adj_lists, intra_aggs,
inter, cuda=True):
"""
Initialize the inter-relation aggregator
:param width_rl: width of each relation tree
:param height_rl: height of each relation tree
:param device: "cuda" / "cpu"
:param LR: Actor learning rate (hyper-parameters of AC)
:param GAMMA: Actor discount factor (hyper-parameters of AC)
:param stop_num: deep switching or termination conditions
:param features: the input node features or embeddings for all nodes
:param feature_dim: the input dimension
:param embed_dim: the output dimension
:param adj_lists: a list of adjacency lists for each single-relation graph
:param intra_aggs: the intra-relation aggregators used by each single-relation graph
:param inter: the aggregator type: 'Att', 'Weight', 'Mean', 'GNN'
:param cuda: whether to use GPU
"""
super(InterAgg, self).__init__()
self.features = features
self.dropout = 0.6
self.adj_lists = adj_lists
self.intra_aggs = intra_aggs
self.embed_dim = embed_dim
self.feat_dim = feature_dim
self.inter = inter
self.cuda = cuda
# initial filtering thresholds
self.thresholds = [0.5 for r in range(len(intra_aggs))]
# RL condition flag
self.RL = True
self.rl_tree = RLForest(width_rl, height_rl, device, LR, GAMMA, stop_num, len(intra_aggs))
# number of batches for current epoch, assigned during training
self.batch_num = 0
self.auc = 0
# the activation function used by attention mechanism
self.leakyrelu = nn.LeakyReLU(0.2)
# parameter used to transform node embeddings before inter-relation aggregation
self.weight = nn.Parameter(torch.FloatTensor(self.embed_dim, self.feat_dim))
init.xavier_uniform_(self.weight)
# weight parameter for each relation used by Rio-Weight
self.alpha = nn.Parameter(torch.FloatTensor(self.embed_dim, len(intra_aggs)))
init.xavier_uniform_(self.alpha)
# parameters used by attention layer
self.a = nn.Parameter(torch.FloatTensor(2 * self.embed_dim, 1))
init.xavier_uniform_(self.a)
# label predictor for similarity measure
self.label_clf = nn.Linear(self.feat_dim, 2)
# initialize the parameter logs
self.weights_log = []
def forward(self, nodes, labels, train_flag=True):
"""
:param nodes: a list of batch node ids
:param labels: a list of batch node labels, only used by the RLModule
:param train_flag: indicates whether in training or testing mode
:return combined: the embeddings of a batch of input node features
:return center_scores: the label-aware scores of batch nodes
"""
# extract 1-hop neighbor ids from adj lists of each single-relation graph
to_neighs = []
for adj_list in self.adj_lists:
to_neighs.append([set(adj_list[int(node)]) for node in nodes])
# find unique nodes and their neighbors used in current batch
unique_nodes = set.union(*(set.union(*to_neighs[r]) for r in range(len(self.intra_aggs))), set(nodes))
# calculate label-aware scores
if self.cuda:
batch_features = self.features(torch.cuda.LongTensor(list(unique_nodes)))
else:
batch_features = self.features(torch.LongTensor(list(unique_nodes)))
batch_scores = self.label_clf(batch_features)
id_mapping = {node_id: index for node_id, index in zip(unique_nodes, range(len(unique_nodes)))}
# the label-aware scores for current batch of nodes
center_scores = batch_scores[itemgetter(*nodes)(id_mapping), :]
# get neighbor node id list for each batch node and relation
r_list = [[list(to_neigh) for to_neigh in to_neighs[r]] for r in range(len(self.intra_aggs))]
# assign label-aware scores to neighbor nodes for each batch node and relation
r_scores = [[batch_scores[itemgetter(*to_neigh)(id_mapping), :].view(-1, 2) for to_neigh in r_list[r]]
for r in range(len(self.intra_aggs))]
# count the number of neighbors kept for aggregation for each batch node and relation
r_sample_num_list = [[math.ceil(len(neighs) * self.thresholds[r]) for neighs in r_list[r]]
for r in range(len(self.intra_aggs))]
# intra-aggregation steps for each relation
# Eq. (8) in the paper
r_feats, r_scores = tuple(
zip(*list(self.intra_aggs[r].forward(nodes, r_list[r], center_scores, r_scores[r], r_sample_num_list[r])
for r in range(len(self.intra_aggs)))))
# concat the intra-aggregated embeddings from each relation
neigh_feats = torch.cat(r_feats, dim=0)
# get features or embeddings for batch nodes
if self.cuda and isinstance(nodes, list):
index = torch.LongTensor(nodes).cuda()
else:
index = torch.LongTensor(nodes)
self_feats = self.features(index)
# number of nodes in a batch
n = len(nodes)
# inter-relation aggregation steps
# Eq. (9) in the paper
if self.inter == 'Att':
# 1) Rio-Att Inter-relation Aggregator
combined, attention = att_inter_agg(len(self.adj_lists), self.leakyrelu, self_feats, neigh_feats,
self.embed_dim,
self.weight, self.a, n, self.dropout, self.training, self.cuda)
elif self.inter == 'Weight':
# 2) Rio-Weight Inter-relation Aggregator
combined = weight_inter_agg(len(self.adj_lists), self_feats, neigh_feats, self.embed_dim, self.weight,
self.alpha, n, self.cuda)
gem_weights = F.softmax(torch.sum(self.alpha, dim=0), dim=0).tolist()
if train_flag:
print(f'Weights: {gem_weights}')
elif self.inter == 'Mean':
# 3) Rio-Mean Inter-relation Aggregator
combined = mean_inter_agg(len(self.adj_lists), self_feats, neigh_feats, self.embed_dim, self.weight, n,
self.cuda)
elif self.inter == 'GNN':
# 4) Rio-GNN Inter-relation Aggregator
combined = threshold_inter_agg(len(self.adj_lists), self_feats, neigh_feats, self.embed_dim, self.weight,
self.thresholds, n, self.cuda)
# the reinforcement learning module
if self.RL and train_flag:
thresholds, stop_flag = self.rl_tree.get_threshold(list(r_scores), labels, self.thresholds, self.batch_num,
self.auc)
self.thresholds = thresholds
self.RL = stop_flag
return combined, center_scores
class IntraAgg(nn.Module):
def __init__(self, features, feat_dim, cuda=False):
"""
Initialize the intra-relation aggregator
:param features: the input node features or embeddings for all nodes
:param feat_dim: the input dimension
:param cuda: whether to use GPU
"""
super(IntraAgg, self).__init__()
self.features = features
self.cuda = cuda
self.feat_dim = feat_dim
def forward(self, nodes, to_neighs_list, batch_scores, neigh_scores, sample_list):
"""
Code partially from https://github.com/williamleif/graphsage-simple/
:param nodes: list of nodes in a batch
:param to_neighs_list: neighbor node id list for each batch node in one relation
:param batch_scores: the label-aware scores of batch nodes
:param neigh_scores: the label-aware scores 1-hop neighbors each batch node in one relation
:param sample_list: the number of neighbors kept for each batch node in one relation
:return to_feats: the aggregated embeddings of batch nodes neighbors in one relation
:return samp_scores: the average neighbor distances for each relation after filtering
"""
# filer neighbors under given relation
samp_neighs, samp_scores = filter_neighs_ada_threshold(batch_scores, neigh_scores, to_neighs_list, sample_list)
# find the unique nodes among batch nodes and the filtered neighbors
unique_nodes_list = list(set.union(*samp_neighs))
unique_nodes = {n: i for i, n in enumerate(unique_nodes_list)}
# intra-relation aggregation only with sampled neighbors
mask = Variable(torch.zeros(len(samp_neighs), len(unique_nodes)))
column_indices = [unique_nodes[n] for samp_neigh in samp_neighs for n in samp_neigh]
row_indices = [i for i in range(len(samp_neighs)) for _ in range(len(samp_neighs[i]))]
mask[row_indices, column_indices] = 1
if self.cuda:
mask = mask.cuda()
num_neigh = mask.sum(1, keepdim=True)
mask = mask.div(num_neigh)
if self.cuda:
embed_matrix = self.features(torch.LongTensor(unique_nodes_list).cuda())
else:
embed_matrix = self.features(torch.LongTensor(unique_nodes_list))
to_feats = mask.mm(embed_matrix)
to_feats = F.relu(to_feats)
return to_feats, samp_scores
def filter_neighs_ada_threshold(center_scores, neigh_scores, neighs_list, sample_list):
"""
Filter neighbors according label predictor result with adaptive thresholds
:param center_scores: the label-aware scores of batch nodes
:param neigh_scores: the label-aware scores 1-hop neighbors each batch node in one relation
:param neighs_list: neighbor node id list for each batch node in one relation
:param sample_list: the number of neighbors kept for each batch node in one relation
:return samp_neighs: the neighbor indices and neighbor simi scores
:return samp_scores: the average neighbor distances for each relation after filtering
"""
samp_neighs = []
samp_scores = []
for idx, center_score in enumerate(center_scores):
center_score = center_scores[idx][0]
neigh_score = neigh_scores[idx][:, 0].view(-1, 1)
center_score = center_score.repeat(neigh_score.size()[0], 1)
neighs_indices = neighs_list[idx]
num_sample = sample_list[idx]
# compute the L1-distance of batch nodes and their neighbors
# Eq. (2) in paper
score_diff = torch.abs(center_score - neigh_score).squeeze()
sorted_scores, sorted_indices = torch.sort(score_diff, dim=0, descending=False)
selected_indices = sorted_indices.tolist()
# top-p sampling according to distance ranking and thresholds
# Section 3.3.1 in paper
if len(neigh_scores[idx]) > num_sample + 1:
selected_neighs = [neighs_indices[n] for n in selected_indices[:num_sample]]
selected_scores = sorted_scores.tolist()[:num_sample]
else:
selected_neighs = neighs_indices
selected_scores = score_diff.tolist()
if isinstance(selected_scores, float):
selected_scores = [selected_scores]
samp_neighs.append(set(selected_neighs))
samp_scores.append(selected_scores)
return samp_neighs, samp_scores
def mean_inter_agg(num_relations, self_feats, neigh_feats, embed_dim, weight, n, cuda):
"""
Mean inter-relation aggregator
:param num_relations: number of relations in the graph
:param self_feats: batch nodes features or embeddings
:param neigh_feats: intra-relation aggregated neighbor embeddings for each relation
:param embed_dim: the dimension of output embedding
:param weight: parameter used to transform node embeddings before inter-relation aggregation
:param n: number of nodes in a batch
:param cuda: whether use GPU
:return: inter-relation aggregated node embeddings
"""
# transform batch node embedding and neighbor embedding in each relation with weight parameter
center_h = weight.mm(self_feats.t())
neigh_h = weight.mm(neigh_feats.t())
# initialize the final neighbor embedding
if cuda:
aggregated = torch.zeros(size=(embed_dim, n)).cuda()
else:
aggregated = torch.zeros(size=(embed_dim, n))
# sum neighbor embeddings together
for r in range(num_relations):
aggregated += neigh_h[:, r * n:(r + 1) * n]
# sum aggregated neighbor embedding and batch node embedding
# take the average of embedding and feed them to activation function
combined = F.relu((center_h + aggregated) / 4.0)
return combined
def weight_inter_agg(num_relations, self_feats, neigh_feats, embed_dim, weight, alpha, n, cuda):
"""
Weight inter-relation aggregator
Reference: https://arxiv.org/abs/2002.12307
:param num_relations: number of relations in the graph
:param self_feats: batch nodes features or embeddings
:param neigh_feats: intra-relation aggregated neighbor embeddings for each relation
:param embed_dim: the dimension of output embedding
:param weight: parameter used to transform node embeddings before inter-relation aggregation
:param alpha: weight parameter for each relation used by Rio-Weight
:param n: number of nodes in a batch
:param cuda: whether use GPU
:return: inter-relation aggregated node embeddings
"""
# transform batch node embedding and neighbor embedding in each relation with weight parameter
center_h = weight.mm(self_feats.t())
neigh_h = weight.mm(neigh_feats.t())
# compute relation weights using softmax
w = F.softmax(alpha, dim=1)
# initialize the final neighbor embedding
if cuda:
aggregated = torch.zeros(size=(embed_dim, n)).cuda()
else:
aggregated = torch.zeros(size=(embed_dim, n))
# add weighted neighbor embeddings in each relation together
for r in range(num_relations):
aggregated += torch.mul(w[:, r].unsqueeze(1).repeat(1, n), neigh_h[:, r * n:(r + 1) * n])
# sum aggregated neighbor embedding and batch node embedding
# feed them to activation function
combined = F.relu(center_h + aggregated)
return combined
def att_inter_agg(num_relations, att_layer, self_feats, neigh_feats, embed_dim, weight, a, n, dropout, training, cuda):
"""
Attention-based inter-relation aggregator
Reference: https://github.com/Diego999/pyGAT
:param num_relations: num_relations: number of relations in the graph
:param att_layer: the activation function used by the attention layer
:param self_feats: batch nodes features or embeddings
:param neigh_feats: intra-relation aggregated neighbor embeddings for each relation
:param embed_dim: the dimension of output embedding
:param weight: parameter used to transform node embeddings before inter-relation aggregation
:param a: parameters used by attention layer
:param n: number of nodes in a batch
:param dropout: dropout for attention layer
:param training: a flag indicating whether in the training or testing mode
:param cuda: whether use GPU
:return combined: inter-relation aggregated node embeddings
:return att: the attention weights for each relation
"""
# transform batch node embedding and neighbor embedding in each relation with weight parameter
center_h = self_feats.mm(weight.t())
neigh_h = neigh_feats.mm(weight.t())
# compute attention weights
combined = torch.cat((center_h.repeat(num_relations, 1), neigh_h), dim=1)
e = att_layer(combined.mm(a))
attention = torch.cat((e[0:n, :], e[n:2 * n, :], e[2 * n:num_relations * n, :]), dim=1)
ori_attention = F.softmax(attention, dim=1)
attention = F.dropout(ori_attention, dropout, training=training)
# initialize the final neighbor embedding
if cuda:
aggregated = torch.zeros(size=(n, embed_dim)).cuda()
else:
aggregated = torch.zeros(size=(n, embed_dim))
# add neighbor embeddings in each relation together with attention weights
for r in range(num_relations):
aggregated += torch.mul(attention[:, r].unsqueeze(1).repeat(1, embed_dim), neigh_h[r * n:(r + 1) * n, :])
# sum aggregated neighbor embedding and batch node embedding
# feed them to activation function
combined = F.relu((center_h + aggregated).t())
# extract the attention weights
att = F.softmax(torch.sum(ori_attention, dim=0), dim=0)
return combined, att
def threshold_inter_agg(num_relations, self_feats, neigh_feats, embed_dim, weight, threshold, n, cuda):
"""
Rio-GNN inter-relation aggregator
Eq. (9) in the paper
:param num_relations: number of relations in the graph
:param self_feats: batch nodes features or embeddings
:param neigh_feats: intra-relation aggregated neighbor embeddings for each relation
:param embed_dim: the dimension of output embedding
:param weight: parameter used to transform node embeddings before inter-relation aggregation
:param threshold: the neighbor filtering thresholds used as aggregating weights
:param n: number of nodes in a batch
:param cuda: whether use GPU
:return: inter-relation aggregated node embeddings
"""
# transform batch node embedding and neighbor embedding in each relation with weight parameter
center_h = weight.mm(self_feats.t())
neigh_h = weight.mm(neigh_feats.t())
if cuda:
# use thresholds as aggregating weights
w = torch.FloatTensor(threshold).repeat(weight.size(0), 1).cuda()
# initialize the final neighbor embedding
aggregated = torch.zeros(size=(embed_dim, n)).cuda()
else:
w = torch.FloatTensor(threshold).repeat(weight.size(0), 1)
aggregated = torch.zeros(size=(embed_dim, n))
# add weighted neighbor embeddings in each relation together
for r in range(num_relations):
aggregated += torch.mul(w[:, r].unsqueeze(1).repeat(1, n), neigh_h[:, r * n:(r + 1) * n])
# sum aggregated neighbor embedding and batch node embedding
# feed them to activation function
combined = F.relu(center_h + aggregated)
return combined
| 18,857 | 42.855814 | 119 | py |
EuclidEmulator | EuclidEmulator-master/wrapper2/e2py/ee_observables.py | """
ee_observables.py
EuclidEmulator submodule for actual emulation of cosmological observables.
"""
# This file is part of EuclidEmulator
# Copyright (c) 2018-2020 Mischa Knabenhans
#
# EuclidEmulator is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# EuclidEmulator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys as _sys
import numpy as _np
import warnings as _warnings
import EuclidEmulator_BackEnd as _eeb
from e2py._internal import _ee_aux as _aux
from e2py._internal import _ee_background as _bg
from e2py._internal import _ee_cosmoconv as _cc
import e2py.ee_input as _inp
import e2py._ee_lens as _lens
from scipy.integrate import romb as _romb
from scipy.interpolate import CubicSpline as _CubicSpline
try:
from classy import Class as _Class
except ImportError:
print "\nClassy could not be found in your system."
print "Here are some suggestions:\n"
print "\t -Download the Class from class-code.net and install it"
print "\t together with its wrapper classy (type 'make' instead of"
print "\t 'make class'"
print "\t -If you know that Class is installed on your system"
print "\t and yet classy could not be installed, try re-compiling"
print "\t Class with just ''make'' instead of ''make class''"
print "NOTICE: Even without classy you can still use EuclidEmulator"
print " to emulate boost factors. You won't be able to compute"
print " full power spectra, though."
def get_boost(emu_pars_dict, redshifts, custom_kvec=None, verbose=True):
"""
Signature: get_boost(emu_pars_dict, redshifts [, custom_kvec=None, verbose=True])
Description: Computes the non-linear boost factor for a cosmology
defined in emu_pars_dict (a python dictionary containing
the values for the 6 LCDM parameters) at specified
redshift stored in a list or numpy.ndarray.
Optionally, a list or numpy.ndarray of k modes can be
passed to the function via the keyword argument "kvec".
Then, by setting verbose=False, it is possible to fully
suppress any verbose information about how the code
progresses.
Input types: python dictionary (with the six cosmological parameters)
list or numpy.ndarray (with redshift values)
:OPTIONAL:
list or numpy.ndarray (with k mode values)
boolean (verbosity)
Output type: python dictionary
Related: get_plin, get_pnonlin
"""
# Check cosmological parameter ranges
_inp.check_param_range(emu_pars_dict)
if isinstance(redshifts, (int, float)):
redshifts = _np.asarray([redshifts])
else:
redshifts = _np.asarray(redshifts)
for z in redshifts:
assert z <= 5.0, "EuclidEmulator allows only redshifts z <= 5.0.\n"
if not isinstance(emu_pars_dict, (dict,)):
print "The cosmological parameters must be passed as a python \
dictionary.\n"
_sys.exit()
boost_data = _eeb.emu_boost(_np.array([emu_pars_dict['om_b'],
emu_pars_dict['om_m'],
emu_pars_dict['n_s'],
emu_pars_dict['h'],
emu_pars_dict['w_0'],
emu_pars_dict['sigma_8']]),
redshifts, verbose)
kvals = boost_data.k
k_shape = kvals.shape
do_extrapolate_above = False
do_extrapolate_below = False
if not(custom_kvec is None):
upper_mask = custom_kvec < max(kvals)
lower_mask = custom_kvec > min(kvals)
mask = [u and l for (u,l) in zip(lower_mask, upper_mask)]
custom_k_within_range = custom_kvec[mask]
custom_k_below = custom_kvec[[not(l) for l in lower_mask]]
custom_k_above = custom_kvec[[not(u) for u in upper_mask]]
if any(custom_kvec > max(kvals)):
wrn_message = ("EuclidEmulator emulates the non-linear correction in \n"
"the interval [6.87215e-3 h/Mpc, 5.52669h/Mpc]. You are \n"
"requesting k modes beyond k_max = 5.52669h/Mpc. \n"
"Higher k modes constantly extrapolated.")
if verbose:
_warnings.warn(wrn_message)
do_extrapolate_above = True
if any(custom_kvec < min(kvals)):
wrn_message = ("EuclidEmulator emulates the non-linear correction in \n"
"the interval [6.87215e-3 h/Mpc, 5.52669h/Mpc]. You are \n"
"requesting k modes below k_min = 6.87215h/Mpc. \n"
"Lower k modes constantly extrapolated.")
if verbose:
_warnings.warn(wrn_message)
do_extrapolate_below = True
len_kvec = len(kvals)
len_redshifts = len(redshifts)
if len_redshifts > 1:
bvals = {}
for i in range(len_redshifts):
tmp = boost_data.boost[i*len_kvec:(i+1)*len_kvec]
if not(custom_kvec is None):
bvals['z'+str(i)] = 10.0**_CubicSpline(_np.log10(kvals),
_np.log10(tmp.reshape(k_shape))
)(_np.log10(custom_k_within_range))
#Extrapolate if necessary
if do_extrapolate_below:
# below the k_min of EuclidEmulator, we are in the linear regime where
# the boost factor is unity by construction
b_extrap = _np.ones_like(custom_k_below)
bvals['z'+str(i)]= _np.concatenate((b_extrap, bvals['z'+str(i)]))
if do_extrapolate_above:
# We extrapolate by setting all b(k > k_max) to b(k_max)
b_extrap = bvals['z'+str(i)][-1] * _np.ones_like(custom_k_above)
bvals['z'+str(i)] = _np.concatenate((bvals['z'+str(i)], b_extrap))
else:
bvals['z'+str(i)] = tmp.reshape(k_shape)
else:
tmp = boost_data.boost
if not(custom_kvec is None):
bvals = 10.0**_CubicSpline(_np.log10(kvals),
_np.log10(tmp.reshape(k_shape))
)(_np.log10(custom_k_within_range))
#Extrapolate if necessary
if do_extrapolate_below:
# below the k_min of EuclidEmulator, we are in the linear regime where
# the boost factor is unity by construction
b_extrap = _np.ones_like(custom_k_below)
bvals = _np.concatenate((b_extrap,bvals))
if do_extrapolate_above:
# We extrapolate by setting all b(k > k_max) to b(k_max)
b_extrap = bvals[-1] * _np.ones_like(custom_k_above)
bvals = _np.concatenate((bvals, b_extrap))
else:
bvals = tmp.reshape(k_shape)
if not(custom_kvec is None): # This could probably be done cleaner!
kvals = custom_kvec
return {'k': kvals, 'B': bvals}
def get_pnonlin(emu_pars_dict, redshifts, custom_kvec=None, verbose=True):
"""
Signature: get_pnonlin(emu_pars_dict, redshifts [, custom_kvec=None, verbose=True])
Description: Computes the linear power spectrum and the non-linear boost
separately for a given redshift z (or for a list or numpy.ndarray
of redshifts), a given cosmology defined in emu_pars_dic (a python
dictionary containing the values for the 6 LCDM parameters) and
optionally a list or numpy.ndarray of k modes. Then it returns the
product of these two which is the non-linear DM-only power spectrum.
Input types: python dictionary (with the six cosmological parameters)
float or iterable (list, numpy.ndarray) (with redshifts)
:OPTIONAL:
iterable (list, numpy.ndarray) (with k modes)
boolean (verbose)
Output type: python dictionary
Related: get_plin, get_boost
"""
if _Class.__module__ not in _sys.modules:
print "You have not imported neither classee nor classy.\n \
Emulating full power spectrum is hence not possible."
return None
# Check cosmological parameter ranges
_inp.check_param_range(emu_pars_dict)
if isinstance(redshifts, (int, float)):
redshifts = _np.asarray([redshifts])
else:
redshifts = _np.asarray(redshifts)
for z in redshifts:
assert z <= 5.0, "EuclidEmulator allows only redshifts z <= 5.0.\n"
boost_dict = get_boost(emu_pars_dict, redshifts, custom_kvec, verbose)
kvec = boost_dict['k']
Bk = boost_dict['B']
plin = get_plin(emu_pars_dict, kvec, redshifts)
plin = plin['P_lin']
if len(redshifts) == 1:
pnonlin = plin*Bk
else:
pnonlin = {}
for i, z in enumerate(redshifts):
pnonlin['z'+str(i)] = plin['z'+str(i)]*Bk['z'+str(i)]
return {'k': kvec, 'P_nonlin': pnonlin, 'P_lin': plin, 'B': Bk}
def get_plin(emu_pars_dict, custom_kvec, redshifts):
"""
Signature: get_plin(emu_pars_dict, custom_kvec, redshifts)
Description: Computes the linear power spectrum at redshift z for a
cosmology defined in EmuParsArr (a numpy array containing
the values for the 6 LCDM parameters) (uses classy).
Input types: python dictionary (with the six cosmological parameters)
numpy.ndarray (containing the k modes)
numpy.ndarray (containing the redshift values)
Output type: if len(redshifts)==1, then numpy.ndarray (containing the
linear power spectrum values)
if len(redshifts)>1, then dict with indices 'z0', 'z1',
'z2' etc.
Related: get_pnonlin, get_boost
"""
if _Class.__module__ not in _sys.modules:
print "You have not imported neither classee nor classy.\n \
Computing linear power spectrum is hence not possible."
return None
# Convert single redshift input argument to array
if isinstance(redshifts, (int, float)):
redshifts = _np.asarray([redshifts])
else:
redshifts = _np.asarray(redshifts)
for z in redshifts:
assert z <= 5.0, "EuclidEmulator allows only redshifts z <= 5.0.\n"
# Convert single redshift input argument to array
if isinstance(custom_kvec, (int, float)):
custom_kvec = _np.asarray([custom_kvec])
else:
custom_kvec = _np.asarray(custom_kvec)
# "Stringify" the input arrays to be understandable for classy.
z_arr, z_str = _aux.stringify_arr(redshifts)
# Convert the input dictionary into a Class-compatible dictionary
class_pars_dict = _inp.emu_to_class(emu_pars_dict)
# Extend the input Class-compatible dictionary by the additional
# information requested by classy.
classy_pars = class_pars_dict
classy_pars['Omega_Lambda'] = 0.0
classy_pars['output'] = 'mPk'
classy_pars['P_k_max_1/Mpc'] = 10.0
classy_pars['z_pk'] = z_str
# Create a "Class" instance called "cosmo" and run classy to compute
# the cosmological quantities.
cosmo = _Class()
cosmo.set(classy_pars)
cosmo.compute()
# Convert k units: h/Mpc
h = classy_pars['h']
k_classy_arr = h*custom_kvec
# Get shape of k vector
k_shape = k_classy_arr.shape
# Get power spectrum at tabulated z and k in units of Mpc^3
if len(z_arr) == 1:
z = z_arr
linpower = _np.array([cosmo.pk(k, z)*h*h*h
for k in k_classy_arr]).reshape(k_shape)
else:
linpower = {'z'+str(i):
_np.array([cosmo.pk(k, z)*h*h*h
for k in k_classy_arr]).reshape(k_shape)
for i, z in enumerate(z_arr)}
return {'k': custom_kvec, 'P_lin': linpower}
| 12,678 | 38.746082 | 90 | py |
EuclidEmulator | EuclidEmulator-master/wrapper2/e2py/ee_input.py | """
ee_input.py
EuclidEmulator submodule containing functions related to argument parsing.
"""
# This file is part of EuclidEmulator
# Copyright (c) 2018-2020 Mischa Knabenhans
#
# EuclidEmulator is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# EuclidEmulator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys as _sys
import pandas as _pd
from e2py._internal import _ee_cosmoconv as _cc
######################################################
#################### Check input #####################
######################################################
def check_param_range(par_dict, csm_index=0):
"""
Checks if all parameters in the cosmology dictionary 'par_dict'
(with index 'csm_index') passed to this function obey the limits
set by the emulator.
"""
om_b_range = [0.0217, 0.0233]
om_m_range = [0.1326, 0.1526]
n_s_range = [0.9345, 0.9965]
h_range = [0.6251, 0.7211]
w_0_range = [-1.250, -0.750]
sigma_8_range = [0.7684, 0.8614]
om_b_not_in_range = om_b_range[0] > par_dict['om_b'] or\
om_b_range[1] < par_dict['om_b']
om_m_not_in_range = om_m_range[0] > par_dict['om_m'] or\
om_m_range[1] < par_dict['om_m']
n_s_not_in_range = n_s_range[0] > par_dict['n_s'] or\
n_s_range[1] < par_dict['n_s']
h_not_in_range = h_range[0] > par_dict['h'] or\
h_range[1] < par_dict['h']
w_0_not_in_range = w_0_range[0] > par_dict['w_0'] or\
w_0_range[1] < par_dict['w_0']
sigma_8_not_in_range = sigma_8_range[0] > par_dict['sigma_8'] or\
sigma_8_range[1] < par_dict['sigma_8']
if om_b_not_in_range:
raise ValueError("Parameter range violation in cosmology %d: \
om_b is set to %f, but should be in the interval [0.0217, 0.0233]."
%(csm_index, par_dict['om_b']))
if om_m_not_in_range:
raise ValueError("Parameter range violation in cosmology %d: \
om_m is set to %f, but should be in the interval [0.1326, 0.1526]."
%(csm_index, par_dict['om_m']))
if n_s_not_in_range:
raise ValueError("Parameter range violation in cosmology %d: \
n_s is set to %f, but should be in the interval [0.9345, 0.9965]."
%(csm_index, par_dict['n_s']))
if h_not_in_range:
raise ValueError("Parameter range violation in cosmology %d: \
h is set to %f, but should be in the interval [0.6251, 0.7211]."
%(csm_index, par_dict['h']))
if w_0_not_in_range:
raise ValueError("Parameter range violation in cosmology %d: \
w_0 is set to %f, but should be in the interval [-1.250, -0.750]."
%(csm_index, par_dict['w_0']))
if sigma_8_not_in_range:
raise ValueError("Parameter range violation in cosmology %d: \
sigma_8 is set to %f, but should be in the interval [0.7684, 0.8614]."
%(csm_index, par_dict['sigma_8']))
######################################################
#################### Argument parsing ################
######################################################
def read_parfile(filename, sep=","):
"""
Signature: read_parfile(filename, sep=",")
Description: Reads in a parameter file and returns a dictionary or a
list of dictionaires (if multiple cosmologies are specified)
with the parameters.
Input type: string (path to or name of parameter file)
Output type: list of python dictionaries
"""
list_of_cosmologies = []
parameters = _pd.read_csv(filename,
delimiter='\s*'+sep+'\s*',
engine='python')
for indx, row in parameters.iterrows():
# Convert pandas.Series to dictionary (= required output type)
cosmo_dict = row.to_dict()
# Check if parameter ranges are obeyed
check_param_range(cosmo_dict, indx)
# Append to list
list_of_cosmologies.append(cosmo_dict)
return list_of_cosmologies
########################################################
################### Format conversion ##################
########################################################
def emu_to_class(emu_pars_dict):
"""
Signature: emu_to_class(emu_pars_dict)
Description: Converts the set of parameters accepted by
EuclidEmulator into a set of parameters accepted
by CLASS and CAMB.
Input type: python dictionary
Output type: python dictionary
Remark: The expected keys are:
'om_b' (lowercase baryon density parameter)
'om_m' (lowercase total matter density parameter)
'n_s' (spectral index)
'h' (Hubble parameter)
'w_0' (dark energy equation of state parameter)
'sigma_8' (overdensity fluctuation variance).
Related: class_to_emu
"""
if not isinstance(emu_pars_dict, (dict,)):
print "The cosmological parameters must be passed as a \
python dictionary.\n"
_sys.exit()
om_b = emu_pars_dict['om_b']
om_m = emu_pars_dict['om_m']
n_s = emu_pars_dict['n_s']
h = emu_pars_dict['h']
w_0 = emu_pars_dict['w_0']
sigma_8 = emu_pars_dict['sigma_8']
om_cdm = om_m - om_b
a_s = _cc.sigma8_to_as(sigma_8)
class_pars_dict = {'omega_b': om_b,
'omega_cdm': om_cdm,
'n_s': n_s,
'h': h,
'w0_fld': w_0,
'sigma8': sigma_8
}
return class_pars_dict
def class_to_emu(class_pars_dict):
"""
Signature: class_to_emu(class_pars_dict)
Description: Converts the set of parameters accepted by CLASS
and CAMB into a set of parameters accepted by
EuclidEmulator.
Input type: python dictionary
Output type: python dictionary
Remark: The expected keys are:
'omega_b' (lowercase baryon density parameter)
'omega_cdm' (lowercase cold dark matter density parameter)
'n_s' (spectral index)
'h' (Hubble parameter)
'w0_fld' (dark energy equation of state parameter)
'sigma8' (power spectrum amplitude/variance of over-density field).
Related: class_to_emu
"""
if not isinstance(class_pars_dict, (dict,)):
print "The cosmological parameters must be passed as a \
python dictionary.\n"
_sys.exit()
om_b = class_pars_dict['omega_b']
om_cdm = class_pars_dict['omega_cdm']
n_s = class_pars_dict['n_s']
h = class_pars_dict['h']
w_0 = class_pars_dict['w0_fld']
sigma_8 = class_pars_dict['sigma8']
om_m = om_b + om_cdm
emu_pars_dict = {'om_b': om_b,
'om_m': om_m,
'n_s': n_s,
'h': h,
'w_0': w_0,
'sigma_8': sigma_8}
return emu_pars_dict
| 7,734 | 34.645161 | 88 | py |
EuclidEmulator | EuclidEmulator-master/wrapper2/e2py/__init__.py | # This file is part of EuclidEmulator
# Copyright (c) 2018-2020 Mischa Knabenhans
#
# EuclidEmulator is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# EuclidEmulator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
print('EuclidEmulator Copyright (C) 2018-2020 Mischa Knabenhans & Joachim Stadel')
print('This program comes with ABSOLUTELY NO WARRANTY.')
print('This is free software, and you are welcome to redistribute it')
print('under certain conditions. See http://www.gnu.org/licenses/ for further')
print('information.')
from ee_input import *
from ee_observables import *
| 1,090 | 40.961538 | 82 | py |
EuclidEmulator | EuclidEmulator-master/wrapper2/e2py/_ee_lens.py | """
ee_lens.py
EuclidEmulator submodule for computation of cosmological lensing quantities.
REMARK: The geometry of the Universe is fixed to be flat (i.e.
Omega_curvature = 1) and the radiation energy density
is set to Om_rad = 4.183709411969527e-5/(h*h). These
values were assumed in the construction process of
EuclidEmulator and hence must be used whenever one is
working with it.
"""
# This file is part of EuclidEmulator
# Copyright (c) 2018-2020 Mischa Knabenhans
#
# EuclidEmulator is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# EuclidEmulator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as _np
from scipy.integrate import romb as _romb
from scipy.interpolate import CubicSpline as _CubicSpline
from e2py._internal import _ee_aux as _aux
def lens_efficiency(sourcedist, dcomov, dcomov_lim, prec=12):
"""
Signature: lens_efficiency(sourcedist, dcomov, dcomov_lim)
Description: Computes the lens efficiency function q (see e.g. equation
(24)the review article by Martin Kilbinger "Cosmology with
cosmic shear observations: a review", July 21, 2015,
arXiv:1411.0115v2), given a source distribution function n and
two comoving distances.
Input types: sourcedist - np.ndarray
dcomov - float (or int) or np.ndarray
dcomov_lim - float
Output types: float (or np.ndarray if type(dcomov)=np.ndarray)
"""
# interpolate the source distribution function
nfunc = _CubicSpline(_np.log10(sourcedist['chi']),
_np.log10(sourcedist['n']))
result = []
if isinstance(dcomov, _np.ndarray):
for distance in dcomov:
chi = _np.linspace(distance, dcomov_lim, 2**prec+1)
sourcedistribution = 10**nfunc(_np.log10(chi))
integrand = sourcedistribution * (1-distance/chi)
result.append(_romb(integrand, chi[1]-chi[0]))
return _np.asarray(result)
elif isinstance(dcomov, (float, int)):
chi = _np.linspace(dcomov, dcomov_lim, 2**prec+1)
sourcedistribution = 10**nfunc(_np.log10(chi))
integrand = sourcedistribution * (1-dcomov/chi)
return _romb(integrand, chi[1]-chi[0])
else:
raise(TypeError, "The second argument 'dcomov' must be either a float,\
an integer or a np.ndarray.\n")
class GalaxyRedshiftDist(object):
# Author: Rongchuan Zhao
def __init__(self, alpha=2.0, beta=1.5, z_mean=0.9):
self._alpha = alpha
self._beta = beta
self._z_mean = z_mean
def __call__(self, z_mean):
return GalaxyRedshiftDist(z_mean=z_mean,
alpha=self._alpha,
beta=self._beta).gala_probdist_func
def _z_0(self):
return self._z_mean/1.412
@_aux.normalize()
def _gala_probdist(self, z):
z_0 = self._z_0()
return (z / z_0)**self._alpha*_np.exp(-(z/z_0)**self._beta)
@property
def gala_probdist_func(self):
return _aux.Function(self._gala_probdist)
| 3,699 | 35.27451 | 80 | py |
EuclidEmulator | EuclidEmulator-master/wrapper2/e2py/_internal/_ee_aux.py | """
_ee_aux.py
EuclidEmulator submodule for auxiliary functions.
"""
# This file is part of EuclidEmulator
# Copyright (c) 2018-2020 Mischa Knabenhans
#
# EuclidEmulator is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# EuclidEmulator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys as _sys
import contextlib as _ctlib
import numpy as _np
from functools import wraps as _wraps
from functools import partial as _partial
from scipy.integrate import quad as _quad
# Auxiliary functions for formatting input such that CLASS understands
@_ctlib.contextmanager
def disable_numpy_summarization():
# Author: Jeppe Mosgaard Dakin
"""
Signature: disable_numpy_summarization()
Description: Allows numpy array to have arbitrary length without summarizing
its entries.
Input type: None
Output type: None
"""
threshold = _np.get_printoptions()['threshold']
_np.set_printoptions(threshold=_np.inf)
try:
yield
finally:
_np.set_printoptions(threshold=threshold)
def stringify_arr(arr):
# Author: Jeppe Mosgaard Dakin
"""
Signature: stringify_arr(arr)
Description: Takes an array and returns all entries as a single string.
Input type: array
Output type: tuple (array, string)
"""
with disable_numpy_summarization():
arr_str = _np.array2string(
arr,
max_line_width=_np.inf,
formatter={'float': lambda f: '{:.8e}'.format(f)},
separator=',',
).strip('[]')
return _np.fromstring(arr_str, sep=','), arr_str
def print_cosmology(emu_pars_dict):
# Author: Mischa Knabenhans
h = emu_pars_dict['h']
omega_m = emu_pars_dict['om_m']
omega_rad = 4.183709411969527e-5; # corresponds to 2.755 K Tcmb
Om_m = omega_m/(h*h)
Om_rad = omega_rad/(h*h)
Om_DE = 1.0-Om_m-Om_rad
print "#"
print "# Cosmology:"
print "# dOmega0: ", Om_m
print "# dOmegaRad: ", Om_rad
print "# dOmegaDE: ", Om_DE
def progress(count, total, status=''):
# Source: online
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
_sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', status))
_sys.stdout.flush()
def normalize(ave_range=[0, _np.inf]):
# Author: Rongchuan Zhao
def decorate(func):
@_wraps(func)
def norm_func(self, *args):
# reads in some function and returns its
# area-normalized version (as a function object)
inst_func = _partial(func, self)
ToT = _quad(inst_func, ave_range[0], ave_range[1], args=args[1:] )[0]
return func(self, *args)/ToT
return norm_func
return decorate
class Function(object):
# Author: Rongchuan Zhao
def __init__(self, func):
self.func = func
def __add__(self, other):
return Function(lambda x: self.func(x) + other.func(x))
def __sub__(self, other):
return Function(lambda x: self.func(x) - other.func(x))
def __mul__(self, other):
return Function(lambda x: self.func(x) * other.func(x))
def __div__(self, other):
return Function(lambda x: self.func(x) / other.func(x))
def __pow__(self, index):
return Function(lambda x: self.func(x)**index)
def __call__(self, *arg, **kwarg):
return self.func(*arg, **kwarg)
| 4,029 | 29.074627 | 81 | py |
EuclidEmulator | EuclidEmulator-master/wrapper2/e2py/_internal/_ee_background.py | """
ee_background.py
EuclidEmulator submodule for computation of cosmological background quantities.
REMARK: The geometry of the Universe is fixed to be flat (i.e.
Omega_curvature = 1) and the radiation energy density
is set to Om_rad = 4.183709411969527e-5/(h*h). These
values were assumed in the construction process of
EuclidEmulator and hence must be used whenever one is
working with it.
"""
# This file is part of EuclidEmulator
# Copyright (c) 2018-2020 Mischa Knabenhans
#
# EuclidEmulator is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# EuclidEmulator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as _np
from scipy.integrate import romb as _romb
# UNIVERSAL CONSTANTS (Source: PDG booklet 2018)
SPEED_OF_LIGHT_IN_KILOMETERS_PER_SECOND = 299792.458
NEWTONS_CONSTANT = 6.6740908 * 1e-11 # in m^3kg^(-1)s^(-2)
MEGAPARSEC = 3.08567758149 * 1e22 # meters
M_SOLAR_IN_KG = 1.98848 *1e30
def dist_comov(emu_pars_dict, z1, z2, prec=12):
"""
Signature: dist_comov(emu_pars_dict, z1, z2, prec=12)
Description: Computes the comoving distance (in units of Mpc/h) between
objects at redshifts z1 and z2 for a cosmology specified by
the parameters Om_m (matter density parameter),
Om_rad (radiation density parameter) and Om_DE (dark
energy density parameter), the Hubble parameter H0,
and the dark energy equation of state parameters w0
and wa.
Input type: python dictionary (containing the 6 LCDM parameters)
floats or np.ndarrays
Output type: float or np.ndarray
REMARK 1: If the redshifts z1 and z2 are passed as vectors (1-dimen-
sional np.ndarrays) then the comoving distances will be com-
puted between the pairs of redshift (z1_1, z2_1), (z1_2,z2_2),
(z1_3, z2_3) etc. Hence, if the vectors have length n, the
resulting vector of d_comov will also be of length n (and not
n(n-1)). We do NOT compute the d_comov for all possible red-
shift combinations.
REMARK 2: The geometry of the Universe is fixed to be flat (i.e.
Omega_curvature = 1) and the radiation energy density
is set to Om_rad = 4.183709411969527e-5/(h*h). These
values were assumed in the construction process of
EuclidEmulator and hence must be used whenever one is
working with it.
"""
if isinstance(z1,(float,int)) and isinstance(z2,(float,int)):
z1 = _np.array([z1])
z2 = np.array([z2])
a1_vec = _cc.z_to_a(z1)
a2_vec = _cc.z_to_a(z2)
d_comov = []
for a1,a2 in zip(a1_vec,a2_vec):
if a1 > a2:
a1, a2 = a2, a1 # swap values
avec = _np.linspace(a1, a2, 2**prec+1)
delta_a = avec[1]-avec[0]
H = _cc.a_to_hubble(emu_pars_dict,avec)
d_comov.append(_romb(1./(avec*avec*H), dx=delta_a))
chi = _np.array(d_comov) # here the comoving distances have units of Mpc/(km/s)
# return result in units of Mpc/h
return SPEED_OF_LIGHT_IN_KILOMETERS_PER_SECOND * chi * emu_pars_dict['h']
| 3,814 | 39.585106 | 83 | py |
EuclidEmulator | EuclidEmulator-master/wrapper2/e2py/_internal/_ee_cosmoconv.py | """
ee_cosmoconv.py
EuclidEmulator submodule for converting cosmological quantities.
REMARK: The geometry of the Universe is fixed to be flat (i.e.
Omega_curvature = 1) and the radiation energy density
is set to Om_rad = 4.183709411969527e-5/(h*h). These
values were assumed in the construction process of
EuclidEmulator and hence must be used whenever one is
working with it.
"""
import numpy as _np
def z_to_a(z):
"""
Signature: z_to_a(z)
Description: Converts a redshift z into the corresponding
scale factor a.
Input type: float or numpy.ndarray
Output type: float or numpy.ndarray
"""
return 1.0/(z+1.0)
def a_to_z(a):
"""
Signature: a_to_z(a)
Description: Converts a scale factor a into the corresponding
redshift z.
Input type: float or numpy.ndarray
Output type: float or numpy.ndarray
"""
return (1.0/a)-1.0
def sigma8_to_as(sigma8):
"""
Signature: sigma8_to_as(sigma8)
Description: Converts sigma_8 into A_s based on renormalization.
Input type: float
Output type: float
Remarks: This conversion is not generic. It is the one used
in the construction process of Euclid emulator and
has to be used whenever a conversion from sigma8
to As is performed in the context of working with
EuclidEmulator.
Related: as_to_sigma8
"""
# Check if input is given:
if sigma8 is None:
raise ValueError("Value of sigma8 is 'None', i.e. sigma8 has \
no assigned value.")
# Check if input is a real number
sigma8_is_complex = isinstance(sigma8, complex)
sigma8_is_array = isinstance(sigma8, _np.ndarray)
complex_entry = False
if sigma8_is_array:
complex_entry = any([isinstance(val, _np.complex128) for val in sigma8])
if sigma8_is_complex or complex_entry:
raise TypeError("sigma8 must be a real number.")
a_s = 2.215e-9*(sigma8*sigma8)/(0.8496*0.8496)
return a_s
def as_to_sigma8(a_s):
"""
Signature: as_to_sigma8(a_s)
Description: Converts As into sigma8 based on renormalization.
Input type: float
Output type: float
Remarks: This conversion is not generic. It is the one used
in the construction process of Euclid emulator and
has to be used whenever a conversion from As to
sigma8 is performed in the context of working with
EuclidEmulator.
Related: sigma8_to_as
"""
sigma_8 = _np.sqrt(0.8496*0.8496*a_s/2.215e-9)
return sigma_8
def a_to_hubble(emu_pars_dict, a):
"""
Signature: a_to_hubble(emu_pars,dict, a, H0, Om_m, Om_DE, Om_curve, w_0, w_a)
Description: Computes the Hubble parameter at a given scale factor a
for a given cosmology.
Input type: type(a) = float or numpy.ndarray
all other input parameters are of type float
Output type: type(Hubble) = float or numpy.ndarray
REMARK: The geometry of the Universe is fixed to be flat (i.e.
Omega_curvature = 1) and the radiation energy density
is set to Om_rad = 4.183709411969527e-5/(h*h). These
values were assumed in the construction process of
EuclidEmulator and hence must be used whenever one is
working with it.
"""
a_inv = 1.0/a
h = emu_pars_dict['h']
Om_m = emu_pars_dict['om_m']/(h*h)
H0 = 100.0 * h # * (km/s)/Mpc
w_0 = emu_pars_dict['w_0']
# We explicitly set the radiation energy density. By hardcoding this
# we make sure that the user cannot choose Om_rad values inconsistent with
# that used in the construction process of EuclidEmulator.
# We adopt the same equation for the computation of the radiation energy
# density as in CLASS (cf. CLASS code, input.c file, lines 643 & 714)
Om_rad = 4.183709411969527e-5/(h*h)
# We infer Om_DE assuming flat geometry (Om_curve = 0.0). By hardcoding this
# we make sure that the user cannot choose Om_curve values inconsistent with
# that used in the construction process of EuclidEmulator.
Om_curve = 0.0
Om_DE = 1.0-Om_curve-Om_m-Om_rad
assert (Om_curve == 0.0 and Om_m + Om_rad + Om_DE + Om_curve == 1.0)
curvature = Om_curve * a_inv * a_inv
matter = Om_m * a_inv * a_inv * a_inv
radiation = Om_rad * a_inv * a_inv * a_inv * a_inv
darkenergy = Om_DE * a_inv**(3.0+3.0*w_0)
H = H0 * _np.sqrt(curvature + matter + radiation + darkenergy)
return H # in the standard units of (km/s)/Mpc
def k_to_l(emu_pars_dict, k, z, prec=12):
"""
Signature: k_to_l(emu_pars_dict, k, z, prec=12)
Description: Converts a numpy.array k into a numpy.array l for a given
redshift z. Here, k denotes the wave numbers and l the
multipole order. The keyword argument "prec" is a precision
parameter for the romberg integration that has to be computed
in the course of the comoving distance calculation.
Input types: type(emu_pars_dict) = dict
type(k) = numpy.ndarray
type(z) = float
type(prec) = int
Ouput type: numpy.ndarray
REMARK: The geometry of the Universe is fixed to be flat (i.e.
Omega_curvature = 1
and as a result the comoving angular diameter distance
equals the comoving distance) and the radiation energy
density is set to
Om_rad = 4.183709411969527e-5/(h*h).
These values were assumed in the construction process
of EuclidEmulator and hence must be used whenever one
is working with it.
"""
return k * _bg.dist_comov(emu_pars_dict, 1e-13, z, prec)
def l_to_k(emu_pars_dict, l, z, prec=12):
"""
Signature: l_to_k(emu_pars_dict, l, z, prec=12)
Description: Converts a numpy.array l into a numpy.array k for a given
redshift z. Here, k denotes the wave numbers and l the
multipole order. The keyword argument "prec" is a precision
parameter for the romberg integration that has to be computed
in the course of the comoving distance calculation.
Input types: type(emu_pars_dict) = dict
type(l) = numpy.ndarray
type(z) = float
type(prec) = int
Ouput type: numpy.ndarray
REMARK: The geometry of the Universe is fixed to be flat (i.e.
Omega_curvature = 1
and as a result the comoving angular diameter distance
equals the comoving distance) and the radiation energy
density is set to
Om_rad = 4.183709411969527e-5/(h*h).
These values were assumed in the construction process
of EuclidEmulator and hence must be used whenever one
is working with it.
"""
return l/_bg.dist_comov(emu_pars_dict, 1e-13, z, prec)
| 7,394 | 33.078341 | 83 | py |
EuclidEmulator | EuclidEmulator-master/wrapper2/e2py/_internal/__init__.py | import _ee_cosmoconv as _cc
import _ee_background as _bg
| 57 | 18.333333 | 28 | py |
EuclidEmulator | EuclidEmulator-master/examples/test.py | import e2py
import matplotlib.pyplot as plt
import numpy as np
import pylab as plb
from scipy.interpolate import CubicSpline
import os
# Specify cosmology and redshifts at which the non-linear
# power spectrum shall be emulated
csm = {'om_b': 0.0219961,
'om_m': 0.1431991,
'n_s': 0.96,
'h': 0.67,
'w_0': -1.0,
'sigma_8': 0.83}
z=np.array([0.0,1.0,5.0])
# Emulation step: the one next line does all the magic
pnl_dict = e2py.get_pnonlin(csm,z)
# We rename some variables to avoid long variables names
# later on (this step is clearly optional)
kvec = pnl_dict['k']
P_nonlin = pnl_dict['P_nonlin']
P_lin = pnl_dict['P_lin']
Boost = pnl_dict['B']
# Save the data
if not(os.path.isdir("DataOutput")):
os.makedirs("DataOutput")
for idx in P_nonlin.keys():
np.savetxt("./DataOutput/EmuData."+idx+".dat", np.c_[kvec, P_nonlin[idx], P_lin[idx], Boost[idx]])
# Simulation data
ksim, Psim, __ = np.loadtxt("AVG_EuclidReference.wRad.00100.pk", unpack=True)
fsim = CubicSpline(np.log10(ksim), np.log10(Psim))
Psim_intp = 10.0**fsim(np.log10(kvec))
# Plot the data
Fig, axs = plt.subplots(4,1,sharex=True)
ax = axs[0]
ax.loglog(kvec,P_lin['z0'],c="b",label='z='+str(z[0]))
ax.loglog(kvec,P_lin['z1'],c="r",label='z='+str(z[1]))
ax.loglog(kvec,P_lin['z2'],c="g",label='z='+str(z[2]))
ax.legend(loc='lower left')
ax.set_ylabel(r"$P_{\rm lin}(k)\enspace[({\rm Mpc}/h)^3]$")
ax = axs[1]
ax.axhline(y=1.0,ls=":",c="k")
ax.loglog(kvec,Boost['z0'],c="b",label='z='+str(z[0]))
ax.loglog(kvec,Boost['z1'],c="r",label='z='+str(z[1]))
ax.loglog(kvec,Boost['z2'],c="g",label='z='+str(z[2]))
ax.legend(loc='upper left')
ax.set_ylabel(r"$B(k)\enspace[1]$")
ax = axs[2]
ax.loglog(kvec,P_nonlin['z0'],c="b",label='z='+str(z[0]))
ax.loglog(kvec,P_nonlin['z1'],c="r",label='z='+str(z[1]))
ax.loglog(kvec,P_nonlin['z2'],c="g",label='z='+str(z[2]))
ax.loglog(kvec,Psim_intp,c='k', ls="--", label="z=0.0 (sim/ref)")
ax.legend(loc='lower left')
ax.set_ylabel(r"$P_{\rm nl}(k)\enspace[({\rm Mpc}/h)^3]$")
#ax.set_xlabel(r"$k\enspace[h/{\rm Mpc}]$")
ax = axs[3]
ax.axhspan(-1, 1, alpha=0.5, color='grey')
ax.axhline(y=0.0, ls=":", c='k')
ax.semilogx(kvec, 100*(P_nonlin['z0']/Psim_intp-1), c='b', ls="-")
ax.set_ylabel(r"$\varepsilon_{rel}\;[\%]$")
ax.set_xlabel(r"$k\enspace[h/{\rm Mpc}]$")
ax.set_xlim([0.005,5.35])
plt.show()
| 2,350 | 28.759494 | 102 | py |
EuclidEmulator | EuclidEmulator-master/examples/ProducePublicationPlot.py | import numpy as np
import matplotlib.pyplot as plt
import e2py
from classy import Class
csm = {'om_b': 0.0219961,
'om_m': 0.1431991,
'n_s': 0.96,
'h': 0.67,
'w_0': -1.0,
'sigma_8': 0.83}
h = csm['h']
#zvec = np.array([0.0,0.5,1.0,2.0])
Pnl = e2py.get_pnonlin(csm,0.5)
kvec = Pnl['k']
kshape = kvec.shape
ClassyPars = e2py.emu_to_class(csm)
ClassyPars['Omega_Lambda']=0.0
ClassyPars['output']='mPk'
ClassyPars['non linear']='Halofit'
ClassyPars['format']='camb'
ClassyPars['P_k_max_h/Mpc']=10.
ClassyPars['k_per_decade_for_pk']=300.
ClassyPars['z_pk']=0.5#'0.0','0.5','1.0','2.0'
cosmo=Class()
cosmo.set(ClassyPars)
cosmo.compute()
pHF = np.array([cosmo.pk(k*h,0.5)*h*h*h for k in kvec]).reshape(kshape)
Fig, axs = plt.subplots(2,1, sharex=True)
ax = axs[0]
ax.loglog(kvec,Pnl['P_lin'], c='gray', label = r"$P_\rm{lin}^\rm{CLASS}$")
ax.loglog(kvec,Pnl['P_nonlin'], c='blue', label = r"$P_\rm{nl}^\rm{EE}=P_\rm{lin}^\rm{CLASS} * B$")
ax.grid(True)
ax.set_ylabel(r"P(k,z=0.5) [$(\rm{Mpc}/h)^3$]")
ax.set_xlim([0.01,5])
ax = axs[1]
ax.axhline(y=0, c="black", ls=":")
ax.axhspan(-1,1,color="gray", alpha=0.5)
ax.semilogx(kvec, 100*(Pnl['P_nonlin']/pHF-1), c="black")
ax.grid(True)
ax.set_xlabel(r"$k [h/{\rm Mpc}]$")
ax.set_ylabel(r"$\frac{P_{nl}^{EE}(k,z=0.5)-P^{THM}_{nl}(k,z=0.5)}{P^{THM}_{nl}(k,z=0.5)} [(\rm{Mpc}/h)^3]$")
ax.set_xlim([0.01,5])
plt.show()
plt.show()
| 1,415 | 24.285714 | 109 | py |
EuclidEmulator | EuclidEmulator-master/wrapper3/e2py/ee_observables.py | """
ee_observables.py
EuclidEmulator submodule for actual emulation of cosmological observables.
"""
# This file is part of EuclidEmulator
# Copyright (c) 2018-2020 Mischa Knabenhans
#
# EuclidEmulator is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# EuclidEmulator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys as _sys
import numpy as _np
import warnings as _warnings
import EuclidEmulator_BackEnd as _eeb
from e2py._internal import _ee_aux as _aux
from e2py._internal import _ee_background as _bg
from e2py._internal import _ee_cosmoconv as _cc
import e2py.ee_input as _inp
import e2py._ee_lens as _lens
from scipy.integrate import romb as _romb
from scipy.interpolate import CubicSpline as _CubicSpline
try:
from classy import Class as _Class
except ImportError:
print("\nClassy could not be found in your system.")
print("Here are some suggestions:\n")
print("\t -Download the Class from class-code.net and install it")
print("\t together with its wrapper classy (type 'make' instead of")
print("\t 'make class'")
print("\t -If you know that Class is installed on your system")
print("\t and yet classy could not be installed, try re-compiling")
print("\t Class with just ''make'' instead of ''make class''")
print("NOTICE: Even without classy you can still use EuclidEmulator")
print(" to emulate boost factors. You won't be able to compute")
print(" full power spectra, though.")
def get_boost(emu_pars_dict, redshifts, custom_kvec=None, verbose=True):
"""
Signature: get_boost(emu_pars_dict, redshifts [, custom_kvec=None, verbose=True])
Description: Computes the non-linear boost factor for a cosmology
defined in emu_pars_dict (a python dictionary containing
the values for the 6 LCDM parameters) at specified
redshift stored in a list or numpy.ndarray. Optionally,
a list or numpy.ndarray of k modes can be passed to the
function via the keyword argument "kvec".
Input types: python dictionary (with the six cosmological parameters)
list or numpy.ndarray (with redshift values)
:OPTIONAL:
list or numpy.ndarray (with k mode values)
boolean (verbosity)
Output type: python dictionary
Related: get_plin, get_pnonlin
"""
# Check cosmological parameter ranges
_inp.check_param_range(emu_pars_dict)
if isinstance(redshifts, (int, float)):
redshifts = _np.asarray([redshifts])
else:
redshifts = _np.asarray(redshifts)
for z in redshifts:
assert z <= 5.0, "EuclidEmulator allows only redshifts z <= 5.0.\n"
if not isinstance(emu_pars_dict, dict):
print("The cosmological parameters must be passed as a python \
dictionary.\n")
_sys.exit()
boost_data = _eeb.emu_boost(_np.array([emu_pars_dict['om_b'],
emu_pars_dict['om_m'],
emu_pars_dict['n_s'],
emu_pars_dict['h'],
emu_pars_dict['w_0'],
emu_pars_dict['sigma_8']]),
redshifts, verbose)
kvals = boost_data.k
k_shape = kvals.shape
do_extrapolate_above = False
do_extrapolate_below = False
if not(custom_kvec is None):
upper_mask = custom_kvec < max(kvals)
lower_mask = custom_kvec > min(kvals)
mask = [u and l for (u,l) in zip(lower_mask, upper_mask)]
custom_k_within_range = custom_kvec[mask]
custom_k_below = custom_kvec[[not(l) for l in lower_mask]]
custom_k_above = custom_kvec[[not(u) for u in upper_mask]]
if any(custom_kvec > max(kvals)):
wrn_message = ("EuclidEmulator emulates the non-linear correction in \n"
"the interval [6.87215e-3 h/Mpc, 5.52669h/Mpc]. You are \n"
"requesting k modes beyond k_max = 5.52669h/Mpc. \n"
"Higher k modes constantly extrapolated.")
if verbose:
_warnings.warn(wrn_message)
do_extrapolate_above = True
if any(custom_kvec < min(kvals)):
wrn_message = ("EuclidEmulator emulates the non-linear correction in \n"
"the interval [6.87215e-3 h/Mpc, 5.52669h/Mpc]. You are \n"
"requesting k modes below k_min = 6.87215h/Mpc. \n"
"Lower k modes constantly extrapolated.")
if verbose:
_warnings.warn(wrn_message)
do_extrapolate_below = True
len_kvals = len(kvals)
len_redshifts = len(redshifts)
if len_redshifts > 1:
bvals = {}
for i in range(len_redshifts):
tmp = boost_data.boost[i*len_kvals:(i+1)*len_kvals]
if not(custom_kvec is None):
bvals['z'+str(i)] = 10.0**_CubicSpline(_np.log10(kvals),
_np.log10(tmp.reshape(k_shape))
)(_np.log10(custom_k_within_range))
#Extrapolate if necessary
if do_extrapolate_below:
# below the k_min of EuclidEmulator, we are in the linear regime where
# the boost factor is unity by construction
b_extrap = _np.ones_like(custom_k_below)
bvals['z'+str(i)]= _np.concatenate((b_extrap, bvals['z'+str(i)]))
if do_extrapolate_above:
# We extrapolate by setting all b(k > k_max) to b(k_max)
b_extrap = bvals['z'+str(i)][-1] * _np.ones_like(custom_k_above)
bvals['z'+str(i)] = _np.concatenate((bvals['z'+str(i)], b_extrap))
else:
bvals['z'+str(i)] = tmp.reshape(k_shape)
else:
tmp = boost_data.boost
if not(custom_kvec is None):
bvals = 10.0**_CubicSpline(_np.log10(kvals),
_np.log10(tmp.reshape(k_shape))
)(_np.log10(custom_k_within_range))
#Extrapolate if necessary
if do_extrapolate_below:
# below the k_min of EuclidEmulator, we are in the linear regime where
# the boost factor is unity by construction
b_extrap = _np.ones_like(custom_k_below)
bvals = _np.concatenate((b_extrap,bvals))
if do_extrapolate_above:
# We extrapolate by setting all b(k > k_max) to b(k_max)
b_extrap = bvals[-1] * _np.ones_like(custom_k_above)
bvals = _np.concatenate((bvals, b_extrap))
else:
bvals = tmp.reshape(k_shape)
if not(custom_kvec is None): # This could probably be done cleaner!
kvals = custom_kvec
return {'k': kvals, 'B': bvals}
def get_pnonlin(emu_pars_dict, redshifts, custom_kvec=None, verbose=True):
"""
Signature: get_pnonlin(emu_pars_dict, redshifts [, custom_kvec=None, verbose=True])
Description: Computes the linear power spectrum and the non-linear boost
separately for a given redshift z (or for a list or numpy.ndarray
of redshifts), a given cosmology defined in emu_pars_dic (a python
dictionary containing the values for the 6 LCDM parameters) and
optionally a list or numpy.ndarray of k modes. Then it returns the
product of these two which is the non-linear DM-only power spectrum.
Input types: python dictionary (with the six cosmological parameters)
float or iterable (list, numpy.ndarray) (with redshifts)
:OPTIONAL:
list or numpy.ndarray (with k mode values)
boolean (verbosity)
Output type: python dictionary
Related: get_plin, get_boost
"""
if _Class.__module__ not in _sys.modules:
print("You have not imported neither classee nor classy.\n \
Emulating full power spectrum is hence not possible.")
return None
# Check cosmological parameter ranges
_inp.check_param_range(emu_pars_dict)
if isinstance(redshifts, (int, float)):
redshifts = _np.asarray([redshifts])
else:
redshifts = _np.asarray(redshifts)
for z in redshifts:
assert z <= 5.0, "EuclidEmulator allows only redshifts z <= 5.0.\n"
boost_dict = get_boost(emu_pars_dict, redshifts, custom_kvec, verbose)
kvec = boost_dict['k']
Bk = boost_dict['B']
plin = get_plin(emu_pars_dict, kvec, redshifts)
plin = plin['P_lin']
if len(redshifts) == 1:
pnonlin = plin*Bk
else:
pnonlin = {}
for i, z in enumerate(redshifts):
pnonlin['z'+str(i)] = plin['z'+str(i)]*Bk['z'+str(i)]
return {'k': kvec, 'P_nonlin': pnonlin, 'P_lin': plin, 'B': Bk}
def get_plin(emu_pars_dict, custom_kvec, redshifts):
"""
Signature: get_plin(emu_pars_dict, custom_kvec, redshifts)
Description: Computes the linear power spectrum at redshift z for a
cosmology defined in EmuParsArr (a numpy array containing
the values for the 6 LCDM parameters) (uses classy).
Input types: python dictionary (with the six cosmological parameters)
numpy.ndarray (containing the k modes)
numpy.ndarray (containing the redshift values)
Output type: if len(redshifts)==1, then numpy.ndarray (containing the
linear power spectrum values)
if len(redshifts)>1, then dict with indices 'z0', 'z1',
'z2' etc.
Related: get_pnonlin, get_boost
"""
if _Class.__module__ not in _sys.modules:
print("You have not imported neither classee nor classy.\n \
Computing linear power spectrum is hence not possible.")
return None
# Convert single redshift input argument to array
if isinstance(redshifts, (int, float)):
redshifts = _np.asarray([redshifts])
else:
redshifts = _np.asarray(redshifts)
for z in redshifts:
assert z <= 5.0, "EuclidEmulator allows only redshifts z <= 5.0.\n"
# Convert single redshift input argument to array
if isinstance(custom_kvec, (int, float)):
custom_kvec = _np.asarray([custom_kvec])
else:
custom_kvec = _np.asarray(custom_kvec)
# "Stringify" the input arrays to be understandable for classy.
z_arr, z_str = _aux.stringify_arr(redshifts)
# Convert the input dictionary into a Class-compatible dictionary
class_pars_dict = _inp.emu_to_class(emu_pars_dict)
# Extend the input Class-compatible dictionary by the additional
# information requested by classy.
classy_pars = class_pars_dict
classy_pars['Omega_Lambda'] = 0.0
classy_pars['output'] = 'mPk'
classy_pars['P_k_max_1/Mpc'] = 10.0
classy_pars['z_pk'] = z_str
# Create a "Class" instance called "cosmo" and run classy to compute
# the cosmological quantities.
cosmo = _Class()
cosmo.set(classy_pars)
cosmo.compute()
# Convert k units: h/Mpc
h = classy_pars['h']
k_classy_arr = h*custom_kvec
# Get shape of k vector
k_shape = k_classy_arr.shape
# Get power spectrum at tabulated z and k in units of Mpc^3
if len(z_arr) == 1:
z = z_arr
linpower = _np.array([cosmo.pk(k, z)*h*h*h
for k in k_classy_arr]).reshape(k_shape)
else:
linpower = {'z'+str(i):
_np.array([cosmo.pk(k, z)*h*h*h
for k in k_classy_arr]).reshape(k_shape)
for i, z in enumerate(z_arr)}
return {'k': custom_kvec, 'P_lin': linpower}
| 12,537 | 38.677215 | 90 | py |
EuclidEmulator | EuclidEmulator-master/wrapper3/e2py/ee_input.py | """
ee_input.py
EuclidEmulator submodule containing functions related to argument parsing.
"""
# This file is part of EuclidEmulator
# Copyright (c) 2018-2020 Mischa Knabenhans
#
# EuclidEmulator is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# EuclidEmulator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys as _sys
import pandas as _pd
from e2py._internal import _ee_cosmoconv as _cc
######################################################
#################### Check input #####################
######################################################
def check_param_range(par_dict, csm_index=0):
"""
Checks if all parameters in the cosmology dictionary 'par_dict'
(with index 'csm_index') passed to this function obey the limits
set by the emulator.
"""
om_b_range = [0.0217, 0.0233]
om_m_range = [0.1326, 0.1526]
n_s_range = [0.9345, 0.9965]
h_range = [0.6251, 0.7211]
w_0_range = [-1.250, -0.750]
sigma_8_range = [0.7684, 0.8614]
om_b_not_in_range = om_b_range[0] > par_dict['om_b'] or\
om_b_range[1] < par_dict['om_b']
om_m_not_in_range = om_m_range[0] > par_dict['om_m'] or\
om_m_range[1] < par_dict['om_m']
n_s_not_in_range = n_s_range[0] > par_dict['n_s'] or\
n_s_range[1] < par_dict['n_s']
h_not_in_range = h_range[0] > par_dict['h'] or\
h_range[1] < par_dict['h']
w_0_not_in_range = w_0_range[0] > par_dict['w_0'] or\
w_0_range[1] < par_dict['w_0']
sigma_8_not_in_range = sigma_8_range[0] > par_dict['sigma_8'] or\
sigma_8_range[1] < par_dict['sigma_8']
if om_b_not_in_range:
raise ValueError("Parameter range violation in cosmology %d: \
om_b is set to %f, but should be in the interval \
[0.0217, 0.0233]."
%(csm_index, par_dict['om_b']))
if om_m_not_in_range:
raise ValueError("Parameter range violation in cosmology %d: \
om_m is set to %f, but should be in the interval \
[0.1326, 0.1526]."
%(csm_index, par_dict['om_m']))
if n_s_not_in_range:
raise ValueError("Parameter range violation in cosmology %d: \
n_s is set to %f, but should be in the interval \
[0.9345, 0.9965]."
%(csm_index, par_dict['n_s']))
if h_not_in_range:
raise ValueError("Parameter range violation in cosmology %d: \
h is set to %f, but should be in the interval \
[0.6251, 0.9965]."
%(csm_index, par_dict['h']))
if w_0_not_in_range:
raise ValueError("Parameter range violation in cosmology %d: \
w_0 is set to %f, but should be in the interval \
[-1.250, -0.750]."
%(csm_index, par_dict['w_0']))
if sigma_8_not_in_range:
raise ValueError("Parameter range violation in cosmology %d: \
sigma_8 is set to %f, but should be in the interval \
[0.7684, 0.8614]."
%(csm_index, par_dict['sigma_8']))
######################################################
#################### Argument parsing ################
######################################################
def read_parfile(filename, sep=","):
"""
Signature: read_parfile(filename, sep=",")
Description: Reads in a parameter file and returns a dictionary or a
list of dictionaires (if multiple cosmologies are specified)
with the parameters.
Input type: string (path to or name of parameter file)
Output type: list of python dictionaries
"""
list_of_cosmologies = []
parameters = _pd.read_csv(filename,
delimiter='\s*'+sep+'\s*',
engine='python')
for indx, row in parameters.iterrows():
# Convert pandas.Series to dictionary (= required output type)
cosmo_dict = row.to_dict()
# Check if parameter ranges are obeyed
check_param_range(cosmo_dict, indx)
# Append to list
list_of_cosmologies.append(cosmo_dict)
return list_of_cosmologies
########################################################
################### Format conversion ##################
########################################################
def emu_to_class(emu_pars_dict):
"""
Signature: emu_to_class(emu_pars_dict)
Description: Converts the set of parameters accepted by
EuclidEmulator into a set of parameters accepted
by CLASS and CAMB.
Input type: python dictionary
Output type: python dictionary
Remark: The expected keys are:
'om_b' (lowercase baryon density parameter)
'om_m' (lowercase total matter density parameter)
'n_s' (spectral index)
'h' (Hubble parameter)
'w_0' (dark energy equation of state parameter)
'sigma_8' (overdensity fluctuation variance).
Related: class_to_emu
"""
if not isinstance(emu_pars_dict, dict):
print("The cosmological parameters must be passed as a \
python dictionary.\n")
_sys.exit()
om_b = emu_pars_dict['om_b']
om_m = emu_pars_dict['om_m']
n_s = emu_pars_dict['n_s']
h = emu_pars_dict['h']
w_0 = emu_pars_dict['w_0']
sigma_8 = emu_pars_dict['sigma_8']
om_cdm = om_m - om_b
class_pars_dict = {'omega_b': om_b,
'omega_cdm': om_cdm,
'n_s': n_s,
'h': h,
'w0_fld': w_0,
'sigma8': sigma_8
}
return class_pars_dict
def class_to_emu(class_pars_dict):
"""
Signature: class_to_emu(class_pars_dict)
Description: Converts the set of parameters accepted by CLASS
and CAMB into a set of parameters accepted by
EuclidEmulator.
Input type: python dictionary
Output type: python dictionary
Remark: The expected keys are:
'omega_b' (lowercase baryon density parameter)
'omega_cdm' (lowercase cold dark matter density parameter)
'n_s' (spectral index)
'h' (Hubble parameter)
'w0_fld' (dark energy equation of state parameter)
'sigma8 (amplitude of power spectrum/variance of density field).
Related: class_to_emu
"""
if not isinstance(class_pars_dict, dict):
print("The cosmological parameters must be passed as a \
python dictionary.\n")
_sys.exit()
om_b = class_pars_dict['omega_b']
om_cdm = class_pars_dict['omega_cdm']
n_s = class_pars_dict['n_s']
h = class_pars_dict['h']
w_0 = class_pars_dict['w0_fld']
sigma_8 = class_pars_dict['sigma8']
om_m = om_b + om_cdm
emu_pars_dict = {'om_b': om_b,
'om_m': om_m,
'n_s': n_s,
'h': h,
'w_0': w_0,
'sigma_8': sigma_8}
return emu_pars_dict
| 8,016 | 35.112613 | 86 | py |
EuclidEmulator | EuclidEmulator-master/wrapper3/e2py/__init__.py | # This file is part of EuclidEmulator
# Copyright (c) 2018-2020 Mischa Knabenhans
#
# EuclidEmulator is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# EuclidEmulator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
print('EuclidEmulator Copyright (C) 2018-2020 Mischa Knabenhans & Joachim Stadel')
print('This program comes with ABSOLUTELY NO WARRANTY.')
print('This is free software, and you are welcome to redistribute it')
print('under certain conditions. See http://www.gnu.org/licenses/ for further')
print('information.')
from .ee_input import *
from .ee_observables import *
| 1,092 | 41.038462 | 82 | py |
EuclidEmulator | EuclidEmulator-master/wrapper3/e2py/_ee_lens.py | """
ee_lens.py
EuclidEmulator submodule for computation of cosmological lensing quantities.
REMARK: The geometry of the Universe is fixed to be flat (i.e.
Omega_curvature = 1) and the radiation energy density
is set to Om_rad = 4.183709411969527e-5/(h*h). These
values were assumed in the construction process of
EuclidEmulator and hence must be used whenever one is
working with it.
"""
# This file is part of EuclidEmulator
# Copyright (c) 2018-2020 Mischa Knabenhans
#
# EuclidEmulator is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# EuclidEmulator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as _np
from scipy.integrate import romb as _romb
from scipy.interpolate import CubicSpline as _CubicSpline
from e2py._internal import _ee_aux as _aux
def lens_efficiency(sourcedist, dcomov, dcomov_lim, prec=12):
"""
Signature: lens_efficiency(sourcedist, dcomov, dcomov_lim)
Description: Computes the lens efficiency function q (see e.g. equation
(24)the review article by Martin Kilbinger "Cosmology with
cosmic shear observations: a review", July 21, 2015,
arXiv:1411.0115v2), given a source distribution function n and
two comoving distances.
Input types: sourcedist - np.ndarray
dcomov - float (or int) or np.ndarray
dcomov_lim - float
Output types: float (or np.ndarray if type(dcomov)=np.ndarray)
"""
# interpolate the source distribution function
nfunc = _CubicSpline(_np.log10(sourcedist['chi']),
_np.log10(sourcedist['n']))
result = []
if isinstance(dcomov, _np.ndarray):
for distance in dcomov:
chi = _np.linspace(distance, dcomov_lim, 2**prec+1)
sourcedistribution = 10**nfunc(_np.log10(chi))
integrand = sourcedistribution * (1-distance/chi)
result.append(_romb(integrand, chi[1]-chi[0]))
return _np.asarray(result)
elif isinstance(dcomov, (float, int)):
chi = _np.linspace(dcomov, dcomov_lim, 2**prec+1)
sourcedistribution = 10**nfunc(_np.log10(chi))
integrand = sourcedistribution * (1-dcomov/chi)
return _romb(integrand, chi[1]-chi[0])
else:
raise TypeError
class GalaxyRedshiftDist(object):
# Author: Rongchuan Zhao
def __init__(self, alpha=2.0, beta=1.5, z_mean=0.9):
self._alpha = alpha
self._beta = beta
self._z_mean = z_mean
def __call__(self, z_mean):
return GalaxyRedshiftDist(z_mean=z_mean,
alpha=self._alpha,
beta=self._beta).gala_probdist_func
def _z_0(self):
return self._z_mean/1.412
@_aux.normalize()
def _gala_probdist(self, z):
z_0 = self._z_0()
return (z / z_0)**self._alpha*_np.exp(-(z/z_0)**self._beta)
@property
def gala_probdist_func(self):
return _aux.Function(self._gala_probdist)
| 3,585 | 34.50495 | 80 | py |
EuclidEmulator | EuclidEmulator-master/wrapper3/e2py/_internal/_ee_aux.py | """
_ee_aux.py
EuclidEmulator submodule for auxiliary functions.
"""
# This file is part of EuclidEmulator
# Copyright (c) 2018-2020 Mischa Knabenhans
#
# EuclidEmulator is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# EuclidEmulator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys as _sys
import contextlib as _ctlib
import numpy as _np
from functools import wraps as _wraps
from functools import partial as _partial
from scipy.integrate import quad as _quad
# Auxiliary functions for formatting input such that CLASS understands
@_ctlib.contextmanager
def disable_numpy_summarization():
# Author: Jeppe Mosgaard Dakin
"""
Signature: disable_numpy_summarization()
Description: Allows numpy array to have arbitrary length without summarizing
its entries.
Input type: None
Output type: None
"""
threshold = _np.get_printoptions()['threshold']
_np.set_printoptions(threshold=_np.inf)
try:
yield
finally:
_np.set_printoptions(threshold=threshold)
def stringify_arr(arr):
# Author: Jeppe Mosgaard Dakin
"""
Signature: stringify_arr(arr)
Description: Takes an array and returns all entries as a single string.
Input type: array
Output type: tuple (array, string)
"""
with disable_numpy_summarization():
arr_str = _np.array2string(
arr,
max_line_width=_np.inf,
formatter={'float': lambda f: '{:.8e}'.format(f)},
separator=',',
).strip('[]')
return _np.fromstring(arr_str, sep=','), arr_str
def print_cosmology(emu_pars_dict):
# Author: Mischa Knabenhans
h = emu_pars_dict['h']
omega_m = emu_pars_dict['om_m']
omega_rad = 4.183709411969527e-5; # corresponds to 2.755 K Tcmb
Om_m = omega_m/(h*h)
Om_rad = omega_rad/(h*h)
Om_DE = 1.0-Om_m-Om_rad
print("#")
print("# Cosmology:")
print("# dOmega0: ", Om_m)
print("# dOmegaRad: ", Om_rad)
print("# dOmegaDE: ", Om_DE)
def progress(count, total, status=''):
# Source: online
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
_sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', status))
_sys.stdout.flush()
def normalize(ave_range=[0, _np.inf]):
# Author: Rongchuan Zhao
def decorate(func):
@_wraps(func)
def norm_func(self, *args):
# reads in some function and returns its
# area-normalized version (as a function object)
inst_func = _partial(func, self)
ToT = _quad(inst_func, ave_range[0], ave_range[1], args=args[1:] )[0]
return func(self, *args)/ToT
return norm_func
return decorate
class Function(object):
# Author: Rongchuan Zhao
def __init__(self, func):
self.func = func
def __add__(self, other):
return Function(lambda x: self.func(x) + other.func(x))
def __sub__(self, other):
return Function(lambda x: self.func(x) - other.func(x))
def __mul__(self, other):
return Function(lambda x: self.func(x) * other.func(x))
def __div__(self, other):
return Function(lambda x: self.func(x) / other.func(x))
def __pow__(self, index):
return Function(lambda x: self.func(x)**index)
def __call__(self, *arg, **kwarg):
return self.func(*arg, **kwarg)
| 4,034 | 29.11194 | 81 | py |
EuclidEmulator | EuclidEmulator-master/wrapper3/e2py/_internal/_ee_background.py | """
ee_background.py
EuclidEmulator submodule for computation of cosmological background quantities.
REMARK: The geometry of the Universe is fixed to be flat (i.e.
Omega_curvature = 1) and the radiation energy density
is set to Om_rad = 4.183709411969527e-5/(h*h). These
values were assumed in the construction process of
EuclidEmulator and hence must be used whenever one is
working with it.
"""
# This file is part of EuclidEmulator
# Copyright (c) 2018-2020 Mischa Knabenhans
#
# EuclidEmulator is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# EuclidEmulator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as _np
from scipy.integrate import romb as _romb
from . import _ee_cosmoconv as _cc
# UNIVERSAL CONSTANTS (Source: PDG booklet 2018)
SPEED_OF_LIGHT_IN_KILOMETERS_PER_SECOND = 299792.458
NEWTONS_CONSTANT = 6.6740908 * 1e-11 # in m^3kg^(-1)s^(-2)
MEGAPARSEC = 3.08567758149 * 1e22 # meters
M_SOLAR_IN_KG = 1.98848 *1e30
def dist_comov(emu_pars_dict, z1, z2, prec=12):
"""
Signature: dist_comov(emu_pars_dict, z1, z2, prec=12)
Description: Computes the comoving distance (in units of Mpc/h) between
objects at redshifts z1 and z2 for a cosmology specified by
the parameters Om_m (matter density parameter),
Om_rad (radiation density parameter) and Om_DE (dark
energy density parameter), the Hubble parameter H0,
and the dark energy equation of state parameters w0
and wa.
Input type: python dictionary (containing the 6 LCDM parameters)
floats or np.ndarrays
Output type: float or np.ndarray
REMARK 1: If the redshifts z1 and z2 are passed as vectors (1-dimen-
sional np.ndarrays) then the comoving distances will be com-
puted between the pairs of redshift (z1_1, z2_1), (z1_2,z2_2),
(z1_3, z2_3) etc. Hence, if the vectors have length n, the
resulting vector of d_comov will also be of length n (and not
n(n-1)). We do NOT compute the d_comov for all possible red-
shift combinations.
REMARK 2: The geometry of the Universe is fixed to be flat (i.e.
Omega_curvature = 1) and the radiation energy density
is set to Om_rad = 4.183709411969527e-5/(h*h). These
values were assumed in the construction process of
EuclidEmulator and hence must be used whenever one is
working with it.
"""
if isinstance(z1,(float,int)) and isinstance(z2,(float,int)):
z1 = _np.array([z1])
z2 = np.array([z2])
a1_vec = _cc.z_to_a(z1)
a2_vec = _cc.z_to_a(z2)
d_comov = []
for a1,a2 in zip(a1_vec,a2_vec):
if a1 > a2:
a1, a2 = a2, a1 # swap values
avec = _np.linspace(a1, a2, 2**prec+1)
delta_a = avec[1]-avec[0]
H = _cc.a_to_hubble(emu_pars_dict,avec)
d_comov.append(_romb(1./(avec*avec*H), dx=delta_a))
chi = _np.array(d_comov) # here the comoving distances have units of Mpc/(km/s)
# return result in units of Mpc/h
return SPEED_OF_LIGHT_IN_KILOMETERS_PER_SECOND * chi * emu_pars_dict['h']
| 3,849 | 39.526316 | 83 | py |
EuclidEmulator | EuclidEmulator-master/wrapper3/e2py/_internal/_ee_cosmoconv.py | """
ee_cosmoconv.py
EuclidEmulator submodule for converting cosmological quantities.
REMARK: The geometry of the Universe is fixed to be flat (i.e.
Omega_curvature = 1) and the radiation energy density
is set to Om_rad = 4.183709411969527e-5/(h*h). These
values were assumed in the construction process of
EuclidEmulator and hence must be used whenever one is
working with it.
"""
import numpy as _np
from . import _ee_background as _bg
def z_to_a(z):
"""
Signature: z_to_a(z)
Description: Converts a redshift z into the corresponding
scale factor a.
Input type: float or numpy.ndarray
Output type: float or numpy.ndarray
"""
return 1.0/(z+1.0)
def a_to_z(a):
"""
Signature: a_to_z(a)
Description: Converts a scale factor a into the corresponding
redshift z.
Input type: float or numpy.ndarray
Output type: float or numpy.ndarray
"""
return (1.0/a)-1.0
def a_to_hubble(emu_pars_dict, a):
"""
Signature: a_to_hubble(emu_pars,dict, a, H0, Om_m, Om_DE, Om_curve, w_0, w_a)
Description: Computes the Hubble parameter at a given scale factor a
for a given cosmology.
Input type: type(a) = float or numpy.ndarray
all other input parameters are of type float
Output type: type(Hubble) = float or numpy.ndarray
REMARK: The geometry of the Universe is fixed to be flat (i.e.
Omega_curvature = 1) and the radiation energy density
is set to Om_rad = 4.183709411969527e-5/(h*h). These
values were assumed in the construction process of
EuclidEmulator and hence must be used whenever one is
working with it.
"""
a_inv = 1.0/a
h = emu_pars_dict['h']
Om_m = emu_pars_dict['om_m']/(h*h)
H0 = 100.0 * h # * (km/s)/Mpc
w_0 = emu_pars_dict['w_0']
# We explicitly set the radiation energy density. By hardcoding this
# we make sure that the user cannot choose Om_rad values inconsistent with
# that used in the construction process of EuclidEmulator.
# We adopt the same equation for the computation of the radiation energy
# density as in CLASS (cf. CLASS code, input.c file, lines 643 & 714)
Om_rad = 4.183709411969527e-5/(h*h)
# We infer Om_DE assuming flat geometry (Om_curve = 0.0). By hardcoding this
# we make sure that the user cannot choose Om_curve values inconsistent with
# that used in the construction process of EuclidEmulator.
Om_curve = 0.0
Om_DE = 1.0-Om_curve-Om_m-Om_rad
assert (Om_curve == 0.0 and Om_m + Om_rad + Om_DE + Om_curve == 1.0)
curvature = Om_curve * a_inv * a_inv
matter = Om_m * a_inv * a_inv * a_inv
radiation = Om_rad * a_inv * a_inv * a_inv * a_inv
darkenergy = Om_DE * a_inv**(3.0+3.0*w_0)
H = H0 * _np.sqrt(curvature + matter + radiation + darkenergy)
return H # in the standard units of (km/s)/Mpc
def k_to_l(emu_pars_dict, k, z, prec=12):
"""
Signature: k_to_l(emu_pars_dict, k, z, prec=12)
Description: Converts a numpy.array k into a numpy.array l for a given
redshift z. Here, k denotes the wave numbers and l the
multipole order. The keyword argument "prec" is a precision
parameter for the romberg integration that has to be computed
in the course of the comoving distance calculation.
Input types: type(emu_pars_dict) = dict
type(k) = numpy.ndarray
type(z) = float
type(prec) = int
Ouput type: numpy.ndarray
REMARK: The geometry of the Universe is fixed to be flat (i.e.
Omega_curvature = 1
and as a result the comoving angular diameter distance
equals the comoving distance) and the radiation energy
density is set to
Om_rad = 4.183709411969527e-5/(h*h).
These values were assumed in the construction process
of EuclidEmulator and hence must be used whenever one
is working with it.
"""
return k * _bg.dist_comov(emu_pars_dict, 1e-13, z, prec)
def l_to_k(emu_pars_dict, l, z, prec=12):
"""
Signature: l_to_k(emu_pars_dict, l, z, prec=12)
Description: Converts a numpy.array l into a numpy.array k for a given
redshift z. Here, k denotes the wave numbers and l the
multipole order. The keyword argument "prec" is a precision
parameter for the romberg integration that has to be computed
in the course of the comoving distance calculation.
Input types: type(emu_pars_dict) = dict
type(l) = numpy.ndarray
type(z) = float
type(prec) = int
Ouput type: numpy.ndarray
REMARK: The geometry of the Universe is fixed to be flat (i.e.
Omega_curvature = 1
and as a result the comoving angular diameter distance
equals the comoving distance) and the radiation energy
density is set to
Om_rad = 4.183709411969527e-5/(h*h).
These values were assumed in the construction process
of EuclidEmulator and hence must be used whenever one
is working with it.
"""
return l/_bg.dist_comov(emu_pars_dict, 1e-13, z, prec)
| 5,665 | 34.4125 | 83 | py |
EuclidEmulator | EuclidEmulator-master/wrapper3/e2py/_internal/__init__.py | 0 | 0 | 0 | py | |
HIPT | HIPT-master/1-Hierarchical-Pretraining/eval_copy_detection.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import pickle
import argparse
import torch
from torch import nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torchvision import models as torchvision_models
from torchvision import transforms as pth_transforms
from PIL import Image, ImageFile
import numpy as np
import utils
import vision_transformer as vits
from eval_knn import extract_features
class CopydaysDataset():
def __init__(self, basedir):
self.basedir = basedir
self.block_names = (
['original', 'strong'] +
['jpegqual/%d' % i for i in
[3, 5, 8, 10, 15, 20, 30, 50, 75]] +
['crops/%d' % i for i in
[10, 15, 20, 30, 40, 50, 60, 70, 80]])
self.nblocks = len(self.block_names)
self.query_blocks = range(self.nblocks)
self.q_block_sizes = np.ones(self.nblocks, dtype=int) * 157
self.q_block_sizes[1] = 229
# search only among originals
self.database_blocks = [0]
def get_block(self, i):
dirname = self.basedir + '/' + self.block_names[i]
fnames = [dirname + '/' + fname
for fname in sorted(os.listdir(dirname))
if fname.endswith('.jpg')]
return fnames
def get_block_filenames(self, subdir_name):
dirname = self.basedir + '/' + subdir_name
return [fname
for fname in sorted(os.listdir(dirname))
if fname.endswith('.jpg')]
def eval_result(self, ids, distances):
j0 = 0
for i in range(self.nblocks):
j1 = j0 + self.q_block_sizes[i]
block_name = self.block_names[i]
I = ids[j0:j1] # block size
sum_AP = 0
if block_name != 'strong':
# 1:1 mapping of files to names
positives_per_query = [[i] for i in range(j1 - j0)]
else:
originals = self.get_block_filenames('original')
strongs = self.get_block_filenames('strong')
# check if prefixes match
positives_per_query = [
[j for j, bname in enumerate(originals)
if bname[:4] == qname[:4]]
for qname in strongs]
for qno, Iline in enumerate(I):
positives = positives_per_query[qno]
ranks = []
for rank, bno in enumerate(Iline):
if bno in positives:
ranks.append(rank)
sum_AP += score_ap_from_ranks_1(ranks, len(positives))
print("eval on %s mAP=%.3f" % (
block_name, sum_AP / (j1 - j0)))
j0 = j1
# from the Holidays evaluation package
def score_ap_from_ranks_1(ranks, nres):
""" Compute the average precision of one search.
ranks = ordered list of ranks of true positives
nres = total number of positives in dataset
"""
# accumulate trapezoids in PR-plot
ap = 0.0
# All have an x-size of:
recall_step = 1.0 / nres
for ntp, rank in enumerate(ranks):
# y-size on left side of trapezoid:
# ntp = nb of true positives so far
# rank = nb of retrieved items so far
if rank == 0:
precision_0 = 1.0
else:
precision_0 = ntp / float(rank)
# y-size on right side of trapezoid:
# ntp and rank are increased by one
precision_1 = (ntp + 1) / float(rank + 1)
ap += (precision_1 + precision_0) * recall_step / 2.0
return ap
class ImgListDataset(torch.utils.data.Dataset):
def __init__(self, img_list, transform=None):
self.samples = img_list
self.transform = transform
def __getitem__(self, i):
with open(self.samples[i], 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
if self.transform is not None:
img = self.transform(img)
return img, i
def __len__(self):
return len(self.samples)
def is_image_file(s):
ext = s.split(".")[-1]
if ext in ['jpg', 'jpeg', 'png', 'ppm', 'bmp', 'pgm', 'tif', 'tiff', 'webp']:
return True
return False
@torch.no_grad()
def extract_features(image_list, model, args):
transform = pth_transforms.Compose([
pth_transforms.Resize((args.imsize, args.imsize), interpolation=3),
pth_transforms.ToTensor(),
pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
tempdataset = ImgListDataset(image_list, transform=transform)
data_loader = torch.utils.data.DataLoader(tempdataset, batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers, drop_last=False,
sampler=torch.utils.data.DistributedSampler(tempdataset, shuffle=False))
features = None
for samples, index in utils.MetricLogger(delimiter=" ").log_every(data_loader, 10):
samples, index = samples.cuda(non_blocking=True), index.cuda(non_blocking=True)
feats = model.get_intermediate_layers(samples, n=1)[0].clone()
cls_output_token = feats[:, 0, :] # [CLS] token
# GeM with exponent 4 for output patch tokens
b, h, w, d = len(samples), int(samples.shape[-2] / model.patch_embed.patch_size), int(samples.shape[-1] / model.patch_embed.patch_size), feats.shape[-1]
feats = feats[:, 1:, :].reshape(b, h, w, d)
feats = feats.clamp(min=1e-6).permute(0, 3, 1, 2)
feats = nn.functional.avg_pool2d(feats.pow(4), (h, w)).pow(1. / 4).reshape(b, -1)
# concatenate [CLS] token and GeM pooled patch tokens
feats = torch.cat((cls_output_token, feats), dim=1)
# init storage feature matrix
if dist.get_rank() == 0 and features is None:
features = torch.zeros(len(data_loader.dataset), feats.shape[-1])
if args.use_cuda:
features = features.cuda(non_blocking=True)
# get indexes from all processes
y_all = torch.empty(dist.get_world_size(), index.size(0), dtype=index.dtype, device=index.device)
y_l = list(y_all.unbind(0))
y_all_reduce = torch.distributed.all_gather(y_l, index, async_op=True)
y_all_reduce.wait()
index_all = torch.cat(y_l)
# share features between processes
feats_all = torch.empty(dist.get_world_size(), feats.size(0), feats.size(1),
dtype=feats.dtype, device=feats.device)
output_l = list(feats_all.unbind(0))
output_all_reduce = torch.distributed.all_gather(output_l, feats, async_op=True)
output_all_reduce.wait()
# update storage feature matrix
if dist.get_rank() == 0:
if args.use_cuda:
features.index_copy_(0, index_all, torch.cat(output_l))
else:
features.index_copy_(0, index_all.cpu(), torch.cat(output_l).cpu())
return features # features is still None for every rank which is not 0 (main)
if __name__ == '__main__':
parser = argparse.ArgumentParser('Copy detection on Copydays')
parser.add_argument('--data_path', default='/path/to/copydays/', type=str,
help="See https://lear.inrialpes.fr/~jegou/data.php#copydays")
parser.add_argument('--whitening_path', default='/path/to/whitening_data/', type=str,
help="""Path to directory with images used for computing the whitening operator.
In our paper, we use 20k random images from YFCC100M.""")
parser.add_argument('--distractors_path', default='/path/to/distractors/', type=str,
help="Path to directory with distractors images. In our paper, we use 10k random images from YFCC100M.")
parser.add_argument('--imsize', default=320, type=int, help='Image size (square image)')
parser.add_argument('--batch_size_per_gpu', default=16, type=int, help='Per-GPU batch-size')
parser.add_argument('--pretrained_weights', default='', type=str, help="Path to pretrained weights to evaluate.")
parser.add_argument('--use_cuda', default=True, type=utils.bool_flag)
parser.add_argument('--arch', default='vit_base', type=str, help='Architecture')
parser.add_argument('--patch_size', default=8, type=int, help='Patch resolution of the model.')
parser.add_argument("--checkpoint_key", default="teacher", type=str,
help='Key to use in the checkpoint (example: "teacher")')
parser.add_argument('--num_workers', default=10, type=int, help='Number of data loading workers per GPU.')
parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up
distributed training; see https://pytorch.org/docs/stable/distributed.html""")
parser.add_argument("--local_rank", default=0, type=int, help="Please ignore and do not set this argument.")
args = parser.parse_args()
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))
cudnn.benchmark = True
# ============ building network ... ============
if "vit" in args.arch:
model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0)
print(f"Model {args.arch} {args.patch_size}x{args.patch_size} built.")
else:
print(f"Architecture {args.arch} non supported")
sys.exit(1)
if args.use_cuda:
model.cuda()
model.eval()
utils.load_pretrained_weights(model, args.pretrained_weights, args.checkpoint_key, args.arch, args.patch_size)
dataset = CopydaysDataset(args.data_path)
# ============ Extract features ... ============
# extract features for queries
queries = []
for q in dataset.query_blocks:
queries.append(extract_features(dataset.get_block(q), model, args))
if utils.get_rank() == 0:
queries = torch.cat(queries)
print(f"Extraction of queries features done. Shape: {queries.shape}")
# extract features for database
database = []
for b in dataset.database_blocks:
database.append(extract_features(dataset.get_block(b), model, args))
# extract features for distractors
if os.path.isdir(args.distractors_path):
print("Using distractors...")
list_distractors = [os.path.join(args.distractors_path, s) for s in os.listdir(args.distractors_path) if is_image_file(s)]
database.append(extract_features(list_distractors, model, args))
if utils.get_rank() == 0:
database = torch.cat(database)
print(f"Extraction of database and distractors features done. Shape: {database.shape}")
# ============ Whitening ... ============
if os.path.isdir(args.whitening_path):
print(f"Extracting features on images from {args.whitening_path} for learning the whitening operator.")
list_whit = [os.path.join(args.whitening_path, s) for s in os.listdir(args.whitening_path) if is_image_file(s)]
features_for_whitening = extract_features(list_whit, model, args)
if utils.get_rank() == 0:
# center
mean_feature = torch.mean(features_for_whitening, dim=0)
database -= mean_feature
queries -= mean_feature
pca = utils.PCA(dim=database.shape[-1], whit=0.5)
# compute covariance
cov = torch.mm(features_for_whitening.T, features_for_whitening) / features_for_whitening.shape[0]
pca.train_pca(cov.cpu().numpy())
database = pca.apply(database)
queries = pca.apply(queries)
# ============ Copy detection ... ============
if utils.get_rank() == 0:
# l2 normalize the features
database = nn.functional.normalize(database, dim=1, p=2)
queries = nn.functional.normalize(queries, dim=1, p=2)
# similarity
similarity = torch.mm(queries, database.T)
distances, indices = similarity.topk(20, largest=True, sorted=True)
# evaluate
retrieved = dataset.eval_result(indices, distances)
dist.barrier()
| 12,631 | 40.827815 | 160 | py |
HIPT | HIPT-master/1-Hierarchical-Pretraining/eval_linear.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import json
from pathlib import Path
import torch
from torch import nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torchvision import datasets
from torchvision import transforms as pth_transforms
from torchvision import models as torchvision_models
import utils
import vision_transformer as vits
def eval_linear(args):
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))
cudnn.benchmark = True
# ============ building network ... ============
# if the network is a Vision Transformer (i.e. vit_tiny, vit_small, vit_base)
if args.arch in vits.__dict__.keys():
model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0)
embed_dim = model.embed_dim * (args.n_last_blocks + int(args.avgpool_patchtokens))
# if the network is a XCiT
elif "xcit" in args.arch:
model = torch.hub.load('facebookresearch/xcit:main', args.arch, num_classes=0)
embed_dim = model.embed_dim
# otherwise, we check if the architecture is in torchvision models
elif args.arch in torchvision_models.__dict__.keys():
model = torchvision_models.__dict__[args.arch]()
embed_dim = model.fc.weight.shape[1]
model.fc = nn.Identity()
else:
print(f"Unknow architecture: {args.arch}")
sys.exit(1)
model.cuda()
model.eval()
# load weights to evaluate
utils.load_pretrained_weights(model, args.pretrained_weights, args.checkpoint_key, args.arch, args.patch_size)
print(f"Model {args.arch} built.")
linear_classifier = LinearClassifier(embed_dim, num_labels=args.num_labels)
linear_classifier = linear_classifier.cuda()
linear_classifier = nn.parallel.DistributedDataParallel(linear_classifier, device_ids=[args.gpu])
# ============ preparing data ... ============
val_transform = pth_transforms.Compose([
pth_transforms.Resize(256, interpolation=3),
pth_transforms.CenterCrop(224),
pth_transforms.ToTensor(),
pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
dataset_val = datasets.ImageFolder(os.path.join(args.data_path, "val"), transform=val_transform)
val_loader = torch.utils.data.DataLoader(
dataset_val,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=True,
)
if args.evaluate:
utils.load_pretrained_linear_weights(linear_classifier, args.arch, args.patch_size)
test_stats = validate_network(val_loader, model, linear_classifier, args.n_last_blocks, args.avgpool_patchtokens)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
return
train_transform = pth_transforms.Compose([
pth_transforms.RandomResizedCrop(224),
pth_transforms.RandomHorizontalFlip(),
pth_transforms.ToTensor(),
pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
dataset_train = datasets.ImageFolder(os.path.join(args.data_path, "train"), transform=train_transform)
sampler = torch.utils.data.distributed.DistributedSampler(dataset_train)
train_loader = torch.utils.data.DataLoader(
dataset_train,
sampler=sampler,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=True,
)
print(f"Data loaded with {len(dataset_train)} train and {len(dataset_val)} val imgs.")
# set optimizer
optimizer = torch.optim.SGD(
linear_classifier.parameters(),
args.lr * (args.batch_size_per_gpu * utils.get_world_size()) / 256., # linear scaling rule
momentum=0.9,
weight_decay=0, # we do not apply weight decay
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs, eta_min=0)
# Optionally resume from a checkpoint
to_restore = {"epoch": 0, "best_acc": 0.}
utils.restart_from_checkpoint(
os.path.join(args.output_dir, "checkpoint.pth.tar"),
run_variables=to_restore,
state_dict=linear_classifier,
optimizer=optimizer,
scheduler=scheduler,
)
start_epoch = to_restore["epoch"]
best_acc = to_restore["best_acc"]
for epoch in range(start_epoch, args.epochs):
train_loader.sampler.set_epoch(epoch)
train_stats = train(model, linear_classifier, optimizer, train_loader, epoch, args.n_last_blocks, args.avgpool_patchtokens)
scheduler.step()
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch}
if epoch % args.val_freq == 0 or epoch == args.epochs - 1:
test_stats = validate_network(val_loader, model, linear_classifier, args.n_last_blocks, args.avgpool_patchtokens)
print(f"Accuracy at epoch {epoch} of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
best_acc = max(best_acc, test_stats["acc1"])
print(f'Max accuracy so far: {best_acc:.2f}%')
log_stats = {**{k: v for k, v in log_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()}}
if utils.is_main_process():
with (Path(args.output_dir) / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
save_dict = {
"epoch": epoch + 1,
"state_dict": linear_classifier.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
"best_acc": best_acc,
}
torch.save(save_dict, os.path.join(args.output_dir, "checkpoint.pth.tar"))
print("Training of the supervised linear classifier on frozen features completed.\n"
"Top-1 test accuracy: {acc:.1f}".format(acc=best_acc))
def train(model, linear_classifier, optimizer, loader, epoch, n, avgpool):
linear_classifier.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
for (inp, target) in metric_logger.log_every(loader, 20, header):
# move to gpu
inp = inp.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# forward
with torch.no_grad():
if "vit" in args.arch:
intermediate_output = model.get_intermediate_layers(inp, n)
output = torch.cat([x[:, 0] for x in intermediate_output], dim=-1)
if avgpool:
output = torch.cat((output.unsqueeze(-1), torch.mean(intermediate_output[-1][:, 1:], dim=1).unsqueeze(-1)), dim=-1)
output = output.reshape(output.shape[0], -1)
else:
output = model(inp)
output = linear_classifier(output)
# compute cross entropy loss
loss = nn.CrossEntropyLoss()(output, target)
# compute the gradients
optimizer.zero_grad()
loss.backward()
# step
optimizer.step()
# log
torch.cuda.synchronize()
metric_logger.update(loss=loss.item())
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def validate_network(val_loader, model, linear_classifier, n, avgpool):
linear_classifier.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
for inp, target in metric_logger.log_every(val_loader, 20, header):
# move to gpu
inp = inp.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# forward
with torch.no_grad():
if "vit" in args.arch:
intermediate_output = model.get_intermediate_layers(inp, n)
output = torch.cat([x[:, 0] for x in intermediate_output], dim=-1)
if avgpool:
output = torch.cat((output.unsqueeze(-1), torch.mean(intermediate_output[-1][:, 1:], dim=1).unsqueeze(-1)), dim=-1)
output = output.reshape(output.shape[0], -1)
else:
output = model(inp)
output = linear_classifier(output)
loss = nn.CrossEntropyLoss()(output, target)
if linear_classifier.module.num_labels >= 5:
acc1, acc5 = utils.accuracy(output, target, topk=(1, 5))
else:
acc1, = utils.accuracy(output, target, topk=(1,))
batch_size = inp.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
if linear_classifier.module.num_labels >= 5:
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
if linear_classifier.module.num_labels >= 5:
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
else:
print('* Acc@1 {top1.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
class LinearClassifier(nn.Module):
"""Linear layer to train on top of frozen features"""
def __init__(self, dim, num_labels=1000):
super(LinearClassifier, self).__init__()
self.num_labels = num_labels
self.linear = nn.Linear(dim, num_labels)
self.linear.weight.data.normal_(mean=0.0, std=0.01)
self.linear.bias.data.zero_()
def forward(self, x):
# flatten
x = x.view(x.size(0), -1)
# linear layer
return self.linear(x)
if __name__ == '__main__':
parser = argparse.ArgumentParser('Evaluation with linear classification on ImageNet')
parser.add_argument('--n_last_blocks', default=4, type=int, help="""Concatenate [CLS] tokens
for the `n` last blocks. We use `n=4` when evaluating ViT-Small and `n=1` with ViT-Base.""")
parser.add_argument('--avgpool_patchtokens', default=False, type=utils.bool_flag,
help="""Whether ot not to concatenate the global average pooled features to the [CLS] token.
We typically set this to False for ViT-Small and to True with ViT-Base.""")
parser.add_argument('--arch', default='vit_small', type=str, help='Architecture')
parser.add_argument('--patch_size', default=16, type=int, help='Patch resolution of the model.')
parser.add_argument('--pretrained_weights', default='', type=str, help="Path to pretrained weights to evaluate.")
parser.add_argument("--checkpoint_key", default="teacher", type=str, help='Key to use in the checkpoint (example: "teacher")')
parser.add_argument('--epochs', default=100, type=int, help='Number of epochs of training.')
parser.add_argument("--lr", default=0.001, type=float, help="""Learning rate at the beginning of
training (highest LR used during training). The learning rate is linearly scaled
with the batch size, and specified here for a reference batch size of 256.
We recommend tweaking the LR depending on the checkpoint evaluated.""")
parser.add_argument('--batch_size_per_gpu', default=128, type=int, help='Per-GPU batch-size')
parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up
distributed training; see https://pytorch.org/docs/stable/distributed.html""")
parser.add_argument("--local_rank", default=0, type=int, help="Please ignore and do not set this argument.")
parser.add_argument('--data_path', default='/path/to/imagenet/', type=str)
parser.add_argument('--num_workers', default=10, type=int, help='Number of data loading workers per GPU.')
parser.add_argument('--val_freq', default=1, type=int, help="Epoch frequency for validation.")
parser.add_argument('--output_dir', default=".", help='Path to save logs and checkpoints')
parser.add_argument('--num_labels', default=1000, type=int, help='Number of labels for linear classifier')
parser.add_argument('--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set')
args = parser.parse_args()
eval_linear(args)
| 13,256 | 46.010638 | 135 | py |
HIPT | HIPT-master/1-Hierarchical-Pretraining/eval_image_retrieval.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import pickle
import argparse
import torch
from torch import nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torchvision import models as torchvision_models
from torchvision import transforms as pth_transforms
from PIL import Image, ImageFile
import numpy as np
import utils
import vision_transformer as vits
from eval_knn import extract_features
class OxfordParisDataset(torch.utils.data.Dataset):
def __init__(self, dir_main, dataset, split, transform=None, imsize=None):
if dataset not in ['roxford5k', 'rparis6k']:
raise ValueError('Unknown dataset: {}!'.format(dataset))
# loading imlist, qimlist, and gnd, in cfg as a dict
gnd_fname = os.path.join(dir_main, dataset, 'gnd_{}.pkl'.format(dataset))
with open(gnd_fname, 'rb') as f:
cfg = pickle.load(f)
cfg['gnd_fname'] = gnd_fname
cfg['ext'] = '.jpg'
cfg['qext'] = '.jpg'
cfg['dir_data'] = os.path.join(dir_main, dataset)
cfg['dir_images'] = os.path.join(cfg['dir_data'], 'jpg')
cfg['n'] = len(cfg['imlist'])
cfg['nq'] = len(cfg['qimlist'])
cfg['im_fname'] = config_imname
cfg['qim_fname'] = config_qimname
cfg['dataset'] = dataset
self.cfg = cfg
self.samples = cfg["qimlist"] if split == "query" else cfg["imlist"]
self.transform = transform
self.imsize = imsize
def __len__(self):
return len(self.samples)
def __getitem__(self, index):
path = os.path.join(self.cfg["dir_images"], self.samples[index] + ".jpg")
ImageFile.LOAD_TRUNCATED_IMAGES = True
with open(path, 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
if self.imsize is not None:
img.thumbnail((self.imsize, self.imsize), Image.ANTIALIAS)
if self.transform is not None:
img = self.transform(img)
return img, index
def config_imname(cfg, i):
return os.path.join(cfg['dir_images'], cfg['imlist'][i] + cfg['ext'])
def config_qimname(cfg, i):
return os.path.join(cfg['dir_images'], cfg['qimlist'][i] + cfg['qext'])
if __name__ == '__main__':
parser = argparse.ArgumentParser('Image Retrieval on revisited Paris and Oxford')
parser.add_argument('--data_path', default='/path/to/revisited_paris_oxford/', type=str)
parser.add_argument('--dataset', default='roxford5k', type=str, choices=['roxford5k', 'rparis6k'])
parser.add_argument('--multiscale', default=False, type=utils.bool_flag)
parser.add_argument('--imsize', default=224, type=int, help='Image size')
parser.add_argument('--pretrained_weights', default='', type=str, help="Path to pretrained weights to evaluate.")
parser.add_argument('--use_cuda', default=True, type=utils.bool_flag)
parser.add_argument('--arch', default='vit_small', type=str, help='Architecture')
parser.add_argument('--patch_size', default=16, type=int, help='Patch resolution of the model.')
parser.add_argument("--checkpoint_key", default="teacher", type=str,
help='Key to use in the checkpoint (example: "teacher")')
parser.add_argument('--num_workers', default=10, type=int, help='Number of data loading workers per GPU.')
parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up
distributed training; see https://pytorch.org/docs/stable/distributed.html""")
parser.add_argument("--local_rank", default=0, type=int, help="Please ignore and do not set this argument.")
args = parser.parse_args()
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))
cudnn.benchmark = True
# ============ preparing data ... ============
transform = pth_transforms.Compose([
pth_transforms.ToTensor(),
pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
dataset_train = OxfordParisDataset(args.data_path, args.dataset, split="train", transform=transform, imsize=args.imsize)
dataset_query = OxfordParisDataset(args.data_path, args.dataset, split="query", transform=transform, imsize=args.imsize)
sampler = torch.utils.data.DistributedSampler(dataset_train, shuffle=False)
data_loader_train = torch.utils.data.DataLoader(
dataset_train,
sampler=sampler,
batch_size=1,
num_workers=args.num_workers,
pin_memory=True,
drop_last=False,
)
data_loader_query = torch.utils.data.DataLoader(
dataset_query,
batch_size=1,
num_workers=args.num_workers,
pin_memory=True,
drop_last=False,
)
print(f"train: {len(dataset_train)} imgs / query: {len(dataset_query)} imgs")
# ============ building network ... ============
if "vit" in args.arch:
model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0)
print(f"Model {args.arch} {args.patch_size}x{args.patch_size} built.")
elif "xcit" in args.arch:
model = torch.hub.load('facebookresearch/xcit:main', args.arch, num_classes=0)
elif args.arch in torchvision_models.__dict__.keys():
model = torchvision_models.__dict__[args.arch](num_classes=0)
else:
print(f"Architecture {args.arch} non supported")
sys.exit(1)
if args.use_cuda:
model.cuda()
model.eval()
# load pretrained weights
if os.path.isfile(args.pretrained_weights):
state_dict = torch.load(args.pretrained_weights, map_location="cpu")
if args.checkpoint_key is not None and args.checkpoint_key in state_dict:
print(f"Take key {args.checkpoint_key} in provided checkpoint dict")
state_dict = state_dict[args.checkpoint_key]
# remove `module.` prefix
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# remove `backbone.` prefix induced by multicrop wrapper
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
msg = model.load_state_dict(state_dict, strict=False)
print('Pretrained weights found at {} and loaded with msg: {}'.format(args.pretrained_weights, msg))
elif args.arch == "vit_small" and args.patch_size == 16:
print("Since no pretrained weights have been provided, we load pretrained DINO weights on Google Landmark v2.")
model.load_state_dict(torch.hub.load_state_dict_from_url(url="https://dl.fbaipublicfiles.com/dino/dino_vitsmall16_googlelandmark_pretrain/dino_vitsmall16_googlelandmark_pretrain.pth"))
else:
print("Warning: We use random weights.")
############################################################################
# Step 1: extract features
train_features = extract_features(model, data_loader_train, args.use_cuda, multiscale=args.multiscale)
query_features = extract_features(model, data_loader_query, args.use_cuda, multiscale=args.multiscale)
if utils.get_rank() == 0: # only rank 0 will work from now on
# normalize features
train_features = nn.functional.normalize(train_features, dim=1, p=2)
query_features = nn.functional.normalize(query_features, dim=1, p=2)
############################################################################
# Step 2: similarity
sim = torch.mm(train_features, query_features.T)
ranks = torch.argsort(-sim, dim=0).cpu().numpy()
############################################################################
# Step 3: evaluate
gnd = dataset_train.cfg['gnd']
# evaluate ranks
ks = [1, 5, 10]
# search for easy & hard
gnd_t = []
for i in range(len(gnd)):
g = {}
g['ok'] = np.concatenate([gnd[i]['easy'], gnd[i]['hard']])
g['junk'] = np.concatenate([gnd[i]['junk']])
gnd_t.append(g)
mapM, apsM, mprM, prsM = utils.compute_map(ranks, gnd_t, ks)
# search for hard
gnd_t = []
for i in range(len(gnd)):
g = {}
g['ok'] = np.concatenate([gnd[i]['hard']])
g['junk'] = np.concatenate([gnd[i]['junk'], gnd[i]['easy']])
gnd_t.append(g)
mapH, apsH, mprH, prsH = utils.compute_map(ranks, gnd_t, ks)
print('>> {}: mAP M: {}, H: {}'.format(args.dataset, np.around(mapM*100, decimals=2), np.around(mapH*100, decimals=2)))
print('>> {}: mP@k{} M: {}, H: {}'.format(args.dataset, np.array(ks), np.around(mprM*100, decimals=2), np.around(mprH*100, decimals=2)))
dist.barrier()
| 9,288 | 44.985149 | 192 | py |
HIPT | HIPT-master/1-Hierarchical-Pretraining/hubconf.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torchvision.models.resnet import resnet50
import vision_transformer as vits
dependencies = ["torch", "torchvision"]
def dino_vits16(pretrained=True, **kwargs):
"""
ViT-Small/16x16 pre-trained with DINO.
Achieves 74.5% top-1 accuracy on ImageNet with k-NN classification.
"""
model = vits.__dict__["vit_small"](patch_size=16, num_classes=0, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=True)
return model
def dino_vits8(pretrained=True, **kwargs):
"""
ViT-Small/8x8 pre-trained with DINO.
Achieves 78.3% top-1 accuracy on ImageNet with k-NN classification.
"""
model = vits.__dict__["vit_small"](patch_size=8, num_classes=0, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_deitsmall8_pretrain/dino_deitsmall8_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=True)
return model
def dino_vitb16(pretrained=True, **kwargs):
"""
ViT-Base/16x16 pre-trained with DINO.
Achieves 76.1% top-1 accuracy on ImageNet with k-NN classification.
"""
model = vits.__dict__["vit_base"](patch_size=16, num_classes=0, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=True)
return model
def dino_vitb8(pretrained=True, **kwargs):
"""
ViT-Base/8x8 pre-trained with DINO.
Achieves 77.4% top-1 accuracy on ImageNet with k-NN classification.
"""
model = vits.__dict__["vit_base"](patch_size=8, num_classes=0, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=True)
return model
def dino_resnet50(pretrained=True, **kwargs):
"""
ResNet-50 pre-trained with DINO.
Achieves 75.3% top-1 accuracy on ImageNet linear evaluation benchmark (requires to train `fc`).
"""
model = resnet50(pretrained=False, **kwargs)
model.fc = torch.nn.Identity()
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_resnet50_pretrain/dino_resnet50_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=False)
return model
def dino_xcit_small_12_p16(pretrained=True, **kwargs):
"""
XCiT-Small-12/16 pre-trained with DINO.
"""
model = torch.hub.load('facebookresearch/xcit:main', "xcit_small_12_p16", num_classes=0, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_xcit_small_12_p16_pretrain/dino_xcit_small_12_p16_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=True)
return model
def dino_xcit_small_12_p8(pretrained=True, **kwargs):
"""
XCiT-Small-12/8 pre-trained with DINO.
"""
model = torch.hub.load('facebookresearch/xcit:main', "xcit_small_12_p8", num_classes=0, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_xcit_small_12_p8_pretrain/dino_xcit_small_12_p8_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=True)
return model
def dino_xcit_medium_24_p16(pretrained=True, **kwargs):
"""
XCiT-Medium-24/16 pre-trained with DINO.
"""
model = torch.hub.load('facebookresearch/xcit:main', "xcit_medium_24_p16", num_classes=0, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_xcit_medium_24_p16_pretrain/dino_xcit_medium_24_p16_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=True)
return model
def dino_xcit_medium_24_p8(pretrained=True, **kwargs):
"""
XCiT-Medium-24/8 pre-trained with DINO.
"""
model = torch.hub.load('facebookresearch/xcit:main', "xcit_medium_24_p8", num_classes=0, **kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/dino_xcit_medium_24_p8_pretrain/dino_xcit_medium_24_p8_pretrain.pth",
map_location="cpu",
)
model.load_state_dict(state_dict, strict=True)
return model
| 5,653 | 36.197368 | 124 | py |
HIPT | HIPT-master/1-Hierarchical-Pretraining/run_with_submitit.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A script to run multinode training with submitit.
Almost copy-paste from https://github.com/facebookresearch/deit/blob/main/run_with_submitit.py
"""
import argparse
import os
import uuid
from pathlib import Path
import main_dino
import submitit
def parse_args():
parser = argparse.ArgumentParser("Submitit for DINO", parents=[main_dino.get_args_parser()])
parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=2, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=2800, type=int, help="Duration of the job")
parser.add_argument("--partition", default="learnfair", type=str, help="Partition where to submit")
parser.add_argument("--use_volta32", action='store_true', help="Big models? Use this")
parser.add_argument('--comment', default="", type=str,
help='Comment to pass to scheduler, e.g. priority message')
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path(f"/checkpoint/{user}/experiments")
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
import main_dino
self._setup_gpu_args()
main_dino.train_dino(self.args)
def checkpoint(self):
import os
import submitit
self.args.dist_url = get_init_file().as_uri()
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
from pathlib import Path
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id)))
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def main():
args = parse_args()
if args.output_dir == "":
args.output_dir = get_shared_folder() / "%j"
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
executor = submitit.AutoExecutor(folder=args.output_dir, slurm_max_num_timeout=30)
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout
partition = args.partition
kwargs = {}
if args.use_volta32:
kwargs['slurm_constraint'] = 'volta32gb'
if args.comment:
kwargs['slurm_comment'] = args.comment
executor.update_parameters(
mem_gb=40 * num_gpus_per_node,
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node, # one task per GPU
cpus_per_task=10,
nodes=nodes,
timeout_min=timeout_min, # max is 60 * 72
# Below are cluster dependent parameters
slurm_partition=partition,
slurm_signal_delay_s=120,
**kwargs
)
executor.update_parameters(name="dino")
args.dist_url = get_init_file().as_uri()
trainer = Trainer(args)
job = executor.submit(trainer)
print(f"Submitted job_id: {job.job_id}")
print(f"Logs and checkpoints will be saved at: {args.output_dir}")
if __name__ == "__main__":
main()
| 4,374 | 31.894737 | 103 | py |
HIPT | HIPT-master/1-Hierarchical-Pretraining/visualize_attention.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import argparse
import cv2
import random
import colorsys
import requests
from io import BytesIO
import skimage.io
from skimage.measure import find_contours
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import torch
import torch.nn as nn
import torchvision
from torchvision import transforms as pth_transforms
import numpy as np
from PIL import Image
import utils
import vision_transformer as vits
def apply_mask(image, mask, color, alpha=0.5):
for c in range(3):
image[:, :, c] = image[:, :, c] * (1 - alpha * mask) + alpha * mask * color[c] * 255
return image
def random_colors(N, bright=True):
"""
Generate random colors.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def display_instances(image, mask, fname="test", figsize=(5, 5), blur=False, contour=True, alpha=0.5):
fig = plt.figure(figsize=figsize, frameon=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax = plt.gca()
N = 1
mask = mask[None, :, :]
# Generate random colors
colors = random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
margin = 0
ax.set_ylim(height + margin, -margin)
ax.set_xlim(-margin, width + margin)
ax.axis('off')
masked_image = image.astype(np.uint32).copy()
for i in range(N):
color = colors[i]
_mask = mask[i]
if blur:
_mask = cv2.blur(_mask,(10,10))
# Mask
masked_image = apply_mask(masked_image, _mask, color, alpha)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
if contour:
padded_mask = np.zeros((_mask.shape[0] + 2, _mask.shape[1] + 2))
padded_mask[1:-1, 1:-1] = _mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
ax.imshow(masked_image.astype(np.uint8), aspect='auto')
fig.savefig(fname)
print(f"{fname} saved.")
return
if __name__ == '__main__':
parser = argparse.ArgumentParser('Visualize Self-Attention maps')
parser.add_argument('--arch', default='vit_small', type=str,
choices=['vit_tiny', 'vit_small', 'vit_base'], help='Architecture (support only ViT atm).')
parser.add_argument('--patch_size', default=8, type=int, help='Patch resolution of the model.')
parser.add_argument('--pretrained_weights', default='', type=str,
help="Path to pretrained weights to load.")
parser.add_argument("--checkpoint_key", default="teacher", type=str,
help='Key to use in the checkpoint (example: "teacher")')
parser.add_argument("--image_path", default=None, type=str, help="Path of the image to load.")
parser.add_argument("--image_size", default=(480, 480), type=int, nargs="+", help="Resize image.")
parser.add_argument('--output_dir', default='.', help='Path where to save visualizations.')
parser.add_argument("--threshold", type=float, default=None, help="""We visualize masks
obtained by thresholding the self-attention maps to keep xx% of the mass.""")
args = parser.parse_args()
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# build model
model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0)
for p in model.parameters():
p.requires_grad = False
model.eval()
model.to(device)
if os.path.isfile(args.pretrained_weights):
state_dict = torch.load(args.pretrained_weights, map_location="cpu")
if args.checkpoint_key is not None and args.checkpoint_key in state_dict:
print(f"Take key {args.checkpoint_key} in provided checkpoint dict")
state_dict = state_dict[args.checkpoint_key]
# remove `module.` prefix
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# remove `backbone.` prefix induced by multicrop wrapper
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
msg = model.load_state_dict(state_dict, strict=False)
print('Pretrained weights found at {} and loaded with msg: {}'.format(args.pretrained_weights, msg))
else:
print("Please use the `--pretrained_weights` argument to indicate the path of the checkpoint to evaluate.")
url = None
if args.arch == "vit_small" and args.patch_size == 16:
url = "dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth"
elif args.arch == "vit_small" and args.patch_size == 8:
url = "dino_deitsmall8_300ep_pretrain/dino_deitsmall8_300ep_pretrain.pth" # model used for visualizations in our paper
elif args.arch == "vit_base" and args.patch_size == 16:
url = "dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth"
elif args.arch == "vit_base" and args.patch_size == 8:
url = "dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth"
if url is not None:
print("Since no pretrained weights have been provided, we load the reference pretrained DINO weights.")
state_dict = torch.hub.load_state_dict_from_url(url="https://dl.fbaipublicfiles.com/dino/" + url)
model.load_state_dict(state_dict, strict=True)
else:
print("There is no reference weights available for this model => We use random weights.")
# open image
if args.image_path is None:
# user has not specified any image - we use our own image
print("Please use the `--image_path` argument to indicate the path of the image you wish to visualize.")
print("Since no image path have been provided, we take the first image in our paper.")
response = requests.get("https://dl.fbaipublicfiles.com/dino/img.png")
img = Image.open(BytesIO(response.content))
img = img.convert('RGB')
elif os.path.isfile(args.image_path):
with open(args.image_path, 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
else:
print(f"Provided image path {args.image_path} is non valid.")
sys.exit(1)
transform = pth_transforms.Compose([
pth_transforms.Resize(args.image_size),
pth_transforms.ToTensor(),
pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
img = transform(img)
# make the image divisible by the patch size
w, h = img.shape[1] - img.shape[1] % args.patch_size, img.shape[2] - img.shape[2] % args.patch_size
img = img[:, :w, :h].unsqueeze(0)
w_featmap = img.shape[-2] // args.patch_size
h_featmap = img.shape[-1] // args.patch_size
attentions = model.get_last_selfattention(img.to(device))
nh = attentions.shape[1] # number of head
# we keep only the output patch attention
attentions = attentions[0, :, 0, 1:].reshape(nh, -1)
if args.threshold is not None:
# we keep only a certain percentage of the mass
val, idx = torch.sort(attentions)
val /= torch.sum(val, dim=1, keepdim=True)
cumval = torch.cumsum(val, dim=1)
th_attn = cumval > (1 - args.threshold)
idx2 = torch.argsort(idx)
for head in range(nh):
th_attn[head] = th_attn[head][idx2[head]]
th_attn = th_attn.reshape(nh, w_featmap, h_featmap).float()
# interpolate
th_attn = nn.functional.interpolate(th_attn.unsqueeze(0), scale_factor=args.patch_size, mode="nearest")[0].cpu().numpy()
attentions = attentions.reshape(nh, w_featmap, h_featmap)
attentions = nn.functional.interpolate(attentions.unsqueeze(0), scale_factor=args.patch_size, mode="nearest")[0].cpu().numpy()
# save attentions heatmaps
os.makedirs(args.output_dir, exist_ok=True)
torchvision.utils.save_image(torchvision.utils.make_grid(img, normalize=True, scale_each=True), os.path.join(args.output_dir, "img.png"))
for j in range(nh):
fname = os.path.join(args.output_dir, "attn-head" + str(j) + ".png")
plt.imsave(fname=fname, arr=attentions[j], format='png')
print(f"{fname} saved.")
if args.threshold is not None:
image = skimage.io.imread(os.path.join(args.output_dir, "img.png"))
for j in range(nh):
display_instances(image, th_attn[j], fname=os.path.join(args.output_dir, "mask_th" + str(args.threshold) + "_head" + str(j) +".png"), blur=False)
| 9,389 | 42.878505 | 157 | py |
HIPT | HIPT-master/1-Hierarchical-Pretraining/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Misc functions.
Mostly copy-paste from torchvision references or other public repos like DETR:
https://github.com/facebookresearch/detr/blob/master/util/misc.py
"""
import os
import sys
import time
import math
import random
import datetime
import subprocess
from collections import defaultdict, deque
import numpy as np
import torch
from torch import nn
import torch.distributed as dist
from PIL import ImageFilter, ImageOps
class GaussianBlur(object):
"""
Apply Gaussian Blur to the PIL image.
"""
def __init__(self, p=0.5, radius_min=0.1, radius_max=2.):
self.prob = p
self.radius_min = radius_min
self.radius_max = radius_max
def __call__(self, img):
do_it = random.random() <= self.prob
if not do_it:
return img
return img.filter(
ImageFilter.GaussianBlur(
radius=random.uniform(self.radius_min, self.radius_max)
)
)
class Solarization(object):
"""
Apply Solarization to the PIL image.
"""
def __init__(self, p):
self.p = p
def __call__(self, img):
if random.random() < self.p:
return ImageOps.solarize(img)
else:
return img
def load_pretrained_weights(model, pretrained_weights, checkpoint_key, model_name, patch_size):
if os.path.isfile(pretrained_weights):
state_dict = torch.load(pretrained_weights, map_location="cpu")
if checkpoint_key is not None and checkpoint_key in state_dict:
print(f"Take key {checkpoint_key} in provided checkpoint dict")
state_dict = state_dict[checkpoint_key]
# remove `module.` prefix
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# remove `backbone.` prefix induced by multicrop wrapper
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
msg = model.load_state_dict(state_dict, strict=False)
print('Pretrained weights found at {} and loaded with msg: {}'.format(pretrained_weights, msg))
else:
print("Please use the `--pretrained_weights` argument to indicate the path of the checkpoint to evaluate.")
url = None
if model_name == "vit_small" and patch_size == 16:
url = "dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth"
elif model_name == "vit_small" and patch_size == 8:
url = "dino_deitsmall8_pretrain/dino_deitsmall8_pretrain.pth"
elif model_name == "vit_base" and patch_size == 16:
url = "dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth"
elif model_name == "vit_base" and patch_size == 8:
url = "dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth"
elif model_name == "xcit_small_12_p16":
url = "dino_xcit_small_12_p16_pretrain/dino_xcit_small_12_p16_pretrain.pth"
elif model_name == "xcit_small_12_p8":
url = "dino_xcit_small_12_p8_pretrain/dino_xcit_small_12_p8_pretrain.pth"
elif model_name == "xcit_medium_24_p16":
url = "dino_xcit_medium_24_p16_pretrain/dino_xcit_medium_24_p16_pretrain.pth"
elif model_name == "xcit_medium_24_p8":
url = "dino_xcit_medium_24_p8_pretrain/dino_xcit_medium_24_p8_pretrain.pth"
elif model_name == "resnet50":
url = "dino_resnet50_pretrain/dino_resnet50_pretrain.pth"
if url is not None:
print("Since no pretrained weights have been provided, we load the reference pretrained DINO weights.")
state_dict = torch.hub.load_state_dict_from_url(url="https://dl.fbaipublicfiles.com/dino/" + url)
model.load_state_dict(state_dict, strict=True)
else:
print("There is no reference weights available for this model => We use random weights.")
def load_pretrained_linear_weights(linear_classifier, model_name, patch_size):
url = None
if model_name == "vit_small" and patch_size == 16:
url = "dino_deitsmall16_pretrain/dino_deitsmall16_linearweights.pth"
elif model_name == "vit_small" and patch_size == 8:
url = "dino_deitsmall8_pretrain/dino_deitsmall8_linearweights.pth"
elif model_name == "vit_base" and patch_size == 16:
url = "dino_vitbase16_pretrain/dino_vitbase16_linearweights.pth"
elif model_name == "vit_base" and patch_size == 8:
url = "dino_vitbase8_pretrain/dino_vitbase8_linearweights.pth"
elif model_name == "resnet50":
url = "dino_resnet50_pretrain/dino_resnet50_linearweights.pth"
if url is not None:
print("We load the reference pretrained linear weights.")
state_dict = torch.hub.load_state_dict_from_url(url="https://dl.fbaipublicfiles.com/dino/" + url)["state_dict"]
linear_classifier.load_state_dict(state_dict, strict=True)
else:
print("We use random linear weights.")
def clip_gradients(model, clip):
norms = []
for name, p in model.named_parameters():
if p.grad is not None:
param_norm = p.grad.data.norm(2)
norms.append(param_norm.item())
clip_coef = clip / (param_norm + 1e-6)
if clip_coef < 1:
p.grad.data.mul_(clip_coef)
return norms
def cancel_gradients_last_layer(epoch, model, freeze_last_layer):
if epoch >= freeze_last_layer:
return
for n, p in model.named_parameters():
if "last_layer" in n:
p.grad = None
def restart_from_checkpoint(ckp_path, run_variables=None, **kwargs):
"""
Re-start from checkpoint
"""
if not os.path.isfile(ckp_path):
return
print("Found checkpoint at {}".format(ckp_path))
# open checkpoint file
checkpoint = torch.load(ckp_path, map_location="cpu")
# key is what to look for in the checkpoint file
# value is the object to load
# example: {'state_dict': model}
for key, value in kwargs.items():
if key in checkpoint and value is not None:
try:
msg = value.load_state_dict(checkpoint[key], strict=False)
print("=> loaded '{}' from checkpoint '{}' with msg {}".format(key, ckp_path, msg))
except TypeError:
try:
msg = value.load_state_dict(checkpoint[key])
print("=> loaded '{}' from checkpoint: '{}'".format(key, ckp_path))
except ValueError:
print("=> failed to load '{}' from checkpoint: '{}'".format(key, ckp_path))
else:
print("=> key '{}' not found in checkpoint: '{}'".format(key, ckp_path))
# re load variable important for the run
if run_variables is not None:
for var_name in run_variables:
if var_name in checkpoint:
run_variables[var_name] = checkpoint[var_name]
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0, start_warmup_value=0):
warmup_schedule = np.array([])
warmup_iters = warmup_epochs * niter_per_ep
if warmup_epochs > 0:
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
iters = np.arange(epochs * niter_per_ep - warmup_iters)
schedule = final_value + 0.5 * (base_value - final_value) * (1 + np.cos(np.pi * iters / len(iters)))
schedule = np.concatenate((warmup_schedule, schedule))
assert len(schedule) == epochs * niter_per_ep
return schedule
def bool_flag(s):
"""
Parse boolean arguments from the command line.
"""
FALSY_STRINGS = {"off", "false", "0"}
TRUTHY_STRINGS = {"on", "true", "1"}
if s.lower() in FALSY_STRINGS:
return False
elif s.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("invalid value for a boolean flag")
def fix_random_seeds(seed=31):
"""
Fix random seeds.
"""
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.6f} ({global_avg:.6f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.6f}')
data_time = SmoothedValue(fmt='{avg:.6f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
if torch.cuda.is_available():
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}',
'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
])
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.6f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def get_sha():
cwd = os.path.dirname(os.path.abspath(__file__))
def _run(command):
return subprocess.check_output(command, cwd=cwd).decode('ascii').strip()
sha = 'N/A'
diff = "clean"
branch = 'N/A'
try:
sha = _run(['git', 'rev-parse', 'HEAD'])
subprocess.check_output(['git', 'diff'], cwd=cwd)
diff = _run(['git', 'diff-index', 'HEAD'])
diff = "has uncommited changes" if diff else "clean"
branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
except Exception:
pass
message = f"sha: {sha}, status: {diff}, branch: {branch}"
return message
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def init_distributed_mode(args):
# launched with torch.distributed.launch
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
# launched with submitit on a slurm cluster
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
# launched naively with `python main_dino.py`
# we manually add MASTER_ADDR and MASTER_PORT to env variables
elif torch.cuda.is_available():
print('Will run the code on one GPU.')
args.rank, args.gpu, args.world_size = 0, 0, 1
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
else:
print('Does not support training without GPU.')
sys.exit(1)
dist.init_process_group(
backend="nccl",
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
torch.cuda.set_device(args.gpu)
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
dist.barrier()
setup_for_distributed(args.rank == 0)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
return [correct[:k].reshape(-1).float().sum(0) * 100. / batch_size for k in topk]
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class LARS(torch.optim.Optimizer):
"""
Almost copy-paste from https://github.com/facebookresearch/barlowtwins/blob/main/main.py
"""
def __init__(self, params, lr=0, weight_decay=0, momentum=0.9, eta=0.001,
weight_decay_filter=None, lars_adaptation_filter=None):
defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum,
eta=eta, weight_decay_filter=weight_decay_filter,
lars_adaptation_filter=lars_adaptation_filter)
super().__init__(params, defaults)
@torch.no_grad()
def step(self):
for g in self.param_groups:
for p in g['params']:
dp = p.grad
if dp is None:
continue
if p.ndim != 1:
dp = dp.add(p, alpha=g['weight_decay'])
if p.ndim != 1:
param_norm = torch.norm(p)
update_norm = torch.norm(dp)
one = torch.ones_like(param_norm)
q = torch.where(param_norm > 0.,
torch.where(update_norm > 0,
(g['eta'] * param_norm / update_norm), one), one)
dp = dp.mul(q)
param_state = self.state[p]
if 'mu' not in param_state:
param_state['mu'] = torch.zeros_like(p)
mu = param_state['mu']
mu.mul_(g['momentum']).add_(dp)
p.add_(mu, alpha=-g['lr'])
class MultiCropWrapper(nn.Module):
"""
Perform forward pass separately on each resolution input.
The inputs corresponding to a single resolution are clubbed and single
forward is run on the same resolution inputs. Hence we do several
forward passes = number of different resolutions used. We then
concatenate all the output features and run the head forward on these
concatenated features.
"""
def __init__(self, backbone, head):
super(MultiCropWrapper, self).__init__()
# disable layers dedicated to ImageNet labels classification
backbone.fc, backbone.head = nn.Identity(), nn.Identity()
self.backbone = backbone
self.head = head
def forward(self, x):
# convert to list
if not isinstance(x, list):
x = [x]
idx_crops = torch.cumsum(torch.unique_consecutive(
torch.tensor([inp.shape[-1] for inp in x]),
return_counts=True,
)[1], 0)
start_idx, output = 0, torch.empty(0).to(x[0].device)
for end_idx in idx_crops:
_out = self.backbone(torch.cat(x[start_idx: end_idx]))
# The output is a tuple with XCiT model. See:
# https://github.com/facebookresearch/xcit/blob/master/xcit.py#L404-L405
if isinstance(_out, tuple):
_out = _out[0]
# accumulate outputs
output = torch.cat((output, _out))
start_idx = end_idx
# Run the head forward on the concatenated features.
return self.head(output)
def get_params_groups(model):
regularized = []
not_regularized = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue
# we do not regularize biases nor Norm parameters
if name.endswith(".bias") or len(param.shape) == 1:
not_regularized.append(param)
else:
regularized.append(param)
return [{'params': regularized}, {'params': not_regularized, 'weight_decay': 0.}]
def has_batchnorms(model):
bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.SyncBatchNorm)
for name, module in model.named_modules():
if isinstance(module, bn_types):
return True
return False
class PCA():
"""
Class to compute and apply PCA.
"""
def __init__(self, dim=256, whit=0.5):
self.dim = dim
self.whit = whit
self.mean = None
def train_pca(self, cov):
"""
Takes a covariance matrix (np.ndarray) as input.
"""
d, v = np.linalg.eigh(cov)
eps = d.max() * 1e-5
n_0 = (d < eps).sum()
if n_0 > 0:
d[d < eps] = eps
# total energy
totenergy = d.sum()
# sort eigenvectors with eigenvalues order
idx = np.argsort(d)[::-1][:self.dim]
d = d[idx]
v = v[:, idx]
print("keeping %.2f %% of the energy" % (d.sum() / totenergy * 100.0))
# for the whitening
d = np.diag(1. / d**self.whit)
# principal components
self.dvt = np.dot(d, v.T)
def apply(self, x):
# input is from numpy
if isinstance(x, np.ndarray):
if self.mean is not None:
x -= self.mean
return np.dot(self.dvt, x.T).T
# input is from torch and is on GPU
if x.is_cuda:
if self.mean is not None:
x -= torch.cuda.FloatTensor(self.mean)
return torch.mm(torch.cuda.FloatTensor(self.dvt), x.transpose(0, 1)).transpose(0, 1)
# input if from torch, on CPU
if self.mean is not None:
x -= torch.FloatTensor(self.mean)
return torch.mm(torch.FloatTensor(self.dvt), x.transpose(0, 1)).transpose(0, 1)
def compute_ap(ranks, nres):
"""
Computes average precision for given ranked indexes.
Arguments
---------
ranks : zerro-based ranks of positive images
nres : number of positive images
Returns
-------
ap : average precision
"""
# number of images ranked by the system
nimgranks = len(ranks)
# accumulate trapezoids in PR-plot
ap = 0
recall_step = 1. / nres
for j in np.arange(nimgranks):
rank = ranks[j]
if rank == 0:
precision_0 = 1.
else:
precision_0 = float(j) / rank
precision_1 = float(j + 1) / (rank + 1)
ap += (precision_0 + precision_1) * recall_step / 2.
return ap
def compute_map(ranks, gnd, kappas=[]):
"""
Computes the mAP for a given set of returned results.
Usage:
map = compute_map (ranks, gnd)
computes mean average precsion (map) only
map, aps, pr, prs = compute_map (ranks, gnd, kappas)
computes mean average precision (map), average precision (aps) for each query
computes mean precision at kappas (pr), precision at kappas (prs) for each query
Notes:
1) ranks starts from 0, ranks.shape = db_size X #queries
2) The junk results (e.g., the query itself) should be declared in the gnd stuct array
3) If there are no positive images for some query, that query is excluded from the evaluation
"""
map = 0.
nq = len(gnd) # number of queries
aps = np.zeros(nq)
pr = np.zeros(len(kappas))
prs = np.zeros((nq, len(kappas)))
nempty = 0
for i in np.arange(nq):
qgnd = np.array(gnd[i]['ok'])
# no positive images, skip from the average
if qgnd.shape[0] == 0:
aps[i] = float('nan')
prs[i, :] = float('nan')
nempty += 1
continue
try:
qgndj = np.array(gnd[i]['junk'])
except:
qgndj = np.empty(0)
# sorted positions of positive and junk images (0 based)
pos = np.arange(ranks.shape[0])[np.in1d(ranks[:,i], qgnd)]
junk = np.arange(ranks.shape[0])[np.in1d(ranks[:,i], qgndj)]
k = 0;
ij = 0;
if len(junk):
# decrease positions of positives based on the number of
# junk images appearing before them
ip = 0
while (ip < len(pos)):
while (ij < len(junk) and pos[ip] > junk[ij]):
k += 1
ij += 1
pos[ip] = pos[ip] - k
ip += 1
# compute ap
ap = compute_ap(pos, len(qgnd))
map = map + ap
aps[i] = ap
# compute precision @ k
pos += 1 # get it to 1-based
for j in np.arange(len(kappas)):
kq = min(max(pos), kappas[j]);
prs[i, j] = (pos <= kq).sum() / kq
pr = pr + prs[i, :]
map = map / (nq - nempty)
pr = pr / (nq - nempty)
return map, aps, pr, prs
def multi_scale(samples, model):
v = None
for s in [1, 1/2**(1/2), 1/2]: # we use 3 different scales
if s == 1:
inp = samples.clone()
else:
inp = nn.functional.interpolate(samples, scale_factor=s, mode='bilinear', align_corners=False)
feats = model(inp).clone()
if v is None:
v = feats
else:
v += feats
v /= 3
v /= v.norm()
return v
| 28,039 | 32.783133 | 119 | py |
HIPT | HIPT-master/1-Hierarchical-Pretraining/video_generation.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
import sys
import argparse
import cv2
from tqdm import tqdm
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torchvision
from torchvision import transforms as pth_transforms
import numpy as np
from PIL import Image
import utils
import vision_transformer as vits
FOURCC = {
"mp4": cv2.VideoWriter_fourcc(*"MP4V"),
"avi": cv2.VideoWriter_fourcc(*"XVID"),
}
DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
class VideoGenerator:
def __init__(self, args):
self.args = args
# self.model = None
# Don't need to load model if you only want a video
if not self.args.video_only:
self.model = self.__load_model()
def run(self):
if self.args.input_path is None:
print(f"Provided input path {self.args.input_path} is non valid.")
sys.exit(1)
else:
if self.args.video_only:
self._generate_video_from_images(
self.args.input_path, self.args.output_path
)
else:
# If input path exists
if os.path.exists(self.args.input_path):
# If input is a video file
if os.path.isfile(self.args.input_path):
frames_folder = os.path.join(self.args.output_path, "frames")
attention_folder = os.path.join(
self.args.output_path, "attention"
)
os.makedirs(frames_folder, exist_ok=True)
os.makedirs(attention_folder, exist_ok=True)
self._extract_frames_from_video(
self.args.input_path, frames_folder
)
self._inference(
frames_folder,
attention_folder,
)
self._generate_video_from_images(
attention_folder, self.args.output_path
)
# If input is a folder of already extracted frames
if os.path.isdir(self.args.input_path):
attention_folder = os.path.join(
self.args.output_path, "attention"
)
os.makedirs(attention_folder, exist_ok=True)
self._inference(self.args.input_path, attention_folder)
self._generate_video_from_images(
attention_folder, self.args.output_path
)
# If input path doesn't exists
else:
print(f"Provided input path {self.args.input_path} doesn't exists.")
sys.exit(1)
def _extract_frames_from_video(self, inp: str, out: str):
vidcap = cv2.VideoCapture(inp)
self.args.fps = vidcap.get(cv2.CAP_PROP_FPS)
print(f"Video: {inp} ({self.args.fps} fps)")
print(f"Extracting frames to {out}")
success, image = vidcap.read()
count = 0
while success:
cv2.imwrite(
os.path.join(out, f"frame-{count:04}.jpg"),
image,
)
success, image = vidcap.read()
count += 1
def _generate_video_from_images(self, inp: str, out: str):
img_array = []
attention_images_list = sorted(glob.glob(os.path.join(inp, "attn-*.jpg")))
# Get size of the first image
with open(attention_images_list[0], "rb") as f:
img = Image.open(f)
img = img.convert("RGB")
size = (img.width, img.height)
img_array.append(cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR))
print(f"Generating video {size} to {out}")
for filename in tqdm(attention_images_list[1:]):
with open(filename, "rb") as f:
img = Image.open(f)
img = img.convert("RGB")
img_array.append(cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR))
out = cv2.VideoWriter(
os.path.join(out, "video." + self.args.video_format),
FOURCC[self.args.video_format],
self.args.fps,
size,
)
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
print("Done")
def _inference(self, inp: str, out: str):
print(f"Generating attention images to {out}")
for img_path in tqdm(sorted(glob.glob(os.path.join(inp, "*.jpg")))):
with open(img_path, "rb") as f:
img = Image.open(f)
img = img.convert("RGB")
if self.args.resize is not None:
transform = pth_transforms.Compose(
[
pth_transforms.ToTensor(),
pth_transforms.Resize(self.args.resize),
pth_transforms.Normalize(
(0.485, 0.456, 0.406), (0.229, 0.224, 0.225)
),
]
)
else:
transform = pth_transforms.Compose(
[
pth_transforms.ToTensor(),
pth_transforms.Normalize(
(0.485, 0.456, 0.406), (0.229, 0.224, 0.225)
),
]
)
img = transform(img)
# make the image divisible by the patch size
w, h = (
img.shape[1] - img.shape[1] % self.args.patch_size,
img.shape[2] - img.shape[2] % self.args.patch_size,
)
img = img[:, :w, :h].unsqueeze(0)
w_featmap = img.shape[-2] // self.args.patch_size
h_featmap = img.shape[-1] // self.args.patch_size
attentions = self.model.get_last_selfattention(img.to(DEVICE))
nh = attentions.shape[1] # number of head
# we keep only the output patch attention
attentions = attentions[0, :, 0, 1:].reshape(nh, -1)
# we keep only a certain percentage of the mass
val, idx = torch.sort(attentions)
val /= torch.sum(val, dim=1, keepdim=True)
cumval = torch.cumsum(val, dim=1)
th_attn = cumval > (1 - self.args.threshold)
idx2 = torch.argsort(idx)
for head in range(nh):
th_attn[head] = th_attn[head][idx2[head]]
th_attn = th_attn.reshape(nh, w_featmap, h_featmap).float()
# interpolate
th_attn = (
nn.functional.interpolate(
th_attn.unsqueeze(0),
scale_factor=self.args.patch_size,
mode="nearest",
)[0]
.cpu()
.numpy()
)
attentions = attentions.reshape(nh, w_featmap, h_featmap)
attentions = (
nn.functional.interpolate(
attentions.unsqueeze(0),
scale_factor=self.args.patch_size,
mode="nearest",
)[0]
.cpu()
.numpy()
)
# save attentions heatmaps
fname = os.path.join(out, "attn-" + os.path.basename(img_path))
plt.imsave(
fname=fname,
arr=sum(
attentions[i] * 1 / attentions.shape[0]
for i in range(attentions.shape[0])
),
cmap="inferno",
format="jpg",
)
def __load_model(self):
# build model
model = vits.__dict__[self.args.arch](
patch_size=self.args.patch_size, num_classes=0
)
for p in model.parameters():
p.requires_grad = False
model.eval()
model.to(DEVICE)
if os.path.isfile(self.args.pretrained_weights):
state_dict = torch.load(self.args.pretrained_weights, map_location="cpu")
if (
self.args.checkpoint_key is not None
and self.args.checkpoint_key in state_dict
):
print(
f"Take key {self.args.checkpoint_key} in provided checkpoint dict"
)
state_dict = state_dict[self.args.checkpoint_key]
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# remove `backbone.` prefix induced by multicrop wrapper
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
msg = model.load_state_dict(state_dict, strict=False)
print(
"Pretrained weights found at {} and loaded with msg: {}".format(
self.args.pretrained_weights, msg
)
)
else:
print(
"Please use the `--pretrained_weights` argument to indicate the path of the checkpoint to evaluate."
)
url = None
if self.args.arch == "vit_small" and self.args.patch_size == 16:
url = "dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth"
elif self.args.arch == "vit_small" and self.args.patch_size == 8:
url = "dino_deitsmall8_300ep_pretrain/dino_deitsmall8_300ep_pretrain.pth" # model used for visualizations in our paper
elif self.args.arch == "vit_base" and self.args.patch_size == 16:
url = "dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth"
elif self.args.arch == "vit_base" and self.args.patch_size == 8:
url = "dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth"
if url is not None:
print(
"Since no pretrained weights have been provided, we load the reference pretrained DINO weights."
)
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/dino/" + url
)
model.load_state_dict(state_dict, strict=True)
else:
print(
"There is no reference weights available for this model => We use random weights."
)
return model
def parse_args():
parser = argparse.ArgumentParser("Generation self-attention video")
parser.add_argument(
"--arch",
default="vit_small",
type=str,
choices=["vit_tiny", "vit_small", "vit_base"],
help="Architecture (support only ViT atm).",
)
parser.add_argument(
"--patch_size", default=8, type=int, help="Patch resolution of the self.model."
)
parser.add_argument(
"--pretrained_weights",
default="",
type=str,
help="Path to pretrained weights to load.",
)
parser.add_argument(
"--checkpoint_key",
default="teacher",
type=str,
help='Key to use in the checkpoint (example: "teacher")',
)
parser.add_argument(
"--input_path",
required=True,
type=str,
help="""Path to a video file if you want to extract frames
or to a folder of images already extracted by yourself.
or to a folder of attention images.""",
)
parser.add_argument(
"--output_path",
default="./",
type=str,
help="""Path to store a folder of frames and / or a folder of attention images.
and / or a final video. Default to current directory.""",
)
parser.add_argument(
"--threshold",
type=float,
default=0.6,
help="""We visualize masks
obtained by thresholding the self-attention maps to keep xx percent of the mass.""",
)
parser.add_argument(
"--resize",
default=None,
type=int,
nargs="+",
help="""Apply a resize transformation to input image(s). Use if OOM error.
Usage (single or W H): --resize 512, --resize 720 1280""",
)
parser.add_argument(
"--video_only",
action="store_true",
help="""Use this flag if you only want to generate a video and not all attention images.
If used, --input_path must be set to the folder of attention images. Ex: ./attention/""",
)
parser.add_argument(
"--fps",
default=30.0,
type=float,
help="FPS of input / output video. Automatically set if you extract frames from a video.",
)
parser.add_argument(
"--video_format",
default="mp4",
type=str,
choices=["mp4", "avi"],
help="Format of generated video (mp4 or avi).",
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
vg = VideoGenerator(args)
vg.run()
| 13,669 | 35.068602 | 135 | py |
HIPT | HIPT-master/1-Hierarchical-Pretraining/vision_transformer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mostly copy-paste from timm library.
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
import math
from functools import partial
import torch
import torch.nn as nn
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def drop_path(x, drop_prob: float = 0., training: bool = False):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, return_attention=False):
y, attn = self.attn(self.norm1(x))
if return_attention:
return attn
x = x + self.drop_path(y)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = (img_size // patch_size) * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class VisionTransformer(nn.Module):
""" Vision Transformer """
def __init__(self, img_size=[224], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.patch_embed = PatchEmbed(
img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
B, nc, w, h = x.shape
x = self.patch_embed(x) # patch linear embedding
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward(self, x):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0]
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def vit_tiny(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_small(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_base(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
class DINOHead(nn.Module):
def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256):
super().__init__()
nlayers = max(nlayers, 1)
if nlayers == 1:
self.mlp = nn.Linear(in_dim, bottleneck_dim)
else:
layers = [nn.Linear(in_dim, hidden_dim)]
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
for _ in range(nlayers - 2):
layers.append(nn.Linear(hidden_dim, hidden_dim))
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
layers.append(nn.Linear(hidden_dim, bottleneck_dim))
self.mlp = nn.Sequential(*layers)
self.apply(self._init_weights)
self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))
self.last_layer.weight_g.data.fill_(1)
if norm_last_layer:
self.last_layer.weight_g.requires_grad = False
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.mlp(x)
x = nn.functional.normalize(x, dim=-1, p=2)
x = self.last_layer(x)
return x
| 12,706 | 37.389728 | 124 | py |
HIPT | HIPT-master/1-Hierarchical-Pretraining/main_dino4k.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
import datetime
import time
import math
import json
from pathlib import Path
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torchvision import datasets, transforms
from torchvision import models as torchvision_models
from torch.utils.data.dataset import Dataset
import utils
import vision_transformer4k as vits
from vision_transformer4k import DINOHead
from einops import rearrange, repeat, reduce
torchvision_archs = sorted(name for name in torchvision_models.__dict__
if name.islower() and not name.startswith("__")
and callable(torchvision_models.__dict__[name]))
def get_args_parser():
parser = argparse.ArgumentParser('DINO4K', add_help=False)
# Model parameters
parser.add_argument('--arch', default='vit_xs', type=str,
choices=['vit4k_xs', 'vit_tiny', 'vit_small', 'vit_base', 'xcit', 'deit_tiny', 'deit_small'] \
+ torchvision_archs + torch.hub.list("facebookresearch/xcit:main"),
help="""Name of architecture to train. For quick experiments with ViTs,
we recommend using vit_tiny or vit_small.""")
parser.add_argument('--patch_size', default=16, type=int, help="""Size in pixels
of input square patches - default 16 (for 16x16 patches). Using smaller
values leads to better performance but requires more memory. Applies only
for ViTs (vit_tiny, vit_small and vit_base). If <16, we recommend disabling
mixed precision training (--use_fp16 false) to avoid unstabilities.""")
parser.add_argument('--out_dim', default=65536, type=int, help="""Dimensionality of
the DINO head output. For complex and large datasets large values (like 65k) work well.""")
parser.add_argument('--norm_last_layer', default=True, type=utils.bool_flag,
help="""Whether or not to weight normalize the last layer of the DINO head.
Not normalizing leads to better performance but can make the training unstable.
In our experiments, we typically set this paramater to False with vit_small and True with vit_base.""")
parser.add_argument('--momentum_teacher', default=0.996, type=float, help="""Base EMA
parameter for teacher update. The value is increased to 1 during training with cosine schedule.
We recommend setting a higher value with small batches: for example use 0.9995 with batch size of 256.""")
parser.add_argument('--use_bn_in_head', default=False, type=utils.bool_flag,
help="Whether to use batch normalizations in projection head (Default: False)")
# Temperature teacher parameters
parser.add_argument('--warmup_teacher_temp', default=0.04, type=float,
help="""Initial value for the teacher temperature: 0.04 works well in most cases.
Try decreasing it if the training loss does not decrease.""")
parser.add_argument('--teacher_temp', default=0.04, type=float, help="""Final value (after linear warmup)
of the teacher temperature. For most experiments, anything above 0.07 is unstable. We recommend
starting with the default value of 0.04 and increase this slightly if needed.""")
parser.add_argument('--warmup_teacher_temp_epochs', default=0, type=int,
help='Number of warmup epochs for the teacher temperature (Default: 30).')
# Training/Optimization parameters
parser.add_argument('--use_fp16', type=utils.bool_flag, default=True, help="""Whether or not
to use half precision for training. Improves training time and memory requirements,
but can provoke instability and slight decay of performance. We recommend disabling
mixed precision if the loss is unstable, if reducing the patch size or if training with bigger ViTs.""")
parser.add_argument('--weight_decay', type=float, default=0.04, help="""Initial value of the
weight decay. With ViT, a smaller value at the beginning of training works well.""")
parser.add_argument('--weight_decay_end', type=float, default=0.4, help="""Final value of the
weight decay. We use a cosine schedule for WD and using a larger decay by
the end of training improves performance for ViTs.""")
parser.add_argument('--clip_grad', type=float, default=3.0, help="""Maximal parameter
gradient norm if using gradient clipping. Clipping with norm .3 ~ 1.0 can
help optimization for larger ViT architectures. 0 for disabling.""")
parser.add_argument('--batch_size_per_gpu', default=64, type=int,
help='Per-GPU batch-size : number of distinct images loaded on one GPU.')
parser.add_argument('--epochs', default=100, type=int, help='Number of epochs of training.')
parser.add_argument('--freeze_last_layer', default=1, type=int, help="""Number of epochs
during which we keep the output layer fixed. Typically doing so during
the first epoch helps training. Try increasing this value if the loss does not decrease.""")
parser.add_argument("--lr", default=0.0005, type=float, help="""Learning rate at the end of
linear warmup (highest LR used during training). The learning rate is linearly scaled
with the batch size, and specified here for a reference batch size of 256.""")
parser.add_argument("--warmup_epochs", default=10, type=int,
help="Number of epochs for the linear learning-rate warm up.")
parser.add_argument('--min_lr', type=float, default=1e-6, help="""Target LR at the
end of optimization. We use a cosine LR schedule with linear warmup.""")
parser.add_argument('--optimizer', default='adamw', type=str,
choices=['adamw', 'sgd', 'lars'], help="""Type of optimizer. We recommend using adamw with ViTs.""")
parser.add_argument('--drop_path_rate', type=float, default=0.1, help="stochastic depth rate")
# Multi-crop parameters
parser.add_argument('--global_crops_scale', type=float, nargs='+', default=(0.4, 1.),
help="""Scale range of the cropped image before resizing, relatively to the origin image.
Used for large global view cropping. When disabling multi-crop (--local_crops_number 0), we
recommand using a wider range of scale ("--global_crops_scale 0.14 1." for example)""")
parser.add_argument('--local_crops_number', type=int, default=8, help="""Number of small
local views to generate. Set this parameter to 0 to disable multi-crop training.
When disabling multi-crop we recommend to use "--global_crops_scale 0.14 1." """)
parser.add_argument('--local_crops_scale', type=float, nargs='+', default=(0.05, 0.4),
help="""Scale range of the cropped image before resizing, relatively to the origin image.
Used for small local view cropping of multi-crop.""")
# Misc
parser.add_argument('--data_path', default='/path/to/imagenet/train/', type=str,
help='Please specify path to the ImageNet training data.')
parser.add_argument('--output_dir', default=".", type=str, help='Path to save logs and checkpoints.')
parser.add_argument('--saveckp_freq', default=20, type=int, help='Save checkpoint every x epochs.')
parser.add_argument('--seed', default=0, type=int, help='Random seed.')
parser.add_argument('--num_workers', default=10, type=int, help='Number of data loading workers per GPU.')
parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up
distributed training; see https://pytorch.org/docs/stable/distributed.html""")
parser.add_argument("--local_rank", default=0, type=int, help="Please ignore and do not set this argument.")
return parser
def train_dino(args):
utils.init_distributed_mode(args)
utils.fix_random_seeds(args.seed)
print("git:\n {}\n".format(utils.get_sha()))
print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))
cudnn.benchmark = True
# ============ preparing data ... ============
transform = DataAugmentationDINO4K(
args.local_crops_number
)
# Using custom dataset for our [256 x 384] tensors
dataset = SeqDataset(dataroot=args.data_path, transform=transform)
sampler = torch.utils.data.DistributedSampler(dataset, shuffle=True)
data_loader = torch.utils.data.DataLoader(
dataset,
sampler=sampler,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=True,
drop_last=True,
)
print(f"Data loaded: there are {len(dataset)} images.")
# ============ building student and teacher networks ... ============
# we changed the name DeiT-S for ViT-S to avoid confusions
args.arch = args.arch.replace("deit", "vit")
# if the network is a Vision Transformer (i.e. vit_tiny, vit_small, vit_base)
if args.arch in vits.__dict__.keys():
student = vits.__dict__[args.arch](
patch_size=args.patch_size,
drop_path_rate=args.drop_path_rate, # stochastic depth
)
teacher = vits.__dict__[args.arch](patch_size=args.patch_size)
embed_dim = student.embed_dim
# if the network is a XCiT
elif args.arch in torch.hub.list("facebookresearch/xcit:main"):
student = torch.hub.load('facebookresearch/xcit:main', args.arch,
pretrained=False, drop_path_rate=args.drop_path_rate)
teacher = torch.hub.load('facebookresearch/xcit:main', args.arch, pretrained=False)
embed_dim = student.embed_dim
# otherwise, we check if the architecture is in torchvision models
elif args.arch in torchvision_models.__dict__.keys():
student = torchvision_models.__dict__[args.arch]()
teacher = torchvision_models.__dict__[args.arch]()
embed_dim = student.fc.weight.shape[1]
else:
print(f"Unknow architecture: {args.arch}")
# multi-crop wrapper handles forward with inputs of different resolutions
student = utils.MultiCropWrapper(student, DINOHead(
embed_dim,
args.out_dim,
use_bn=args.use_bn_in_head,
norm_last_layer=args.norm_last_layer,
))
teacher = utils.MultiCropWrapper(
teacher,
DINOHead(embed_dim, args.out_dim, args.use_bn_in_head),
)
# move networks to gpu
student, teacher = student.cuda(), teacher.cuda()
# synchronize batch norms (if any)
if utils.has_batchnorms(student):
student = nn.SyncBatchNorm.convert_sync_batchnorm(student)
teacher = nn.SyncBatchNorm.convert_sync_batchnorm(teacher)
# we need DDP wrapper to have synchro batch norms working...
teacher = nn.parallel.DistributedDataParallel(teacher, device_ids=[args.gpu])
teacher_without_ddp = teacher.module
else:
# teacher_without_ddp and teacher are the same thing
teacher_without_ddp = teacher
student = nn.parallel.DistributedDataParallel(student, device_ids=[args.gpu], find_unused_parameters=True)
# teacher and student start with the same weights
teacher_without_ddp.load_state_dict(student.module.state_dict())
# there is no backpropagation through the teacher, so no need for gradients
for p in teacher.parameters():
p.requires_grad = False
print(f"Student and Teacher are built: they are both {args.arch} network.")
# ============ preparing loss ... ============
dino_loss = DINOLoss(
args.out_dim,
args.local_crops_number + 2, # total number of crops = 2 global crops + local_crops_number
args.warmup_teacher_temp,
args.teacher_temp,
args.warmup_teacher_temp_epochs,
args.epochs,
).cuda()
# ============ preparing optimizer ... ============
params_groups = utils.get_params_groups(student)
if args.optimizer == "adamw":
optimizer = torch.optim.AdamW(params_groups) # to use with ViTs
elif args.optimizer == "sgd":
optimizer = torch.optim.SGD(params_groups, lr=0, momentum=0.9) # lr is set by scheduler
elif args.optimizer == "lars":
optimizer = utils.LARS(params_groups) # to use with convnet and large batches
# for mixed precision training
fp16_scaler = None
if args.use_fp16:
fp16_scaler = torch.cuda.amp.GradScaler()
# ============ init schedulers ... ============
lr_schedule = utils.cosine_scheduler(
args.lr * (args.batch_size_per_gpu * utils.get_world_size()) / 256., # linear scaling rule
args.min_lr,
args.epochs, len(data_loader),
warmup_epochs=args.warmup_epochs,
)
wd_schedule = utils.cosine_scheduler(
args.weight_decay,
args.weight_decay_end,
args.epochs, len(data_loader),
)
# momentum parameter is increased to 1. during training with a cosine schedule
momentum_schedule = utils.cosine_scheduler(args.momentum_teacher, 1,
args.epochs, len(data_loader))
print(f"Loss, optimizer and schedulers ready.")
# ============ optionally resume training ... ============
to_restore = {"epoch": 0}
utils.restart_from_checkpoint(
os.path.join(args.output_dir, "checkpoint.pth"),
run_variables=to_restore,
student=student,
teacher=teacher,
optimizer=optimizer,
fp16_scaler=fp16_scaler,
dino_loss=dino_loss,
)
start_epoch = to_restore["epoch"]
start_time = time.time()
print("Starting DINO training !")
for epoch in range(start_epoch, args.epochs):
data_loader.sampler.set_epoch(epoch)
# ============ training one epoch of DINO ... ============
train_stats = train_one_epoch(student, teacher, teacher_without_ddp, dino_loss,
data_loader, optimizer, lr_schedule, wd_schedule, momentum_schedule,
epoch, fp16_scaler, args)
# ============ writing logs ... ============
save_dict = {
'student': student.state_dict(),
'teacher': teacher.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch + 1,
'args': args,
'dino_loss': dino_loss.state_dict(),
}
if fp16_scaler is not None:
save_dict['fp16_scaler'] = fp16_scaler.state_dict()
utils.save_on_master(save_dict, os.path.join(args.output_dir, 'checkpoint.pth'))
if args.saveckp_freq and epoch % args.saveckp_freq == 0:
utils.save_on_master(save_dict, os.path.join(args.output_dir, f'checkpoint{epoch:04}.pth'))
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch}
if utils.is_main_process():
with (Path(args.output_dir) / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
def train_one_epoch(student, teacher, teacher_without_ddp, dino_loss, data_loader,
optimizer, lr_schedule, wd_schedule, momentum_schedule,epoch,
fp16_scaler, args):
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Epoch: [{}/{}]'.format(epoch, args.epochs)
for it, (images, _) in enumerate(metric_logger.log_every(data_loader, 10, header)):
# update weight decay and learning rate according to their schedule
it = len(data_loader) * epoch + it # global training iteration
for i, param_group in enumerate(optimizer.param_groups):
param_group["lr"] = lr_schedule[it]
if i == 0: # only the first group is regularized
param_group["weight_decay"] = wd_schedule[it]
# move images to gpu
images = [im.cuda(non_blocking=True) for im in images]
# teacher and student forward passes + compute dino loss
with torch.cuda.amp.autocast(fp16_scaler is not None):
teacher_output = teacher(images[:2]) # only the 2 global views pass through the teacher
student_output = student(images)
loss = dino_loss(student_output, teacher_output, epoch)
print(f'dino_loss: {loss}')
loss = loss
if not math.isfinite(loss.item()):
print("Loss is {}, stopping training".format(loss.item()), force=True)
sys.exit(1)
# student update
optimizer.zero_grad()
param_norms = None
if fp16_scaler is None:
loss.backward()
if args.clip_grad:
param_norms = utils.clip_gradients(student, args.clip_grad)
utils.cancel_gradients_last_layer(epoch, student,
args.freeze_last_layer)
optimizer.step()
else:
fp16_scaler.scale(loss).backward()
if args.clip_grad:
fp16_scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
param_norms = utils.clip_gradients(student, args.clip_grad)
utils.cancel_gradients_last_layer(epoch, student,
args.freeze_last_layer)
fp16_scaler.step(optimizer)
fp16_scaler.update()
# EMA update for the teacher
with torch.no_grad():
m = momentum_schedule[it] # momentum parameter
for param_q, param_k in zip(student.module.parameters(), teacher_without_ddp.parameters()):
param_k.data.mul_(m).add_((1 - m) * param_q.detach().data)
# logging
torch.cuda.synchronize()
metric_logger.update(loss=loss.item())
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
metric_logger.update(wd=optimizer.param_groups[0]["weight_decay"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
class DINOLoss(nn.Module):
def __init__(self, out_dim, ncrops, warmup_teacher_temp, teacher_temp,
warmup_teacher_temp_epochs, nepochs, student_temp=0.1,
center_momentum=0.9):
super().__init__()
self.student_temp = student_temp
self.center_momentum = center_momentum
self.ncrops = ncrops
self.register_buffer("center", torch.zeros(1, out_dim))
# we apply a warm up for the teacher temperature because
# a too high temperature makes the training instable at the beginning
self.teacher_temp_schedule = np.concatenate((
np.linspace(warmup_teacher_temp,
teacher_temp, warmup_teacher_temp_epochs),
np.ones(nepochs - warmup_teacher_temp_epochs) * teacher_temp
))
def forward(self, student_output, teacher_output, epoch):
"""
Cross-entropy between softmax outputs of the teacher and student networks.
"""
student_out = student_output / self.student_temp
student_out = student_out.chunk(self.ncrops)
# teacher centering and sharpening
temp = self.teacher_temp_schedule[epoch]
teacher_out = F.softmax((teacher_output - self.center) / temp, dim=-1)
teacher_out = teacher_out.detach().chunk(2)
total_loss = 0
n_loss_terms = 0
for iq, q in enumerate(teacher_out):
for v in range(len(student_out)):
if v == iq:
# we skip cases where student and teacher operate on the same view
continue
loss = torch.sum(-q * F.log_softmax(student_out[v], dim=-1), dim=-1)
total_loss += loss.mean()
n_loss_terms += 1
total_loss /= n_loss_terms
self.update_center(teacher_output)
return total_loss
@torch.no_grad()
def update_center(self, teacher_output):
"""
Update center used for teacher output.
"""
batch_center = torch.sum(teacher_output, dim=0, keepdim=True)
dist.all_reduce(batch_center)
batch_center = batch_center / (len(teacher_output) * dist.get_world_size())
# ema update
self.center = self.center * self.center_momentum + batch_center * (1 - self.center_momentum)
### Custom Dataset Implemented to Load in [256-Length x 384-Dim] Tensors which correspond to extracted ViT-16 features for 4K x 4K patch
class SeqDataset(Dataset):
def __init__(self, dataroot, transform):
seq_list = os.listdir(dataroot)
self.seq_list = [os.path.join(dataroot, fname) for fname in seq_list]
self.transform = transform
def __getitem__(self, index):
seq = torch.load(self.seq_list[index])
label = torch.zeros(1,1)
return self.transform(seq), label
def __len__(self):
return len(self.seq_list)
### Modified Data Augmentaton for DINO for 4K x 4K resolutions for performing local / global crops on features in image grid
class DataAugmentationDINO4K(object):
def __init__(self, local_crops_number):
flip = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.5),
])
# first global crop
self.global_transfo1 = transforms.Compose([
transforms.RandomCrop(14),
transforms.RandomHorizontalFlip(p=0.5),
])
# second global crop
self.global_transfo2 = transforms.Compose([
transforms.RandomCrop(14),
transforms.RandomHorizontalFlip(p=0.5),
])
# transformation for the local small crops
self.local_crops_number = local_crops_number
self.local_transfo = transforms.Compose([
transforms.RandomCrop(6),
transforms.RandomHorizontalFlip(p=0.5),
])
def __call__(self, image):
crops = []
image = image.unfold(0, 16, 16).transpose(0,1)
crops.append(self.global_transfo1(image))
crops.append(self.global_transfo2(image))
for _ in range(self.local_crops_number):
crops.append(self.local_transfo(image))
return crops
if __name__ == '__main__':
parser = argparse.ArgumentParser('DINO4K', parents=[get_args_parser()])
args = parser.parse_args()
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
train_dino(args)
| 23,147 | 47.225 | 136 | py |
HIPT | HIPT-master/1-Hierarchical-Pretraining/eval_knn.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import argparse
import torch
from torch import nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torchvision import datasets
from torchvision import transforms as pth_transforms
from torchvision import models as torchvision_models
import utils
import vision_transformer as vits
def extract_feature_pipeline(args):
# ============ preparing data ... ============
transform = pth_transforms.Compose([
pth_transforms.Resize(256, interpolation=3),
pth_transforms.CenterCrop(224),
pth_transforms.ToTensor(),
pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
dataset_train = ReturnIndexDataset(os.path.join(args.data_path, "train"), transform=transform)
dataset_val = ReturnIndexDataset(os.path.join(args.data_path, "val"), transform=transform)
sampler = torch.utils.data.DistributedSampler(dataset_train, shuffle=False)
data_loader_train = torch.utils.data.DataLoader(
dataset_train,
sampler=sampler,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=True,
drop_last=False,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=True,
drop_last=False,
)
print(f"Data loaded with {len(dataset_train)} train and {len(dataset_val)} val imgs.")
# ============ building network ... ============
if "vit" in args.arch:
model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0)
print(f"Model {args.arch} {args.patch_size}x{args.patch_size} built.")
elif "xcit" in args.arch:
model = torch.hub.load('facebookresearch/xcit:main', args.arch, num_classes=0)
elif args.arch in torchvision_models.__dict__.keys():
model = torchvision_models.__dict__[args.arch](num_classes=0)
model.fc = nn.Identity()
else:
print(f"Architecture {args.arch} non supported")
sys.exit(1)
model.cuda()
utils.load_pretrained_weights(model, args.pretrained_weights, args.checkpoint_key, args.arch, args.patch_size)
model.eval()
# ============ extract features ... ============
print("Extracting features for train set...")
train_features = extract_features(model, data_loader_train, args.use_cuda)
print("Extracting features for val set...")
test_features = extract_features(model, data_loader_val, args.use_cuda)
if utils.get_rank() == 0:
train_features = nn.functional.normalize(train_features, dim=1, p=2)
test_features = nn.functional.normalize(test_features, dim=1, p=2)
train_labels = torch.tensor([s[-1] for s in dataset_train.samples]).long()
test_labels = torch.tensor([s[-1] for s in dataset_val.samples]).long()
# save features and labels
if args.dump_features and dist.get_rank() == 0:
torch.save(train_features.cpu(), os.path.join(args.dump_features, "trainfeat.pth"))
torch.save(test_features.cpu(), os.path.join(args.dump_features, "testfeat.pth"))
torch.save(train_labels.cpu(), os.path.join(args.dump_features, "trainlabels.pth"))
torch.save(test_labels.cpu(), os.path.join(args.dump_features, "testlabels.pth"))
return train_features, test_features, train_labels, test_labels
@torch.no_grad()
def extract_features(model, data_loader, use_cuda=True, multiscale=False):
metric_logger = utils.MetricLogger(delimiter=" ")
features = None
for samples, index in metric_logger.log_every(data_loader, 10):
samples = samples.cuda(non_blocking=True)
index = index.cuda(non_blocking=True)
if multiscale:
feats = utils.multi_scale(samples, model)
else:
feats = model(samples).clone()
# init storage feature matrix
if dist.get_rank() == 0 and features is None:
features = torch.zeros(len(data_loader.dataset), feats.shape[-1])
if use_cuda:
features = features.cuda(non_blocking=True)
print(f"Storing features into tensor of shape {features.shape}")
# get indexes from all processes
y_all = torch.empty(dist.get_world_size(), index.size(0), dtype=index.dtype, device=index.device)
y_l = list(y_all.unbind(0))
y_all_reduce = torch.distributed.all_gather(y_l, index, async_op=True)
y_all_reduce.wait()
index_all = torch.cat(y_l)
# share features between processes
feats_all = torch.empty(
dist.get_world_size(),
feats.size(0),
feats.size(1),
dtype=feats.dtype,
device=feats.device,
)
output_l = list(feats_all.unbind(0))
output_all_reduce = torch.distributed.all_gather(output_l, feats, async_op=True)
output_all_reduce.wait()
# update storage feature matrix
if dist.get_rank() == 0:
if use_cuda:
features.index_copy_(0, index_all, torch.cat(output_l))
else:
features.index_copy_(0, index_all.cpu(), torch.cat(output_l).cpu())
return features
@torch.no_grad()
def knn_classifier(train_features, train_labels, test_features, test_labels, k, T, num_classes=1000):
top1, top5, total = 0.0, 0.0, 0
train_features = train_features.t()
num_test_images, num_chunks = test_labels.shape[0], 100
imgs_per_chunk = num_test_images // num_chunks
retrieval_one_hot = torch.zeros(k, num_classes).to(train_features.device)
for idx in range(0, num_test_images, imgs_per_chunk):
# get the features for test images
features = test_features[
idx : min((idx + imgs_per_chunk), num_test_images), :
]
targets = test_labels[idx : min((idx + imgs_per_chunk), num_test_images)]
batch_size = targets.shape[0]
# calculate the dot product and compute top-k neighbors
similarity = torch.mm(features, train_features)
distances, indices = similarity.topk(k, largest=True, sorted=True)
candidates = train_labels.view(1, -1).expand(batch_size, -1)
retrieved_neighbors = torch.gather(candidates, 1, indices)
retrieval_one_hot.resize_(batch_size * k, num_classes).zero_()
retrieval_one_hot.scatter_(1, retrieved_neighbors.view(-1, 1), 1)
distances_transform = distances.clone().div_(T).exp_()
probs = torch.sum(
torch.mul(
retrieval_one_hot.view(batch_size, -1, num_classes),
distances_transform.view(batch_size, -1, 1),
),
1,
)
_, predictions = probs.sort(1, True)
# find the predictions that match the target
correct = predictions.eq(targets.data.view(-1, 1))
top1 = top1 + correct.narrow(1, 0, 1).sum().item()
top5 = top5 + correct.narrow(1, 0, min(5, k)).sum().item() # top5 does not make sense if k < 5
total += targets.size(0)
top1 = top1 * 100.0 / total
top5 = top5 * 100.0 / total
return top1, top5
class ReturnIndexDataset(datasets.ImageFolder):
def __getitem__(self, idx):
img, lab = super(ReturnIndexDataset, self).__getitem__(idx)
return img, idx
if __name__ == '__main__':
parser = argparse.ArgumentParser('Evaluation with weighted k-NN on ImageNet')
parser.add_argument('--batch_size_per_gpu', default=128, type=int, help='Per-GPU batch-size')
parser.add_argument('--nb_knn', default=[10, 20, 100, 200], nargs='+', type=int,
help='Number of NN to use. 20 is usually working the best.')
parser.add_argument('--temperature', default=0.07, type=float,
help='Temperature used in the voting coefficient')
parser.add_argument('--pretrained_weights', default='', type=str, help="Path to pretrained weights to evaluate.")
parser.add_argument('--use_cuda', default=True, type=utils.bool_flag,
help="Should we store the features on GPU? We recommend setting this to False if you encounter OOM")
parser.add_argument('--arch', default='vit_small', type=str, help='Architecture')
parser.add_argument('--patch_size', default=16, type=int, help='Patch resolution of the model.')
parser.add_argument("--checkpoint_key", default="teacher", type=str,
help='Key to use in the checkpoint (example: "teacher")')
parser.add_argument('--dump_features', default=None,
help='Path where to save computed features, empty for no saving')
parser.add_argument('--load_features', default=None, help="""If the features have
already been computed, where to find them.""")
parser.add_argument('--num_workers', default=10, type=int, help='Number of data loading workers per GPU.')
parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up
distributed training; see https://pytorch.org/docs/stable/distributed.html""")
parser.add_argument("--local_rank", default=0, type=int, help="Please ignore and do not set this argument.")
parser.add_argument('--data_path', default='/path/to/imagenet/', type=str)
args = parser.parse_args()
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))
cudnn.benchmark = True
if args.load_features:
train_features = torch.load(os.path.join(args.load_features, "trainfeat.pth"))
test_features = torch.load(os.path.join(args.load_features, "testfeat.pth"))
train_labels = torch.load(os.path.join(args.load_features, "trainlabels.pth"))
test_labels = torch.load(os.path.join(args.load_features, "testlabels.pth"))
else:
# need to extract features !
train_features, test_features, train_labels, test_labels = extract_feature_pipeline(args)
if utils.get_rank() == 0:
if args.use_cuda:
train_features = train_features.cuda()
test_features = test_features.cuda()
train_labels = train_labels.cuda()
test_labels = test_labels.cuda()
print("Features are ready!\nStart the k-NN classification.")
for k in args.nb_knn:
top1, top5 = knn_classifier(train_features, train_labels,
test_features, test_labels, k, args.temperature)
print(f"{k}-NN classifier result: Top1: {top1}, Top5: {top5}")
dist.barrier()
| 11,128 | 44.798354 | 117 | py |
HIPT | HIPT-master/1-Hierarchical-Pretraining/vision_transformer4k.py | import argparse
import os
import sys
import datetime
import time
import math
import json
from pathlib import Path
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torchvision import datasets, transforms
from torchvision import models as torchvision_models
import utils
import vision_transformer as vits
from vision_transformer import DINOHead
import math
from functools import partial
import torch
import torch.nn as nn
#from utils import trunc_normal_
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def drop_path(x, drop_prob: float = 0., training: bool = False):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, return_attention=False):
y, attn = self.attn(self.norm1(x))
if return_attention:
return attn
x = x + self.drop_path(y)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class VisionTransformer4K(nn.Module):
""" Vision Transformer 4K """
def __init__(self, num_classes=0, img_size=[224], input_embed_dim=384, output_embed_dim = 192,
depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, num_prototypes=64, **kwargs):
super().__init__()
embed_dim = output_embed_dim
self.num_features = self.embed_dim = embed_dim
self.phi = nn.Sequential(*[nn.Linear(input_embed_dim, output_embed_dim), nn.GELU(), nn.Dropout(p=drop_rate)])
num_patches = int(img_size[0] // 16)**2
print("# of Patches:", num_patches)
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // 1
h0 = h // 1
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
#print('preparing tokens (after crop)', x.shape)
self.mpp_feature = x
B, embed_dim, w, h = x.shape
x = x.flatten(2, 3).transpose(1,2)
x = self.phi(x)
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward(self, x):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0]
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def vit4k_xs(patch_size=16, **kwargs):
model = VisionTransformer4K(
patch_size=patch_size, input_embed_dim=384, output_embed_dim=192,
depth=6, num_heads=6, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
| 10,220 | 35.503571 | 123 | py |
HIPT | HIPT-master/1-Hierarchical-Pretraining/main_dino.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
import datetime
import time
import math
import json
from pathlib import Path
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torchvision import datasets, transforms
from torchvision import models as torchvision_models
import utils
import vision_transformer as vits
from vision_transformer import DINOHead
torchvision_archs = sorted(name for name in torchvision_models.__dict__
if name.islower() and not name.startswith("__")
and callable(torchvision_models.__dict__[name]))
def get_args_parser():
parser = argparse.ArgumentParser('DINO', add_help=False)
# Model parameters
parser.add_argument('--arch', default='vit_small', type=str,
choices=['vit_tiny', 'vit_small', 'vit_base', 'xcit', 'deit_tiny', 'deit_small'] \
+ torchvision_archs + torch.hub.list("facebookresearch/xcit:main"),
help="""Name of architecture to train. For quick experiments with ViTs,
we recommend using vit_tiny or vit_small.""")
parser.add_argument('--patch_size', default=16, type=int, help="""Size in pixels
of input square patches - default 16 (for 16x16 patches). Using smaller
values leads to better performance but requires more memory. Applies only
for ViTs (vit_tiny, vit_small and vit_base). If <16, we recommend disabling
mixed precision training (--use_fp16 false) to avoid unstabilities.""")
parser.add_argument('--out_dim', default=65536, type=int, help="""Dimensionality of
the DINO head output. For complex and large datasets large values (like 65k) work well.""")
parser.add_argument('--norm_last_layer', default=True, type=utils.bool_flag,
help="""Whether or not to weight normalize the last layer of the DINO head.
Not normalizing leads to better performance but can make the training unstable.
In our experiments, we typically set this paramater to False with vit_small and True with vit_base.""")
parser.add_argument('--momentum_teacher', default=0.996, type=float, help="""Base EMA
parameter for teacher update. The value is increased to 1 during training with cosine schedule.
We recommend setting a higher value with small batches: for example use 0.9995 with batch size of 256.""")
parser.add_argument('--use_bn_in_head', default=False, type=utils.bool_flag,
help="Whether to use batch normalizations in projection head (Default: False)")
# Temperature teacher parameters
parser.add_argument('--warmup_teacher_temp', default=0.04, type=float,
help="""Initial value for the teacher temperature: 0.04 works well in most cases.
Try decreasing it if the training loss does not decrease.""")
parser.add_argument('--teacher_temp', default=0.04, type=float, help="""Final value (after linear warmup)
of the teacher temperature. For most experiments, anything above 0.07 is unstable. We recommend
starting with the default value of 0.04 and increase this slightly if needed.""")
parser.add_argument('--warmup_teacher_temp_epochs', default=0, type=int,
help='Number of warmup epochs for the teacher temperature (Default: 30).')
# Training/Optimization parameters
parser.add_argument('--use_fp16', type=utils.bool_flag, default=True, help="""Whether or not
to use half precision for training. Improves training time and memory requirements,
but can provoke instability and slight decay of performance. We recommend disabling
mixed precision if the loss is unstable, if reducing the patch size or if training with bigger ViTs.""")
parser.add_argument('--weight_decay', type=float, default=0.04, help="""Initial value of the
weight decay. With ViT, a smaller value at the beginning of training works well.""")
parser.add_argument('--weight_decay_end', type=float, default=0.4, help="""Final value of the
weight decay. We use a cosine schedule for WD and using a larger decay by
the end of training improves performance for ViTs.""")
parser.add_argument('--clip_grad', type=float, default=3.0, help="""Maximal parameter
gradient norm if using gradient clipping. Clipping with norm .3 ~ 1.0 can
help optimization for larger ViT architectures. 0 for disabling.""")
parser.add_argument('--batch_size_per_gpu', default=64, type=int,
help='Per-GPU batch-size : number of distinct images loaded on one GPU.')
parser.add_argument('--epochs', default=100, type=int, help='Number of epochs of training.')
parser.add_argument('--freeze_last_layer', default=1, type=int, help="""Number of epochs
during which we keep the output layer fixed. Typically doing so during
the first epoch helps training. Try increasing this value if the loss does not decrease.""")
parser.add_argument("--lr", default=0.0005, type=float, help="""Learning rate at the end of
linear warmup (highest LR used during training). The learning rate is linearly scaled
with the batch size, and specified here for a reference batch size of 256.""")
parser.add_argument("--warmup_epochs", default=10, type=int,
help="Number of epochs for the linear learning-rate warm up.")
parser.add_argument('--min_lr', type=float, default=1e-6, help="""Target LR at the
end of optimization. We use a cosine LR schedule with linear warmup.""")
parser.add_argument('--optimizer', default='adamw', type=str,
choices=['adamw', 'sgd', 'lars'], help="""Type of optimizer. We recommend using adamw with ViTs.""")
parser.add_argument('--drop_path_rate', type=float, default=0.1, help="stochastic depth rate")
# Multi-crop parameters
parser.add_argument('--global_crops_scale', type=float, nargs='+', default=(0.4, 1.),
help="""Scale range of the cropped image before resizing, relatively to the origin image.
Used for large global view cropping. When disabling multi-crop (--local_crops_number 0), we
recommand using a wider range of scale ("--global_crops_scale 0.14 1." for example)""")
parser.add_argument('--local_crops_number', type=int, default=8, help="""Number of small
local views to generate. Set this parameter to 0 to disable multi-crop training.
When disabling multi-crop we recommend to use "--global_crops_scale 0.14 1." """)
parser.add_argument('--local_crops_scale', type=float, nargs='+', default=(0.05, 0.4),
help="""Scale range of the cropped image before resizing, relatively to the origin image.
Used for small local view cropping of multi-crop.""")
# Misc
parser.add_argument('--data_path', default='/path/to/imagenet/train/', type=str,
help='Please specify path to the ImageNet training data.')
parser.add_argument('--output_dir', default=".", type=str, help='Path to save logs and checkpoints.')
parser.add_argument('--saveckp_freq', default=20, type=int, help='Save checkpoint every x epochs.')
parser.add_argument('--seed', default=0, type=int, help='Random seed.')
parser.add_argument('--num_workers', default=10, type=int, help='Number of data loading workers per GPU.')
parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up
distributed training; see https://pytorch.org/docs/stable/distributed.html""")
parser.add_argument("--local_rank", default=0, type=int, help="Please ignore and do not set this argument.")
return parser
def train_dino(args):
utils.init_distributed_mode(args)
utils.fix_random_seeds(args.seed)
print("git:\n {}\n".format(utils.get_sha()))
print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))
cudnn.benchmark = True
# ============ preparing data ... ============
transform = DataAugmentationDINO(
args.global_crops_scale,
args.local_crops_scale,
args.local_crops_number,
)
dataset = datasets.ImageFolder(args.data_path, transform=transform)
sampler = torch.utils.data.DistributedSampler(dataset, shuffle=True)
data_loader = torch.utils.data.DataLoader(
dataset,
sampler=sampler,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=True,
drop_last=True,
)
print(f"Data loaded: there are {len(dataset)} images.")
# ============ building student and teacher networks ... ============
# we changed the name DeiT-S for ViT-S to avoid confusions
args.arch = args.arch.replace("deit", "vit")
# if the network is a Vision Transformer (i.e. vit_tiny, vit_small, vit_base)
if args.arch in vits.__dict__.keys():
student = vits.__dict__[args.arch](
patch_size=args.patch_size,
drop_path_rate=args.drop_path_rate, # stochastic depth
)
teacher = vits.__dict__[args.arch](patch_size=args.patch_size)
embed_dim = student.embed_dim
# if the network is a XCiT
elif args.arch in torch.hub.list("facebookresearch/xcit:main"):
student = torch.hub.load('facebookresearch/xcit:main', args.arch,
pretrained=False, drop_path_rate=args.drop_path_rate)
teacher = torch.hub.load('facebookresearch/xcit:main', args.arch, pretrained=False)
embed_dim = student.embed_dim
# otherwise, we check if the architecture is in torchvision models
elif args.arch in torchvision_models.__dict__.keys():
student = torchvision_models.__dict__[args.arch]()
teacher = torchvision_models.__dict__[args.arch]()
embed_dim = student.fc.weight.shape[1]
else:
print(f"Unknow architecture: {args.arch}")
# multi-crop wrapper handles forward with inputs of different resolutions
student = utils.MultiCropWrapper(student, DINOHead(
embed_dim,
args.out_dim,
use_bn=args.use_bn_in_head,
norm_last_layer=args.norm_last_layer,
))
teacher = utils.MultiCropWrapper(
teacher,
DINOHead(embed_dim, args.out_dim, args.use_bn_in_head),
)
# move networks to gpu
student, teacher = student.cuda(), teacher.cuda()
# synchronize batch norms (if any)
if utils.has_batchnorms(student):
student = nn.SyncBatchNorm.convert_sync_batchnorm(student)
teacher = nn.SyncBatchNorm.convert_sync_batchnorm(teacher)
# we need DDP wrapper to have synchro batch norms working...
teacher = nn.parallel.DistributedDataParallel(teacher, device_ids=[args.gpu])
teacher_without_ddp = teacher.module
else:
# teacher_without_ddp and teacher are the same thing
teacher_without_ddp = teacher
student = nn.parallel.DistributedDataParallel(student, device_ids=[args.gpu])
# teacher and student start with the same weights
teacher_without_ddp.load_state_dict(student.module.state_dict())
# there is no backpropagation through the teacher, so no need for gradients
for p in teacher.parameters():
p.requires_grad = False
print(f"Student and Teacher are built: they are both {args.arch} network.")
# ============ preparing loss ... ============
dino_loss = DINOLoss(
args.out_dim,
args.local_crops_number + 2, # total number of crops = 2 global crops + local_crops_number
args.warmup_teacher_temp,
args.teacher_temp,
args.warmup_teacher_temp_epochs,
args.epochs,
).cuda()
# ============ preparing optimizer ... ============
params_groups = utils.get_params_groups(student)
if args.optimizer == "adamw":
optimizer = torch.optim.AdamW(params_groups) # to use with ViTs
elif args.optimizer == "sgd":
optimizer = torch.optim.SGD(params_groups, lr=0, momentum=0.9) # lr is set by scheduler
elif args.optimizer == "lars":
optimizer = utils.LARS(params_groups) # to use with convnet and large batches
# for mixed precision training
fp16_scaler = None
if args.use_fp16:
fp16_scaler = torch.cuda.amp.GradScaler()
# ============ init schedulers ... ============
lr_schedule = utils.cosine_scheduler(
args.lr * (args.batch_size_per_gpu * utils.get_world_size()) / 256., # linear scaling rule
args.min_lr,
args.epochs, len(data_loader),
warmup_epochs=args.warmup_epochs,
)
wd_schedule = utils.cosine_scheduler(
args.weight_decay,
args.weight_decay_end,
args.epochs, len(data_loader),
)
# momentum parameter is increased to 1. during training with a cosine schedule
momentum_schedule = utils.cosine_scheduler(args.momentum_teacher, 1,
args.epochs, len(data_loader))
print(f"Loss, optimizer and schedulers ready.")
# ============ optionally resume training ... ============
to_restore = {"epoch": 0}
utils.restart_from_checkpoint(
os.path.join(args.output_dir, "checkpoint.pth"),
run_variables=to_restore,
student=student,
teacher=teacher,
optimizer=optimizer,
fp16_scaler=fp16_scaler,
dino_loss=dino_loss,
)
start_epoch = to_restore["epoch"]
start_time = time.time()
print("Starting DINO training !")
for epoch in range(start_epoch, args.epochs):
data_loader.sampler.set_epoch(epoch)
# ============ training one epoch of DINO ... ============
train_stats = train_one_epoch(student, teacher, teacher_without_ddp, dino_loss,
data_loader, optimizer, lr_schedule, wd_schedule, momentum_schedule,
epoch, fp16_scaler, args)
# ============ writing logs ... ============
save_dict = {
'student': student.state_dict(),
'teacher': teacher.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch + 1,
'args': args,
'dino_loss': dino_loss.state_dict(),
}
if fp16_scaler is not None:
save_dict['fp16_scaler'] = fp16_scaler.state_dict()
utils.save_on_master(save_dict, os.path.join(args.output_dir, 'checkpoint.pth'))
if args.saveckp_freq and epoch % args.saveckp_freq == 0:
utils.save_on_master(save_dict, os.path.join(args.output_dir, f'checkpoint{epoch:04}.pth'))
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch}
if utils.is_main_process():
with (Path(args.output_dir) / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
def train_one_epoch(student, teacher, teacher_without_ddp, dino_loss, data_loader,
optimizer, lr_schedule, wd_schedule, momentum_schedule,epoch,
fp16_scaler, args):
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Epoch: [{}/{}]'.format(epoch, args.epochs)
for it, (images, _) in enumerate(metric_logger.log_every(data_loader, 10, header)):
# update weight decay and learning rate according to their schedule
it = len(data_loader) * epoch + it # global training iteration
for i, param_group in enumerate(optimizer.param_groups):
param_group["lr"] = lr_schedule[it]
if i == 0: # only the first group is regularized
param_group["weight_decay"] = wd_schedule[it]
# move images to gpu
images = [im.cuda(non_blocking=True) for im in images]
# teacher and student forward passes + compute dino loss
with torch.cuda.amp.autocast(fp16_scaler is not None):
teacher_output = teacher(images[:2]) # only the 2 global views pass through the teacher
student_output = student(images)
loss = dino_loss(student_output, teacher_output, epoch)
if not math.isfinite(loss.item()):
print("Loss is {}, stopping training".format(loss.item()), force=True)
sys.exit(1)
# student update
optimizer.zero_grad()
param_norms = None
if fp16_scaler is None:
loss.backward()
if args.clip_grad:
param_norms = utils.clip_gradients(student, args.clip_grad)
utils.cancel_gradients_last_layer(epoch, student,
args.freeze_last_layer)
optimizer.step()
else:
fp16_scaler.scale(loss).backward()
if args.clip_grad:
fp16_scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
param_norms = utils.clip_gradients(student, args.clip_grad)
utils.cancel_gradients_last_layer(epoch, student,
args.freeze_last_layer)
fp16_scaler.step(optimizer)
fp16_scaler.update()
# EMA update for the teacher
with torch.no_grad():
m = momentum_schedule[it] # momentum parameter
for param_q, param_k in zip(student.module.parameters(), teacher_without_ddp.parameters()):
param_k.data.mul_(m).add_((1 - m) * param_q.detach().data)
# logging
torch.cuda.synchronize()
metric_logger.update(loss=loss.item())
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
metric_logger.update(wd=optimizer.param_groups[0]["weight_decay"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
class DINOLoss(nn.Module):
def __init__(self, out_dim, ncrops, warmup_teacher_temp, teacher_temp,
warmup_teacher_temp_epochs, nepochs, student_temp=0.1,
center_momentum=0.9):
super().__init__()
self.student_temp = student_temp
self.center_momentum = center_momentum
self.ncrops = ncrops
self.register_buffer("center", torch.zeros(1, out_dim))
# we apply a warm up for the teacher temperature because
# a too high temperature makes the training instable at the beginning
self.teacher_temp_schedule = np.concatenate((
np.linspace(warmup_teacher_temp,
teacher_temp, warmup_teacher_temp_epochs),
np.ones(nepochs - warmup_teacher_temp_epochs) * teacher_temp
))
def forward(self, student_output, teacher_output, epoch):
"""
Cross-entropy between softmax outputs of the teacher and student networks.
"""
student_out = student_output / self.student_temp
student_out = student_out.chunk(self.ncrops)
# teacher centering and sharpening
temp = self.teacher_temp_schedule[epoch]
teacher_out = F.softmax((teacher_output - self.center) / temp, dim=-1)
teacher_out = teacher_out.detach().chunk(2)
total_loss = 0
n_loss_terms = 0
for iq, q in enumerate(teacher_out):
for v in range(len(student_out)):
if v == iq:
# we skip cases where student and teacher operate on the same view
continue
loss = torch.sum(-q * F.log_softmax(student_out[v], dim=-1), dim=-1)
total_loss += loss.mean()
n_loss_terms += 1
total_loss /= n_loss_terms
self.update_center(teacher_output)
return total_loss
@torch.no_grad()
def update_center(self, teacher_output):
"""
Update center used for teacher output.
"""
batch_center = torch.sum(teacher_output, dim=0, keepdim=True)
dist.all_reduce(batch_center)
batch_center = batch_center / (len(teacher_output) * dist.get_world_size())
# ema update
self.center = self.center * self.center_momentum + batch_center * (1 - self.center_momentum)
class DataAugmentationDINO(object):
def __init__(self, global_crops_scale, local_crops_scale, local_crops_number):
flip_and_color_jitter = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomApply(
[transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1)],
p=0.8
),
transforms.RandomGrayscale(p=0.2),
])
normalize = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
# first global crop
self.global_transfo1 = transforms.Compose([
transforms.RandomResizedCrop(224, scale=global_crops_scale, interpolation=Image.BICUBIC),
flip_and_color_jitter,
utils.GaussianBlur(1.0),
normalize,
])
# second global crop
self.global_transfo2 = transforms.Compose([
transforms.RandomResizedCrop(224, scale=global_crops_scale, interpolation=Image.BICUBIC),
flip_and_color_jitter,
utils.GaussianBlur(0.1),
utils.Solarization(0.2),
normalize,
])
# transformation for the local small crops
self.local_crops_number = local_crops_number
self.local_transfo = transforms.Compose([
transforms.RandomResizedCrop(96, scale=local_crops_scale, interpolation=Image.BICUBIC),
flip_and_color_jitter,
utils.GaussianBlur(p=0.5),
normalize,
])
def __call__(self, image):
crops = []
crops.append(self.global_transfo1(image))
crops.append(self.global_transfo2(image))
for _ in range(self.local_crops_number):
crops.append(self.local_transfo(image))
return crops
if __name__ == '__main__':
parser = argparse.ArgumentParser('DINO', parents=[get_args_parser()])
args = parser.parse_args()
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
train_dino(args)
| 22,945 | 47.614407 | 114 | py |
HIPT | HIPT-master/1-Hierarchical-Pretraining/eval_video_segmentation.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Some parts are taken from https://github.com/Liusifei/UVC
"""
import os
import copy
import glob
import queue
from urllib.request import urlopen
import argparse
import numpy as np
from tqdm import tqdm
import cv2
import torch
import torch.nn as nn
from torch.nn import functional as F
from PIL import Image
from torchvision import transforms
import utils
import vision_transformer as vits
@torch.no_grad()
def eval_video_tracking_davis(args, model, frame_list, video_dir, first_seg, seg_ori, color_palette):
"""
Evaluate tracking on a video given first frame & segmentation
"""
video_folder = os.path.join(args.output_dir, video_dir.split('/')[-1])
os.makedirs(video_folder, exist_ok=True)
# The queue stores the n preceeding frames
que = queue.Queue(args.n_last_frames)
# first frame
frame1, ori_h, ori_w = read_frame(frame_list[0])
# extract first frame feature
frame1_feat = extract_feature(model, frame1).T # dim x h*w
# saving first segmentation
out_path = os.path.join(video_folder, "00000.png")
imwrite_indexed(out_path, seg_ori, color_palette)
mask_neighborhood = None
for cnt in tqdm(range(1, len(frame_list))):
frame_tar = read_frame(frame_list[cnt])[0]
# we use the first segmentation and the n previous ones
used_frame_feats = [frame1_feat] + [pair[0] for pair in list(que.queue)]
used_segs = [first_seg] + [pair[1] for pair in list(que.queue)]
frame_tar_avg, feat_tar, mask_neighborhood = label_propagation(args, model, frame_tar, used_frame_feats, used_segs, mask_neighborhood)
# pop out oldest frame if neccessary
if que.qsize() == args.n_last_frames:
que.get()
# push current results into queue
seg = copy.deepcopy(frame_tar_avg)
que.put([feat_tar, seg])
# upsampling & argmax
frame_tar_avg = F.interpolate(frame_tar_avg, scale_factor=args.patch_size, mode='bilinear', align_corners=False, recompute_scale_factor=False)[0]
frame_tar_avg = norm_mask(frame_tar_avg)
_, frame_tar_seg = torch.max(frame_tar_avg, dim=0)
# saving to disk
frame_tar_seg = np.array(frame_tar_seg.squeeze().cpu(), dtype=np.uint8)
frame_tar_seg = np.array(Image.fromarray(frame_tar_seg).resize((ori_w, ori_h), 0))
frame_nm = frame_list[cnt].split('/')[-1].replace(".jpg", ".png")
imwrite_indexed(os.path.join(video_folder, frame_nm), frame_tar_seg, color_palette)
def restrict_neighborhood(h, w):
# We restrict the set of source nodes considered to a spatial neighborhood of the query node (i.e. ``local attention'')
mask = torch.zeros(h, w, h, w)
for i in range(h):
for j in range(w):
for p in range(2 * args.size_mask_neighborhood + 1):
for q in range(2 * args.size_mask_neighborhood + 1):
if i - args.size_mask_neighborhood + p < 0 or i - args.size_mask_neighborhood + p >= h:
continue
if j - args.size_mask_neighborhood + q < 0 or j - args.size_mask_neighborhood + q >= w:
continue
mask[i, j, i - args.size_mask_neighborhood + p, j - args.size_mask_neighborhood + q] = 1
mask = mask.reshape(h * w, h * w)
return mask.cuda(non_blocking=True)
def norm_mask(mask):
c, h, w = mask.size()
for cnt in range(c):
mask_cnt = mask[cnt,:,:]
if(mask_cnt.max() > 0):
mask_cnt = (mask_cnt - mask_cnt.min())
mask_cnt = mask_cnt/mask_cnt.max()
mask[cnt,:,:] = mask_cnt
return mask
def label_propagation(args, model, frame_tar, list_frame_feats, list_segs, mask_neighborhood=None):
"""
propagate segs of frames in list_frames to frame_tar
"""
## we only need to extract feature of the target frame
feat_tar, h, w = extract_feature(model, frame_tar, return_h_w=True)
return_feat_tar = feat_tar.T # dim x h*w
ncontext = len(list_frame_feats)
feat_sources = torch.stack(list_frame_feats) # nmb_context x dim x h*w
feat_tar = F.normalize(feat_tar, dim=1, p=2)
feat_sources = F.normalize(feat_sources, dim=1, p=2)
feat_tar = feat_tar.unsqueeze(0).repeat(ncontext, 1, 1)
aff = torch.exp(torch.bmm(feat_tar, feat_sources) / 0.1) # nmb_context x h*w (tar: query) x h*w (source: keys)
if args.size_mask_neighborhood > 0:
if mask_neighborhood is None:
mask_neighborhood = restrict_neighborhood(h, w)
mask_neighborhood = mask_neighborhood.unsqueeze(0).repeat(ncontext, 1, 1)
aff *= mask_neighborhood
aff = aff.transpose(2, 1).reshape(-1, h * w) # nmb_context*h*w (source: keys) x h*w (tar: queries)
tk_val, _ = torch.topk(aff, dim=0, k=args.topk)
tk_val_min, _ = torch.min(tk_val, dim=0)
aff[aff < tk_val_min] = 0
aff = aff / torch.sum(aff, keepdim=True, axis=0)
list_segs = [s.cuda() for s in list_segs]
segs = torch.cat(list_segs)
nmb_context, C, h, w = segs.shape
segs = segs.reshape(nmb_context, C, -1).transpose(2, 1).reshape(-1, C).T # C x nmb_context*h*w
seg_tar = torch.mm(segs, aff)
seg_tar = seg_tar.reshape(1, C, h, w)
return seg_tar, return_feat_tar, mask_neighborhood
def extract_feature(model, frame, return_h_w=False):
"""Extract one frame feature everytime."""
out = model.get_intermediate_layers(frame.unsqueeze(0).cuda(), n=1)[0]
out = out[:, 1:, :] # we discard the [CLS] token
h, w = int(frame.shape[1] / model.patch_embed.patch_size), int(frame.shape[2] / model.patch_embed.patch_size)
dim = out.shape[-1]
out = out[0].reshape(h, w, dim)
out = out.reshape(-1, dim)
if return_h_w:
return out, h, w
return out
def imwrite_indexed(filename, array, color_palette):
""" Save indexed png for DAVIS."""
if np.atleast_3d(array).shape[2] != 1:
raise Exception("Saving indexed PNGs requires 2D array.")
im = Image.fromarray(array)
im.putpalette(color_palette.ravel())
im.save(filename, format='PNG')
def to_one_hot(y_tensor, n_dims=None):
"""
Take integer y (tensor or variable) with n dims &
convert it to 1-hot representation with n+1 dims.
"""
if(n_dims is None):
n_dims = int(y_tensor.max()+ 1)
_,h,w = y_tensor.size()
y_tensor = y_tensor.type(torch.LongTensor).view(-1, 1)
n_dims = n_dims if n_dims is not None else int(torch.max(y_tensor)) + 1
y_one_hot = torch.zeros(y_tensor.size()[0], n_dims).scatter_(1, y_tensor, 1)
y_one_hot = y_one_hot.view(h,w,n_dims)
return y_one_hot.permute(2, 0, 1).unsqueeze(0)
def read_frame_list(video_dir):
frame_list = [img for img in glob.glob(os.path.join(video_dir,"*.jpg"))]
frame_list = sorted(frame_list)
return frame_list
def read_frame(frame_dir, scale_size=[480]):
"""
read a single frame & preprocess
"""
img = cv2.imread(frame_dir)
ori_h, ori_w, _ = img.shape
if len(scale_size) == 1:
if(ori_h > ori_w):
tw = scale_size[0]
th = (tw * ori_h) / ori_w
th = int((th // 64) * 64)
else:
th = scale_size[0]
tw = (th * ori_w) / ori_h
tw = int((tw // 64) * 64)
else:
th, tw = scale_size
img = cv2.resize(img, (tw, th))
img = img.astype(np.float32)
img = img / 255.0
img = img[:, :, ::-1]
img = np.transpose(img.copy(), (2, 0, 1))
img = torch.from_numpy(img).float()
img = color_normalize(img)
return img, ori_h, ori_w
def read_seg(seg_dir, factor, scale_size=[480]):
seg = Image.open(seg_dir)
_w, _h = seg.size # note PIL.Image.Image's size is (w, h)
if len(scale_size) == 1:
if(_w > _h):
_th = scale_size[0]
_tw = (_th * _w) / _h
_tw = int((_tw // 64) * 64)
else:
_tw = scale_size[0]
_th = (_tw * _h) / _w
_th = int((_th // 64) * 64)
else:
_th = scale_size[1]
_tw = scale_size[0]
small_seg = np.array(seg.resize((_tw // factor, _th // factor), 0))
small_seg = torch.from_numpy(small_seg.copy()).contiguous().float().unsqueeze(0)
return to_one_hot(small_seg), np.asarray(seg)
def color_normalize(x, mean=[0.485, 0.456, 0.406], std=[0.228, 0.224, 0.225]):
for t, m, s in zip(x, mean, std):
t.sub_(m)
t.div_(s)
return x
if __name__ == '__main__':
parser = argparse.ArgumentParser('Evaluation with video object segmentation on DAVIS 2017')
parser.add_argument('--pretrained_weights', default='', type=str, help="Path to pretrained weights to evaluate.")
parser.add_argument('--arch', default='vit_small', type=str,
choices=['vit_tiny', 'vit_small', 'vit_base'], help='Architecture (support only ViT atm).')
parser.add_argument('--patch_size', default=16, type=int, help='Patch resolution of the model.')
parser.add_argument("--checkpoint_key", default="teacher", type=str, help='Key to use in the checkpoint (example: "teacher")')
parser.add_argument('--output_dir', default=".", help='Path where to save segmentations')
parser.add_argument('--data_path', default='/path/to/davis/', type=str)
parser.add_argument("--n_last_frames", type=int, default=7, help="number of preceeding frames")
parser.add_argument("--size_mask_neighborhood", default=12, type=int,
help="We restrict the set of source nodes considered to a spatial neighborhood of the query node")
parser.add_argument("--topk", type=int, default=5, help="accumulate label from top k neighbors")
parser.add_argument("--bs", type=int, default=6, help="Batch size, try to reduce if OOM")
args = parser.parse_args()
print("git:\n {}\n".format(utils.get_sha()))
print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))
# building network
model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0)
print(f"Model {args.arch} {args.patch_size}x{args.patch_size} built.")
model.cuda()
utils.load_pretrained_weights(model, args.pretrained_weights, args.checkpoint_key, args.arch, args.patch_size)
for param in model.parameters():
param.requires_grad = False
model.eval()
color_palette = []
for line in urlopen("https://raw.githubusercontent.com/Liusifei/UVC/master/libs/data/palette.txt"):
color_palette.append([int(i) for i in line.decode("utf-8").split('\n')[0].split(" ")])
color_palette = np.asarray(color_palette, dtype=np.uint8).reshape(-1,3)
video_list = open(os.path.join(args.data_path, "ImageSets/2017/val.txt")).readlines()
for i, video_name in enumerate(video_list):
video_name = video_name.strip()
print(f'[{i}/{len(video_list)}] Begin to segmentate video {video_name}.')
video_dir = os.path.join(args.data_path, "JPEGImages/480p/", video_name)
frame_list = read_frame_list(video_dir)
seg_path = frame_list[0].replace("JPEGImages", "Annotations").replace("jpg", "png")
first_seg, seg_ori = read_seg(seg_path, args.patch_size)
eval_video_tracking_davis(args, model, frame_list, video_dir, first_seg, seg_ori, color_palette)
| 11,835 | 39.395904 | 153 | py |
HIPT | HIPT-master/3-Self-Supervised-Eval/slide_evaluation_utils.py | def get_knn_classification_results(dataeroot, study='tcga_lung', enc_name='vit256mean', prop=1.0):
r"""
Runs 10-fold CV for KNN of mean WSI embeddings
Args:
- dataroot (str): Path to mean WSI embeddings for each feature type.
- study (str): Which TCGA study (Choices: tcga_brca, tcga_lung, tcga_kidney)
- enc_name (str): Which encoder to use (Choices: resnet50mean, vit16mean, vit256mean)
- prop (float): Proportion of training dataset to use
Return:
- aucs_knn_all (pd.DataFrame): AUCs for 10-fold CV evaluation
"""
aucs_knn_all = {}
for i in range(10):
train_fname = os.path.join(dataroot, enc_name, f'{study}_{enc_name}_class_split_train_{i}.pkl')
with open(train_fname, 'rb') as handle:
asset_dict = pickle.load(handle)
train_embeddings, train_labels = asset_dict['embeddings'], asset_dict['labels']
if prop < 1:
sample_inds = pd.DataFrame(range(train_embeddings.shape[0])).sample(frac=0.1, random_state=1).index
train_embeddings = train_embeddings[sample_inds]
train_labels = train_labels[sample_inds]
val_fname = os.path.join(dataroot, enc_name, f'{study}_{enc_name}_class_split_test_{i}.pkl')
with open(val_fname, 'rb') as handle:
asset_dict = pickle.load(handle)
val_embeddings, val_labels = asset_dict['embeddings'], asset_dict['labels']
le = LabelEncoder().fit(train_labels)
train_labels = le.transform(train_labels)
val_labels = le.transform(val_labels)
### K-NN Evaluation
clf = KNeighborsClassifier().fit(train_embeddings, train_labels)
y_score = clf.predict_proba(val_embeddings)
y_pred = clf.predict(val_embeddings)
aucs, f1s = [], []
if len(np.unique(val_labels)) > 2:
for j, label in enumerate(np.unique(val_labels)):
label_class = np.array(val_labels == label, int)
aucs.append(sklearn.metrics.roc_auc_score(val_labels, y_score, average='macro', multi_class='ovr'))
else:
aucs.append(sklearn.metrics.roc_auc_score(val_labels, y_score[:,1]))
aucs_knn_all[i] = aucs
aucs_knn_all = pd.DataFrame(aucs_knn_all).T
return aucs_knn_all | 2,324 | 46.44898 | 115 | py |
HIPT | HIPT-master/3-Self-Supervised-Eval/patch_evaluation_utils.py | import numpy as np
import scipy
import scipy.special as special
from scipy.stats._stats import (_kendall_dis, _toint64, _weightedrankedtau,
_local_correlations)
from scipy.stats import *
def _contains_nan(a, nan_policy='propagate'):
policies = ['propagate', 'raise', 'omit']
if nan_policy not in policies:
raise ValueError("nan_policy must be one of {%s}" %
', '.join("'%s'" % s for s in policies))
try:
# Calling np.sum to avoid creating a huge array into memory
# e.g. np.isnan(a).any()
with np.errstate(invalid='ignore'):
contains_nan = np.isnan(np.sum(a))
except TypeError:
# This can happen when attempting to sum things which are not
# numbers (e.g. as in the function `mode`). Try an alternative method:
try:
contains_nan = np.nan in set(a.ravel())
except TypeError:
# Don't know what to do. Fall back to omitting nan values and
# issue a warning.
contains_nan = False
nan_policy = 'omit'
warnings.warn("The input array could not be properly "
"checked for nan values. nan values "
"will be ignored.", RuntimeWarning)
if contains_nan and nan_policy == 'raise':
raise ValueError("The input contains nan values")
return contains_nan, nan_policy
"""
Modified from scipy.stats
"""
def kendalltau_bpq(x, y, initial_lexsort=None, nan_policy='propagate',
method='auto', variant='d'):
"""Calculates Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, and values close to -1
indicate strong disagreement. This implements two variants of Kendall's
tau: tau-b (the default) and tau-c (also known as Stuart's tau-c). These
differ only in how they are normalized to lie within the range -1 to 1;
the hypothesis tests (their p-values) are identical. Kendall's original
tau-a is not implemented separately because both tau-b and tau-c reduce
to tau-a in the absence of ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they
will be flattened to 1-D.
initial_lexsort : bool, optional
Unused (deprecated).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
method : {'auto', 'asymptotic', 'exact'}, optional
Defines which method is used to calculate the p-value [5]_.
The following options are available (default is 'auto'):
* 'auto': selects the appropriate method based on a trade-off
between speed and accuracy
* 'asymptotic': uses a normal approximation valid for large samples
* 'exact': computes the exact p-value, but can only be used if no ties
are present. As the sample size increases, the 'exact' computation
time may grow and the result may lose some precision.
variant: {'b', 'c'}, optional
Defines which variant of Kendall's tau is returned. Default is 'b'.
Returns
-------
correlation : float
The tau statistic.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
See Also
--------
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
weightedtau : Computes a weighted version of Kendall's tau.
Notes
-----
The definition of Kendall's tau that is used is [2]_::
tau_b = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
tau_c = 2 (P - Q) / (n**2 * (m - 1) / m)
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U. n is the total number of samples, and m is the
number of unique values in either `x` or `y`, whichever is smaller.
References
----------
.. [1] Maurice G. Kendall, "A New Measure of Rank Correlation", Biometrika
Vol. 30, No. 1/2, pp. 81-93, 1938.
.. [2] Maurice G. Kendall, "The treatment of ties in ranking problems",
Biometrika Vol. 33, No. 3, pp. 239-251. 1945.
.. [3] Gottfried E. Noether, "Elements of Nonparametric Statistics", John
Wiley & Sons, 1967.
.. [4] Peter M. Fenwick, "A new data structure for cumulative frequency
tables", Software: Practice and Experience, Vol. 24, No. 3,
pp. 327-336, 1994.
.. [5] Maurice G. Kendall, "Rank Correlation Methods" (4th Edition),
Charles Griffin & Co., 1970.
Examples
--------
>>> from scipy import stats
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.2827454599327748
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `kendalltau` must be of the same "
f"size, found x-size {x.size} and y-size {y.size}")
elif not x.size or not y.size:
# Return NaN if arrays are empty
return KendalltauResult(np.nan, np.nan)
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == 'omit' or npy == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'propagate':
return KendalltauResult(np.nan, np.nan)
elif contains_nan and nan_policy == 'omit':
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
if variant == 'b':
return mstats_basic.kendalltau(x, y, method=method, use_ties=True)
else:
raise ValueError("Only variant 'b' is supported for masked arrays")
if initial_lexsort is not None: # deprecate to drop!
warnings.warn('"initial_lexsort" is gone!')
def count_rank_tie(ranks):
cnt = np.bincount(ranks).astype('int64', copy=False)
cnt = cnt[cnt > 1]
return ((cnt * (cnt - 1) // 2).sum(),
(cnt * (cnt - 1.) * (cnt - 2)).sum(),
(cnt * (cnt - 1.) * (2*cnt + 5)).sum())
size = x.size
perm = np.argsort(y) # sort on y and convert y to dense ranks
x, y = x[perm], y[perm]
y = np.r_[True, y[1:] != y[:-1]].cumsum(dtype=np.intp)
# stable sort on x and convert x to dense ranks
perm = np.argsort(x, kind='mergesort')
x, y = x[perm], y[perm]
x = np.r_[True, x[1:] != x[:-1]].cumsum(dtype=np.intp)
dis = _kendall_dis(x, y) # discordant pairs
obs = np.r_[True, (x[1:] != x[:-1]) | (y[1:] != y[:-1]), True]
cnt = np.diff(np.nonzero(obs)[0]).astype('int64', copy=False)
ntie = (cnt * (cnt - 1) // 2).sum() # joint ties
xtie, x0, x1 = count_rank_tie(x) # ties in x, stats
ytie, y0, y1 = count_rank_tie(y) # ties in y, stats
tot = (size * (size - 1)) // 2
if xtie == tot or ytie == tot:
return KendalltauResult(np.nan, np.nan)
# Note that tot = con + dis + (xtie - ntie) + (ytie - ntie) + ntie
# = con + dis + xtie + ytie - ntie
con_minus_dis = tot - xtie - ytie + ntie - 2 * dis
con = tot - xtie - ytie + ntie
if variant == 'b':
tau = con_minus_dis / np.sqrt(tot - xtie) / np.sqrt(tot - ytie)
elif variant == 'c':
minclasses = min(len(set(x)), len(set(y)))
tau = 2*con_minus_dis / (size**2 * (minclasses-1)/minclasses)
elif variant == 'd':
tau = (con + ytie*0.5) / (con + dis + ytie)
else:
raise ValueError(f"Unknown variant of the method chosen: {variant}. "
"variant must be 'b' or 'c'.")
# Limit range to fix computational errors
tau = min(1., max(-1., tau))
# The p-value calculation is the same for all variants since the p-value
# depends only on con_minus_dis.
if method == 'exact' and (xtie != 0 or ytie != 0):
raise ValueError("Ties found, exact method cannot be used.")
if method == 'auto':
if (xtie == 0 and ytie == 0) and (size <= 33 or
min(dis, tot-dis) <= 1):
method = 'exact'
else:
method = 'asymptotic'
if xtie == 0 and ytie == 0 and method == 'exact':
pvalue = mstats_basic._kendall_p_exact(size, min(dis, tot-dis))
elif method == 'asymptotic':
# con_minus_dis is approx normally distributed with this variance [3]_
m = size * (size - 1.)
var = ((m * (2*size + 5) - x1 - y1) / 18 +
(2 * xtie * ytie) / m + x0 * y0 / (9 * m * (size - 2)))
pvalue = (special.erfc(np.abs(con_minus_dis) /
np.sqrt(var) / np.sqrt(2)))
else:
raise ValueError(f"Unknown method {method} specified. Use 'auto', "
"'exact' or 'asymptotic'.")
return tau | 9,513 | 40.72807 | 80 | py |
HIPT | HIPT-master/3-Self-Supervised-Eval/patch_extraction.py | ### Dependencies
# Base Dependencies
import os
import pickle
import sys
# LinAlg / Stats / Plotting Dependencies
import h5py
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from PIL import Image
import umap
import umap.plot
from tqdm import tqdm
# Torch Dependencies
import torch
import torch.multiprocessing
import torchvision
import torch.utils.data.dataset as Dataset
from torchvision import transforms
from pl_bolts.models.self_supervised import resnets
from pl_bolts.utils.semi_supervised import Identity
device = torch.device('cuda:0')
torch.multiprocessing.set_sharing_strategy('file_system')
# Model Architectures
from nn_encoder_arch.vision_transformer import vit_small
from nn_encoder_arch.resnet_trunc import resnet50_trunc_baseline
### Extracting Patch Features
patch_datasets = 'path/to/patch/datasets'
library_path = './embeddings_patch_library/'
os.makedirs(library_path, exist_ok=True)
models = ['resnet50_trunc', 'resnet50_tcga_brca_simclr', 'vits_tcga_brca_dino']
for enc_name in models:
create_embeddings(patch_datasets=patch_datasets, embeddings_dir=library_path, enc_name=enc_name, dataset='crc100knonorm')
create_embeddings(patch_datasets=patch_datasets, embeddings_dir=library_path, enc_name=enc_name, dataset='crc100k')
create_embeddings(patch_datasets=patch_datasets, embeddings_dir=library_path, enc_name=enc_name, dataset='bcss')
create_embeddings(patch_datasets=patch_datasets, embeddings_dir=library_path, enc_name=enc_name, dataset='breastpathq') | 1,521 | 34.395349 | 125 | py |
HIPT | HIPT-master/3-Self-Supervised-Eval/slide_extraction_utils.py | # Base Dependencies
import os
import pickle
import sys
j_ = os.path.join
# LinAlg / Stats / Plotting Dependencies
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from PIL import Image
from tqdm import tqdm
# Scikit-Learn Imports
import sklearn
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import cross_val_score, StratifiedKFold
#Torch Imports
import torch
import torch.nn as nn
from torch.utils.data.dataset import Dataset
torch.multiprocessing.set_sharing_strategy('file_system')
def series_intersection(s1, s2):
r"""
Takes the intersection of two pandas.Series (pd.Series) objects.
Args:
- s1 (pd.Series): pd.Series object.
- s2 (pd.Series): pd.Series object.
Return:
- pd.Series: Intersection of s1 and s2.
"""
return pd.Series(list(set(s1) & set(s2)))
def save_embeddings_mean(save_pickle_fpath, dataset):
r"""
Saves+Pickle each WSI in a SlideEmbeddingDataset Object as the average of its instance-level embeddings
Args:
- save_fpath (str): Save filepath for the pickle object.
- dataset (torch.utils.data.dataset): SlideEmbeddingDataset_WS object that iterates+loads each WSI in a folder
Return:
- None
"""
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=4)
embeddings, labels = [], []
for batch, target in dataloader:
with torch.no_grad():
embeddings.append(batch.squeeze(dim=0).mean(dim=0).numpy())
labels.append(target.numpy())
embeddings = np.vstack(embeddings)
labels = np.vstack(labels).squeeze()
asset_dict = {'embeddings': embeddings, 'labels': labels}
with open(save_pickle_fpath, 'wb') as handle:
pickle.dump(asset_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
class SlideEmbeddingSplitDataset(Dataset):
r"""
torch.utils.data.dataset object that iterates+loads each WSI from a split CSV file
Args:
- dataroot (str): Path to wsi_labels.csv.
- tcga_csv (pd.DataFrame): Clinical CSV (as a pd.DataFrame object) for a TCGA Study
- pt_path (str): Path to folder of saved instance-level feature embeddings for each WSI.
- splits_csv (pd.DataFrame): DataFrame which contains slide_ids for train / val / test
- label_col (str): Which column to use as labels in tcga_csv
- label_dict (dict): Dictionary for categorizing labels
Return:
- None
"""
def __init__(self, dataroot, tcga_csv, pt_path, splits_csv=None,
label_col='oncotree_code', label_dict={'LUSC':0, 'LUAD':1}):
self.csv = pd.read_csv(os.path.join(dataroot, 'tcga_wsi_labels.csv'))
self.csv['slide_path'] = pt_path+self.csv['slide_id']
self.csv = self.csv.set_index('slide_id', drop=True).drop(['Unnamed: 0'], axis=1)
self.csv.index = self.csv.index.str[:-3]
self.csv.index.name = None
self.csv = self.csv.join(tcga_csv, how='inner')
if splits_csv is not None:
self.csv = self.csv.loc[series_intersection(splits_csv.dropna(), self.csv.index)]
self.label_col = label_col
self.label_dict = label_dict
### If using DINO Features, subset and save only the last 384-dim features.
if 'dino_pt_patch_features' in pt_path:
self.last_stage = True
else:
self.last_stage = False
def __getitem__(self, index):
x = torch.load(self.csv['slide_path'][index])
if self.last_stage and x.shape[1] == 1536:
x = x[:,(1536-384):1536]
label = torch.Tensor([self.label_dict[self.csv[self.label_col][index]]]).to(torch.long)
return x, label
def __len__(self):
return self.csv.shape[0]
def create_slide_embeddings(dataroot, saveroot, enc_name, study):
r"""
"""
path2csv = '../Weakly-Supervised-Subtyping/dataset_csv/'
path2splits = '../Weakly-Supervised-Subtyping/splits/'
splits_folder = j_(path2splits, '10foldcv_subtype', study)
tcga_csv = pd.read_csv(j_(path2csv, f'{study}_subset.csv.zip'), index_col=2)['oncotree_code']
tcga_csv.index = tcga_csv.index.str[:-4]
tcga_csv.index.name = None
save_embedding_dir = j_(saveroot, enc_name)
os.makedirs(save_embedding_dir, exist_ok=True)
if enc_name == 'vit256mean':
pt_path = j_(dataroot, 'vit256mean_tcga_slide_embeddings')
elif enc_name == 'vit16mean':
extracted_dir = f'{study}/extracted_mag20x_patch256_fp/vits_tcga_pancancer_dino_pt_patch_features/'
pt_path = j_(dataroot, extracted_dir)
elif enc_name == 'resnet50mean':
extracted_dir = f'{study}/extracted_mag20x_patch256_fp/resnet50_trunc_pt_patch_features/'
pt_path = j_(dataroot, extracted_dir)
if study == 'tcga_brca':
label_dict={'IDC':0, 'ILC':1}
tcga_csv = tcga_csv[tcga_csv.str.contains('IDC|ILC')]
elif study == 'tcga_kidney':
label_dict={'CCRCC':0, 'PRCC':1, 'CHRCC': 2}
elif study == 'tcga_lung':
label_dict={'LUSC':0, 'LUAD':1}
for i in tqdm(range(10)):
splits_csv = pd.read_csv(os.path.join(splits_folder, f'splits_{i}.csv'), index_col=0)
train = SlideEmbeddingSplitDataset(dataroot=dataroot, tcga_csv=tcga_csv, pt_path=pt_path,
splits_csv=splits_csv['train'], label_dict=label_dict)
test = SlideEmbeddingSplitDataset(dataroot=dataroot, tcga_csv=tcga_csv, pt_path=pt_path,
splits_csv=splits_csv['test'], label_dict=label_dict)
save_embeddings_mean(j_(save_embedding_dir, f'{study}_{enc_name}_class_split_train_{i}.pkl'), train)
save_embeddings_mean(j_(save_embedding_dir, f'{study}_{enc_name}_class_split_test_{i}.pkl'), test) | 6,006 | 37.754839 | 118 | py |
HIPT | HIPT-master/3-Self-Supervised-Eval/patch_extraction_utils.py | ### Dependencies
# Base Dependencies
import os
import pickle
import sys
# LinAlg / Stats / Plotting Dependencies
import h5py
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from PIL import Image
import umap
import umap.plot
from tqdm import tqdm
# Torch Dependencies
import torch
import torch.multiprocessing
import torchvision
from torch.utils.data.dataset import Dataset
from torchvision import transforms
from pl_bolts.models.self_supervised import resnets
from pl_bolts.utils.semi_supervised import Identity
device = torch.device('cuda:0')
torch.multiprocessing.set_sharing_strategy('file_system')
# Model Architectures
from nn_encoder_arch.vision_transformer import vit_small
from nn_encoder_arch.resnet_trunc import resnet50_trunc_baseline
### Helper Functions for Normalization + Loading in pytorch_lightning SSL encoder (for SimCLR)
def eval_transforms(pretrained=False):
if pretrained:
mean, std = (0.485, 0.456, 0.406), (0.229, 0.224, 0.225)
else:
mean, std = (0.5,0.5,0.5), (0.5,0.5,0.5)
trnsfrms_val = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean = mean, std = std)])
return trnsfrms_val
def torchvision_ssl_encoder(name: str, pretrained: bool = False, return_all_feature_maps: bool = False):
pretrained_model = getattr(resnets, name)(pretrained=pretrained, return_all_feature_maps=return_all_feature_maps)
pretrained_model.fc = Identity()
return pretrained_model
### Wrapper Classes for loading in patch datasets for BreastPathQ + BCSS (CRC100K uses the ImageFolder Dataset Class)
class CSVDataset_BreastPathQ(Dataset):
def __init__(self, dataroot, csv_path, transforms_eval=eval_transforms()):
self.csv = pd.read_csv(csv_path)
self.csv['img_path'] = dataroot+self.csv['slide'].astype(str) + "_" + self.csv['rid'].astype(str) + '.tif'
self.transforms = transforms_eval
def __getitem__(self, index):
img = Image.open(self.csv['img_path'][index])
return self.transforms(img), self.csv['y'][index]
def __len__(self):
return self.csv.shape[0]
class CSVDataset_BCSS(Dataset):
def __init__(self, dataset_csv, is_train=1, transforms_eval=eval_transforms()):
self.csv = dataset_csv
self.csv = self.csv[self.csv['train']==is_train]
self.transforms = transforms_eval
def __getitem__(self, index):
img = Image.open(self.csv.index[index])
return self.transforms(img), self.csv.iloc[index]['label']
def __len__(self):
return self.csv.shape[0]
### Functions for Loading + Saving + Visualizing Patch Embeddings
def save_embeddings(model, fname, dataloader, dataset=None, is_imagefolder=False,
save_patches=False, sprite_dim=128, overwrite=False):
if os.path.isfile('%s.pkl' % fname) and (overwrite == False):
return None
embeddings, labels = [], []
patches = []
for batch, target in tqdm(dataloader):
if save_patches:
for img in batch:
patches.append(tensor2im(input_image=img).resize(sprite_dim))
with torch.no_grad():
batch = batch.to(device)
embeddings.append(model(batch).detach().cpu().numpy())
labels.append(target.numpy())
embeddings = np.vstack(embeddings)
labels = np.vstack(labels).squeeze()
if is_imagefolder:
id2label = dict(map(reversed, dataset.class_to_idx.items()))
labels = np.array(list(map(id2label.get, labels.ravel())))
asset_dict = {'embeddings': embeddings, 'labels': labels}
if save_patches:
asset_dict.update({'patches': patches})
with open('%s.pkl' % (fname), 'wb') as handle:
pickle.dump(asset_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
def create_UMAP(library_path, save_path, dataset, enc_name, n=15, d=0.1):
path = os.path.join(library_path, '%s_%s.pkl' % (dataset, enc_name))
with open(path, 'rb') as handle:
asset_dict = pickle.load(handle)
embeddings, labels = asset_dict['embeddings'], asset_dict['labels']
if 'crc100k' in dataset:
labels[labels=='MUS'] = 'STR'
mapper = umap.UMAP(n_neighbors=n, min_dist=d).fit(embeddings)
fig = plt.figure(figsize=(10, 10), dpi=100)
umap.plot.points(mapper, labels=labels, width=600, height=600)
plt.tight_layout()
plt.savefig(os.path.join(save_path, '%s_%s_umap_n%d_d%0.2f.jpg' % (dataset, enc_name, n, d)))
def create_embeddings(embeddings_dir, enc_name, dataset, save_patches=False, sprite_dim=128,
patch_datasets='path/to/patch/datasets', assets_dir ='./ckpts/',
disentangle=-1, stage=-1):
print("Extracting Features for '%s' via '%s'" % (dataset, enc_name))
if enc_name == 'resnet50_trunc':
model = resnet50_trunc_baseline(pretrained=True)
eval_t = eval_transforms(pretrained=True)
elif 'dino' in enc_name:
ckpt_path = os.path.join(assets_dir, enc_name+'.pt')
assert os.path.isfile(ckpt_path)
model = vit_small(patch_size=16)
state_dict = torch.load(ckpt_path, map_location="cpu")['teacher']
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
#print("Missing Keys:", missing_keys)
#print("Unexpected Keys:", unexpected_keys)
eval_t = eval_transforms(pretrained=False)
elif 'simclr' in enc_name:
ckpt_path = os.path.join(assets_dir, enc_name+'.pt')
assert os.path.isfile(ckpt_path)
model = torchvision_ssl_encoder('resnet50', pretrained=True)
missing_keys, unexpected_keys = model.load_state_dict(torch.load(ckpt_path), strict=False)
#print("Missing Keys:", missing_keys)
#print("Unexpected Keys:", unexpected_keys)
eval_t = eval_transforms(pretrained=False)
else:
pass
model = model.to(device)
model.eval()
if 'simclr' in enc_name or 'simsiam' in enc_name:
_model = model
model = lambda x: _model.forward(x)[0]
elif 'dino' in enc_name:
_model = model
if stage == -1:
model = _model
else:
model = lambda x: torch.cat([x[:, 0] for x in _model.get_intermediate_layers(x, stage)], dim=-1)
if stage != -1:
_stage = '_s%d' % stage
else:
_stage = ''
if dataset == 'crc100k':
### Train
dataroot = os.path.join(patch_datasets, 'NCT-CRC-HE-100K/')
dataset = torchvision.datasets.ImageFolder(dataroot, transform=eval_t)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=10, shuffle=False, num_workers=4)
fname = os.path.join(embeddings_dir, 'crc100k_train_%s%s' % (enc_name, _stage))
save_embeddings(model=model, fname=fname, dataloader=dataloader, dataset=dataset,
save_patches=save_patches, sprite_dim=sprite_dim, is_imagefolder=True)
### Test
dataroot = os.path.join(patch_datasets, 'CRC-VAL-HE-7K/')
dataset = torchvision.datasets.ImageFolder(dataroot, transform=eval_t)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=4)
fname = os.path.join(embeddings_dir, 'crc100k_val_%s%s' % (enc_name, _stage))
save_embeddings(model=model, fname=fname, dataloader=dataloader, dataset=dataset,
save_patches=save_patches, sprite_dim=sprite_dim, is_imagefolder=True)
elif dataset == 'crc100knonorm':
### Train
dataroot = os.path.join(patch_datasets, 'NCT-CRC-HE-100K-NONORM/')
dataset = torchvision.datasets.ImageFolder(dataroot, transform=eval_t)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=10, shuffle=False, num_workers=4)
fname = os.path.join(embeddings_dir, 'crc100knonorm_train_%s%s' % (enc_name, _stage))
save_embeddings(model=model, fname=fname, dataloader=dataloader, dataset=dataset,
save_patches=save_patches, sprite_dim=sprite_dim, is_imagefolder=True)
### Test
dataroot = os.path.join(patch_datasets, 'CRC-VAL-HE-7K/')
dataset = torchvision.datasets.ImageFolder(dataroot, transform=eval_t)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=4)
fname = os.path.join(embeddings_dir, 'crc100knonorm_val_%s%s' % (enc_name, _stage))
save_embeddings(model=model, fname=fname, dataloader=dataloader, dataset=dataset,
save_patches=save_patches, sprite_dim=sprite_dim, is_imagefolder=True)
elif dataset == 'breastpathq':
train_dataroot = os.path.join(patch_datasets, 'BreastPathQ/breastpathq/datasets/train/')
val_dataroot = os.path.join(patch_datasets, 'BreastPathQ/breastpathq/datasets/validation/')
train_csv = os.path.join(patch_datasets, 'BreastPathQ/breastpathq/datasets/train_labels.csv')
val_csv = os.path.join(patch_datasets, 'BreastPathQ/breastpathq/datasets/val_labels.csv')
train_dataset = CSVDataset_BreastPathQ(dataroot=train_dataroot, csv_path=train_csv, transforms_eval=eval_t)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=1, shuffle=False, num_workers=4)
val_dataset = CSVDataset_BreastPathQ(dataroot=val_dataroot, csv_path=val_csv, transforms_eval=eval_t)
val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=4)
train_fname = os.path.join(embeddings_dir, 'breastpathq_train_%s%s' % (enc_name, _stage))
val_fname = os.path.join(embeddings_dir, 'breastpathq_val_%s%s' % (enc_name, _stage))
save_embeddings(model=model, fname=train_fname, dataloader=train_dataloader,
save_patches=save_patches, sprite_dim=sprite_dim)
save_embeddings(model=model, fname=val_fname, dataloader=val_dataloader,
save_patches=save_patches, sprite_dim=sprite_dim)
elif dataset == 'bcss':
dataroot = os.path.join(patch_datasets, 'BCSS/40x/patches/All/')
csv_path = os.path.join(patch_datasets, 'BCSS/40x/patches/summary.csv')
dataset_csv = pd.read_csv(csv_path, sep=' ')['filename,train'].str.split(',', expand=True).astype(int)
dataset_csv.columns = ['label', 'train']
dataset_csv = dataset_csv[dataset_csv['label'].isin([0,1,2,3])]
dataset_csv.index = [os.path.join(dataroot, fname+'.png') for fname in dataset_csv.index]
train_dataset = CSVDataset_BCSS(dataset_csv=dataset_csv, is_train=1, transforms_eval=eval_t)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=1, shuffle=False, num_workers=1)
val_dataset = CSVDataset_BCSS(dataset_csv=dataset_csv, is_train=0, transforms_eval=eval_t)
val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=1)
train_fname = os.path.join(embeddings_dir, 'bcss_train_%s%s' % (enc_name, _stage))
val_fname = os.path.join(embeddings_dir, 'bcss_val_%s%s' % (enc_name, _stage))
save_embeddings(model=model, fname=train_fname, dataloader=train_dataloader,
save_patches=save_patches, sprite_dim=sprite_dim)
save_embeddings(model=model, fname=val_fname, dataloader=val_dataloader,
save_patches=save_patches, sprite_dim=sprite_dim) | 11,702 | 46.573171 | 117 | py |
HIPT | HIPT-master/HIPT_4K/hipt_4k.py | ### Dependencies
# Base Dependencies
import os
import pickle
import sys
# LinAlg / Stats / Plotting Dependencies
import h5py
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
from tqdm import tqdm
# Torch Dependencies
import torch
import torch.multiprocessing
import torchvision
from torchvision import transforms
from einops import rearrange, repeat
torch.multiprocessing.set_sharing_strategy('file_system')
# Local Dependencies
import vision_transformer as vits
import vision_transformer4k as vits4k
from hipt_heatmap_utils import *
from hipt_model_utils import get_vit256, get_vit4k, tensorbatch2im, eval_transforms, roll_batch2img
class HIPT_4K(torch.nn.Module):
"""
HIPT Model (ViT-4K) for encoding non-square images (with [256 x 256] patch tokens), with
[256 x 256] patch tokens encoded via ViT-256 using [16 x 16] patch tokens.
"""
def __init__(self,
model256_path: str = '../Checkpoints/vit256_small_dino.pth',
model4k_path: str = '../Checkpoints/vit4k_xs_dino.pth',
device256=torch.device('cuda:0'),
device4k=torch.device('cuda:1')):
super().__init__()
self.model256 = get_vit256(pretrained_weights=model256_path).to(device256)
self.model4k = get_vit4k(pretrained_weights=model4k_path).to(device4k)
self.device256 = device256
self.device4k = device4k
def forward(self, x):
"""
Forward pass of HIPT (given an image tensor x), outputting the [CLS] token from ViT-4K.
1. x is center-cropped such that the W / H is divisible by the patch token size in ViT-4K (e.g. - 256 x 256).
2. x then gets unfolded into a "batch" of [256 x 256] images.
3. A pretrained ViT-256 model extracts the CLS token from each [256 x 256] image in the batch.
4. These batch-of-features are then reshaped into a 2D feature grid (of width "w_256" and height "h_256".)
5. This feature grid is then used as the input to ViT-4K, outputting [CLS]_4K.
Args:
- x (torch.Tensor): [1 x C x W' x H'] image tensor.
Return:
- features_cls4k (torch.Tensor): [1 x 192] cls token (d_4k = 192 by default).
"""
batch_256, w_256, h_256 = self.prepare_img_tensor(x) # 1. [1 x 3 x W x H]
batch_256 = batch_256.unfold(2, 256, 256).unfold(3, 256, 256) # 2. [1 x 3 x w_256 x h_256 x 256 x 256]
batch_256 = rearrange(batch_256, 'b c p1 p2 w h -> (b p1 p2) c w h') # 2. [B x 3 x 256 x 256], where B = (1*w_256*h_256)
features_cls256 = []
for mini_bs in range(0, batch_256.shape[0], 256): # 3. B may be too large for ViT-256. We further take minibatches of 256.
minibatch_256 = batch_256[mini_bs:mini_bs+256].to(self.device256, non_blocking=True)
features_cls256.append(self.model256(minibatch_256).detach().cpu()) # 3. Extracting ViT-256 features from [256 x 3 x 256 x 256] image batches.
features_cls256 = torch.vstack(features_cls256) # 3. [B x 384], where 384 == dim of ViT-256 [ClS] token.
features_cls256 = features_cls256.reshape(w_256, h_256, 384).transpose(0,1).transpose(0,2).unsqueeze(dim=0)
features_cls256 = features_cls256.to(self.device4k, non_blocking=True) # 4. [1 x 384 x w_256 x h_256]
features_cls4k = self.model4k.forward(features_cls256) # 5. [1 x 192], where 192 == dim of ViT-4K [ClS] token.
return features_cls4k
def forward_asset_dict(self, x: torch.Tensor):
"""
Forward pass of HIPT (given an image tensor x), with certain intermediate representations saved in
a dictionary (that is to be stored in a H5 file). See walkthrough of how the model works above.
Args:
- x (torch.Tensor): [1 x C x W' x H'] image tensor.
Return:
- asset_dict (dict): Dictionary of intermediate feature representations of HIPT and other metadata.
- features_cls256 (np.array): [B x 384] extracted ViT-256 cls tokens
- features_mean256 (np.array): [1 x 384] mean ViT-256 cls token (exluding non-tissue patches)
- features_4k (np.array): [1 x 192] extracted ViT-4K cls token.
- features_4k (np.array): [1 x 576] feature vector (concatenating mean ViT-256 + ViT-4K cls tokens)
"""
batch_256, w_256, h_256 = self.prepare_img_tensor(x)
batch_256 = batch_256.unfold(2, 256, 256).unfold(3, 256, 256)
batch_256 = rearrange(batch_256, 'b c p1 p2 w h -> (b p1 p2) c w h')
features_cls256 = []
for mini_bs in range(0, batch_256.shape[0], 256):
minibatch_256 = batch_256[mini_bs:mini_bs+256].to(self.device256, non_blocking=True)
features_cls256.append(self.model256(minibatch_256).detach().cpu())
features_cls256 = torch.vstack(features_cls256)
features_mean256 = features_cls256.mean(dim=0).unsqueeze(dim=0)
features_grid256 = features_cls256.reshape(w_256, h_256, 384).transpose(0,1).transpose(0,2).unsqueeze(dim=0)
features_grid256 = features_grid256.to(self.device4k, non_blocking=True)
features_cls4k = self.model4k.forward(features_grid256).detach().cpu()
features_mean256_cls4k = torch.cat([features_mean256, features_cls4k], dim=1)
asset_dict = {
'features_cls256': features_cls256.numpy(),
'features_mean256': features_mean256.numpy(),
'features_cls4k': features_cls4k.numpy(),
'features_mean256_cls4k': features_mean256_cls4k.numpy()
}
return asset_dict
def _get_region_attention_scores(self, region, scale=1):
r"""
Forward pass in hierarchical model with attention scores saved.
Args:
- region (PIL.Image): 4096 x 4096 Image
- model256 (torch.nn): 256-Level ViT
- model4k (torch.nn): 4096-Level ViT
- scale (int): How much to scale the output image by (e.g. - scale=4 will resize images to be 1024 x 1024.)
Returns:
- np.array: [256, 256/scale, 256/scale, 3] np.array sequence of image patches from the 4K x 4K region.
- attention_256 (torch.Tensor): [256, 256/scale, 256/scale, 3] torch.Tensor sequence of attention maps for 256-sized patches.
- attention_4k (torch.Tensor): [1, 4096/scale, 4096/scale, 3] torch.Tensor sequence of attention maps for 4k-sized regions.
"""
eval_t = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
x = eval_transforms()(region).unsqueeze(dim=0)
batch_256, w_256, h_256 = self.prepare_img_tensor(x)
batch_256 = batch_256.unfold(2, 256, 256).unfold(3, 256, 256)
batch_256 = rearrange(batch_256, 'b c p1 p2 w h -> (b p1 p2) c w h')
batch_256 = batch_256.to(self.device256, non_blocking=True)
features_cls256 = self.model256(batch_256)
attention_256 = self.model256.get_last_selfattention(batch_256)
nh = attention_256.shape[1] # number of head
attention_256 = attention_256[:, :, 0, 1:].reshape(256, nh, -1)
attention_256 = attention_256.reshape(w_256*h_256, nh, 16, 16)
attention_256 = nn.functional.interpolate(attention_256, scale_factor=int(16/scale), mode="nearest").cpu().numpy()
features_grid256 = features_cls256.reshape(w_256, h_256, 384).transpose(0,1).transpose(0,2).unsqueeze(dim=0)
features_grid256 = features_grid256.to(self.device4k, non_blocking=True)
features_cls4k = self.model4k.forward(features_grid256).detach().cpu()
attention_4k = self.model4k.get_last_selfattention(features_grid256)
nh = attention_4k.shape[1] # number of head
attention_4k = attention_4k[0, :, 0, 1:].reshape(nh, -1)
attention_4k = attention_4k.reshape(nh, w_256, h_256)
attention_4k = nn.functional.interpolate(attention_4k.unsqueeze(0), scale_factor=int(256/scale), mode="nearest")[0].cpu().numpy()
if scale != 1:
batch_256 = nn.functional.interpolate(batch_256, scale_factor=(1/scale), mode="nearest")
return tensorbatch2im(batch_256), attention_256, attention_4k
def get_region_attention_heatmaps(self, x, offset=128, scale=4, alpha=0.5, cmap = cmap_map(lambda x: x/2 + 0.5, matplotlib.cm.jet), threshold=None):
r"""
Creates hierarchical heatmaps (Raw H&E + ViT-256 + ViT-4K + Blended Heatmaps saved individually).
Args:
- region (PIL.Image): 4096 x 4096 Image
- model256 (torch.nn): 256-Level ViT
- model4k (torch.nn): 4096-Level ViT
- output_dir (str): Save directory / subdirectory
- fname (str): Naming structure of files
- offset (int): How much to offset (from top-left corner with zero-padding) the region by for blending
- scale (int): How much to scale the output image by
- alpha (float): Image blending factor for cv2.addWeighted
- cmap (matplotlib.pyplot): Colormap for creating heatmaps
Returns:
- None
"""
region = Image.fromarray(tensorbatch2im(x)[0])
w, h = region.size
region2 = add_margin(region.crop((128,128,w,h)),
top=0, left=0, bottom=128, right=128, color=(255,255,255))
region3 = add_margin(region.crop((128*2,128*2,w,h)),
top=0, left=0, bottom=128*2, right=128*2, color=(255,255,255))
region4 = add_margin(region.crop((128*3,128*3,w,h)),
top=0, left=0, bottom=128*4, right=128*4, color=(255,255,255))
b256_1, a256_1, a4k_1 = self._get_region_attention_scores(region, scale)
b256_2, a256_2, a4k_2 = self._get_region_attention_scores(region, scale)
b256_3, a256_3, a4k_3 = self._get_region_attention_scores(region, scale)
b256_4, a256_4, a4k_4 = self._get_region_attention_scores(region, scale)
offset_2 = (offset*1)//scale
offset_3 = (offset*2)//scale
offset_4 = (offset*3)//scale
w_s, h_s = w//scale, h//scale
w_256, h_256 = w//256, h//256
save_region = np.array(region.resize((w_s, h_s)))
if threshold != None:
for i in range(6):
score256_1 = concat_scores256(a256_1[:,i,:,:], w_256, h_256, size=(w_s//w_256,h_s//h_256))
score256_2 = concat_scores256(a256_2[:,i,:,:], w_256, h_256, size=(w_s//w_256,h_s//h_256))
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:w_s, offset_2:h_s] = score256_2[:(w_s-offset_2), :(h_s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:w_s, offset_2:h_s] += 100
score256 = (score256_1+new_score256_2)/overlay256
mask256 = score256.copy()
mask256[mask256 < threshold] = 0
mask256[mask256 > threshold] = 0.95
color_block256 = (cmap(mask256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
region256_hm[mask256==0] = 0
img_inverse = save_region.copy()
img_inverse[mask256 == 0.95] = 0
Image.fromarray(region256_hm+img_inverse).save(os.path.join(output_dir, '%s_256th[%d].png' % (fname, i)))
if False:
for j in range(6):
score4k_1 = concat_scores4k(a4k_1[j], size=(h_s,w_s))
score4k = score4k_1 / 100
color_block4k = (cmap(score4k)*255)[:,:,:3].astype(np.uint8)
region4k_hm = cv2.addWeighted(color_block4k, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
Image.fromarray(region4k_hm).save(os.path.join(output_dir, '%s_4k[%s].png' % (fname, j)))
hm4k, hm256, hm4k_256 = [], [], []
for j in range(6):
score4k_1 = concat_scores4k(a4k_1[j], size=(h_s,w_s))
score4k_2 = concat_scores4k(a4k_2[j], size=(h_s,w_s))
score4k_3 = concat_scores4k(a4k_3[j], size=(h_s,w_s))
score4k_4 = concat_scores4k(a4k_4[j], size=(h_s,w_s))
new_score4k_2 = np.zeros_like(score4k_2)
new_score4k_2[offset_2:h_s, offset_2:w_s] = score4k_2[:(h_s-offset_2), :(w_s-offset_2)]
new_score4k_3 = np.zeros_like(score4k_3)
new_score4k_3[offset_3:h_s, offset_3:w_s] = score4k_3[:(h_s-offset_3), :(w_s-offset_3)]
new_score4k_4 = np.zeros_like(score4k_4)
new_score4k_4[offset_4:h_s, offset_4:w_s] = score4k_4[:(h_s-offset_4), :(w_s-offset_4)]
overlay4k = np.ones_like(score4k_2)*100
overlay4k[offset_2:h_s, offset_2:w_s] += 100
overlay4k[offset_3:h_s, offset_3:w_s] += 100
overlay4k[offset_4:h_s, offset_4:w_s] += 100
score4k = (score4k_1+new_score4k_2+new_score4k_3+new_score4k_4)/overlay4k
color_block4k = (cmap(score4k)*255)[:,:,:3].astype(np.uint8)
region4k_hm = cv2.addWeighted(color_block4k, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
hm4k.append(Image.fromarray(region4k_hm))
for i in range(6):
score256_1 = concat_scores256(a256_1[:,i,:,:], h_256, w_256, size=(256, 256))
score256_2 = concat_scores256(a256_2[:,i,:,:], h_256, w_256, size=(256, 256))
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:h_s, offset_2:w_s] = score256_2[:(h_s-offset_2), :(w_s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:h_s, offset_2:w_s] += 100
score256 = (score256_1+new_score256_2)/overlay256
color_block256 = (cmap(score256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
hm256.append(Image.fromarray(region256_hm))
for j in range(6):
score4k_1 = concat_scores4k(a4k_1[j], size=(h_s,w_s))
score4k_2 = concat_scores4k(a4k_2[j], size=(h_s,w_s))
score4k_3 = concat_scores4k(a4k_3[j], size=(h_s,w_s))
score4k_4 = concat_scores4k(a4k_4[j], size=(h_s,w_s))
new_score4k_2 = np.zeros_like(score4k_2)
new_score4k_2[offset_2:h_s, offset_2:w_s] = score4k_2[:(h_s-offset_2), :(w_s-offset_2)]
new_score4k_3 = np.zeros_like(score4k_3)
new_score4k_3[offset_3:h_s, offset_3:w_s] = score4k_3[:(h_s-offset_3), :(w_s-offset_3)]
new_score4k_4 = np.zeros_like(score4k_4)
new_score4k_4[offset_4:h_s, offset_4:w_s] = score4k_4[:(h_s-offset_4), :(w_s-offset_4)]
overlay4k = np.ones_like(score4k_2)*100
overlay4k[offset_2:h_s, offset_2:w_s] += 100
overlay4k[offset_3:h_s, offset_3:w_s] += 100
overlay4k[offset_4:h_s, offset_4:w_s] += 100
score4k = (score4k_1+new_score4k_2+new_score4k_3+new_score4k_4)/overlay4k
for i in range(6):
score256_1 = concat_scores256(a256_1[:,i,:,:], h_256, w_256, size=(256, 256))
score256_2 = concat_scores256(a256_2[:,i,:,:], h_256, w_256, size=(256, 256))
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:h_s, offset_2:w_s] = score256_2[:(h_s-offset_2), :(w_s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:h_s, offset_2:w_s] += 100
score256 = (score256_1+new_score256_2)/overlay256
factorize = lambda data: (data - np.min(data)) / (np.max(data) - np.min(data))
score = (score4k*overlay4k+score256*overlay256)/(overlay4k+overlay256) #factorize(score256*score4k)
color_block = (cmap(score)*255)[:,:,:3].astype(np.uint8)
region4k_256_hm = cv2.addWeighted(color_block, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
hm4k_256.append(Image.fromarray(region4k_256_hm))
return hm4k, hm256, hm4k_256
def prepare_img_tensor(self, img: torch.Tensor, patch_size=256):
"""
Helper function that takes a non-square image tensor, and takes a center crop s.t. the width / height
are divisible by 256.
(Note: "_256" for w / h is should technically be renamed as "_ps", but may not be easier to read.
Until I need to make HIPT with patch_sizes != 256, keeping the naming convention as-is.)
Args:
- img (torch.Tensor): [1 x C x W' x H'] image tensor.
- patch_size (int): Desired patch size to evenly subdivide the image.
Return:
- img_new (torch.Tensor): [1 x C x W x H] image tensor, where W and H are divisble by patch_size.
- w_256 (int): # of [256 x 256] patches of img_new's width (e.g. - W/256)
- h_256 (int): # of [256 x 256] patches of img_new's height (e.g. - H/256)
"""
make_divisble = lambda l, patch_size: (l - (l % patch_size))
b, c, w, h = img.shape
load_size = make_divisble(w, patch_size), make_divisble(h, patch_size)
w_256, h_256 = w // patch_size, h // patch_size
img_new = transforms.CenterCrop(load_size)(img)
return img_new, w_256, h_256 | 15,783 | 46.830303 | 149 | py |
HIPT | HIPT-master/HIPT_4K/hipt_heatmap_utils.py | ### Dependencies
# Base Dependencies
import argparse
import colorsys
from io import BytesIO
import os
import random
import requests
import sys
# LinAlg / Stats / Plotting Dependencies
import cv2
import h5py
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import numpy as np
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from scipy.stats import rankdata
import skimage.io
from skimage.measure import find_contours
from tqdm import tqdm
import webdataset as wds
# Torch Dependencies
import torch
import torch.nn as nn
import torch.multiprocessing
import torchvision
from torchvision import transforms
from einops import rearrange, repeat
torch.multiprocessing.set_sharing_strategy('file_system')
from attention_visualization_utils import get_patch_attention_scores, tensorbatch2im, concat_scores256
#def concat_scores256(attns, w_256, h_256, size=(256,256)):
# r"""
#
# """
# rank = lambda v: rankdata(v)*100/len(v)
# color_block = [rank(attn.flatten()).reshape(size) for attn in attns]
# color_hm = np.concatenate([
# np.concatenate(color_block[i:(i+h_256)], axis=1)
# for i in range(0,h_256*w_256,h_256)
# ])
# return color_hm
def concat_scores4k(attn, size=(4096, 4096)):
r"""
"""
rank = lambda v: rankdata(v)*100/len(v)
color_hm = rank(attn.flatten()).reshape(size)
return color_hm
def get_scores256(attns, size=(256,256)):
r"""
"""
rank = lambda v: rankdata(v)*100/len(v)
color_block = [rank(attn.flatten()).reshape(size) for attn in attns][0]
return color_block
def cmap_map(function, cmap):
r"""
Applies function (which should operate on vectors of shape 3: [r, g, b]), on colormap cmap.
This routine will break any discontinuous points in a colormap.
Args:
- function (function)
- cmap (matplotlib.colormap)
Returns:
- matplotlib.colormap
"""
cdict = cmap._segmentdata
step_dict = {}
# Firt get the list of points where the segments start or end
for key in ('red', 'green', 'blue'):
step_dict[key] = list(map(lambda x: x[0], cdict[key]))
step_list = sum(step_dict.values(), [])
step_list = np.array(list(set(step_list)))
# Then compute the LUT, and apply the function to the LUT
reduced_cmap = lambda step : np.array(cmap(step)[0:3])
old_LUT = np.array(list(map(reduced_cmap, step_list)))
new_LUT = np.array(list(map(function, old_LUT)))
# Now try to make a minimal segment definition of the new LUT
cdict = {}
for i, key in enumerate(['red','green','blue']):
this_cdict = {}
for j, step in enumerate(step_list):
if step in step_dict[key]:
this_cdict[step] = new_LUT[j, i]
elif new_LUT[j,i] != old_LUT[j, i]:
this_cdict[step] = new_LUT[j, i]
colorvector = list(map(lambda x: x + (x[1], ), this_cdict.items()))
colorvector.sort()
cdict[key] = colorvector
return matplotlib.colors.LinearSegmentedColormap('colormap', cdict, 1024)
def getConcatImage(imgs, how='horizontal', gap=0):
r"""
Function to concatenate list of images (vertical or horizontal).
Args:
- imgs (list of PIL.Image): List of PIL Images to concatenate.
- how (str): How the images are concatenated (either 'horizontal' or 'vertical')
- gap (int): Gap (in px) between images
Return:
- dst (PIL.Image): Concatenated image result.
"""
gap_dist = (len(imgs)-1)*gap
if how == 'vertical':
w, h = np.max([img.width for img in imgs]), np.sum([img.height for img in imgs])
h += gap_dist
curr_h = 0
dst = Image.new('RGBA', (w, h), color=(255, 255, 255, 0))
for img in imgs:
dst.paste(img, (0, curr_h))
curr_h += img.height + gap
elif how == 'horizontal':
w, h = np.sum([img.width for img in imgs]), np.min([img.height for img in imgs])
w += gap_dist
curr_w = 0
dst = Image.new('RGBA', (w, h), color=(255, 255, 255, 0))
for idx, img in enumerate(imgs):
dst.paste(img, (curr_w, 0))
curr_w += img.width + gap
return dst
def add_margin(pil_img, top, right, bottom, left, color):
r"""
Adds custom margin to PIL.Image.
"""
width, height = pil_img.size
new_width = width + right + left
new_height = height + top + bottom
result = Image.new(pil_img.mode, (new_width, new_height), color)
result.paste(pil_img, (left, top))
return result
################################################
# 256 x 256 ("Patch") Attention Heatmap Creation
################################################
def create_patch_heatmaps_indiv(patch, model256, output_dir, fname, threshold=0.5,
offset=16, alpha=0.5, cmap=plt.get_cmap('coolwarm'), device256=torch.device('cpu')):
r"""
Creates patch heatmaps (saved individually)
To be refactored!
Args:
- patch (PIL.Image): 256 x 256 Image
- model256 (torch.nn): 256-Level ViT
- output_dir (str): Save directory / subdirectory
- fname (str): Naming structure of files
- offset (int): How much to offset (from top-left corner with zero-padding) the region by for blending
- alpha (float): Image blending factor for cv2.addWeighted
- cmap (matplotlib.pyplot): Colormap for creating heatmaps
Returns:
- None
"""
patch1 = patch.copy()
patch2 = add_margin(patch.crop((16,16,256,256)), top=0, left=0, bottom=16, right=16, color=(255,255,255))
b256_1, a256_1 = get_patch_attention_scores(patch1, model256, device256=device256)
b256_1, a256_2 = get_patch_attention_scores(patch2, model256, device256=device256)
save_region = np.array(patch.copy())
s = 256
offset_2 = offset
if threshold != None:
for i in range(6):
score256_1 = get_scores256(a256_1[:,i,:,:], size=(s,)*2)
score256_2 = get_scores256(a256_2[:,i,:,:], size=(s,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:s, offset_2:s] += 100
score256 = (score256_1+new_score256_2)/overlay256
mask256 = score256.copy()
mask256[mask256 < threshold] = 0
mask256[mask256 > threshold] = 0.95
color_block256 = (cmap(mask256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
region256_hm[mask256==0] = 0
img_inverse = save_region.copy()
img_inverse[mask256 == 0.95] = 0
Image.fromarray(region256_hm+img_inverse).save(os.path.join(output_dir, '%s_256th[%d].png' % (fname, i)))
for i in range(6):
score256_1 = get_scores256(a256_1[:,i,:,:], size=(s,)*2)
score256_2 = get_scores256(a256_2[:,i,:,:], size=(s,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:s, offset_2:s] += 100
score256 = (score256_1+new_score256_2)/overlay256
color_block256 = (cmap(score256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
Image.fromarray(region256_hm).save(os.path.join(output_dir, '%s_256[%s].png' % (fname, i)))
def create_patch_heatmaps_concat(patch, model256, output_dir, fname, threshold=0.5,
offset=16, alpha=0.5, cmap=plt.get_cmap('coolwarm'), device256=torch.device('cpu')):
r"""
Creates patch heatmaps (concatenated for easy comparison)
To be refactored!
Args:
- patch (PIL.Image): 256 x 256 Image
- model256 (torch.nn): 256-Level ViT
- output_dir (str): Save directory / subdirectory
- fname (str): Naming structure of files
- offset (int): How much to offset (from top-left corner with zero-padding) the region by for blending
- alpha (float): Image blending factor for cv2.addWeighted
- cmap (matplotlib.pyplot): Colormap for creating heatmaps
Returns:
- None
"""
patch1 = patch.copy()
patch2 = add_margin(patch.crop((16,16,256,256)), top=0, left=0, bottom=16, right=16, color=(255,255,255))
b256_1, a256_1 = get_patch_attention_scores(patch1, model256, device256=device256)
b256_1, a256_2 = get_patch_attention_scores(patch2, model256, device256=device256)
save_region = np.array(patch.copy())
s = 256
offset_2 = offset
if threshold != None:
ths = []
for i in range(6):
score256_1 = get_scores256(a256_1[:,i,:,:], size=(s,)*2)
score256_2 = get_scores256(a256_2[:,i,:,:], size=(s,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:s, offset_2:s] += 100
score256 = (score256_1+new_score256_2)/overlay256
mask256 = score256.copy()
mask256[mask256 < threshold] = 0
mask256[mask256 > threshold] = 0.95
color_block256 = (cmap(mask256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
region256_hm[mask256==0] = 0
img_inverse = save_region.copy()
img_inverse[mask256 == 0.95] = 0
ths.append(region256_hm+img_inverse)
ths = [Image.fromarray(img) for img in ths]
getConcatImage([getConcatImage(ths[0:3]),
getConcatImage(ths[3:6])], how='vertical').save(os.path.join(output_dir, '%s_256th.png' % (fname)))
hms = []
for i in range(6):
score256_1 = get_scores256(a256_1[:,i,:,:], size=(s,)*2)
score256_2 = get_scores256(a256_2[:,i,:,:], size=(s,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:s, offset_2:s] += 100
score256 = (score256_1+new_score256_2)/overlay256
color_block256 = (cmap(score256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
hms.append(region256_hm)
hms = [Image.fromarray(img) for img in hms]
getConcatImage([getConcatImage(hms[0:3]),
getConcatImage(hms[3:6])], how='vertical').save(os.path.join(output_dir, '%s_256hm.png' % (fname)))
################################################
# 4096 x 4096 ("Region") Attention Heatmap Creation
################################################
def get_region_attention_scores(region, model256, model4k, scale=1,
device256=torch.device('cpu'),
device4k=torch.device('cpu')):
r"""
Forward pass in hierarchical model with attention scores saved.
To be refactored!
Args:
- region (PIL.Image): 4096 x 4096 Image
- model256 (torch.nn): 256-Level ViT
- model4k (torch.nn): 4096-Level ViT
- scale (int): How much to scale the output image by (e.g. - scale=4 will resize images to be 1024 x 1024.)
Returns:
- np.array: [256, 256/scale, 256/scale, 3] np.array sequence of image patches from the 4K x 4K region.
- attention_256 (torch.Tensor): [256, 256/scale, 256/scale, 3] torch.Tensor sequence of attention maps for 256-sized patches.
- attention_4k (torch.Tensor): [1, 4096/scale, 4096/scale, 3] torch.Tensor sequence of attention maps for 4k-sized regions.
"""
t = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]
)
])
with torch.no_grad():
batch_256 = t(region).unsqueeze(0).unfold(2, 256, 256).unfold(3, 256, 256)
batch_256 = rearrange(batch_256, 'b c p1 p2 w h -> (b p1 p2) c w h')
batch_256 = batch_256.to(device256, non_blocking=True)
features_256 = model256(batch_256)
attention_256 = model256.get_last_selfattention(batch_256)
nh = attention_256.shape[1] # number of head
attention_256 = attention_256[:, :, 0, 1:].reshape(256, nh, -1)
attention_256 = attention_256.reshape(256, nh, 16, 16)
attention_256 = nn.functional.interpolate(attention_256, scale_factor=int(16/scale), mode="nearest").cpu().numpy()
features_4096 = features_256.unfold(0, 16, 16).transpose(0,1).unsqueeze(dim=0)
attention_4096 = model4k.get_last_selfattention(features_4096.detach().to(device4k))
nh = attention_4096.shape[1] # number of head
attention_4096 = attention_4096[0, :, 0, 1:].reshape(nh, -1)
attention_4096 = attention_4096.reshape(nh, 16, 16)
attention_4096 = nn.functional.interpolate(attention_4096.unsqueeze(0), scale_factor=int(256/scale), mode="nearest")[0].cpu().numpy()
if scale != 1:
batch_256 = nn.functional.interpolate(batch_256, scale_factor=(1/scale), mode="nearest")
return tensorbatch2im(batch_256), attention_256, attention_4096
def create_hierarchical_heatmaps_indiv(region, model256, model4k, output_dir, fname,
offset=128, scale=4, alpha=0.5, cmap = plt.get_cmap('coolwarm'), threshold=None,
device256=torch.device('cpu'), device4k=torch.device('cpu')):
r"""
Creates hierarchical heatmaps (Raw H&E + ViT-256 + ViT-4K + Blended Heatmaps saved individually).
To be refactored!
Args:
- region (PIL.Image): 4096 x 4096 Image
- model256 (torch.nn): 256-Level ViT
- model4k (torch.nn): 4096-Level ViT
- output_dir (str): Save directory / subdirectory
- fname (str): Naming structure of files
- offset (int): How much to offset (from top-left corner with zero-padding) the region by for blending
- scale (int): How much to scale the output image by
- alpha (float): Image blending factor for cv2.addWeighted
- cmap (matplotlib.pyplot): Colormap for creating heatmaps
Returns:
- None
"""
region2 = add_margin(region.crop((128,128,4096,4096)),
top=0, left=0, bottom=128, right=128, color=(255,255,255))
region3 = add_margin(region.crop((128*2,128*2,4096,4096)),
top=0, left=0, bottom=128*2, right=128*2, color=(255,255,255))
region4 = add_margin(region.crop((128*3,128*3,4096,4096)),
top=0, left=0, bottom=128*4, right=128*4, color=(255,255,255))
b256_1, a256_1, a4k_1 = get_region_attention_scores(region, model256, model4k, scale, device256=device256, device4k=device4k)
b256_2, a256_2, a4k_2 = get_region_attention_scores(region2, model256, model4k, scale, device256=device256, device4k=device4k)
b256_3, a256_3, a4k_3 = get_region_attention_scores(region3, model256, model4k, scale, device256=device256, device4k=device4k)
b256_4, a256_4, a4k_4 = get_region_attention_scores(region4, model256, model4k, scale, device256=device256, device4k=device4k)
offset_2 = (offset*1)//scale
offset_3 = (offset*2)//scale
offset_4 = (offset*3)//scale
s = 4096//scale
save_region = np.array(region.resize((s, s)))
if threshold != None:
for i in range(6):
score256_1 = concat_scores256(a256_1[:,i,:,:], size=(s//16,)*2)
score256_2 = concat_scores256(a256_2[:,i,:,:], size=(s//16,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:s, offset_2:s] += 100
score256 = (score256_1+new_score256_2)/overlay256
mask256 = score256.copy()
mask256[mask256 < threshold] = 0
mask256[mask256 > threshold] = 0.95
color_block256 = (cmap(mask256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
region256_hm[mask256==0] = 0
img_inverse = save_region.copy()
img_inverse[mask256 == 0.95] = 0
Image.fromarray(region256_hm+img_inverse).save(os.path.join(output_dir, '%s_256th[%d].png' % (fname, i)))
if False:
for j in range(6):
score4k_1 = concat_scores4k(a4k_1[j], size=(s,)*2)
score4k = score4k_1 / 100
color_block4k = (cmap(score4k)*255)[:,:,:3].astype(np.uint8)
region4k_hm = cv2.addWeighted(color_block4k, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
Image.fromarray(region4k_hm).save(os.path.join(output_dir, '%s_4k[%s].png' % (fname, j)))
for j in range(6):
score4k_1 = concat_scores4k(a4k_1[j], size=(s,)*2)
score4k_2 = concat_scores4k(a4k_2[j], size=(s,)*2)
score4k_3 = concat_scores4k(a4k_3[j], size=(s,)*2)
score4k_4 = concat_scores4k(a4k_4[j], size=(s,)*2)
new_score4k_2 = np.zeros_like(score4k_2)
new_score4k_2[offset_2:s, offset_2:s] = score4k_2[:(s-offset_2), :(s-offset_2)]
new_score4k_3 = np.zeros_like(score4k_3)
new_score4k_3[offset_3:s, offset_3:s] = score4k_3[:(s-offset_3), :(s-offset_3)]
new_score4k_4 = np.zeros_like(score4k_4)
new_score4k_4[offset_4:s, offset_4:s] = score4k_4[:(s-offset_4), :(s-offset_4)]
overlay4k = np.ones_like(score4k_2)*100
overlay4k[offset_2:s, offset_2:s] += 100
overlay4k[offset_3:s, offset_3:s] += 100
overlay4k[offset_4:s, offset_4:s] += 100
score4k = (score4k_1+new_score4k_2+new_score4k_3+new_score4k_4)/overlay4k
color_block4k = (cmap(score4k)*255)[:,:,:3].astype(np.uint8)
region4k_hm = cv2.addWeighted(color_block4k, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
Image.fromarray(region4k_hm).save(os.path.join(output_dir, '%s_1024[%s].png' % (fname, j)))
for i in range(6):
score256_1 = concat_scores256(a256_1[:,i,:,:], size=(s//16,)*2)
score256_2 = concat_scores256(a256_2[:,i,:,:], size=(s//16,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:s, offset_2:s] += 100
score256 = (score256_1+new_score256_2)/overlay256
color_block256 = (cmap(score256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
Image.fromarray(region256_hm).save(os.path.join(output_dir, '%s_256[%s].png' % (fname, i)))
for j in range(6):
score4k_1 = concat_scores4k(a4k_1[j], size=(s,)*2)
score4k_2 = concat_scores4k(a4k_2[j], size=(s,)*2)
score4k_3 = concat_scores4k(a4k_3[j], size=(s,)*2)
score4k_4 = concat_scores4k(a4k_4[j], size=(s,)*2)
new_score4k_2 = np.zeros_like(score4k_2)
new_score4k_2[offset_2:s, offset_2:s] = score4k_2[:(s-offset_2), :(s-offset_2)]
new_score4k_3 = np.zeros_like(score4k_3)
new_score4k_3[offset_3:s, offset_3:s] = score4k_3[:(s-offset_3), :(s-offset_3)]
new_score4k_4 = np.zeros_like(score4k_4)
new_score4k_4[offset_4:s, offset_4:s] = score4k_4[:(s-offset_4), :(s-offset_4)]
overlay4k = np.ones_like(score4k_2)*100
overlay4k[offset_2:s, offset_2:s] += 100
overlay4k[offset_3:s, offset_3:s] += 100
overlay4k[offset_4:s, offset_4:s] += 100
score4k = (score4k_1+new_score4k_2+new_score4k_3+new_score4k_4)/overlay4k
for i in range(6):
score256_1 = concat_scores256(a256_1[:,i,:,:], size=(s//16,)*2)
score256_2 = concat_scores256(a256_2[:,i,:,:], size=(s//16,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100*2
overlay256[offset_2:s, offset_2:s] += 100*2
score256 = (score256_1+new_score256_2)*2/overlay256
factorize = lambda data: (data - np.min(data)) / (np.max(data) - np.min(data))
score = (score4k*overlay4k+score256*overlay256)/(overlay4k+overlay256) #factorize(score256*score4k)
color_block = (cmap(score)*255)[:,:,:3].astype(np.uint8)
region_hm = cv2.addWeighted(color_block, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
Image.fromarray(region_hm).save(os.path.join(output_dir, '%s_factorized_4k[%s]_256[%s].png' % (fname, j, i)))
return
def create_hierarchical_heatmaps_concat(region, model256, model4k, output_dir, fname,
offset=128, scale=4, alpha=0.5, cmap = plt.get_cmap('coolwarm'),
device256=torch.device('cpu'), device4k=torch.device('cpu')):
r"""
Creates hierarchical heatmaps (With Raw H&E + ViT-256 + ViT-4K + Blended Heatmaps concatenated for easy comparison)
To be refactored!
Args:
- region (PIL.Image): 4096 x 4096 Image
- model256 (torch.nn): 256-Level ViT
- model4k (torch.nn): 4096-Level ViT
- output_dir (str): Save directory / subdirectory
- fname (str): Naming structure of files
- offset (int): How much to offset (from top-left corner with zero-padding) the region by for blending
- scale (int): How much to scale the output image by
- alpha (float): Image blending factor for cv2.addWeighted
- cmap (matplotlib.pyplot): Colormap for creating heatmaps
Returns:
- None
"""
region2 = add_margin(region.crop((128,128,4096,4096)),
top=0, left=0, bottom=128, right=128, color=(255,255,255))
region3 = add_margin(region.crop((128*2,128*2,4096,4096)),
top=0, left=0, bottom=128*2, right=128*2, color=(255,255,255))
region4 = add_margin(region.crop((128*3,128*3,4096,4096)),
top=0, left=0, bottom=128*4, right=128*4, color=(255,255,255))
b256_1, a256_1, a4k_1 = get_region_attention_scores(region, model256, model4k, scale, device256=device256, device4k=device4k)
b256_2, a256_2, a4k_2 = get_region_attention_scores(region2, model256, model4k, scale, device256=device256, device4k=device4k)
b256_3, a256_3, a4k_3 = get_region_attention_scores(region3, model256, model4k, scale, device256=device256, device4k=device4k)
b256_4, a256_4, a4k_4 = get_region_attention_scores(region4, model256, model4k, scale, device256=device256, device4k=device4k)
offset_2 = (offset*1)//scale
offset_3 = (offset*2)//scale
offset_4 = (offset*3)//scale
s = 4096//scale
save_region = np.array(region.resize((s, s)))
for j in range(6):
score4k_1 = concat_scores4k(a4k_1[j], size=(s,)*2)
score4k_2 = concat_scores4k(a4k_2[j], size=(s,)*2)
score4k_3 = concat_scores4k(a4k_3[j], size=(s,)*2)
score4k_4 = concat_scores4k(a4k_4[j], size=(s,)*2)
new_score4k_2 = np.zeros_like(score4k_2)
new_score4k_2[offset_2:s, offset_2:s] = score4k_2[:(s-offset_2), :(s-offset_2)]
new_score4k_3 = np.zeros_like(score4k_3)
new_score4k_3[offset_3:s, offset_3:s] = score4k_3[:(s-offset_3), :(s-offset_3)]
new_score4k_4 = np.zeros_like(score4k_4)
new_score4k_4[offset_4:s, offset_4:s] = score4k_4[:(s-offset_4), :(s-offset_4)]
overlay4k = np.ones_like(score4k_2)*100
overlay4k[offset_2:s, offset_2:s] += 100
overlay4k[offset_3:s, offset_3:s] += 100
overlay4k[offset_4:s, offset_4:s] += 100
score4k = (score4k_1+new_score4k_2+new_score4k_3+new_score4k_4)/overlay4k
color_block4k = (cmap(score4k_1/100)*255)[:,:,:3].astype(np.uint8)
region4k_hm = cv2.addWeighted(color_block4k, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
for i in range(6):
score256_1 = concat_scores256(a256_1[:,i,:,:], size=(s//16,)*2)
score256_2 = concat_scores256(a256_2[:,i,:,:], size=(s//16,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100*2
overlay256[offset_2:s, offset_2:s] += 100*2
score256 = (score256_1+new_score256_2)*2/overlay256
color_block256 = (cmap(score256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
factorize = lambda data: (data - np.min(data)) / (np.max(data) - np.min(data))
score = (score4k*overlay4k+score256*overlay256)/(overlay4k+overlay256) #factorize(score256*score4k)
color_block = (cmap(score)*255)[:,:,:3].astype(np.uint8)
region_hm = cv2.addWeighted(color_block, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
pad = 100
canvas = Image.new('RGB', (s*2+pad,)*2, (255,)*3)
draw = ImageDraw.Draw(canvas)
font = ImageFont.truetype("FreeMono.ttf", 50)
draw.text((1024*0.5-pad*2, pad//4), "ViT-256 (Head: %d)" % i, (0, 0, 0), font=font)
canvas = canvas.rotate(90)
draw = ImageDraw.Draw(canvas)
draw.text((1024*1.5-pad, pad//4), "ViT-4K (Head: %d)" % j, (0, 0, 0), font=font)
canvas.paste(Image.fromarray(save_region), (pad,pad))
canvas.paste(Image.fromarray(region4k_hm), (1024+pad,pad))
canvas.paste(Image.fromarray(region256_hm), (pad,1024+pad))
canvas.paste(Image.fromarray(region_hm), (s+pad,s+pad))
canvas.save(os.path.join(output_dir, '%s_4k[%s]_256[%s].png' % (fname, j, i)))
return
def create_hierarchical_heatmaps_concat_select(region, model256, model4k, output_dir, fname,
offset=128, scale=4, alpha=0.5, cmap = plt.get_cmap('coolwarm'),
device256=torch.device('cpu'), device4k=torch.device('cpu')):
r"""
Creates hierarchical heatmaps (With Raw H&E + ViT-256 + ViT-4K + Blended Heatmaps concatenated for easy comparison), with only select attention heads are used.
To be refactored!
Args:
- region (PIL.Image): 4096 x 4096 Image
- model256 (torch.nn): 256-Level ViT
- model4k (torch.nn): 4096-Level ViT
- output_dir (str): Save directory / subdirectory
- fname (str): Naming structure of files
- offset (int): How much to offset (from top-left corner with zero-padding) the region by for blending
- scale (int): How much to scale the output image by
- alpha (float): Image blending factor for cv2.addWeighted
- cmap (matplotlib.pyplot): Colormap for creating heatmaps
Returns:
- None
"""
region2 = add_margin(region.crop((128,128,4096,4096)),
top=0, left=0, bottom=128, right=128, color=(255,255,255))
region3 = add_margin(region.crop((128*2,128*2,4096,4096)),
top=0, left=0, bottom=128*2, right=128*2, color=(255,255,255))
region4 = add_margin(region.crop((128*3,128*3,4096,4096)),
top=0, left=0, bottom=128*4, right=128*4, color=(255,255,255))
b256_1, a256_1, a4k_1 = get_region_attention_scores(region, model256, model4k, scale, device256=device256, device4k=device4k)
b256_2, a256_2, a4k_2 = get_region_attention_scores(region2, model256, model4k, scale, device256=device256, device4k=device4k)
b256_3, a256_3, a4k_3 = get_region_attention_scores(region3, model256, model4k, scale, device256=device256, device4k=device4k)
b256_4, a256_4, a4k_4 = get_region_attention_scores(region4, model256, model4k, scale, device256=device256, device4k=device4k)
offset_2 = (offset*1)//scale
offset_3 = (offset*2)//scale
offset_4 = (offset*3)//scale
s = 4096//scale
save_region = np.array(region.resize((s, s)))
canvas = [[Image.fromarray(save_region), None, None], [None, None, None]]
for idx_4k, j in enumerate([0,5]):
score4k_1 = concat_scores4k(a4k_1[j], size=(s,)*2)
score4k_2 = concat_scores4k(a4k_2[j], size=(s,)*2)
score4k_3 = concat_scores4k(a4k_3[j], size=(s,)*2)
score4k_4 = concat_scores4k(a4k_4[j], size=(s,)*2)
new_score4k_2 = np.zeros_like(score4k_2)
new_score4k_2[offset_2:s, offset_2:s] = score4k_2[:(s-offset_2), :(s-offset_2)]
new_score4k_3 = np.zeros_like(score4k_3)
new_score4k_3[offset_3:s, offset_3:s] = score4k_3[:(s-offset_3), :(s-offset_3)]
new_score4k_4 = np.zeros_like(score4k_4)
new_score4k_4[offset_4:s, offset_4:s] = score4k_4[:(s-offset_4), :(s-offset_4)]
overlay4k = np.ones_like(score4k_2)*100
overlay4k[offset_2:s, offset_2:s] += 100
overlay4k[offset_3:s, offset_3:s] += 100
overlay4k[offset_4:s, offset_4:s] += 100
score4k = (score4k_1+new_score4k_2+new_score4k_3+new_score4k_4)/overlay4k
color_block4k = (cmap(score4k_1/100)*255)[:,:,:3].astype(np.uint8)
region4k_hm = cv2.addWeighted(color_block4k, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
canvas[0][idx_4k+1] = Image.fromarray(region4k_hm)
for idx_256, i in enumerate([2]):
score256_1 = concat_scores256(a256_1[:,i,:,:], size=(s//16,)*2)
score256_2 = concat_scores256(a256_2[:,i,:,:], size=(s//16,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100*2
overlay256[offset_2:s, offset_2:s] += 100*2
score256 = (score256_1+new_score256_2)*2/overlay256
color_block256 = (cmap(score256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
canvas[idx_256+1][0] = Image.fromarray(region256_hm)
factorize = lambda data: (data - np.min(data)) / (np.max(data) - np.min(data))
score = (score4k*overlay4k+score256*overlay256)/(overlay4k+overlay256) #factorize(score256*score4k)
color_block = (cmap(score)*255)[:,:,:3].astype(np.uint8)
region_hm = cv2.addWeighted(color_block, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
canvas[idx_256+1][idx_4k+1] = Image.fromarray(region_hm)
canvas = getConcatImage([getConcatImage(row) for row in canvas], how='vertical')
canvas.save(os.path.join(output_dir, '%s_heatmap.png' % (fname)))
return | 31,892 | 46.672646 | 163 | py |
HIPT | HIPT-master/HIPT_4K/vision_transformer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mostly copy-paste from timm library.
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
import math
from functools import partial
import torch
import torch.nn as nn
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def drop_path(x, drop_prob: float = 0., training: bool = False):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, return_attention=False):
y, attn = self.attn(self.norm1(x))
if return_attention:
return attn
x = x + self.drop_path(y)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = (img_size // patch_size) * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class VisionTransformer(nn.Module):
""" Vision Transformer """
def __init__(self, img_size=[224], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.patch_embed = PatchEmbed(
img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
B, nc, w, h = x.shape
x = self.patch_embed(x) # patch linear embedding
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward(self, x):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0]
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def vit_tiny(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_small(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_base(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
class DINOHead(nn.Module):
def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256):
super().__init__()
nlayers = max(nlayers, 1)
if nlayers == 1:
self.mlp = nn.Linear(in_dim, bottleneck_dim)
else:
layers = [nn.Linear(in_dim, hidden_dim)]
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
for _ in range(nlayers - 2):
layers.append(nn.Linear(hidden_dim, hidden_dim))
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
layers.append(nn.Linear(hidden_dim, bottleneck_dim))
self.mlp = nn.Sequential(*layers)
self.apply(self._init_weights)
self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))
self.last_layer.weight_g.data.fill_(1)
if norm_last_layer:
self.last_layer.weight_g.requires_grad = False
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.mlp(x)
x = nn.functional.normalize(x, dim=-1, p=2)
x = self.last_layer(x)
return x
| 12,706 | 37.389728 | 124 | py |
HIPT | HIPT-master/HIPT_4K/vision_transformer4k.py | import argparse
import os
import sys
import datetime
import time
import math
import json
from pathlib import Path
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torchvision import datasets, transforms
from torchvision import models as torchvision_models
import vision_transformer as vits
from vision_transformer import DINOHead
import math
from functools import partial
import torch
import torch.nn as nn
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def drop_path(x, drop_prob: float = 0., training: bool = False):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, return_attention=False):
y, attn = self.attn(self.norm1(x))
if return_attention:
return attn
x = x + self.drop_path(y)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class VisionTransformer4K(nn.Module):
""" Vision Transformer 4K """
def __init__(self, num_classes=0, img_size=[224], input_embed_dim=384, output_embed_dim = 192,
depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, num_prototypes=64, **kwargs):
super().__init__()
embed_dim = output_embed_dim
self.num_features = self.embed_dim = embed_dim
self.phi = nn.Sequential(*[nn.Linear(input_embed_dim, output_embed_dim), nn.GELU(), nn.Dropout(p=drop_rate)])
num_patches = int(img_size[0] // 16)**2
print("# of Patches:", num_patches)
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // 1
h0 = h // 1
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
#print('preparing tokens (after crop)', x.shape)
self.mpp_feature = x
B, embed_dim, w, h = x.shape
x = x.flatten(2, 3).transpose(1,2)
x = self.phi(x)
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward(self, x):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0]
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def vit4k_xs(patch_size=16, **kwargs):
model = VisionTransformer4K(
patch_size=patch_size, input_embed_dim=384, output_embed_dim=192,
depth=6, num_heads=6, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
| 10,172 | 35.858696 | 123 | py |
HIPT | HIPT-master/HIPT_4K/hipt_model_utils.py | ### Dependencies
# Base Dependencies
import argparse
import colorsys
from io import BytesIO
import os
import random
import requests
import sys
# LinAlg / Stats / Plotting Dependencies
import cv2
import h5py
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import numpy as np
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from scipy.stats import rankdata
import skimage.io
from skimage.measure import find_contours
from tqdm import tqdm
import webdataset as wds
# Torch Dependencies
import torch
import torch.multiprocessing
import torchvision
from torchvision import transforms
from einops import rearrange, repeat
torch.multiprocessing.set_sharing_strategy('file_system')
# Local Dependencies
import vision_transformer as vits
import vision_transformer4k as vits4k
def get_vit256(pretrained_weights, arch='vit_small', device=torch.device('cuda:0')):
r"""
Builds ViT-256 Model.
Args:
- pretrained_weights (str): Path to ViT-256 Model Checkpoint.
- arch (str): Which model architecture.
- device (torch): Torch device to save model.
Returns:
- model256 (torch.nn): Initialized model.
"""
checkpoint_key = 'teacher'
device = torch.device("cpu")
model256 = vits.__dict__[arch](patch_size=16, num_classes=0)
for p in model256.parameters():
p.requires_grad = False
model256.eval()
model256.to(device)
if os.path.isfile(pretrained_weights):
state_dict = torch.load(pretrained_weights, map_location="cpu")
if checkpoint_key is not None and checkpoint_key in state_dict:
print(f"Take key {checkpoint_key} in provided checkpoint dict")
state_dict = state_dict[checkpoint_key]
# remove `module.` prefix
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# remove `backbone.` prefix induced by multicrop wrapper
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
msg = model256.load_state_dict(state_dict, strict=False)
print('Pretrained weights found at {} and loaded with msg: {}'.format(pretrained_weights, msg))
return model256
def get_vit4k(pretrained_weights, arch='vit4k_xs', device=torch.device('cuda:1')):
r"""
Builds ViT-4K Model.
Args:
- pretrained_weights (str): Path to ViT-4K Model Checkpoint.
- arch (str): Which model architecture.
- device (torch): Torch device to save model.
Returns:
- model256 (torch.nn): Initialized model.
"""
checkpoint_key = 'teacher'
device = torch.device("cpu")
model4k = vits4k.__dict__[arch](num_classes=0)
for p in model4k.parameters():
p.requires_grad = False
model4k.eval()
model4k.to(device)
if os.path.isfile(pretrained_weights):
state_dict = torch.load(pretrained_weights, map_location="cpu")
if checkpoint_key is not None and checkpoint_key in state_dict:
print(f"Take key {checkpoint_key} in provided checkpoint dict")
state_dict = state_dict[checkpoint_key]
# remove `module.` prefix
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# remove `backbone.` prefix induced by multicrop wrapper
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
msg = model4k.load_state_dict(state_dict, strict=False)
print('Pretrained weights found at {} and loaded with msg: {}'.format(pretrained_weights, msg))
return model4k
def eval_transforms():
"""
"""
mean, std = (0.5, 0.5, 0.5), (0.5, 0.5, 0.5)
eval_t = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean = mean, std = std)])
return eval_t
def roll_batch2img(batch: torch.Tensor, w: int, h: int, patch_size=256):
"""
Rolls an image tensor batch (batch of [256 x 256] images) into a [W x H] Pil.Image object.
Args:
batch (torch.Tensor): [B x 3 x 256 x 256] image tensor batch.
Return:
Image.PIL: [W x H X 3] Image.
"""
batch = batch.reshape(w, h, 3, patch_size, patch_size)
img = rearrange(batch, 'p1 p2 c w h-> c (p1 w) (p2 h)').unsqueeze(dim=0)
return Image.fromarray(tensorbatch2im(img)[0])
def tensorbatch2im(input_image, imtype=np.uint8):
r""""
Converts a Tensor array into a numpy image array.
Args:
- input_image (torch.Tensor): (B, C, W, H) Torch Tensor.
- imtype (type): the desired type of the converted numpy array
Returns:
- image_numpy (np.array): (B, W, H, C) Numpy Array.
"""
if not isinstance(input_image, np.ndarray):
image_numpy = input_image.cpu().float().numpy() # convert it into a numpy array
#if image_numpy.shape[0] == 1: # grayscale to RGB
# image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (0, 2, 3, 1)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
| 5,125 | 32.503268 | 122 | py |
HIPT | HIPT-master/HIPT_4K/attention_visualization_utils.py | ### Dependencies
import argparse
import colorsys
from io import BytesIO
import os
import random
import requests
import sys
import cv2
import h5py
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import numpy as np
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from scipy.stats import rankdata
import skimage.io
from skimage.measure import find_contours
from tqdm import tqdm
import webdataset as wds
import torch
import torch.nn as nn
import torchvision
from torchvision import transforms as pth_transforms
import torchvision.transforms as transforms
from einops import rearrange, repeat
sys.path.append('../')
sys.path.append('../Hierarchical-Pretraining/')
import vision_transformer as vits
import vision_transformer4k as vits4k
def get_vit256(pretrained_weights, arch='vit_small', device=torch.device('cpu')):
r"""
Builds ViT-256 Model.
Args:
- pretrained_weights (str): Path to ViT-256 Model Checkpoint.
- arch (str): Which model architecture.
- device (torch): Torch device to save model.
Returns:
- model256 (torch.nn): Initialized model.
"""
checkpoint_key = 'teacher'
device = torch.device("cpu")
model256 = vits.__dict__[arch](patch_size=16, num_classes=0)
for p in model256.parameters():
p.requires_grad = False
model256.eval()
model256.to(device)
if os.path.isfile(pretrained_weights):
state_dict = torch.load(pretrained_weights, map_location="cpu")
if checkpoint_key is not None and checkpoint_key in state_dict:
print(f"Take key {checkpoint_key} in provided checkpoint dict")
state_dict = state_dict[checkpoint_key]
# remove `module.` prefix
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# remove `backbone.` prefix induced by multicrop wrapper
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
msg = model256.load_state_dict(state_dict, strict=False)
print('Pretrained weights found at {} and loaded with msg: {}'.format(pretrained_weights, msg))
return model256
def get_vit4k(pretrained_weights, arch='vit4k_xs', device=torch.device('cpu')):
r"""
Builds ViT-4K Model.
Args:
- pretrained_weights (str): Path to ViT-4K Model Checkpoint.
- arch (str): Which model architecture.
- device (torch): Torch device to save model.
Returns:
- model256 (torch.nn): Initialized model.
"""
checkpoint_key = 'teacher'
device = torch.device("cpu")
model4k = vits4k.__dict__[arch](num_classes=0)
for p in model4k.parameters():
p.requires_grad = False
model4k.eval()
model4k.to(device)
if os.path.isfile(pretrained_weights):
state_dict = torch.load(pretrained_weights, map_location="cpu")
if checkpoint_key is not None and checkpoint_key in state_dict:
print(f"Take key {checkpoint_key} in provided checkpoint dict")
state_dict = state_dict[checkpoint_key]
# remove `module.` prefix
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# remove `backbone.` prefix induced by multicrop wrapper
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
msg = model4k.load_state_dict(state_dict, strict=False)
print('Pretrained weights found at {} and loaded with msg: {}'.format(pretrained_weights, msg))
return model4k
def cmap_map(function, cmap):
r"""
Applies function (which should operate on vectors of shape 3: [r, g, b]), on colormap cmap.
This routine will break any discontinuous points in a colormap.
Args:
- function (function)
- cmap (matplotlib.colormap)
Returns:
- matplotlib.colormap
"""
cdict = cmap._segmentdata
step_dict = {}
# Firt get the list of points where the segments start or end
for key in ('red', 'green', 'blue'):
step_dict[key] = list(map(lambda x: x[0], cdict[key]))
step_list = sum(step_dict.values(), [])
step_list = np.array(list(set(step_list)))
# Then compute the LUT, and apply the function to the LUT
reduced_cmap = lambda step : np.array(cmap(step)[0:3])
old_LUT = np.array(list(map(reduced_cmap, step_list)))
new_LUT = np.array(list(map(function, old_LUT)))
# Now try to make a minimal segment definition of the new LUT
cdict = {}
for i, key in enumerate(['red','green','blue']):
this_cdict = {}
for j, step in enumerate(step_list):
if step in step_dict[key]:
this_cdict[step] = new_LUT[j, i]
elif new_LUT[j,i] != old_LUT[j, i]:
this_cdict[step] = new_LUT[j, i]
colorvector = list(map(lambda x: x + (x[1], ), this_cdict.items()))
colorvector.sort()
cdict[key] = colorvector
return matplotlib.colors.LinearSegmentedColormap('colormap',cdict,1024)
def identity(x):
r"""
Identity Function.
Args:
- x:
Returns:
- x
"""
return x
def tensorbatch2im(input_image, imtype=np.uint8):
r""""
Converts a Tensor array into a numpy image array.
Args:
- input_image (torch.Tensor): (B, C, W, H) Torch Tensor.
- imtype (type): the desired type of the converted numpy array
Returns:
- image_numpy (np.array): (B, W, H, C) Numpy Array.
"""
if not isinstance(input_image, np.ndarray):
image_numpy = input_image.cpu().float().numpy() # convert it into a numpy array
#if image_numpy.shape[0] == 1: # grayscale to RGB
# image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (0, 2, 3, 1)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
def getConcatImage(imgs, how='horizontal', gap=0):
r"""
Function to concatenate list of images (vertical or horizontal).
Args:
- imgs (list of PIL.Image): List of PIL Images to concatenate.
- how (str): How the images are concatenated (either 'horizontal' or 'vertical')
- gap (int): Gap (in px) between images
Return:
- dst (PIL.Image): Concatenated image result.
"""
gap_dist = (len(imgs)-1)*gap
if how == 'vertical':
w, h = np.max([img.width for img in imgs]), np.sum([img.height for img in imgs])
h += gap_dist
curr_h = 0
dst = Image.new('RGBA', (w, h), color=(255, 255, 255, 0))
for img in imgs:
dst.paste(img, (0, curr_h))
curr_h += img.height + gap
elif how == 'horizontal':
w, h = np.sum([img.width for img in imgs]), np.min([img.height for img in imgs])
w += gap_dist
curr_w = 0
dst = Image.new('RGBA', (w, h), color=(255, 255, 255, 0))
for idx, img in enumerate(imgs):
dst.paste(img, (curr_w, 0))
curr_w += img.width + gap
return dst
def add_margin(pil_img, top, right, bottom, left, color):
r"""
Adds custom margin to PIL.Image.
"""
width, height = pil_img.size
new_width = width + right + left
new_height = height + top + bottom
result = Image.new(pil_img.mode, (new_width, new_height), color)
result.paste(pil_img, (left, top))
return result
def concat_scores256(attns, size=(256,256)):
r"""
"""
rank = lambda v: rankdata(v)*100/len(v)
color_block = [rank(attn.flatten()).reshape(size) for attn in attns]
color_hm = np.concatenate([
np.concatenate(color_block[i:(i+16)], axis=1)
for i in range(0,256,16)
])
return color_hm
def concat_scores4k(attn, size=(4096, 4096)):
r"""
"""
rank = lambda v: rankdata(v)*100/len(v)
color_hm = rank(attn.flatten()).reshape(size)
return color_hm
def get_scores256(attns, size=(256,256)):
r"""
"""
rank = lambda v: rankdata(v)*100/len(v)
color_block = [rank(attn.flatten()).reshape(size) for attn in attns][0]
return color_block
def get_patch_attention_scores(patch, model256, scale=1, device256=torch.device('cpu')):
r"""
Forward pass in ViT-256 model with attention scores saved.
Args:
- region (PIL.Image): 4096 x 4096 Image
- model256 (torch.nn): 256-Level ViT
- scale (int): How much to scale the output image by (e.g. - scale=4 will resize images to be 1024 x 1024.)
Returns:
- attention_256 (torch.Tensor): [1, 256/scale, 256/scale, 3] torch.Tensor of attention maps for 256-sized patches.
"""
t = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]
)
])
with torch.no_grad():
batch_256 = t(patch).unsqueeze(0)
batch_256 = batch_256.to(device256, non_blocking=True)
features_256 = model256(batch_256)
attention_256 = model256.get_last_selfattention(batch_256)
nh = attention_256.shape[1] # number of head
attention_256 = attention_256[:, :, 0, 1:].reshape(256, nh, -1)
attention_256 = attention_256.reshape(1, nh, 16, 16)
attention_256 = nn.functional.interpolate(attention_256, scale_factor=int(16/scale), mode="nearest").cpu().numpy()
if scale != 1:
batch_256 = nn.functional.interpolate(batch_256, scale_factor=(1/scale), mode="nearest")
return tensorbatch2im(batch_256), attention_256
def create_patch_heatmaps_indiv(patch, model256, output_dir, fname, threshold=0.5,
offset=16, alpha=0.5, cmap=plt.get_cmap('coolwarm'), device256=torch.device('cpu')):
r"""
Creates patch heatmaps (saved individually)
Args:
- patch (PIL.Image): 256 x 256 Image
- model256 (torch.nn): 256-Level ViT
- output_dir (str): Save directory / subdirectory
- fname (str): Naming structure of files
- offset (int): How much to offset (from top-left corner with zero-padding) the region by for blending
- alpha (float): Image blending factor for cv2.addWeighted
- cmap (matplotlib.pyplot): Colormap for creating heatmaps
Returns:
- None
"""
patch1 = patch.copy()
patch2 = add_margin(patch.crop((16,16,256,256)), top=0, left=0, bottom=16, right=16, color=(255,255,255))
b256_1, a256_1 = get_patch_attention_scores(patch1, model256, device256=device256)
b256_1, a256_2 = get_patch_attention_scores(patch2, model256, device256=device256)
save_region = np.array(patch.copy())
s = 256
offset_2 = offset
if threshold != None:
for i in range(6):
score256_1 = get_scores256(a256_1[:,i,:,:], size=(s,)*2)
score256_2 = get_scores256(a256_2[:,i,:,:], size=(s,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:s, offset_2:s] += 100
score256 = (score256_1+new_score256_2)/overlay256
mask256 = score256.copy()
mask256[mask256 < threshold] = 0
mask256[mask256 > threshold] = 0.95
color_block256 = (cmap(mask256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
region256_hm[mask256==0] = 0
img_inverse = save_region.copy()
img_inverse[mask256 == 0.95] = 0
Image.fromarray(region256_hm+img_inverse).save(os.path.join(output_dir, '%s_256th[%d].png' % (fname, i)))
for i in range(6):
score256_1 = get_scores256(a256_1[:,i,:,:], size=(s,)*2)
score256_2 = get_scores256(a256_2[:,i,:,:], size=(s,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:s, offset_2:s] += 100
score256 = (score256_1+new_score256_2)/overlay256
color_block256 = (cmap(score256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
Image.fromarray(region256_hm).save(os.path.join(output_dir, '%s_256[%s].png' % (fname, i)))
def create_patch_heatmaps_concat(patch, model256, output_dir, fname, threshold=0.5,
offset=16, alpha=0.5, cmap=plt.get_cmap('coolwarm')):
r"""
Creates patch heatmaps (concatenated for easy comparison)
Args:
- patch (PIL.Image): 256 x 256 Image
- model256 (torch.nn): 256-Level ViT
- output_dir (str): Save directory / subdirectory
- fname (str): Naming structure of files
- offset (int): How much to offset (from top-left corner with zero-padding) the region by for blending
- alpha (float): Image blending factor for cv2.addWeighted
- cmap (matplotlib.pyplot): Colormap for creating heatmaps
Returns:
- None
"""
patch1 = patch.copy()
patch2 = add_margin(patch.crop((16,16,256,256)), top=0, left=0, bottom=16, right=16, color=(255,255,255))
b256_1, a256_1 = get_patch_attention_scores(patch1, model256)
b256_1, a256_2 = get_patch_attention_scores(patch2, model256)
save_region = np.array(patch.copy())
s = 256
offset_2 = offset
if threshold != None:
ths = []
for i in range(6):
score256_1 = get_scores256(a256_1[:,i,:,:], size=(s,)*2)
score256_2 = get_scores256(a256_2[:,i,:,:], size=(s,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:s, offset_2:s] += 100
score256 = (score256_1+new_score256_2)/overlay256
mask256 = score256.copy()
mask256[mask256 < threshold] = 0
mask256[mask256 > threshold] = 0.95
color_block256 = (cmap(mask256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
region256_hm[mask256==0] = 0
img_inverse = save_region.copy()
img_inverse[mask256 == 0.95] = 0
ths.append(region256_hm+img_inverse)
ths = [Image.fromarray(img) for img in ths]
getConcatImage([getConcatImage(ths[0:3]),
getConcatImage(ths[3:6])], how='vertical').save(os.path.join(output_dir, '%s_256th.png' % (fname)))
hms = []
for i in range(6):
score256_1 = get_scores256(a256_1[:,i,:,:], size=(s,)*2)
score256_2 = get_scores256(a256_2[:,i,:,:], size=(s,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:s, offset_2:s] += 100
score256 = (score256_1+new_score256_2)/overlay256
color_block256 = (cmap(score256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
hms.append(region256_hm)
hms = [Image.fromarray(img) for img in hms]
getConcatImage([getConcatImage(hms[0:3]),
getConcatImage(hms[3:6])], how='vertical').save(os.path.join(output_dir, '%s_256hm.png' % (fname)))
def hipt_forward_pass(region, model256, model4k, scale=1,
device256=torch.device('cpu'),
device4k=torch.device('cpu')):
t = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]
)
])
with torch.no_grad():
batch_256 = t(region).unsqueeze(0).unfold(2, 256, 256).unfold(3, 256, 256)
batch_256 = rearrange(batch_256, 'b c p1 p2 w h -> (b p1 p2) c w h')
batch_256 = batch_256.to(device256, non_blocking=True)
features_256 = model256(batch_256)
features_256 = features_256.unfold(0, 16, 16).transpose(0,1).unsqueeze(dim=0)
features_4096 = model4k.forward(features_256.to(device4k))
return features_4096
def get_region_attention_scores(region, model256, model4k, scale=1,
device256=torch.device('cpu'),
device4k=torch.device('cpu')):
r"""
Forward pass in hierarchical model with attention scores saved.
Args:
- region (PIL.Image): 4096 x 4096 Image
- model256 (torch.nn): 256-Level ViT
- model4k (torch.nn): 4096-Level ViT
- scale (int): How much to scale the output image by (e.g. - scale=4 will resize images to be 1024 x 1024.)
Returns:
- np.array: [256, 256/scale, 256/scale, 3] np.array sequence of image patches from the 4K x 4K region.
- attention_256 (torch.Tensor): [256, 256/scale, 256/scale, 3] torch.Tensor sequence of attention maps for 256-sized patches.
- attention_4k (torch.Tensor): [1, 4096/scale, 4096/scale, 3] torch.Tensor sequence of attention maps for 4k-sized regions.
"""
t = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]
)
])
with torch.no_grad():
batch_256 = t(region).unsqueeze(0).unfold(2, 256, 256).unfold(3, 256, 256)
batch_256 = rearrange(batch_256, 'b c p1 p2 w h -> (b p1 p2) c w h')
batch_256 = batch_256.to(device256, non_blocking=True)
features_256 = model256(batch_256)
attention_256 = model256.get_last_selfattention(batch_256)
nh = attention_256.shape[1] # number of head
attention_256 = attention_256[:, :, 0, 1:].reshape(256, nh, -1)
attention_256 = attention_256.reshape(256, nh, 16, 16)
attention_256 = nn.functional.interpolate(attention_256, scale_factor=int(16/scale), mode="nearest").cpu().numpy()
features_4096 = features_256.unfold(0, 16, 16).transpose(0,1).unsqueeze(dim=0)
attention_4096 = model4k.get_last_selfattention(features_4096.detach().to(device4k))
nh = attention_4096.shape[1] # number of head
attention_4096 = attention_4096[0, :, 0, 1:].reshape(nh, -1)
attention_4096 = attention_4096.reshape(nh, 16, 16)
attention_4096 = nn.functional.interpolate(attention_4096.unsqueeze(0), scale_factor=int(256/scale), mode="nearest")[0].cpu().numpy()
if scale != 1:
batch_256 = nn.functional.interpolate(batch_256, scale_factor=(1/scale), mode="nearest")
return tensorbatch2im(batch_256), attention_256, attention_4096
def create_hierarchical_heatmaps_indiv(region, model256, model4k, output_dir, fname,
offset=128, scale=4, alpha=0.5, cmap = plt.get_cmap('coolwarm'), threshold=None):
r"""
Creates hierarchical heatmaps (Raw H&E + ViT-256 + ViT-4K + Blended Heatmaps saved individually).
Args:
- region (PIL.Image): 4096 x 4096 Image
- model256 (torch.nn): 256-Level ViT
- model4k (torch.nn): 4096-Level ViT
- output_dir (str): Save directory / subdirectory
- fname (str): Naming structure of files
- offset (int): How much to offset (from top-left corner with zero-padding) the region by for blending
- scale (int): How much to scale the output image by
- alpha (float): Image blending factor for cv2.addWeighted
- cmap (matplotlib.pyplot): Colormap for creating heatmaps
Returns:
- None
"""
region2 = add_margin(region.crop((128,128,4096,4096)),
top=0, left=0, bottom=128, right=128, color=(255,255,255))
region3 = add_margin(region.crop((128*2,128*2,4096,4096)),
top=0, left=0, bottom=128*2, right=128*2, color=(255,255,255))
region4 = add_margin(region.crop((128*3,128*3,4096,4096)),
top=0, left=0, bottom=128*4, right=128*4, color=(255,255,255))
b256_1, a256_1, a4k_1 = get_region_attention_scores(region, model256, model4k, scale)
b256_2, a256_2, a4k_2 = get_region_attention_scores(region2, model256, model4k, scale)
b256_3, a256_3, a4k_3 = get_region_attention_scores(region3, model256, model4k, scale)
b256_4, a256_4, a4k_4 = get_region_attention_scores(region4, model256, model4k, scale)
offset_2 = (offset*1)//scale
offset_3 = (offset*2)//scale
offset_4 = (offset*3)//scale
s = 4096//scale
save_region = np.array(region.resize((s, s)))
if threshold != None:
for i in range(6):
score256_1 = concat_scores256(a256_1[:,i,:,:], size=(s//16,)*2)
score256_2 = concat_scores256(a256_2[:,i,:,:], size=(s//16,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:s, offset_2:s] += 100
score256 = (score256_1+new_score256_2)/overlay256
mask256 = score256.copy()
mask256[mask256 < threshold] = 0
mask256[mask256 > threshold] = 0.95
color_block256 = (cmap(mask256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
region256_hm[mask256==0] = 0
img_inverse = save_region.copy()
img_inverse[mask256 == 0.95] = 0
Image.fromarray(region256_hm+img_inverse).save(os.path.join(output_dir, '%s_256th[%d].png' % (fname, i)))
if False:
for j in range(6):
score4k_1 = concat_scores4k(a4k_1[j], size=(s,)*2)
score4k = score4k_1 / 100
color_block4k = (cmap(score4k)*255)[:,:,:3].astype(np.uint8)
region4k_hm = cv2.addWeighted(color_block4k, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
Image.fromarray(region4k_hm).save(os.path.join(output_dir, '%s_4k[%s].png' % (fname, j)))
for j in range(6):
score4k_1 = concat_scores4k(a4k_1[j], size=(s,)*2)
score4k_2 = concat_scores4k(a4k_2[j], size=(s,)*2)
score4k_3 = concat_scores4k(a4k_3[j], size=(s,)*2)
score4k_4 = concat_scores4k(a4k_4[j], size=(s,)*2)
new_score4k_2 = np.zeros_like(score4k_2)
new_score4k_2[offset_2:s, offset_2:s] = score4k_2[:(s-offset_2), :(s-offset_2)]
new_score4k_3 = np.zeros_like(score4k_3)
new_score4k_3[offset_3:s, offset_3:s] = score4k_3[:(s-offset_3), :(s-offset_3)]
new_score4k_4 = np.zeros_like(score4k_4)
new_score4k_4[offset_4:s, offset_4:s] = score4k_4[:(s-offset_4), :(s-offset_4)]
overlay4k = np.ones_like(score4k_2)*100
overlay4k[offset_2:s, offset_2:s] += 100
overlay4k[offset_3:s, offset_3:s] += 100
overlay4k[offset_4:s, offset_4:s] += 100
score4k = (score4k_1+new_score4k_2+new_score4k_3+new_score4k_4)/overlay4k
color_block4k = (cmap(score4k)*255)[:,:,:3].astype(np.uint8)
region4k_hm = cv2.addWeighted(color_block4k, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
Image.fromarray(region4k_hm).save(os.path.join(output_dir, '%s_1024[%s].png' % (fname, j)))
for i in range(6):
score256_1 = concat_scores256(a256_1[:,i,:,:], size=(s//16,)*2)
score256_2 = concat_scores256(a256_2[:,i,:,:], size=(s//16,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100
overlay256[offset_2:s, offset_2:s] += 100
score256 = (score256_1+new_score256_2)/overlay256
color_block256 = (cmap(score256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
Image.fromarray(region256_hm).save(os.path.join(output_dir, '%s_256[%s].png' % (fname, i)))
for j in range(6):
score4k_1 = concat_scores4k(a4k_1[j], size=(s,)*2)
score4k_2 = concat_scores4k(a4k_2[j], size=(s,)*2)
score4k_3 = concat_scores4k(a4k_3[j], size=(s,)*2)
score4k_4 = concat_scores4k(a4k_4[j], size=(s,)*2)
new_score4k_2 = np.zeros_like(score4k_2)
new_score4k_2[offset_2:s, offset_2:s] = score4k_2[:(s-offset_2), :(s-offset_2)]
new_score4k_3 = np.zeros_like(score4k_3)
new_score4k_3[offset_3:s, offset_3:s] = score4k_3[:(s-offset_3), :(s-offset_3)]
new_score4k_4 = np.zeros_like(score4k_4)
new_score4k_4[offset_4:s, offset_4:s] = score4k_4[:(s-offset_4), :(s-offset_4)]
overlay4k = np.ones_like(score4k_2)*100
overlay4k[offset_2:s, offset_2:s] += 100
overlay4k[offset_3:s, offset_3:s] += 100
overlay4k[offset_4:s, offset_4:s] += 100
score4k = (score4k_1+new_score4k_2+new_score4k_3+new_score4k_4)/overlay4k
for i in range(6):
score256_1 = concat_scores256(a256_1[:,i,:,:], size=(s//16,)*2)
score256_2 = concat_scores256(a256_2[:,i,:,:], size=(s//16,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100*2
overlay256[offset_2:s, offset_2:s] += 100*2
score256 = (score256_1+new_score256_2)*2/overlay256
factorize = lambda data: (data - np.min(data)) / (np.max(data) - np.min(data))
score = (score4k*overlay4k+score256*overlay256)/(overlay4k+overlay256) #factorize(score256*score4k)
color_block = (cmap(score)*255)[:,:,:3].astype(np.uint8)
region_hm = cv2.addWeighted(color_block, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
Image.fromarray(region_hm).save(os.path.join(output_dir, '%s_factorized_4k[%s]_256[%s].png' % (fname, j, i)))
return
def create_hierarchical_heatmaps_concat(region, model256, model4k, output_dir, fname,
offset=128, scale=4, alpha=0.5, cmap = plt.get_cmap('coolwarm')):
r"""
Creates hierarchical heatmaps (With Raw H&E + ViT-256 + ViT-4K + Blended Heatmaps concatenated for easy comparison)
Args:
- region (PIL.Image): 4096 x 4096 Image
- model256 (torch.nn): 256-Level ViT
- model4k (torch.nn): 4096-Level ViT
- output_dir (str): Save directory / subdirectory
- fname (str): Naming structure of files
- offset (int): How much to offset (from top-left corner with zero-padding) the region by for blending
- scale (int): How much to scale the output image by
- alpha (float): Image blending factor for cv2.addWeighted
- cmap (matplotlib.pyplot): Colormap for creating heatmaps
Returns:
- None
"""
region2 = add_margin(region.crop((128,128,4096,4096)),
top=0, left=0, bottom=128, right=128, color=(255,255,255))
region3 = add_margin(region.crop((128*2,128*2,4096,4096)),
top=0, left=0, bottom=128*2, right=128*2, color=(255,255,255))
region4 = add_margin(region.crop((128*3,128*3,4096,4096)),
top=0, left=0, bottom=128*4, right=128*4, color=(255,255,255))
b256_1, a256_1, a4k_1 = get_region_attention_scores(region, model256, model4k, scale)
b256_2, a256_2, a4k_2 = get_region_attention_scores(region2, model256, model4k, scale)
b256_3, a256_3, a4k_3 = get_region_attention_scores(region3, model256, model4k, scale)
b256_4, a256_4, a4k_4 = get_region_attention_scores(region4, model256, model4k, scale)
offset_2 = (offset*1)//scale
offset_3 = (offset*2)//scale
offset_4 = (offset*3)//scale
s = 4096//scale
save_region = np.array(region.resize((s, s)))
for j in range(6):
score4k_1 = concat_scores4k(a4k_1[j], size=(s,)*2)
score4k_2 = concat_scores4k(a4k_2[j], size=(s,)*2)
score4k_3 = concat_scores4k(a4k_3[j], size=(s,)*2)
score4k_4 = concat_scores4k(a4k_4[j], size=(s,)*2)
new_score4k_2 = np.zeros_like(score4k_2)
new_score4k_2[offset_2:s, offset_2:s] = score4k_2[:(s-offset_2), :(s-offset_2)]
new_score4k_3 = np.zeros_like(score4k_3)
new_score4k_3[offset_3:s, offset_3:s] = score4k_3[:(s-offset_3), :(s-offset_3)]
new_score4k_4 = np.zeros_like(score4k_4)
new_score4k_4[offset_4:s, offset_4:s] = score4k_4[:(s-offset_4), :(s-offset_4)]
overlay4k = np.ones_like(score4k_2)*100
overlay4k[offset_2:s, offset_2:s] += 100
overlay4k[offset_3:s, offset_3:s] += 100
overlay4k[offset_4:s, offset_4:s] += 100
score4k = (score4k_1+new_score4k_2+new_score4k_3+new_score4k_4)/overlay4k
color_block4k = (cmap(score4k_1/100)*255)[:,:,:3].astype(np.uint8)
region4k_hm = cv2.addWeighted(color_block4k, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
for i in range(6):
score256_1 = concat_scores256(a256_1[:,i,:,:], size=(s//16,)*2)
score256_2 = concat_scores256(a256_2[:,i,:,:], size=(s//16,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100*2
overlay256[offset_2:s, offset_2:s] += 100*2
score256 = (score256_1+new_score256_2)*2/overlay256
color_block256 = (cmap(score256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
factorize = lambda data: (data - np.min(data)) / (np.max(data) - np.min(data))
score = (score4k*overlay4k+score256*overlay256)/(overlay4k+overlay256) #factorize(score256*score4k)
color_block = (cmap(score)*255)[:,:,:3].astype(np.uint8)
region_hm = cv2.addWeighted(color_block, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
pad = 100
canvas = Image.new('RGB', (s*2+pad,)*2, (255,)*3)
draw = ImageDraw.Draw(canvas)
font = ImageFont.truetype("arial.ttf", 50)
draw.text((1024*0.5-pad*2, pad//4), "ViT-256 (Head: %d)" % i, (0, 0, 0), font=font)
canvas = canvas.rotate(90)
draw = ImageDraw.Draw(canvas)
draw.text((1024*1.5-pad, pad//4), "ViT-4K (Head: %d)" % j, (0, 0, 0), font=font)
canvas.paste(Image.fromarray(save_region), (pad,pad))
canvas.paste(Image.fromarray(region4k_hm), (1024+pad,pad))
canvas.paste(Image.fromarray(region256_hm), (pad,1024+pad))
canvas.paste(Image.fromarray(region_hm), (s+pad,s+pad))
canvas.save(os.path.join(output_dir, '%s_4k[%s]_256[%s].png' % (fname, j, i)))
return
def create_hierarchical_heatmaps_concat_select(region, model256, model4k, output_dir, fname,
offset=128, scale=4, alpha=0.5, cmap = plt.get_cmap('coolwarm')):
r"""
Creates hierarchical heatmaps (With Raw H&E + ViT-256 + ViT-4K + Blended Heatmaps concatenated for easy comparison)
Note that only select attention heads are used.
Args:
- region (PIL.Image): 4096 x 4096 Image
- model256 (torch.nn): 256-Level ViT
- model4k (torch.nn): 4096-Level ViT
- output_dir (str): Save directory / subdirectory
- fname (str): Naming structure of files
- offset (int): How much to offset (from top-left corner with zero-padding) the region by for blending
- scale (int): How much to scale the output image by
- alpha (float): Image blending factor for cv2.addWeighted
- cmap (matplotlib.pyplot): Colormap for creating heatmaps
Returns:
- None
"""
region2 = add_margin(region.crop((128,128,4096,4096)),
top=0, left=0, bottom=128, right=128, color=(255,255,255))
region3 = add_margin(region.crop((128*2,128*2,4096,4096)),
top=0, left=0, bottom=128*2, right=128*2, color=(255,255,255))
region4 = add_margin(region.crop((128*3,128*3,4096,4096)),
top=0, left=0, bottom=128*4, right=128*4, color=(255,255,255))
b256_1, a256_1, a4k_1 = get_region_attention_scores(region, model256, model4k, scale)
b256_2, a256_2, a4k_2 = get_region_attention_scores(region2, model256, model4k, scale)
b256_3, a256_3, a4k_3 = get_region_attention_scores(region3, model256, model4k, scale)
b256_4, a256_4, a4k_4 = get_region_attention_scores(region4, model256, model4k, scale)
offset_2 = (offset*1)//scale
offset_3 = (offset*2)//scale
offset_4 = (offset*3)//scale
s = 4096//scale
save_region = np.array(region.resize((s, s)))
canvas = [[Image.fromarray(save_region), None, None], [None, None, None]]
for idx_4k, j in enumerate([0,5]):
score4k_1 = concat_scores4k(a4k_1[j], size=(s,)*2)
score4k_2 = concat_scores4k(a4k_2[j], size=(s,)*2)
score4k_3 = concat_scores4k(a4k_3[j], size=(s,)*2)
score4k_4 = concat_scores4k(a4k_4[j], size=(s,)*2)
new_score4k_2 = np.zeros_like(score4k_2)
new_score4k_2[offset_2:s, offset_2:s] = score4k_2[:(s-offset_2), :(s-offset_2)]
new_score4k_3 = np.zeros_like(score4k_3)
new_score4k_3[offset_3:s, offset_3:s] = score4k_3[:(s-offset_3), :(s-offset_3)]
new_score4k_4 = np.zeros_like(score4k_4)
new_score4k_4[offset_4:s, offset_4:s] = score4k_4[:(s-offset_4), :(s-offset_4)]
overlay4k = np.ones_like(score4k_2)*100
overlay4k[offset_2:s, offset_2:s] += 100
overlay4k[offset_3:s, offset_3:s] += 100
overlay4k[offset_4:s, offset_4:s] += 100
score4k = (score4k_1+new_score4k_2+new_score4k_3+new_score4k_4)/overlay4k
color_block4k = (cmap(score4k_1/100)*255)[:,:,:3].astype(np.uint8)
region4k_hm = cv2.addWeighted(color_block4k, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
canvas[0][idx_4k+1] = Image.fromarray(region4k_hm)
for idx_256, i in enumerate([2]):
score256_1 = concat_scores256(a256_1[:,i,:,:], size=(s//16,)*2)
score256_2 = concat_scores256(a256_2[:,i,:,:], size=(s//16,)*2)
new_score256_2 = np.zeros_like(score256_2)
new_score256_2[offset_2:s, offset_2:s] = score256_2[:(s-offset_2), :(s-offset_2)]
overlay256 = np.ones_like(score256_2)*100*2
overlay256[offset_2:s, offset_2:s] += 100*2
score256 = (score256_1+new_score256_2)*2/overlay256
color_block256 = (cmap(score256)*255)[:,:,:3].astype(np.uint8)
region256_hm = cv2.addWeighted(color_block256, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
canvas[idx_256+1][0] = Image.fromarray(region256_hm)
factorize = lambda data: (data - np.min(data)) / (np.max(data) - np.min(data))
score = (score4k*overlay4k+score256*overlay256)/(overlay4k+overlay256) #factorize(score256*score4k)
color_block = (cmap(score)*255)[:,:,:3].astype(np.uint8)
region_hm = cv2.addWeighted(color_block, alpha, save_region.copy(), 1-alpha, 0, save_region.copy())
canvas[idx_256+1][idx_4k+1] = Image.fromarray(region_hm)
canvas = getConcatImage([getConcatImage(row) for row in canvas], how='vertical')
canvas.save(os.path.join(output_dir, '%s_heatmap.png' % (fname)))
return | 36,576 | 44.10111 | 141 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/main.py | ### Base Packages
from __future__ import print_function
import argparse
import pdb
import os
import math
### Numerical Packages
import numpy as np
import pandas as pd
### Internal Imports
from datasets.dataset_generic import Generic_WSI_Classification_Dataset, Generic_MIL_Dataset
from utils.file_utils import save_pkl, load_pkl
from utils.utils import *
from utils.core_utils import train
### PyTorch Imports
import torch
from torch.utils.data import DataLoader, sampler
import torch.nn as nn
import torch.nn.functional as F
##### Train-Val-Test Loop for 10-Fold CV
def main(args):
### Creates Results Directory (if not previously created)
if not os.path.isdir(args.results_dir):
os.mkdir(args.results_dir)
### Which folds to evaluates + iterate
if args.k_start == -1:
start = 0
else:
start = args.k_start
if args.k_end == -1:
end = args.k
else:
end = args.k_end
### 10-Fold CV Loop.
all_test_auc, all_val_auc = [], []
all_test_acc, all_val_acc= [], []
folds = np.arange(start, end)
for i in folds:
seed_torch(args.seed) ### Sets the Torch.Seed
train_dataset, val_dataset, test_dataset = dataset.return_splits(from_id=False, csv_path='{}/splits_{}.csv'.format(args.split_dir, i))
datasets = (train_dataset, val_dataset, test_dataset)
results, test_auc, val_auc, test_acc, val_acc = train(datasets, i, args)
all_test_auc.append(test_auc)
all_val_auc.append(val_auc)
all_test_acc.append(test_acc)
all_val_acc.append(val_acc)
### Writes results to PKL File
filename = os.path.join(args.results_dir, 'split_{}_results.pkl'.format(i))
save_pkl(filename, results)
### Saves results as a CSV file
final_df = pd.DataFrame({'folds': folds, 'test_auc': all_test_auc, 'val_auc': all_val_auc, 'test_acc': all_test_acc, 'val_acc' : all_val_acc})
if len(folds) != args.k:
save_name = 'summary_partial_{}_{}.csv'.format(start, end)
else:
save_name = 'summary.csv'
final_df.to_csv(os.path.join(args.results_dir, save_name))
##### Argparser
### (Default) Training settings
parser = argparse.ArgumentParser(description='Configurations for WSI Training')
parser.add_argument('--data_root_dir', type=str, default='/media/ssd1/pan-cancer', help='data directory')
parser.add_argument('--max_epochs', type=int, default=20, help='maximum number of epochs to train (default: 200)')
parser.add_argument('--lr', type=float, default=2e-4, help='learning rate (default: 0.0001)')
parser.add_argument('--label_frac', type=float, default=1.0, help='fraction of training labels (default: 1.0)')
parser.add_argument('--reg', type=float, default=1e-5, help='weight decay (default: 1e-5)')
parser.add_argument('--seed', type=int, default=1, help='random seed for reproducible experiment (default: 1)')
parser.add_argument('--k', type=int, default=10, help='number of folds (default: 10)')
parser.add_argument('--k_start', type=int, default=-1, help='start fold (default: -1, last fold)')
parser.add_argument('--k_end', type=int, default=-1, help='end fold (default: -1, first fold)')
parser.add_argument('--results_dir', type=str, default='./results', help='results directory (default: ./results)')
parser.add_argument('--opt', type=str, choices = ['adam', 'sgd'], default='adam')
parser.add_argument('--bag_loss', type=str, choices=['svm', 'ce'], default='ce', help='slide-level classification loss function (default: ce)')
parser.add_argument('--model_size', type=str, choices=['small', 'big'], default='small', help='size of model, does not affect mil')
parser.add_argument('--log_data', action='store_true', default=True, help='log data using tensorboard')
parser.add_argument('--testing', action='store_true', default=False, help='debugging tool')
parser.add_argument('--early_stopping', action='store_true', default=False, help='enable early stopping')
parser.add_argument('--drop_out', action='store_true', default=True, help='enabel dropout (p=0.25)')
parser.add_argument('--weighted_sample',action='store_true', default=True, help='enable weighted sampling')
### CLAM specific options
parser.add_argument('--bag_weight', type=float, default=0.7, help='clam: weight coefficient for bag-level loss (default: 0.7)')
parser.add_argument('--B', type=int, default=8, help='numbr of positive/negative patches to sample for clam')
parser.add_argument('--inst_loss', type=str, choices=['svm', 'ce', None], default='svm', help='instance-level clustering loss function (default: None)')
parser.add_argument('--no_inst_cluster',action='store_true', default=False, help='disable instance-level clustering')
parser.add_argument('--subtyping', action='store_true', default=False, help='subtyping problem')
### Options Used
parser.add_argument('--model_type', type=str, default='clam_sb', help='Type of model to use',
choices=['clam_sb', 'clam_mb', 'mil', 'dgcn', 'mi_fcn', 'dsmil', 'hipt_n', 'hipt_lgp'])
parser.add_argument('--features', type=str, default='vits_tcga_pancancer_dino', help='Which features to use',
choices=['resnet50_trunc', 'vits_tcga_pancancer_dino'])
parser.add_argument('--task', type=str, default='tcga_lung_subtype', help='Which weakly-supervised task to evaluate on.')
parser.add_argument('--path_input_dim', type=int, default=384, help='Size of patch embedding size (384 for DINO)')
parser.add_argument('--mode', type=str, default='path', help='Which features to load')
parser.add_argument('--prop', type=float, default=1.0, help='Proportion of training dataset to use')
parser.add_argument('--pretrain_4k', type=str, default='None', help='Whether to initialize the 4K Transformer in HIPT', choices=['None', 'vit4k_xs_dino'])
parser.add_argument('--freeze_4k', action='store_true', default=False, help='Whether to freeze the 4K Transformer in HIPT')
parser.add_argument('--freeze_WSI', action='store_true', default=False, help='Whether to freeze the WSI Transformer in HIPT')
args = parser.parse_args()
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
##### Creating Experiment Code
### 1. If HIPT, set the mode to be 'pyramid'
if 'hipt' in args.model_type:
args.mode = 'pyramid'
### 2. If using 'hipt_lgp' (HIPT with local-global pretraining), modify the experiment code for any freezing + pretraining
if args.model_type == 'hipt_lgp':
if args.freeze_4k and (not args.freeze_WSI):
model_code = 'hipt_lgp[%s]_freeze_[%s]' % (args.pretrain_4k, args.pretrain_WSI)
else:
model_code = 'hipt_lgp[%s]_[%s]' % (args.pretrain_4k, args.pretrain_WSI)
else:
model_code = args.model_type
### 3. Add embedding dimension in the experiment code.
if args.path_input_dim != 384:
model_code += '_%d' % args.path_input_dim
### 3. Add task information in the experiment code.
if 'subtype' in args.task:
args.exp_code = '%s_%s_%s_%0.2f' % (args.task, model_code, args.features, args.prop)
args.splits = '10foldcv_subtype'
args.split_dir = './splits/%s/%s' % (args.splits, '_'.join(args.task.split('_')[:2]))
print("Setting Splits Directory...", args.split_dir)
##### Setting the seed + log settings
def seed_torch(seed=7):
import random
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if device.type == 'cuda':
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
seed_torch(args.seed)
encoding_size = 1024
settings = {'num_splits': args.k,
'k_start': args.k_start,
'k_end': args.k_end,
'task': args.task,
'max_epochs': args.max_epochs,
'results_dir': args.results_dir,
'lr': args.lr,
'experiment': args.exp_code,
'reg': args.reg,
'label_frac': args.label_frac,
'bag_loss': args.bag_loss,
'seed': args.seed,
'model_type': args.model_type,
'model_size': args.model_size,
"use_drop_out": args.drop_out,
'weighted_sample': args.weighted_sample,
'opt': args.opt}
if args.model_type in ['clam_sb', 'clam_mb']:
settings.update({'bag_weight': args.bag_weight,
'inst_loss': args.inst_loss,
'B': args.B})
##### Loading the dataset
print('\nLoad Dataset')
print(args.task)
study = "_".join(args.task.split('_')[:2])
if args.mode == 'pyramid':
study_dir = '{}/extracted_mag20x_patch4096_fp/{}_pt_patch_features_384'.format(study, args.features)
else:
study_dir = '{}/extracted_mag20x_patch256_fp/{}_pt_patch_features'.format(study, args.features)
if args.task == 'tcga_lung_subtype':
args.n_classes = 2
dataset = Generic_MIL_Dataset(csv_path = './dataset_csv/tcga_lung_subset.csv.zip',
data_dir= os.path.join(args.data_root_dir, study_dir),
mode=args.mode,
shuffle = False,
seed = args.seed,
print_info = True,
label_col='oncotree_code',
label_dict = {'LUAD':0, 'LUSC':1},
patient_strat=False,
prop=args.prop,
ignore=[])
elif args.task == 'tcga_kidney_subtype':
args.n_classes = 3
dataset = Generic_MIL_Dataset(csv_path = './dataset_csv/tcga_kidney_subset.csv.zip',
data_dir= os.path.join(args.data_root_dir, study_dir),
mode=args.mode,
shuffle = False,
seed = args.seed,
print_info = True,
label_col='oncotree_code',
label_dict = {'CCRCC':0, 'PRCC':1, 'CHRCC':2},
patient_strat=False,
prop=args.prop,
ignore=[])
elif args.task == 'tcga_brca_subtype':
args.n_classes = 2
dataset = Generic_MIL_Dataset(csv_path = './dataset_csv/tcga_brca_subset.csv.zip',
data_dir= os.path.join(args.data_root_dir, study_dir),
mode=args.mode,
shuffle = False,
seed = args.seed,
print_info = True,
label_col='oncotree_code',
label_dict = {'IDC':0, 'ILC':1},
patient_strat=False,
prop=args.prop,
ignore=['MDLC', 'PD', 'ACBC', 'IMMC', 'BRCNOS', 'BRCA', 'SPC', 'MBC', 'MPT'])
else:
raise NotImplementedError
if not os.path.isdir(args.results_dir):
os.mkdir(args.results_dir)
if 'subtype' in args.task:
exp_folder = args.task
args.results_dir = os.path.join(args.results_dir, exp_folder, str(args.exp_code) + '_none_s%d' % (args.seed))
if not os.path.isdir(args.results_dir):
os.makedirs(args.results_dir, exist_ok=True)
else:
if 'summary.csv' in os.listdir(args.results_dir):
print("Exp Code <%s> already exists! Exiting script." % args.exp_code)
import sys
sys.exit()
print('split_dir: ', args.split_dir)
assert os.path.isdir(args.split_dir)
settings.update({'split_dir': args.split_dir})
with open(args.results_dir + '/experiment_{}.txt'.format(args.exp_code), 'w') as f:
print(settings, file=f)
f.close()
print("################# Settings ###################")
for key, val in settings.items():
print("{}: {}".format(key, val))
if __name__ == "__main__":
results = main(args)
print("finished!")
print("end script")
| 12,119 | 45.259542 | 157 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/wsi_core/util_classes.py | import os
import numpy as np
from PIL import Image
import pdb
import cv2
class Mosaic_Canvas(object):
def __init__(self,patch_size=256, n=100, downscale=4, n_per_row=10, bg_color=(0,0,0), alpha=-1):
self.patch_size = patch_size
self.downscaled_patch_size = int(np.ceil(patch_size/downscale))
self.n_rows = int(np.ceil(n / n_per_row))
self.n_cols = n_per_row
w = self.n_cols * self.downscaled_patch_size
h = self.n_rows * self.downscaled_patch_size
if alpha < 0:
canvas = Image.new(size=(w,h), mode="RGB", color=bg_color)
else:
canvas = Image.new(size=(w,h), mode="RGBA", color=bg_color + (int(255 * alpha),))
self.canvas = canvas
self.dimensions = np.array([w, h])
self.reset_coord()
def reset_coord(self):
self.coord = np.array([0, 0])
def increment_coord(self):
#print('current coord: {} x {} / {} x {}'.format(self.coord[0], self.coord[1], self.dimensions[0], self.dimensions[1]))
assert np.all(self.coord<=self.dimensions)
if self.coord[0] + self.downscaled_patch_size <=self.dimensions[0] - self.downscaled_patch_size:
self.coord[0]+=self.downscaled_patch_size
else:
self.coord[0] = 0
self.coord[1]+=self.downscaled_patch_size
def save(self, save_path, **kwargs):
self.canvas.save(save_path, **kwargs)
def paste_patch(self, patch):
assert patch.size[0] == self.patch_size
assert patch.size[1] == self.patch_size
self.canvas.paste(patch.resize(tuple([self.downscaled_patch_size, self.downscaled_patch_size])), tuple(self.coord))
self.increment_coord()
def get_painting(self):
return self.canvas
class Contour_Checking_fn(object):
# Defining __call__ method
def __call__(self, pt):
raise NotImplementedError
class isInContourV1(Contour_Checking_fn):
def __init__(self, contour):
self.cont = contour
def __call__(self, pt):
return 1 if cv2.pointPolygonTest(self.cont, pt, False) >= 0 else 0
class isInContourV2(Contour_Checking_fn):
def __init__(self, contour, patch_size):
self.cont = contour
self.patch_size = patch_size
def __call__(self, pt):
return 1 if cv2.pointPolygonTest(self.cont, (pt[0]+self.patch_size//2, pt[1]+self.patch_size//2), False) >= 0 else 0
# Easy version of 4pt contour checking function - 1 of 4 points need to be in the contour for test to pass
class isInContourV3_Easy(Contour_Checking_fn):
def __init__(self, contour, patch_size, center_shift=0.5):
self.cont = contour
self.patch_size = patch_size
self.shift = int(patch_size//2*center_shift)
def __call__(self, pt):
center = (pt[0]+self.patch_size//2, pt[1]+self.patch_size//2)
if self.shift > 0:
all_points = [(center[0]-self.shift, center[1]-self.shift),
(center[0]+self.shift, center[1]+self.shift),
(center[0]+self.shift, center[1]-self.shift),
(center[0]-self.shift, center[1]+self.shift)
]
else:
all_points = [center]
for points in all_points:
if cv2.pointPolygonTest(self.cont, points, False) >= 0:
return 1
return 0
# Hard version of 4pt contour checking function - all 4 points need to be in the contour for test to pass
class isInContourV3_Hard(Contour_Checking_fn):
def __init__(self, contour, patch_size, center_shift=0.5):
self.cont = contour
self.patch_size = patch_size
self.shift = int(patch_size//2*center_shift)
def __call__(self, pt):
center = (pt[0]+self.patch_size//2, pt[1]+self.patch_size//2)
if self.shift > 0:
all_points = [(center[0]-self.shift, center[1]-self.shift),
(center[0]+self.shift, center[1]+self.shift),
(center[0]+self.shift, center[1]-self.shift),
(center[0]-self.shift, center[1]+self.shift)
]
else:
all_points = [center]
for points in all_points:
if cv2.pointPolygonTest(self.cont, points, False) < 0:
return 0
return 1
| 3,787 | 32.22807 | 121 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/wsi_core/WholeSlideImage.py | import math
import os
import time
import xml.etree.ElementTree as ET
from xml.dom import minidom
import multiprocessing as mp
import cv2
import matplotlib.pyplot as plt
import numpy as np
import openslide
from PIL import Image
import pdb
import h5py
import math
from wsi_core.wsi_utils import savePatchIter_bag_hdf5, initialize_hdf5_bag, coord_generator, save_hdf5, sample_indices, screen_coords, isBlackPatch, isWhitePatch, to_percentiles
import itertools
from wsi_core.util_classes import isInContourV1, isInContourV2, isInContourV3_Easy, isInContourV3_Hard, Contour_Checking_fn
from utils.file_utils import load_pkl, save_pkl
Image.MAX_IMAGE_PIXELS = 933120000
class WholeSlideImage(object):
def __init__(self, path):
"""
Args:
path (str): fullpath to WSI file
"""
self.name = ".".join(path.split("/")[-1].split('.')[:-1])
self.wsi = openslide.open_slide(path)
self.level_downsamples = self._assertLevelDownsamples()
self.level_dim = self.wsi.level_dimensions
self.contours_tissue = None
self.contours_tumor = None
self.hdf5_file = None
def getOpenSlide(self):
return self.wsi
def initXML(self, xml_path):
def _createContour(coord_list):
return np.array([[[int(float(coord.attributes['X'].value)),
int(float(coord.attributes['Y'].value))]] for coord in coord_list], dtype = 'int32')
xmldoc = minidom.parse(xml_path)
annotations = [anno.getElementsByTagName('Coordinate') for anno in xmldoc.getElementsByTagName('Annotation')]
self.contours_tumor = [_createContour(coord_list) for coord_list in annotations]
self.contours_tumor = sorted(self.contours_tumor, key=cv2.contourArea, reverse=True)
def initTxt(self,annot_path):
def _create_contours_from_dict(annot):
all_cnts = []
for idx, annot_group in enumerate(annot):
contour_group = annot_group['coordinates']
if annot_group['type'] == 'Polygon':
for idx, contour in enumerate(contour_group):
contour = np.array(contour).astype(np.int32).reshape(-1,1,2)
all_cnts.append(contour)
else:
for idx, sgmt_group in enumerate(contour_group):
contour = []
for sgmt in sgmt_group:
contour.extend(sgmt)
contour = np.array(contour).astype(np.int32).reshape(-1,1,2)
all_cnts.append(contour)
return all_cnts
with open(annot_path, "r") as f:
annot = f.read()
annot = eval(annot)
self.contours_tumor = _create_contours_from_dict(annot)
self.contours_tumor = sorted(self.contours_tumor, key=cv2.contourArea, reverse=True)
def initSegmentation(self, mask_file):
# load segmentation results from pickle file
import pickle
asset_dict = load_pkl(mask_file)
self.holes_tissue = asset_dict['holes']
self.contours_tissue = asset_dict['tissue']
def saveSegmentation(self, mask_file):
# save segmentation results using pickle
asset_dict = {'holes': self.holes_tissue, 'tissue': self.contours_tissue}
save_pkl(mask_file, asset_dict)
def segmentTissue(self, seg_level=0, sthresh=20, sthresh_up = 255, mthresh=7, close = 0, use_otsu=False,
filter_params={'a_t':100}, ref_patch_size=512, exclude_ids=[], keep_ids=[]):
"""
Segment the tissue via HSV -> Median thresholding -> Binary threshold
"""
def _filter_contours(contours, hierarchy, filter_params):
"""
Filter contours by: area.
"""
filtered = []
# find indices of foreground contours (parent == -1)
hierarchy_1 = np.flatnonzero(hierarchy[:,1] == -1)
all_holes = []
# loop through foreground contour indices
for cont_idx in hierarchy_1:
# actual contour
cont = contours[cont_idx]
# indices of holes contained in this contour (children of parent contour)
holes = np.flatnonzero(hierarchy[:, 1] == cont_idx)
# take contour area (includes holes)
a = cv2.contourArea(cont)
# calculate the contour area of each hole
hole_areas = [cv2.contourArea(contours[hole_idx]) for hole_idx in holes]
# actual area of foreground contour region
a = a - np.array(hole_areas).sum()
if a == 0: continue
if tuple((filter_params['a_t'],)) < tuple((a,)):
filtered.append(cont_idx)
all_holes.append(holes)
foreground_contours = [contours[cont_idx] for cont_idx in filtered]
hole_contours = []
for hole_ids in all_holes:
unfiltered_holes = [contours[idx] for idx in hole_ids ]
unfilered_holes = sorted(unfiltered_holes, key=cv2.contourArea, reverse=True)
# take max_n_holes largest holes by area
unfilered_holes = unfilered_holes[:filter_params['max_n_holes']]
filtered_holes = []
# filter these holes
for hole in unfilered_holes:
if cv2.contourArea(hole) > filter_params['a_h']:
filtered_holes.append(hole)
hole_contours.append(filtered_holes)
return foreground_contours, hole_contours
img = np.array(self.wsi.read_region((0,0), seg_level, self.level_dim[seg_level]))
img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) # Convert to HSV space
img_med = cv2.medianBlur(img_hsv[:,:,1], mthresh) # Apply median blurring
# Thresholding
if use_otsu:
_, img_otsu = cv2.threshold(img_med, 0, sthresh_up, cv2.THRESH_OTSU+cv2.THRESH_BINARY)
else:
_, img_otsu = cv2.threshold(img_med, sthresh, sthresh_up, cv2.THRESH_BINARY)
# Morphological closing
if close > 0:
kernel = np.ones((close, close), np.uint8)
img_otsu = cv2.morphologyEx(img_otsu, cv2.MORPH_CLOSE, kernel)
scale = self.level_downsamples[seg_level]
scaled_ref_patch_area = int(ref_patch_size**2 / (scale[0] * scale[1]))
filter_params = filter_params.copy()
filter_params['a_t'] = filter_params['a_t'] * scaled_ref_patch_area
filter_params['a_h'] = filter_params['a_h'] * scaled_ref_patch_area
# Find and filter contours
contours, hierarchy = cv2.findContours(img_otsu, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) # Find contours
hierarchy = np.squeeze(hierarchy, axis=(0,))[:, 2:]
if filter_params: foreground_contours, hole_contours = _filter_contours(contours, hierarchy, filter_params) # Necessary for filtering out artifacts
self.contours_tissue = self.scaleContourDim(foreground_contours, scale)
self.holes_tissue = self.scaleHolesDim(hole_contours, scale)
#exclude_ids = [0,7,9]
if len(keep_ids) > 0:
contour_ids = set(keep_ids) - set(exclude_ids)
else:
contour_ids = set(np.arange(len(self.contours_tissue))) - set(exclude_ids)
self.contours_tissue = [self.contours_tissue[i] for i in contour_ids]
self.holes_tissue = [self.holes_tissue[i] for i in contour_ids]
def visWSI(self, vis_level=0, color = (0,255,0), hole_color = (0,0,255), annot_color=(255,0,0),
line_thickness=250, max_size=None, top_left=None, bot_right=None, custom_downsample=1, view_slide_only=False,
number_contours=False, seg_display=True, annot_display=True):
downsample = self.level_downsamples[vis_level]
scale = [1/downsample[0], 1/downsample[1]]
if top_left is not None and bot_right is not None:
top_left = tuple(top_left)
bot_right = tuple(bot_right)
w, h = tuple((np.array(bot_right) * scale).astype(int) - (np.array(top_left) * scale).astype(int))
region_size = (w, h)
else:
top_left = (0,0)
region_size = self.level_dim[vis_level]
img = np.array(self.wsi.read_region(top_left, vis_level, region_size).convert("RGB"))
if not view_slide_only:
offset = tuple(-(np.array(top_left) * scale).astype(int))
line_thickness = int(line_thickness * math.sqrt(scale[0] * scale[1]))
if self.contours_tissue is not None and seg_display:
if not number_contours:
cv2.drawContours(img, self.scaleContourDim(self.contours_tissue, scale),
-1, color, line_thickness, lineType=cv2.LINE_8, offset=offset)
else: # add numbering to each contour
for idx, cont in enumerate(self.contours_tissue):
contour = np.array(self.scaleContourDim(cont, scale))
M = cv2.moments(contour)
cX = int(M["m10"] / (M["m00"] + 1e-9))
cY = int(M["m01"] / (M["m00"] + 1e-9))
# draw the contour and put text next to center
cv2.drawContours(img, [contour], -1, color, line_thickness, lineType=cv2.LINE_8, offset=offset)
cv2.putText(img, "{}".format(idx), (cX, cY),
cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 0), 10)
for holes in self.holes_tissue:
cv2.drawContours(img, self.scaleContourDim(holes, scale),
-1, hole_color, line_thickness, lineType=cv2.LINE_8)
if self.contours_tumor is not None and annot_display:
cv2.drawContours(img, self.scaleContourDim(self.contours_tumor, scale),
-1, annot_color, line_thickness, lineType=cv2.LINE_8, offset=offset)
img = Image.fromarray(img)
w, h = img.size
if custom_downsample > 1:
img = img.resize((int(w/custom_downsample), int(h/custom_downsample)))
if max_size is not None and (w > max_size or h > max_size):
resizeFactor = max_size/w if w > h else max_size/h
img = img.resize((int(w*resizeFactor), int(h*resizeFactor)))
return img
def createPatches_bag_hdf5(self, save_path, patch_level=0, patch_size=256, step_size=256, save_coord=True, **kwargs):
contours = self.contours_tissue
contour_holes = self.holes_tissue
print("Creating patches for: ", self.name, "...",)
elapsed = time.time()
for idx, cont in enumerate(contours):
patch_gen = self._getPatchGenerator(cont, idx, patch_level, save_path, patch_size, step_size, **kwargs)
if self.hdf5_file is None:
try:
first_patch = next(patch_gen)
# empty contour, continue
except StopIteration:
continue
file_path = initialize_hdf5_bag(first_patch, save_coord=save_coord)
self.hdf5_file = file_path
for patch in patch_gen:
savePatchIter_bag_hdf5(patch)
return self.hdf5_file
def _getPatchGenerator(self, cont, cont_idx, patch_level, save_path, patch_size=256, step_size=256, custom_downsample=1,
white_black=True, white_thresh=15, black_thresh=50, contour_fn='four_pt', use_padding=True):
start_x, start_y, w, h = cv2.boundingRect(cont) if cont is not None else (0, 0, self.level_dim[patch_level][0], self.level_dim[patch_level][1])
print("Bounding Box:", start_x, start_y, w, h)
print("Contour Area:", cv2.contourArea(cont))
if custom_downsample > 1:
assert custom_downsample == 2
target_patch_size = patch_size
patch_size = target_patch_size * 2
step_size = step_size * 2
print("Custom Downsample: {}, Patching at {} x {}, But Final Patch Size is {} x {}".format(custom_downsample, patch_size, patch_size,
target_patch_size, target_patch_size))
patch_downsample = (int(self.level_downsamples[patch_level][0]), int(self.level_downsamples[patch_level][1]))
ref_patch_size = (patch_size*patch_downsample[0], patch_size*patch_downsample[1])
step_size_x = step_size * patch_downsample[0]
step_size_y = step_size * patch_downsample[1]
if isinstance(contour_fn, str):
if contour_fn == 'four_pt':
cont_check_fn = isInContourV3_Easy(contour=cont, patch_size=ref_patch_size[0], center_shift=0.5)
elif contour_fn == 'four_pt_hard':
cont_check_fn = isInContourV3_Hard(contour=cont, patch_size=ref_patch_size[0], center_shift=0.5)
elif contour_fn == 'center':
cont_check_fn = isInContourV2(contour=cont, patch_size=ref_patch_size[0])
elif contour_fn == 'basic':
cont_check_fn = isInContourV1(contour=cont)
else:
raise NotImplementedError
else:
assert isinstance(contour_fn, Contour_Checking_fn)
cont_check_fn = contour_fn
img_w, img_h = self.level_dim[0]
if use_padding:
stop_y = start_y+h
stop_x = start_x+w
else:
stop_y = min(start_y+h, img_h-ref_patch_size[1])
stop_x = min(start_x+w, img_w-ref_patch_size[0])
count = 0
for y in range(start_y, stop_y, step_size_y):
for x in range(start_x, stop_x, step_size_x):
if not self.isInContours(cont_check_fn, (x,y), self.holes_tissue[cont_idx], ref_patch_size[0]): #point not inside contour and its associated holes
continue
count+=1
patch_PIL = self.wsi.read_region((x,y), patch_level, (patch_size, patch_size)).convert('RGB')
if custom_downsample > 1:
patch_PIL = patch_PIL.resize((target_patch_size, target_patch_size))
if white_black:
if isBlackPatch(np.array(patch_PIL), rgbThresh=black_thresh) or isWhitePatch(np.array(patch_PIL), satThresh=white_thresh):
continue
patch_info = {'x':x // (patch_downsample[0] * custom_downsample), 'y':y // (patch_downsample[1] * custom_downsample), 'cont_idx':cont_idx, 'patch_level':patch_level,
'downsample': self.level_downsamples[patch_level], 'downsampled_level_dim': tuple(np.array(self.level_dim[patch_level])//custom_downsample), 'level_dim': self.level_dim[patch_level],
'patch_PIL':patch_PIL, 'name':self.name, 'save_path':save_path}
yield patch_info
print("patches extracted: {}".format(count))
@staticmethod
def isInHoles(holes, pt, patch_size):
for hole in holes:
if cv2.pointPolygonTest(hole, (pt[0]+patch_size/2, pt[1]+patch_size/2), False) > 0:
return 1
return 0
@staticmethod
def isInContours(cont_check_fn, pt, holes=None, patch_size=256):
if cont_check_fn(pt):
if holes is not None:
return not WholeSlideImage.isInHoles(holes, pt, patch_size)
else:
return 1
return 0
@staticmethod
def scaleContourDim(contours, scale):
return [np.array(cont * scale, dtype='int32') for cont in contours]
@staticmethod
def scaleHolesDim(contours, scale):
return [[np.array(hole * scale, dtype = 'int32') for hole in holes] for holes in contours]
def _assertLevelDownsamples(self):
level_downsamples = []
dim_0 = self.wsi.level_dimensions[0]
for downsample, dim in zip(self.wsi.level_downsamples, self.wsi.level_dimensions):
estimated_downsample = (dim_0[0]/float(dim[0]), dim_0[1]/float(dim[1]))
level_downsamples.append(estimated_downsample) if estimated_downsample != (downsample, downsample) else level_downsamples.append((downsample, downsample))
return level_downsamples
def process_contours(self, save_path, patch_level=0, patch_size=256, step_size=256, **kwargs):
save_path_hdf5 = os.path.join(save_path, str(self.name) + '.h5')
print("Creating patches for: ", self.name, "...",)
elapsed = time.time()
n_contours = len(self.contours_tissue)
print("Total number of contours to process: ", n_contours)
fp_chunk_size = math.ceil(n_contours * 0.05)
init = True
for idx, cont in enumerate(self.contours_tissue):
if (idx + 1) % fp_chunk_size == fp_chunk_size:
print('Processing contour {}/{}'.format(idx, n_contours))
asset_dict, attr_dict = self.process_contour(cont, self.holes_tissue[idx], patch_level, save_path, patch_size, step_size, **kwargs)
if len(asset_dict) > 0:
if init:
save_hdf5(save_path_hdf5, asset_dict, attr_dict, mode='w')
init = False
else:
save_hdf5(save_path_hdf5, asset_dict, mode='a')
return self.hdf5_file
def process_contour(self, cont, contour_holes, patch_level, save_path, patch_size = 256, step_size = 256,
contour_fn='four_pt', use_padding=True, top_left=None, bot_right=None):
start_x, start_y, w, h = cv2.boundingRect(cont) if cont is not None else (0, 0, self.level_dim[patch_level][0], self.level_dim[patch_level][1])
patch_downsample = (int(self.level_downsamples[patch_level][0]), int(self.level_downsamples[patch_level][1]))
ref_patch_size = (patch_size*patch_downsample[0], patch_size*patch_downsample[1])
img_w, img_h = self.level_dim[0]
if use_padding:
stop_y = start_y+h
stop_x = start_x+w
else:
stop_y = min(start_y+h, img_h-ref_patch_size[1]+1)
stop_x = min(start_x+w, img_w-ref_patch_size[0]+1)
print("Bounding Box:", start_x, start_y, w, h)
print("Contour Area:", cv2.contourArea(cont))
if bot_right is not None:
stop_y = min(bot_right[1], stop_y)
stop_x = min(bot_right[0], stop_x)
if top_left is not None:
start_y = max(top_left[1], start_y)
start_x = max(top_left[0], start_x)
if bot_right is not None or top_left is not None:
w, h = stop_x - start_x, stop_y - start_y
if w <= 0 or h <= 0:
print("Contour is not in specified ROI, skip")
return {}, {}
else:
print("Adjusted Bounding Box:", start_x, start_y, w, h)
if isinstance(contour_fn, str):
if contour_fn == 'four_pt':
cont_check_fn = isInContourV3_Easy(contour=cont, patch_size=ref_patch_size[0], center_shift=0.5)
elif contour_fn == 'four_pt_hard':
cont_check_fn = isInContourV3_Hard(contour=cont, patch_size=ref_patch_size[0], center_shift=0.5)
elif contour_fn == 'center':
cont_check_fn = isInContourV2(contour=cont, patch_size=ref_patch_size[0])
elif contour_fn == 'basic':
cont_check_fn = isInContourV1(contour=cont)
else:
raise NotImplementedError
else:
assert isinstance(contour_fn, Contour_Checking_fn)
cont_check_fn = contour_fn
step_size_x = step_size * patch_downsample[0]
step_size_y = step_size * patch_downsample[1]
x_range = np.arange(start_x, stop_x, step=step_size_x)
y_range = np.arange(start_y, stop_y, step=step_size_y)
x_coords, y_coords = np.meshgrid(x_range, y_range, indexing='ij')
coord_candidates = np.array([x_coords.flatten(), y_coords.flatten()]).transpose()
num_workers = mp.cpu_count()
if num_workers > 4:
num_workers = 4
pool = mp.Pool(num_workers)
iterable = [(coord, contour_holes, ref_patch_size[0], cont_check_fn) for coord in coord_candidates]
results = pool.starmap(WholeSlideImage.process_coord_candidate, iterable)
pool.close()
results = np.array([result for result in results if result is not None])
print('Extracted {} coordinates'.format(len(results)))
if len(results)>1:
asset_dict = {'coords' : results}
attr = {'patch_size' : patch_size, # To be considered...
'patch_level' : patch_level,
'downsample': self.level_downsamples[patch_level],
'downsampled_level_dim' : tuple(np.array(self.level_dim[patch_level])),
'level_dim': self.level_dim[patch_level],
'name': self.name,
'save_path': save_path}
attr_dict = { 'coords' : attr}
return asset_dict, attr_dict
else:
return {}, {}
@staticmethod
def process_coord_candidate(coord, contour_holes, ref_patch_size, cont_check_fn):
if WholeSlideImage.isInContours(cont_check_fn, coord, contour_holes, ref_patch_size):
return coord
else:
return None
def visHeatmap(self, scores, coords, vis_level=-1,
top_left=None, bot_right=None,
patch_size=(256, 256),
blank_canvas=False, canvas_color=(220, 20, 50), alpha=0.4,
blur=False, overlap=0.0,
segment=True, use_holes=True,
convert_to_percentiles=False,
binarize=False, thresh=0.5,
max_size=None,
custom_downsample = 1,
cmap='coolwarm'):
"""
Args:
scores (numpy array of float): Attention scores
coords (numpy array of int, n_patches x 2): Corresponding coordinates (relative to lvl 0)
vis_level (int): WSI pyramid level to visualize
patch_size (tuple of int): Patch dimensions (relative to lvl 0)
blank_canvas (bool): Whether to use a blank canvas to draw the heatmap (vs. using the original slide)
canvas_color (tuple of uint8): Canvas color
alpha (float [0, 1]): blending coefficient for overlaying heatmap onto original slide
blur (bool): apply gaussian blurring
overlap (float [0 1]): percentage of overlap between neighboring patches (only affect radius of blurring)
segment (bool): whether to use tissue segmentation contour (must have already called self.segmentTissue such that
self.contours_tissue and self.holes_tissue are not None
use_holes (bool): whether to also clip out detected tissue cavities (only in effect when segment == True)
convert_to_percentiles (bool): whether to convert attention scores to percentiles
binarize (bool): only display patches > threshold
threshold (float): binarization threshold
max_size (int): Maximum canvas size (clip if goes over)
custom_downsample (int): additionally downscale the heatmap by specified factor
cmap (str): name of matplotlib colormap to use
"""
if vis_level < 0:
vis_level = self.wsi.get_best_level_for_downsample(32)
downsample = self.level_downsamples[vis_level]
scale = [1/downsample[0], 1/downsample[1]] # Scaling from 0 to desired level
if len(scores.shape) == 2:
scores = scores.flatten()
if binarize:
if thresh < 0:
threshold = 1.0/len(scores)
else:
threshold = thresh
else:
threshold = 0.0
##### calculate size of heatmap and filter coordinates/scores outside specified bbox region #####
if top_left is not None and bot_right is not None:
scores, coords = screen_coords(scores, coords, top_left, bot_right)
coords = coords - top_left
top_left = tuple(top_left)
bot_right = tuple(bot_right)
w, h = tuple((np.array(bot_right) * scale).astype(int) - (np.array(top_left) * scale).astype(int))
region_size = (w, h)
else:
region_size = self.level_dim[vis_level]
top_left = (0,0)
bot_right = self.level_dim[0]
w, h = region_size
patch_size = np.ceil(np.array(patch_size) * np.array(scale)).astype(int)
coords = np.ceil(coords * np.array(scale)).astype(int)
print('\ncreating heatmap for: ')
print('top_left: ', top_left, 'bot_right: ', bot_right)
print('w: {}, h: {}'.format(w, h))
print('scaled patch size: ', patch_size)
###### normalize filtered scores ######
if convert_to_percentiles:
scores = to_percentiles(scores)
scores /= 100
######## calculate the heatmap of raw attention scores (before colormap)
# by accumulating scores over overlapped regions ######
# heatmap overlay: tracks attention score over each pixel of heatmap
# overlay counter: tracks how many times attention score is accumulated over each pixel of heatmap
overlay = np.full(np.flip(region_size), 0).astype(float)
counter = np.full(np.flip(region_size), 0).astype(np.uint16)
count = 0
for idx in range(len(coords)):
score = scores[idx]
coord = coords[idx]
if score >= threshold:
if binarize:
score=1.0
count+=1
else:
score=0.0
# accumulate attention
overlay[coord[1]:coord[1]+patch_size[1], coord[0]:coord[0]+patch_size[0]] += score
# accumulate counter
counter[coord[1]:coord[1]+patch_size[1], coord[0]:coord[0]+patch_size[0]] += 1
if binarize:
print('\nbinarized tiles based on cutoff of {}'.format(threshold))
print('identified {}/{} patches as positive'.format(count, len(coords)))
# fetch attended region and average accumulated attention
zero_mask = counter == 0
if binarize:
overlay[~zero_mask] = np.around(overlay[~zero_mask] / counter[~zero_mask])
else:
overlay[~zero_mask] = overlay[~zero_mask] / counter[~zero_mask]
del counter
if blur:
overlay = cv2.GaussianBlur(overlay,tuple((patch_size * (1-overlap)).astype(int) * 2 +1),0)
if segment:
tissue_mask = self.get_seg_mask(region_size, scale, use_holes=use_holes, offset=tuple(top_left))
# return Image.fromarray(tissue_mask) # tissue mask
if not blank_canvas:
# downsample original image and use as canvas
img = np.array(self.wsi.read_region(top_left, vis_level, region_size).convert("RGB"))
else:
# use blank canvas
img = np.array(Image.new(size=region_size, mode="RGB", color=(255,255,255)))
#return Image.fromarray(img) #raw image
print('\ncomputing heatmap image')
print('total of {} patches'.format(len(coords)))
twenty_percent_chunk = max(1, int(len(coords) * 0.2))
if isinstance(cmap, str):
cmap = plt.get_cmap(cmap)
for idx in range(len(coords)):
if (idx + 1) % twenty_percent_chunk == 0:
print('progress: {}/{}'.format(idx, len(coords)))
score = scores[idx]
coord = coords[idx]
if score >= threshold:
# attention block
raw_block = overlay[coord[1]:coord[1]+patch_size[1], coord[0]:coord[0]+patch_size[0]]
# image block (either blank canvas or orig image)
img_block = img[coord[1]:coord[1]+patch_size[1], coord[0]:coord[0]+patch_size[0]].copy()
# color block (cmap applied to attention block)
color_block = (cmap(raw_block) * 255)[:,:,:3].astype(np.uint8)
if segment:
# tissue mask block
mask_block = tissue_mask[coord[1]:coord[1]+patch_size[1], coord[0]:coord[0]+patch_size[0]]
# copy over only tissue masked portion of color block
img_block[mask_block] = color_block[mask_block]
else:
# copy over entire color block
img_block = color_block
# rewrite image block
img[coord[1]:coord[1]+patch_size[1], coord[0]:coord[0]+patch_size[0]] = img_block.copy()
#return Image.fromarray(img) #overlay
print('Done')
del overlay
if blur:
img = cv2.GaussianBlur(img,tuple((patch_size * (1-overlap)).astype(int) * 2 +1),0)
if alpha < 1.0:
img = self.block_blending(img, vis_level, top_left, bot_right, alpha=alpha, blank_canvas=blank_canvas, block_size=1024)
img = Image.fromarray(img)
w, h = img.size
if custom_downsample > 1:
img = img.resize((int(w/custom_downsample), int(h/custom_downsample)))
if max_size is not None and (w > max_size or h > max_size):
resizeFactor = max_size/w if w > h else max_size/h
img = img.resize((int(w*resizeFactor), int(h*resizeFactor)))
return img
def block_blending(self, img, vis_level, top_left, bot_right, alpha=0.5, blank_canvas=False, block_size=1024):
print('\ncomputing blend')
downsample = self.level_downsamples[vis_level]
w = img.shape[1]
h = img.shape[0]
block_size_x = min(block_size, w)
block_size_y = min(block_size, h)
print('using block size: {} x {}'.format(block_size_x, block_size_y))
shift = top_left # amount shifted w.r.t. (0,0)
for x_start in range(top_left[0], bot_right[0], block_size_x * int(downsample[0])):
for y_start in range(top_left[1], bot_right[1], block_size_y * int(downsample[1])):
#print(x_start, y_start)
# 1. convert wsi coordinates to image coordinates via shift and scale
x_start_img = int((x_start - shift[0]) / int(downsample[0]))
y_start_img = int((y_start - shift[1]) / int(downsample[1]))
# 2. compute end points of blend tile, careful not to go over the edge of the image
y_end_img = min(h, y_start_img+block_size_y)
x_end_img = min(w, x_start_img+block_size_x)
if y_end_img == y_start_img or x_end_img == x_start_img:
continue
#print('start_coord: {} end_coord: {}'.format((x_start_img, y_start_img), (x_end_img, y_end_img)))
# 3. fetch blend block and size
blend_block = img[y_start_img:y_end_img, x_start_img:x_end_img]
blend_block_size = (x_end_img-x_start_img, y_end_img-y_start_img)
if not blank_canvas:
# 4. read actual wsi block as canvas block
pt = (x_start, y_start)
canvas = np.array(self.wsi.read_region(pt, vis_level, blend_block_size).convert("RGB"))
else:
# 4. OR create blank canvas block
canvas = np.array(Image.new(size=blend_block_size, mode="RGB", color=(255,255,255)))
# 5. blend color block and canvas block
img[y_start_img:y_end_img, x_start_img:x_end_img] = cv2.addWeighted(blend_block, alpha, canvas, 1 - alpha, 0, canvas)
return img
def get_seg_mask(self, region_size, scale, use_holes=False, offset=(0,0)):
print('\ncomputing foreground tissue mask')
tissue_mask = np.full(np.flip(region_size), 0).astype(np.uint8)
contours_tissue = self.scaleContourDim(self.contours_tissue, scale)
offset = tuple((np.array(offset) * np.array(scale) * -1).astype(np.int32))
contours_holes = self.scaleHolesDim(self.holes_tissue, scale)
contours_tissue, contours_holes = zip(*sorted(zip(contours_tissue, contours_holes), key=lambda x: cv2.contourArea(x[0]), reverse=True))
for idx in range(len(contours_tissue)):
cv2.drawContours(image=tissue_mask, contours=contours_tissue, contourIdx=idx, color=(1), offset=offset, thickness=-1)
if use_holes:
cv2.drawContours(image=tissue_mask, contours=contours_holes[idx], contourIdx=-1, color=(0), offset=offset, thickness=-1)
# contours_holes = self._scaleContourDim(self.holes_tissue, scale, holes=True, area_thresh=area_thresh)
tissue_mask = tissue_mask.astype(bool)
print('detected {}/{} of region as tissue'.format(tissue_mask.sum(), tissue_mask.size))
return tissue_mask
| 33,883 | 44.727395 | 198 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/wsi_core/wsi_utils.py | import h5py
import numpy as np
import os
import pdb
from wsi_core.util_classes import Mosaic_Canvas
from PIL import Image
import math
import cv2
def isWhitePatch(patch, satThresh=5):
patch_hsv = cv2.cvtColor(patch, cv2.COLOR_RGB2HSV)
return True if np.mean(patch_hsv[:,:,1]) < satThresh else False
def isBlackPatch(patch, rgbThresh=40):
return True if np.all(np.mean(patch, axis = (0,1)) < rgbThresh) else False
def isBlackPatch_S(patch, rgbThresh=20, percentage=0.05):
num_pixels = patch.size[0] * patch.size[1]
return True if np.all(np.array(patch) < rgbThresh, axis=(2)).sum() > num_pixels * percentage else False
def isWhitePatch_S(patch, rgbThresh=220, percentage=0.2):
num_pixels = patch.size[0] * patch.size[1]
return True if np.all(np.array(patch) > rgbThresh, axis=(2)).sum() > num_pixels * percentage else False
def coord_generator(x_start, x_end, x_step, y_start, y_end, y_step, args_dict=None):
for x in range(x_start, x_end, x_step):
for y in range(y_start, y_end, y_step):
if args_dict is not None:
process_dict = args_dict.copy()
process_dict.update({'pt':(x,y)})
yield process_dict
else:
yield (x,y)
def savePatchIter_bag_hdf5(patch):
x, y, cont_idx, patch_level, downsample, downsampled_level_dim, level_dim, img_patch, name, save_path= tuple(patch.values())
img_patch = np.array(img_patch)[np.newaxis,...]
img_shape = img_patch.shape
file_path = os.path.join(save_path, name)+'.h5'
file = h5py.File(file_path, "a")
dset = file['imgs']
dset.resize(len(dset) + img_shape[0], axis=0)
dset[-img_shape[0]:] = img_patch
if 'coords' in file:
coord_dset = file['coords']
coord_dset.resize(len(coord_dset) + img_shape[0], axis=0)
coord_dset[-img_shape[0]:] = (x,y)
file.close()
def save_hdf5(output_path, asset_dict, attr_dict= None, mode='a'):
file = h5py.File(output_path, mode)
for key, val in asset_dict.items():
data_shape = val.shape
if key not in file:
data_type = val.dtype
chunk_shape = (1, ) + data_shape[1:]
maxshape = (None, ) + data_shape[1:]
dset = file.create_dataset(key, shape=data_shape, maxshape=maxshape, chunks=chunk_shape, dtype=data_type)
dset[:] = val
if attr_dict is not None:
if key in attr_dict.keys():
for attr_key, attr_val in attr_dict[key].items():
dset.attrs[attr_key] = attr_val
else:
dset = file[key]
dset.resize(len(dset) + data_shape[0], axis=0)
dset[-data_shape[0]:] = val
file.close()
return output_path
def initialize_hdf5_bag(first_patch, save_coord=False):
x, y, cont_idx, patch_level, downsample, downsampled_level_dim, level_dim, img_patch, name, save_path = tuple(first_patch.values())
file_path = os.path.join(save_path, name)+'.h5'
file = h5py.File(file_path, "w")
img_patch = np.array(img_patch)[np.newaxis,...]
dtype = img_patch.dtype
# Initialize a resizable dataset to hold the output
img_shape = img_patch.shape
maxshape = (None,) + img_shape[1:] #maximum dimensions up to which dataset maybe resized (None means unlimited)
dset = file.create_dataset('imgs',
shape=img_shape, maxshape=maxshape, chunks=img_shape, dtype=dtype)
dset[:] = img_patch
dset.attrs['patch_level'] = patch_level
dset.attrs['wsi_name'] = name
dset.attrs['downsample'] = downsample
dset.attrs['level_dim'] = level_dim
dset.attrs['downsampled_level_dim'] = downsampled_level_dim
if save_coord:
coord_dset = file.create_dataset('coords', shape=(1, 2), maxshape=(None, 2), chunks=(1, 2), dtype=np.int32)
coord_dset[:] = (x,y)
file.close()
return file_path
def sample_indices(scores, k, start=0.48, end=0.52, convert_to_percentile=False, seed=1):
np.random.seed(seed)
if convert_to_percentile:
end_value = np.quantile(scores, end)
start_value = np.quantile(scores, start)
else:
end_value = end
start_value = start
score_window = np.logical_and(scores >= start_value, scores <= end_value)
indices = np.where(score_window)[0]
if len(indices) < 1:
return -1
else:
return np.random.choice(indices, min(k, len(indices)), replace=False)
def top_k(scores, k, invert=False):
if invert:
top_k_ids=scores.argsort()[:k]
else:
top_k_ids=scores.argsort()[::-1][:k]
return top_k_ids
def to_percentiles(scores):
from scipy.stats import rankdata
scores = rankdata(scores, 'average')/len(scores) * 100
return scores
def screen_coords(scores, coords, top_left, bot_right):
bot_right = np.array(bot_right)
top_left = np.array(top_left)
mask = np.logical_and(np.all(coords >= top_left, axis=1), np.all(coords <= bot_right, axis=1))
scores = scores[mask]
coords = coords[mask]
return scores, coords
def sample_rois(scores, coords, k=5, mode='range_sample', seed=1, score_start=0.45, score_end=0.55, top_left=None, bot_right=None):
if len(scores.shape) == 2:
scores = scores.flatten()
scores = to_percentiles(scores)
if top_left is not None and bot_right is not None:
scores, coords = screen_coords(scores, coords, top_left, bot_right)
if mode == 'range_sample':
sampled_ids = sample_indices(scores, start=score_start, end=score_end, k=k, convert_to_percentile=False, seed=seed)
elif mode == 'topk':
sampled_ids = top_k(scores, k, invert=False)
elif mode == 'reverse_topk':
sampled_ids = top_k(scores, k, invert=True)
else:
raise NotImplementedError
coords = coords[sampled_ids]
scores = scores[sampled_ids]
asset = {'sampled_coords': coords, 'sampled_scores': scores}
return asset
def DrawGrid(img, coord, shape, thickness=2, color=(0,0,0,255)):
cv2.rectangle(img, tuple(np.maximum([0, 0], coord-thickness//2)), tuple(coord - thickness//2 + np.array(shape)), (0, 0, 0, 255), thickness=thickness)
return img
def DrawMap(canvas, patch_dset, coords, patch_size, indices=None, verbose=1, draw_grid=True):
if indices is None:
indices = np.arange(len(coords))
total = len(indices)
if verbose > 0:
ten_percent_chunk = math.ceil(total * 0.1)
print('start stitching {}'.format(patch_dset.attrs['wsi_name']))
for idx in range(total):
if verbose > 0:
if idx % ten_percent_chunk == 0:
print('progress: {}/{} stitched'.format(idx, total))
patch_id = indices[idx]
patch = patch_dset[patch_id]
patch = cv2.resize(patch, patch_size)
coord = coords[patch_id]
canvas_crop_shape = canvas[coord[1]:coord[1]+patch_size[1], coord[0]:coord[0]+patch_size[0], :3].shape[:2]
canvas[coord[1]:coord[1]+patch_size[1], coord[0]:coord[0]+patch_size[0], :3] = patch[:canvas_crop_shape[0], :canvas_crop_shape[1], :]
if draw_grid:
DrawGrid(canvas, coord, patch_size)
return Image.fromarray(canvas)
def DrawMapFromCoords(canvas, wsi_object, coords, patch_size, vis_level, indices=None, verbose=1, draw_grid=True):
downsamples = wsi_object.wsi.level_downsamples[vis_level]
if indices is None:
indices = np.arange(len(coords))
total = len(indices)
if verbose > 0:
ten_percent_chunk = math.ceil(total * 0.1)
patch_size = tuple(np.ceil((np.array(patch_size)/np.array(downsamples))).astype(np.int32))
print('downscaled patch size: {}x{}'.format(patch_size[0], patch_size[1]))
for idx in range(total):
if verbose > 0:
if idx % ten_percent_chunk == 0:
print('progress: {}/{} stitched'.format(idx, total))
patch_id = indices[idx]
coord = coords[patch_id]
patch = np.array(wsi_object.wsi.read_region(tuple(coord), vis_level, patch_size).convert("RGB"))
coord = np.ceil(coord / downsamples).astype(np.int32)
canvas_crop_shape = canvas[coord[1]:coord[1]+patch_size[1], coord[0]:coord[0]+patch_size[0], :3].shape[:2]
canvas[coord[1]:coord[1]+patch_size[1], coord[0]:coord[0]+patch_size[0], :3] = patch[:canvas_crop_shape[0], :canvas_crop_shape[1], :]
if draw_grid:
DrawGrid(canvas, coord, patch_size)
return Image.fromarray(canvas)
def StitchPatches(hdf5_file_path, downscale=16, draw_grid=False, bg_color=(0,0,0), alpha=-1):
file = h5py.File(hdf5_file_path, 'r')
dset = file['imgs']
coords = file['coords'][:]
if 'downsampled_level_dim' in dset.attrs.keys():
w, h = dset.attrs['downsampled_level_dim']
else:
w, h = dset.attrs['level_dim']
print('original size: {} x {}'.format(w, h))
w = w // downscale
h = h //downscale
coords = (coords / downscale).astype(np.int32)
print('downscaled size for stiching: {} x {}'.format(w, h))
print('number of patches: {}'.format(len(dset)))
img_shape = dset[0].shape
print('patch shape: {}'.format(img_shape))
downscaled_shape = (img_shape[1] // downscale, img_shape[0] // downscale)
if w*h > Image.MAX_IMAGE_PIXELS:
raise Image.DecompressionBombError("Visualization Downscale %d is too large" % downscale)
if alpha < 0 or alpha == -1:
heatmap = Image.new(size=(w,h), mode="RGB", color=bg_color)
else:
heatmap = Image.new(size=(w,h), mode="RGBA", color=bg_color + (int(255 * alpha),))
heatmap = np.array(heatmap)
heatmap = DrawMap(heatmap, dset, coords, downscaled_shape, indices=None, draw_grid=draw_grid)
file.close()
return heatmap
def StitchCoords(hdf5_file_path, wsi_object, downscale=16, draw_grid=False, bg_color=(0,0,0), alpha=-1):
wsi = wsi_object.getOpenSlide()
vis_level = wsi.get_best_level_for_downsample(downscale)
file = h5py.File(hdf5_file_path, 'r')
dset = file['coords']
coords = dset[:]
w, h = wsi.level_dimensions[0]
print('start stitching {}'.format(dset.attrs['name']))
print('original size: {} x {}'.format(w, h))
w, h = wsi.level_dimensions[vis_level]
print('downscaled size for stiching: {} x {}'.format(w, h))
print('number of patches: {}'.format(len(coords)))
patch_size = dset.attrs['patch_size']
patch_level = dset.attrs['patch_level']
print('patch size: {}x{} patch level: {}'.format(patch_size, patch_size, patch_level))
patch_size = tuple((np.array((patch_size, patch_size)) * wsi.level_downsamples[patch_level]).astype(np.int32))
print('ref patch size: {}x{}'.format(patch_size, patch_size))
if w*h > Image.MAX_IMAGE_PIXELS:
raise Image.DecompressionBombError("Visualization Downscale %d is too large" % downscale)
if alpha < 0 or alpha == -1:
heatmap = Image.new(size=(w,h), mode="RGB", color=bg_color)
else:
heatmap = Image.new(size=(w,h), mode="RGBA", color=bg_color + (int(255 * alpha),))
heatmap = np.array(heatmap)
heatmap = DrawMapFromCoords(heatmap, wsi_object, coords, patch_size, vis_level, indices=None, draw_grid=draw_grid)
file.close()
return heatmap
def SamplePatches(coords_file_path, save_file_path, wsi_object,
patch_level=0, custom_downsample=1, patch_size=256, sample_num=100, seed=1, stitch=True, verbose=1, mode='w'):
file = h5py.File(coords_file_path, 'r')
dset = file['coords']
coords = dset[:]
h5_patch_size = dset.attrs['patch_size']
h5_patch_level = dset.attrs['patch_level']
if verbose>0:
print('in .h5 file: total number of patches: {}'.format(len(coords)))
print('in .h5 file: patch size: {}x{} patch level: {}'.format(h5_patch_size, h5_patch_size, h5_patch_level))
if patch_level < 0:
patch_level = h5_patch_level
if patch_size < 0:
patch_size = h5_patch_size
np.random.seed(seed)
indices = np.random.choice(np.arange(len(coords)), min(len(coords), sample_num), replace=False)
target_patch_size = np.array([patch_size, patch_size])
if custom_downsample > 1:
target_patch_size = (np.array([patch_size, patch_size]) / custom_downsample).astype(np.int32)
if stitch:
canvas = Mosaic_Canvas(patch_size=target_patch_size[0], n=sample_num, downscale=4, n_per_row=10, bg_color=(0,0,0), alpha=-1)
else:
canvas = None
for idx in indices:
coord = coords[idx]
patch = wsi_object.wsi.read_region(coord, patch_level, tuple([patch_size, patch_size])).convert('RGB')
if custom_downsample > 1:
patch = patch.resize(tuple(target_patch_size))
# if isBlackPatch_S(patch, rgbThresh=20, percentage=0.05) or isWhitePatch_S(patch, rgbThresh=220, percentage=0.25):
# continue
if stitch:
canvas.paste_patch(patch)
asset_dict = {'imgs': np.array(patch)[np.newaxis,...], 'coords': coord}
save_hdf5(save_file_path, asset_dict, mode=mode)
mode='a'
return canvas, len(coords), len(indices) | 13,194 | 38.864048 | 153 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/wsi_core/batch_process_utils.py | import pandas as pd
import numpy as np
import pdb
'''
initiate a pandas df describing a list of slides to process
args:
slides (df or array-like):
array-like structure containing list of slide ids, if df, these ids assumed to be
stored under the 'slide_id' column
seg_params (dict): segmentation paramters
filter_params (dict): filter parameters
vis_params (dict): visualization paramters
patch_params (dict): patching paramters
use_heatmap_args (bool): whether to include heatmap arguments such as ROI coordinates
'''
def initialize_df(slides, seg_params, filter_params, vis_params, patch_params,
use_heatmap_args=False, save_patches=False):
total = len(slides)
if isinstance(slides, pd.DataFrame):
slide_ids = slides.slide_id.values
else:
slide_ids = slides
default_df_dict = {'slide_id': slide_ids, 'process': np.full((total), 1, dtype=np.uint8)}
# initiate empty labels in case not provided
if use_heatmap_args:
default_df_dict.update({'label': np.full((total), -1)})
default_df_dict.update({
'status': np.full((total), 'tbp'),
# seg params
'seg_level': np.full((total), int(seg_params['seg_level']), dtype=np.int8),
'sthresh': np.full((total), int(seg_params['sthresh']), dtype=np.uint8),
'mthresh': np.full((total), int(seg_params['mthresh']), dtype=np.uint8),
'close': np.full((total), int(seg_params['close']), dtype=np.uint32),
'use_otsu': np.full((total), bool(seg_params['use_otsu']), dtype=bool),
'keep_ids': np.full((total), seg_params['keep_ids']),
'exclude_ids': np.full((total), seg_params['exclude_ids']),
# filter params
'a_t': np.full((total), int(filter_params['a_t']), dtype=np.float32),
'a_h': np.full((total), int(filter_params['a_h']), dtype=np.float32),
'max_n_holes': np.full((total), int(filter_params['max_n_holes']), dtype=np.uint32),
# vis params
'vis_level': np.full((total), int(vis_params['vis_level']), dtype=np.int8),
'line_thickness': np.full((total), int(vis_params['line_thickness']), dtype=np.uint32),
# patching params
'use_padding': np.full((total), bool(patch_params['use_padding']), dtype=bool),
'contour_fn': np.full((total), patch_params['contour_fn'])
})
if save_patches:
default_df_dict.update({
'white_thresh': np.full((total), int(patch_params['white_thresh']), dtype=np.uint8),
'black_thresh': np.full((total), int(patch_params['black_thresh']), dtype=np.uint8)})
if use_heatmap_args:
# initiate empty x,y coordinates in case not provided
default_df_dict.update({'x1': np.empty((total)).fill(np.NaN),
'x2': np.empty((total)).fill(np.NaN),
'y1': np.empty((total)).fill(np.NaN),
'y2': np.empty((total)).fill(np.NaN)})
if isinstance(slides, pd.DataFrame):
temp_copy = pd.DataFrame(default_df_dict) # temporary dataframe w/ default params
# find key in provided df
# if exist, fill empty fields w/ default values, else, insert the default values as a new column
for key in default_df_dict.keys():
if key in slides.columns:
mask = slides[key].isna()
slides.loc[mask, key] = temp_copy.loc[mask, key]
else:
slides.insert(len(slides.columns), key, default_df_dict[key])
else:
slides = pd.DataFrame(default_df_dict)
return slides | 3,212 | 38.182927 | 98 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/models/model_utils.py | from collections import OrderedDict
from os.path import join
import math
import pdb
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
Attention Network without Gating (2 fc layers)
args:
L: input feature dimension
D: hidden layer dimension
dropout: whether to use dropout (p = 0.25)
n_classes: number of classes
"""
class Attn_Net(nn.Module):
def __init__(self, L = 1024, D = 256, dropout = False, n_classes = 1):
super(Attn_Net, self).__init__()
self.module = [
nn.Linear(L, D),
nn.Tanh()]
if dropout:
self.module.append(nn.Dropout(0.25))
self.module.append(nn.Linear(D, n_classes))
self.module = nn.Sequential(*self.module)
def forward(self, x):
return self.module(x), x # N x n_classes
"""
Attention Network with Sigmoid Gating (3 fc layers)
args:
L: input feature dimension
D: hidden layer dimension
dropout: whether to use dropout (p = 0.25)
n_classes: number of classes
"""
class Attn_Net_Gated(nn.Module):
def __init__(self, L = 1024, D = 256, dropout = False, n_classes = 1):
r"""
Attention Network with Sigmoid Gating (3 fc layers)
args:
L (int): input feature dimension
D (int): hidden layer dimension
dropout (bool): whether to apply dropout (p = 0.25)
n_classes (int): number of classes
"""
super(Attn_Net_Gated, self).__init__()
self.attention_a = [
nn.Linear(L, D),
nn.Tanh()]
self.attention_b = [nn.Linear(L, D), nn.Sigmoid()]
if dropout:
self.attention_a.append(nn.Dropout(0.25))
self.attention_b.append(nn.Dropout(0.25))
self.attention_a = nn.Sequential(*self.attention_a)
self.attention_b = nn.Sequential(*self.attention_b)
self.attention_c = nn.Linear(D, n_classes)
def forward(self, x):
a = self.attention_a(x)
b = self.attention_b(x)
A = a.mul(b)
A = self.attention_c(A) # N x n_classes
return A, x
def init_max_weights(module):
r"""
Initialize Weights function.
args:
modules (torch.nn.Module): Initalize weight using normal distribution
"""
import math
import torch.nn as nn
for m in module.modules():
if type(m) == nn.Linear:
stdv = 1. / math.sqrt(m.weight.size(1))
m.weight.data.normal_(0, stdv)
m.bias.data.zero_() | 2,562 | 25.978947 | 77 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/models/model_dgcn.py | from os.path import join
from collections import OrderedDict
import pdb
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Sequential as Seq
from torch.nn import Linear, LayerNorm, ReLU
#from torch_geometric.nn import GINConv
#from torch_geometric.transforms.normalize_features import NormalizeFeatures
from models.model_utils import *
######################################
# DeepGraphConv Implementation #
######################################
class DeepGraphConv(torch.nn.Module):
def __init__(self, edge_agg='latent', resample=0, num_features=1024, hidden_dim=256,
linear_dim=256, use_edges=False, dropout=0.25, n_classes=4):
super(DeepGraphConv, self).__init__()
self.use_edges = use_edges
self.resample = resample
self.edge_agg = edge_agg
if self.resample > 0:
self.fc = nn.Sequential(*[nn.Dropout(self.resample)])
self.conv1 = GINConv(Seq(nn.Linear(num_features, hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, hidden_dim)))
self.conv2 = GINConv(Seq(nn.Linear(hidden_dim, hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, hidden_dim)))
self.conv3 = GINConv(Seq(nn.Linear(hidden_dim, hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, hidden_dim)))
self.path_attention_head = Attn_Net_Gated(L=hidden_dim, D=hidden_dim, dropout=dropout, n_classes=1)
self.path_rho = nn.Sequential(*[nn.Linear(hidden_dim, hidden_dim), nn.ReLU(), nn.Dropout(dropout)])
self.classifier = torch.nn.Linear(hidden_dim, n_classes)
def relocate(self):
from torch_geometric.nn import DataParallel
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.device_count() >= 1:
device_ids = list(range(torch.cuda.device_count()))
self.conv1 = nn.DataParallel(self.conv1, device_ids=device_ids).to('cuda:0')
self.conv2 = nn.DataParallel(self.conv2, device_ids=device_ids).to('cuda:0')
self.conv3 = nn.DataParallel(self.conv3, device_ids=device_ids).to('cuda:0')
self.path_attention_head = nn.DataParallel(self.path_attention_head, device_ids=device_ids).to('cuda:0')
self.path_rho = self.path_rho.to(device)
self.classifier = self.classifier.to(device)
def forward(self, **kwargs):
data = kwargs['x_path']
x = data.x
if self.edge_agg == 'spatial':
edge_index = data.edge_index
elif self.edge_agg == 'latent':
edge_index = data.edge_latent
batch = data.batch
edge_attr = None
if self.resample:
x = self.fc(x)
x1 = F.relu(self.conv1(x=x, edge_index=edge_index))
x2 = F.relu(self.conv2(x1, edge_index, edge_attr))
x3 = F.relu(self.conv3(x2, edge_index, edge_attr))
h_path = x3
A_path, h_path = self.path_attention_head(h_path)
A_path = torch.transpose(A_path, 1, 0)
h_path = torch.mm(F.softmax(A_path, dim=1) , h_path)
h_path = self.path_rho(h_path).squeeze()
h = h_path # [256] vector
logits = self.classifier(h).unsqueeze(0) # logits needs to be a [1 x 4] vector
Y_prob = F.softmax(logits, dim = 1)
Y_hat = torch.topk(logits, 1, dim = 1)[1]
return logits, Y_prob, Y_hat, 0, 0 # The last two return are just dummy vars | 3,422 | 41.7875 | 116 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/models/model_mil.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.utils import initialize_weights
import numpy as np
class MIL_fc(nn.Module):
def __init__(self, path_input_dim=384, gate = True, size_arg = "small", dropout = False, n_classes = 2, top_k=1):
super(MIL_fc, self).__init__()
assert n_classes == 2
self.size_dict = {"small": [path_input_dim, path_input_dim]}
size = self.size_dict[size_arg]
fc = [nn.Linear(size[0], size[1]), nn.ReLU()]
if dropout:
fc.append(nn.Dropout(0.25))
fc.append(nn.Linear(size[1], n_classes))
self.classifier= nn.Sequential(*fc)
initialize_weights(self)
self.top_k=top_k
def relocate(self):
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.classifier.to(device)
def forward(self, h, return_features=False, **kwargs):
if return_features:
h = self.classifier.module[:3](h)
logits = self.classifier.module[3](h)
else:
logits = self.classifier(h) # K x 1
y_probs = F.softmax(logits, dim = 1)
top_instance_idx = torch.topk(y_probs[:, 1], self.top_k, dim=0)[1].view(1,)
top_instance = torch.index_select(logits, dim=0, index=top_instance_idx)
Y_hat = torch.topk(top_instance, 1, dim = 1)[1]
Y_prob = F.softmax(top_instance, dim = 1)
results_dict = {}
if return_features:
top_features = torch.index_select(h, dim=0, index=top_instance_idx)
results_dict.update({'features': top_features})
return top_instance, Y_prob, Y_hat, y_probs, results_dict
class MIL_fc_mc(nn.Module):
def __init__(self, path_input_dim=384, gate = True, size_arg = "small", dropout = False, n_classes = 2, top_k=1):
super(MIL_fc_mc, self).__init__()
assert n_classes > 2
self.size_dict = {"small": [path_input_dim, path_input_dim]}
size = self.size_dict[size_arg]
fc = [nn.Linear(size[0], size[1]), nn.ReLU()]
if dropout:
fc.append(nn.Dropout(0.25))
self.fc = nn.Sequential(*fc)
self.classifiers = nn.ModuleList([nn.Linear(size[1], 1) for i in range(n_classes)])
initialize_weights(self)
self.top_k=top_k
self.n_classes = n_classes
assert self.top_k == 1
def relocate(self):
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.fc = self.fc.to(device)
self.classifiers = self.classifiers.to(device)
def forward(self, h, return_features=False, **kwargs):
device = h.device
h = self.fc(h)
logits = torch.empty(h.size(0), self.n_classes).float().to(device)
for c in range(self.n_classes):
if isinstance(self.classifiers, nn.DataParallel):
logits[:, c] = self.classifiers.module[c](h).squeeze(1)
else:
logits[:, c] = self.classifiers[c](h).squeeze(1)
y_probs = F.softmax(logits, dim = 1)
m = y_probs.view(1, -1).argmax(1)
top_indices = torch.cat(((m // self.n_classes).view(-1, 1), (m % self.n_classes).view(-1, 1)), dim=1).view(-1, 1)
top_instance = logits[top_indices[0]]
Y_hat = top_indices[1]
Y_prob = y_probs[top_indices[0]]
results_dict = {}
if return_features:
top_features = torch.index_select(h, dim=0, index=top_indices[0])
results_dict.update({'features': top_features})
return top_instance, Y_prob, Y_hat, y_probs, results_dict
| 3,647 | 36.22449 | 121 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/models/model_clam.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.utils import initialize_weights
import numpy as np
from models.model_utils import *
"""
args:
gate: whether to use gated attention network
size_arg: config for network size
dropout: whether to use dropout
k_sample: number of positive/neg patches to sample for instance-level training
dropout: whether to use dropout (p = 0.25)
n_classes: number of classes
instance_loss_fn: loss function to supervise instance-level training
subtyping: whether it's a subtyping problem
"""
class CLAM_SB(nn.Module):
def __init__(self, path_input_dim=1024, gate = True, size_arg = "small", dropout = False, k_sample=8, n_classes=2,
instance_loss_fn=nn.CrossEntropyLoss(), subtyping=False):
super(CLAM_SB, self).__init__()
self.size_dict = {"small": [path_input_dim, 512, 256], "big": [path_input_dim, 512, 384]}
size = self.size_dict[size_arg]
if path_input_dim == 384:
size = [384, 384, 256]
fc = [nn.Linear(size[0], size[1]), nn.ReLU()]
if dropout:
fc.append(nn.Dropout(0.25))
if gate:
attention_net = Attn_Net_Gated(L = size[1], D = size[2], dropout = dropout, n_classes = 1)
else:
attention_net = Attn_Net(L = size[1], D = size[2], dropout = dropout, n_classes = 1)
fc.append(attention_net)
self.attention_net = nn.Sequential(*fc)
self.classifiers = nn.Linear(size[1], n_classes)
instance_classifiers = [nn.Linear(size[1], 2) for i in range(n_classes)]
self.instance_classifiers = nn.ModuleList(instance_classifiers)
self.k_sample = k_sample
self.instance_loss_fn = instance_loss_fn
self.n_classes = n_classes
self.subtyping = subtyping
initialize_weights(self)
def relocate(self):
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.attention_net = self.attention_net.to(device)
self.classifiers = self.classifiers.to(device)
self.instance_classifiers = self.instance_classifiers.to(device)
@staticmethod
def create_positive_targets(length, device):
return torch.full((length, ), 1, device=device, dtype=torch.long)
@staticmethod
def create_negative_targets(length, device):
return torch.full((length, ), 0, device=device, dtype=torch.long)
#instance-level evaluation for in-the-class attention branch
def inst_eval(self, A, h, classifier):
device=h.device
if len(A.shape) == 1:
A = A.view(1, -1)
top_p_ids = torch.topk(A, self.k_sample)[1][-1]
top_p = torch.index_select(h, dim=0, index=top_p_ids)
top_n_ids = torch.topk(-A, self.k_sample, dim=1)[1][-1]
top_n = torch.index_select(h, dim=0, index=top_n_ids)
p_targets = self.create_positive_targets(self.k_sample, device)
n_targets = self.create_negative_targets(self.k_sample, device)
all_targets = torch.cat([p_targets, n_targets], dim=0)
all_instances = torch.cat([top_p, top_n], dim=0)
logits = classifier(all_instances)
all_preds = torch.topk(logits, 1, dim = 1)[1].squeeze(1)
instance_loss = self.instance_loss_fn(logits, all_targets)
return instance_loss, all_preds, all_targets
#instance-level evaluation for out-of-the-class attention branch
def inst_eval_out(self, A, h, classifier):
device=h.device
if len(A.shape) == 1:
A = A.view(1, -1)
top_p_ids = torch.topk(A, self.k_sample)[1][-1]
top_p = torch.index_select(h, dim=0, index=top_p_ids)
p_targets = self.create_negative_targets(self.k_sample, device)
logits = classifier(top_p)
p_preds = torch.topk(logits, 1, dim = 1)[1].squeeze(1)
instance_loss = self.instance_loss_fn(logits, p_targets)
return instance_loss, p_preds, p_targets
def forward(self, h, label=None, instance_eval=False, return_features=False, attention_only=False, **kwargs):
device = h.device
A, h = self.attention_net(h) # NxK
A = torch.transpose(A, 1, 0) # KxN
if attention_only:
return A
A_raw = A
A = F.softmax(A, dim=1) # softmax over N
if instance_eval:
total_inst_loss = 0.0
all_preds = []
all_targets = []
inst_labels = F.one_hot(label, num_classes=self.n_classes).squeeze() #binarize label
for i in range(len(self.instance_classifiers)):
inst_label = inst_labels[i].item()
classifier = self.instance_classifiers[i]
if inst_label == 1: #in-the-class:
instance_loss, preds, targets = self.inst_eval(A, h, classifier)
all_preds.extend(preds.cpu().numpy())
all_targets.extend(targets.cpu().numpy())
else: #out-of-the-class
if self.subtyping:
instance_loss, preds, targets = self.inst_eval_out(A, h, classifier)
all_preds.extend(preds.cpu().numpy())
all_targets.extend(targets.cpu().numpy())
else:
continue
total_inst_loss += instance_loss
if self.subtyping:
total_inst_loss /= len(self.instance_classifiers)
M = torch.mm(A, h)
logits = self.classifiers(M)
Y_hat = torch.topk(logits, 1, dim = 1)[1]
Y_prob = F.softmax(logits, dim = 1)
if instance_eval:
results_dict = {'instance_loss': total_inst_loss, 'inst_labels': np.array(all_targets),
'inst_preds': np.array(all_preds)}
else:
results_dict = {}
if return_features:
results_dict.update({'features': M})
return logits, Y_prob, Y_hat, A_raw, results_dict
class CLAM_MB(CLAM_SB):
def __init__(self, gate = True, size_arg = "small", dropout = False, k_sample=8, n_classes=2,
instance_loss_fn=nn.CrossEntropyLoss(), subtyping=False):
nn.Module.__init__(self)
self.size_dict = {"small": [1024, 512, 256], "big": [1024, 512, 384]}
size = self.size_dict[size_arg]
fc = [nn.Linear(size[0], size[1]), nn.ReLU()]
if dropout:
fc.append(nn.Dropout(0.25))
if gate:
attention_net = Attn_Net_Gated(L = size[1], D = size[2], dropout = dropout, n_classes = n_classes)
else:
attention_net = Attn_Net(L = size[1], D = size[2], dropout = dropout, n_classes = n_classes)
fc.append(attention_net)
self.attention_net = nn.Sequential(*fc)
bag_classifiers = [nn.Linear(size[1], 1) for i in range(n_classes)] #use an indepdent linear layer to predict each class
self.classifiers = nn.ModuleList(bag_classifiers)
instance_classifiers = [nn.Linear(size[1], 2) for i in range(n_classes)]
self.instance_classifiers = nn.ModuleList(instance_classifiers)
self.k_sample = k_sample
self.instance_loss_fn = instance_loss_fn
self.n_classes = n_classes
self.subtyping = subtyping
initialize_weights(self)
def forward(self, h, label=None, instance_eval=False, return_features=False, attention_only=False):
device = h.device
A, h = self.attention_net(h) # NxK
A = torch.transpose(A, 1, 0) # KxN
if attention_only:
return A
A_raw = A
A = F.softmax(A, dim=1) # softmax over N
if instance_eval:
total_inst_loss = 0.0
all_preds = []
all_targets = []
inst_labels = F.one_hot(label, num_classes=self.n_classes).squeeze() #binarize label
for i in range(len(self.instance_classifiers)):
inst_label = inst_labels[i].item()
classifier = self.instance_classifiers[i]
if inst_label == 1: #in-the-class:
instance_loss, preds, targets = self.inst_eval(A[i], h, classifier)
all_preds.extend(preds.cpu().numpy())
all_targets.extend(targets.cpu().numpy())
else: #out-of-the-class
if self.subtyping:
instance_loss, preds, targets = self.inst_eval_out(A[i], h, classifier)
all_preds.extend(preds.cpu().numpy())
all_targets.extend(targets.cpu().numpy())
else:
continue
total_inst_loss += instance_loss
if self.subtyping:
total_inst_loss /= len(self.instance_classifiers)
M = torch.mm(A, h)
logits = torch.empty(1, self.n_classes).float().to(device)
for c in range(self.n_classes):
logits[0, c] = self.classifiers[c](M[c])
Y_hat = torch.topk(logits, 1, dim = 1)[1]
Y_prob = F.softmax(logits, dim = 1)
if instance_eval:
results_dict = {'instance_loss': total_inst_loss, 'inst_labels': np.array(all_targets),
'inst_preds': np.array(all_preds)}
else:
results_dict = {}
if return_features:
results_dict.update({'features': M})
return logits, Y_prob, Y_hat, A_raw, results_dict
| 9,447 | 43.990476 | 128 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/models/model_hierarchical_mil.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
import numpy as np
from os.path import join
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.model_utils import *
import sys
sys.path.append('../HIPT_4K/')
from vision_transformer4k import vit4k_xs
######################################
# HIPT w/o Transformers #
######################################
class HIPT_None_FC(nn.Module):
def __init__(self, path_input_dim=384, size_arg = "small", dropout=0.25, n_classes=2):
super(HIPT_None_FC, self).__init__()
self.size_dict_path = {"small": [path_input_dim, 256, 256], "big": [path_input_dim, 512, 384]}
size = self.size_dict_path[size_arg]
### Local Aggregation
self.local_phi = nn.Sequential(
nn.Linear(size[0], size[1]), nn.ReLU(), nn.Dropout(0.25),
)
self.local_attn_pool = Attn_Net_Gated(L=size[1], D=size[1], dropout=0.25, n_classes=1)
### Global Aggregation
self.global_phi = nn.Sequential(
nn.Linear(size[1], size[1]), nn.ReLU(), nn.Dropout(0.25),
)
self.global_attn_pool = Attn_Net_Gated(L=size[1], D=size[1], dropout=0.25, n_classes=1)
self.global_rho = nn.Sequential(*[nn.Linear(size[1], size[1]), nn.ReLU(), nn.Dropout(0.25)])
self.classifier = nn.Linear(size[1], n_classes)
def forward(self, h, **kwargs):
x_256 = h
### Local
h_256 = self.local_phi(x_256)
A_256, h_256 = self.local_attn_pool(h_256)
A_256 = A_256.squeeze(dim=2) # A = torch.transpose(A, 1, 0)
A_256 = F.softmax(A_256, dim=1)
h_4096 = torch.bmm(A_256.unsqueeze(dim=1), h_256).squeeze(dim=1)
### Global
h_4096 = self.global_phi(h_4096)
A_4096, h_4096 = self.global_attn_pool(h_4096)
A_4096 = torch.transpose(A_4096, 1, 0)
A_4096 = F.softmax(A_4096, dim=1)
h_path = torch.mm(A_4096, h_4096)
h_path = self.global_rho(h_path)
logits = self.classifier(h_path)
Y_hat = torch.topk(logits, 1, dim = 1)[1]
Y_prob = F.softmax(logits, dim = 1)
return logits, Y_prob, Y_hat, None, None
######################################
# 3-Stage HIPT Implementation (With Local-Global Pretraining) #
######################################
class HIPT_LGP_FC(nn.Module):
def __init__(self, path_input_dim=384, size_arg = "small", dropout=0.25, n_classes=4,
pretrain_4k='None', freeze_4k=False, pretrain_WSI='None', freeze_WSI=False):
super(HIPT_LGP_FC, self).__init__()
self.size_dict_path = {"small": [384, 192, 192], "big": [1024, 512, 384]}
#self.fusion = fusion
size = self.size_dict_path[size_arg]
### Local Aggregation
self.local_vit = vit4k_xs()
if pretrain_4k != 'None':
print("Loading Pretrained Local VIT model...",)
state_dict = torch.load('../../HIPT_4K/Checkpoints/%s.pth' % pretrain_4k, map_location='cpu')['teacher']
state_dict = {k.replace('module.', ""): v for k, v in state_dict.items()}
state_dict = {k.replace('backbone.', ""): v for k, v in state_dict.items()}
missing_keys, unexpected_keys = self.local_vit.load_state_dict(state_dict, strict=False)
print("Done!")
if freeze_4k:
print("Freezing Pretrained Local VIT model")
for param in self.local_vit.parameters():
param.requires_grad = False
print("Done")
### Global Aggregation
self.pretrain_WSI = pretrain_WSI
if pretrain_WSI != 'None':
pass
else:
self.global_phi = nn.Sequential(nn.Linear(192, 192), nn.ReLU(), nn.Dropout(0.25))
self.global_transformer = nn.TransformerEncoder(
nn.TransformerEncoderLayer(
d_model=192, nhead=3, dim_feedforward=192, dropout=0.25, activation='relu'
),
num_layers=2
)
self.global_attn_pool = Attn_Net_Gated(L=size[1], D=size[1], dropout=0.25, n_classes=1)
self.global_rho = nn.Sequential(*[nn.Linear(size[1], size[1]), nn.ReLU(), nn.Dropout(0.25)])
self.classifier = nn.Linear(size[1], n_classes)
def forward(self, x_256, **kwargs):
### Local
h_4096 = self.local_vit(x_256.unfold(1, 16, 16).transpose(1,2))
### Global
if self.pretrain_WSI != 'None':
h_WSI = self.global_vit(h_4096.unsqueeze(dim=0))
else:
h_4096 = self.global_phi(h_4096)
h_4096 = self.global_transformer(h_4096.unsqueeze(1)).squeeze(1)
A_4096, h_4096 = self.global_attn_pool(h_4096)
A_4096 = torch.transpose(A_4096, 1, 0)
A_4096 = F.softmax(A_4096, dim=1)
h_path = torch.mm(A_4096, h_4096)
h_WSI = self.global_rho(h_path)
logits = self.classifier(h_WSI)
Y_hat = torch.topk(logits, 1, dim = 1)[1]
return logits, F.softmax(logits, dim=1), Y_hat, None, None
def relocate(self):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.device_count() >= 1:
device_ids = list(range(torch.cuda.device_count()))
self.local_vit = nn.DataParallel(self.local_vit, device_ids=device_ids).to('cuda:0')
if self.pretrain_WSI != 'None':
self.global_vit = nn.DataParallel(self.global_vit, device_ids=device_ids).to('cuda:0')
if self.pretrain_WSI == 'None':
self.global_phi = self.global_phi.to(device)
self.global_transformer = self.global_transformer.to(device)
self.global_attn_pool = self.global_attn_pool.to(device)
self.global_rho = self.global_rho.to(device)
self.classifier = self.classifier.to(device)
######################################
# HIPT Implementation (With Local-Global Pretraining) #
######################################
class HIPT_GP_FC(nn.Module):
def __init__(self, path_input_dim=384, size_arg = "small", dropout=0.25, n_classes=4,
pretrain_WSI='None', freeze_WSI=False):
super(HIPT_GP_FC, self).__init__()
self.size_dict_path = {"small": [384, 192, 192], "big": [1024, 512, 384]}
size = self.size_dict_path[size_arg]
### Global Aggregation
self.pretrain_WSI = pretrain_WSI
if pretrain_WSI != 'None':
pass
else:
self.global_phi = nn.Sequential(nn.Linear(192, 192), nn.ReLU(), nn.Dropout(0.25))
self.global_transformer = nn.TransformerEncoder(
nn.TransformerEncoderLayer(
d_model=192, nhead=3, dim_feedforward=192, dropout=0.25, activation='relu'
),
num_layers=2
)
self.global_attn_pool = Attn_Net_Gated(L=size[1], D=size[1], dropout=0.25, n_classes=1)
self.global_rho = nn.Sequential(*[nn.Linear(size[1], size[1]), nn.ReLU(), nn.Dropout(0.25)])
self.classifier = nn.Linear(size[1], n_classes)
def forward(self, h_4096, **kwargs):
### Global
import pdb
pdb.set_trace()
if self.pretrain_WSI != 'None':
h_WSI = self.global_vit(h_4096.unsqueeze(dim=0))
else:
h_4096 = self.global_phi(h_4096)
h_4096 = self.global_transformer(h_4096.unsqueeze(1)).squeeze(1)
A_4096, h_4096 = self.global_attn_pool(h_4096)
A_4096 = torch.transpose(A_4096, 1, 0)
A_4096 = F.softmax(A_4096, dim=1)
h_path = torch.mm(A_4096, h_4096)
h_WSI = self.global_rho(h_path)
logits = self.classifier(h_WSI)
Y_hat = torch.topk(logits, 1, dim = 1)[1]
return logits, F.softmax(logits, dim=1), Y_hat, None, None
def relocate(self):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.device_count() >= 1:
device_ids = list(range(torch.cuda.device_count()))
if self.pretrain_WSI != 'None':
self.global_vit = nn.DataParallel(self.global_vit, device_ids=device_ids).to('cuda:0')
if self.pretrain_WSI == 'None':
self.global_phi = self.global_phi.to(device)
self.global_transformer = self.global_transformer.to(device)
self.global_attn_pool = self.global_attn_pool.to(device)
self.global_rho = self.global_rho.to(device)
self.classifier = self.classifier.to(device) | 8,672 | 39.528037 | 116 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/models/model_dsmil.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class FCLayer(nn.Module):
def __init__(self, in_size, out_size=1):
super(FCLayer, self).__init__()
self.fc = nn.Sequential(nn.Linear(in_size, out_size))
def forward(self, feats, **kwargs):
x = self.fc(feats)
return feats, x
class IClassifier(nn.Module):
def __init__(self, feature_extractor, feature_size, output_class):
super(IClassifier, self).__init__()
self.feature_extractor = feature_extractor
self.fc = nn.Linear(feature_size, output_class)
def forward(self, x, **kwargs):
device = x.device
feats = self.feature_extractor(x) # N x K
c = self.fc(feats.view(feats.shape[0], -1)) # N x C
return feats.view(feats.shape[0], -1), c
class BClassifier(nn.Module):
def __init__(self, input_size, output_class, dropout_v=0.0): # K, L, N
super(BClassifier, self).__init__()
self.q = nn.Linear(input_size, 128)
self.v = nn.Sequential(
nn.Dropout(dropout_v),
nn.Linear(input_size, input_size)
)
### 1D convolutional layer that can handle multiple class (including binary)
self.fcc = nn.Conv1d(output_class, output_class, kernel_size=input_size)
def forward(self, feats, c, **kwargs): # N x K, N x C
device = feats.device
V = self.v(feats) # N x V, unsorted
Q = self.q(feats).view(feats.shape[0], -1) # N x Q, unsorted
# handle multiple classes without for loop
_, m_indices = torch.sort(c, 0, descending=True) # sort class scores along the instance dimension, m_indices in shape N x C
m_feats = torch.index_select(feats, dim=0, index=m_indices[0, :]) # select critical instances, m_feats in shape C x K
q_max = self.q(m_feats) # compute queries of critical instances, q_max in shape C x Q
A = torch.mm(Q, q_max.transpose(0, 1)) # compute inner product of Q to each entry of q_max, A in shape N x C, each column contains unnormalized attention scores
A = F.softmax( A / torch.sqrt(torch.tensor(Q.shape[1], dtype=torch.float32, device=device)), 0) # normalize attention scores, A in shape N x C,
B = torch.mm(A.transpose(0, 1), V) # compute bag representation, B in shape C x V
B = B.view(1, B.shape[0], B.shape[1]) # 1 x C x V
C = self.fcc(B) # 1 x C x 1
C = C.view(1, -1)
return C, A, B
class MILNet(nn.Module):
def __init__(self, i_classifier, b_classifier):
super(MILNet, self).__init__()
self.i_classifier = i_classifier
self.b_classifier = b_classifier
def forward(self, x, **kwargs):
feats, classes = self.i_classifier(x)
prediction_bag, A, B = self.b_classifier(feats, classes)
max_prediction, _ = torch.max(classes, 0)
logits = 0.5 * (prediction_bag + max_prediction)
Y_prob = F.softmax(logits, dim = 1)
Y_hat = torch.topk(logits, 1, dim = 1)[1]
return logits, Y_prob, Y_hat, 0, 0 # The last two return are just dummy vars
#return classes, prediction_bag, A, B
#(original ins_prediction, bag_prediction, _, _ )) | 3,324 | 43.333333 | 168 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/models/model_cluster.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
import numpy as np
from os.path import join
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
######################################
# Deep Attention MISL Implementation #
######################################
class MIL_Cluster_FC(nn.Module):
def __init__(self, path_input_dim=1024, num_clusters=10, size_arg = "small", dropout=0.25, n_classes=4):
r"""
Attention MIL Implementation
Args:
omic_input_dim (int): Dimension size of genomic features.
fusion (str): Fusion method (Choices: concat, bilinear, or None)
size_arg (str): Size of NN architecture (Choices: small or large)
dropout (float): Dropout rate
n_classes (int): Output shape of NN
"""
super(MIL_Cluster_FC, self).__init__()
self.size_dict_path = {"small": [path_input_dim, 512, 256], "big": [path_input_dim, 512, 384]}
self.size_dict_omic = {'small': [256, 256]}
self.num_clusters = num_clusters
### FC Cluster layers + Pooling
size = self.size_dict_path[size_arg]
if path_input_dim == 384:
size = [path_input_dim, path_input_dim, 256]
phis = []
for phenotype_i in range(num_clusters):
phi = [nn.Linear(size[0], size[1]), nn.ReLU(), nn.Dropout(dropout),
nn.Linear(size[1], size[1]), nn.ReLU(), nn.Dropout(dropout)]
phis.append(nn.Sequential(*phi))
self.phis = nn.ModuleList(phis)
self.pool1d = nn.AdaptiveAvgPool1d(1)
### WSI Attention MIL Construction
fc = [nn.Linear(size[1], size[1]), nn.ReLU(), nn.Dropout(dropout)]
attention_net = Attn_Net_Gated(L=size[1], D=size[2], dropout=dropout, n_classes=1)
fc.append(attention_net)
self.attention_net = nn.Sequential(*fc)
self.rho = nn.Sequential(*[nn.Linear(size[1], size[2]), nn.ReLU(), nn.Dropout(dropout)])
self.classifier = nn.Linear(size[2], n_classes)
def relocate(self):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.device_count() >= 1:
device_ids = list(range(torch.cuda.device_count()))
self.attention_net = nn.DataParallel(self.attention_net, device_ids=device_ids).to('cuda:0')
else:
self.attention_net = self.attention_net.to(device)
self.phis = self.phis.to(device)
self.pool1d = self.pool1d.to(device)
self.rho = self.rho.to(device)
self.classifier = self.classifier.to(device)
def forward(self, data, **kwargs):
x_path = data
cluster_id = kwargs['cluster_id'].detach().cpu().numpy()
### FC Cluster layers + Pooling
h_cluster = []
for i in range(self.num_clusters):
h_cluster_i = self.phis[i](x_path[cluster_id==i])
if h_cluster_i.shape[0] == 0:
h_cluster_i = torch.zeros((1,384)).to(torch.device('cuda'))
h_cluster.append(self.pool1d(h_cluster_i.T.unsqueeze(0)).squeeze(2))
h_cluster = torch.stack(h_cluster, dim=1).squeeze(0)
### Attention MIL
A, h_path = self.attention_net(h_cluster)
A = torch.transpose(A, 1, 0)
A_raw = A
A = F.softmax(A, dim=1)
h_path = torch.mm(A, h_path)
h = self.rho(h_path).squeeze()
logits = self.classifier(h).unsqueeze(0)
Y_hat = torch.topk(logits, 1, dim = 1)[1]
Y_prob = F.softmax(logits, dim = 1)
return logits, Y_prob, Y_hat, None, None
| 3,697 | 37.520833 | 108 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/models/resnet_custom.py | # modified from Pytorch official resnet.py
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch
from torchsummary import summary
import torch.nn.functional as F
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
class Bottleneck_Baseline(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck_Baseline, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet_Baseline(nn.Module):
def __init__(self, block, layers):
self.inplanes = 64
super(ResNet_Baseline, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
def resnet50_baseline(pretrained=False):
"""Constructs a Modified ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet_Baseline(Bottleneck_Baseline, [3, 4, 6, 3])
if pretrained:
model = load_pretrained_weights(model, 'resnet50')
return model
def load_pretrained_weights(model, name):
pretrained_dict = model_zoo.load_url(model_urls[name])
model.load_state_dict(pretrained_dict, strict=False)
return model
| 4,314 | 32.976378 | 90 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/datasets/dataset_generic.py | from __future__ import print_function, division
import os
import torch
import numpy as np
import pandas as pd
import math
import re
import pdb
import pickle
from scipy import stats
from torch.utils.data import Dataset
import h5py
from utils.utils import generate_split, nth
def save_splits(split_datasets, column_keys, filename, boolean_style=False):
splits = [split_datasets[i].slide_data['slide_id'] for i in range(len(split_datasets))]
if not boolean_style:
df = pd.concat(splits, ignore_index=True, axis=1)
df.columns = column_keys
else:
df = pd.concat(splits, ignore_index = True, axis=0)
index = df.values.tolist()
one_hot = np.eye(len(split_datasets)).astype(bool)
bool_array = np.repeat(one_hot, [len(dset) for dset in split_datasets], axis=0)
df = pd.DataFrame(bool_array, index=index, columns = ['train', 'val', 'test'])
df.to_csv(filename)
print()
class Generic_WSI_Classification_Dataset(Dataset):
def __init__(self,
csv_path = 'dataset_csv/ccrcc_clean.csv',
shuffle = False,
seed = 7,
print_info = True,
label_dict = {},
filter_dict = {},
ignore=[],
patient_strat=False,
label_col = None,
patient_voting = 'max',
mode = 'path',
prop = 1.0,
):
"""
Args:
csv_file (string): Path to the csv file with annotations.
shuffle (boolean): Whether to shuffle
seed (int): random seed for shuffling the data
print_info (boolean): Whether to print a summary of the dataset
label_dict (dict): Dictionary with key, value pairs for converting str labels to int
ignore (list): List containing class labels to ignore
"""
self.label_dict = label_dict
self.num_classes = len(set(self.label_dict.values()))
self.seed = seed
self.print_info = print_info
self.patient_strat = patient_strat
self.train_ids, self.val_ids, self.test_ids = (None, None, None)
self.data_dir = None
if not label_col:
label_col = 'label'
self.label_col = label_col
self.mode = prop
self.prop = prop
slide_data = pd.read_csv(csv_path)
slide_data = self.filter_df(slide_data, filter_dict)
slide_data = self.df_prep(slide_data, self.label_dict, ignore, self.label_col)
###shuffle data
if shuffle:
np.random.seed(seed)
np.random.shuffle(slide_data)
self.slide_data = slide_data
self.patient_data_prep(patient_voting)
self.cls_ids_prep()
if print_info:
self.summarize()
def cls_ids_prep(self):
# store ids corresponding each class at the patient or case level
self.patient_cls_ids = [[] for i in range(self.num_classes)]
for i in range(self.num_classes):
self.patient_cls_ids[i] = np.where(self.patient_data['label'] == i)[0]
# store ids corresponding each class at the slide level
self.slide_cls_ids = [[] for i in range(self.num_classes)]
for i in range(self.num_classes):
self.slide_cls_ids[i] = np.where(self.slide_data['label'] == i)[0]
def patient_data_prep(self, patient_voting='max'):
patients = np.unique(np.array(self.slide_data['case_id'])) # get unique patients
patient_labels = []
for p in patients:
locations = self.slide_data[self.slide_data['case_id'] == p].index.tolist()
assert len(locations) > 0
label = self.slide_data['label'][locations].values
if patient_voting == 'max':
label = label.max() # get patient label (MIL convention)
elif patient_voting == 'maj':
label = stats.mode(label)[0]
else:
raise NotImplementedError
patient_labels.append(label)
self.patient_data = {'case_id':patients, 'label':np.array(patient_labels)}
@staticmethod
def df_prep(data, label_dict, ignore, label_col):
if label_col != 'label':
data['label'] = data[label_col].copy()
mask = data['label'].isin(ignore)
data = data[~mask]
data.reset_index(drop=True, inplace=True)
for i in data.index:
key = data.loc[i, 'label']
data.at[i, 'label'] = label_dict[key]
return data
def filter_df(self, df, filter_dict={}):
if len(filter_dict) > 0:
filter_mask = np.full(len(df), True, bool)
# assert 'label' not in filter_dict.keys()
for key, val in filter_dict.items():
mask = df[key].isin(val)
filter_mask = np.logical_and(filter_mask, mask)
df = df[filter_mask]
return df
def __len__(self):
if self.patient_strat:
return len(self.patient_data['case_id'])
else:
return len(self.slide_data)
def summarize(self):
print("label column: {}".format(self.label_col))
print("label dictionary: {}".format(self.label_dict))
print("number of classes: {}".format(self.num_classes))
print("slide-level counts: ", '\n', self.slide_data['label'].value_counts(sort = False))
for i in range(self.num_classes):
print('Patient-LVL; Number of samples registered in class %d: %d' % (i, self.patient_cls_ids[i].shape[0]))
print('Slide-LVL; Number of samples registered in class %d: %d' % (i, self.slide_cls_ids[i].shape[0]))
def create_splits(self, k = 3, val_num = (25, 25), test_num = (40, 40), label_frac = 1.0, custom_test_ids = None):
settings = {
'n_splits' : k,
'val_num' : val_num,
'test_num': test_num,
'label_frac': label_frac,
'seed': self.seed,
'custom_test_ids': custom_test_ids
}
if self.patient_strat:
settings.update({'cls_ids' : self.patient_cls_ids, 'samples': len(self.patient_data['case_id'])})
else:
settings.update({'cls_ids' : self.slide_cls_ids, 'samples': len(self.slide_data)})
self.split_gen = generate_split(**settings)
def set_splits(self,start_from=None):
if start_from:
ids = nth(self.split_gen, start_from)
else:
ids = next(self.split_gen)
if self.patient_strat:
slide_ids = [[] for i in range(len(ids))]
for split in range(len(ids)):
for idx in ids[split]:
case_id = self.patient_data['case_id'][idx]
slide_indices = self.slide_data[self.slide_data['case_id'] == case_id].index.tolist()
slide_ids[split].extend(slide_indices)
self.train_ids, self.val_ids, self.test_ids = slide_ids[0], slide_ids[1], slide_ids[2]
else:
self.train_ids, self.val_ids, self.test_ids = ids
def get_split_from_df(self, all_splits, split_key='train'):
split = all_splits[split_key].str.rstrip('.svs')
split = split.dropna().reset_index(drop=True)
if len(split) > 0:
mask = self.slide_data['slide_id'].isin(split.tolist())
df_slice = self.slide_data[mask].reset_index(drop=True)
if split_key == 'train' and self.prop != 1.0:
df_slice = df_slice.sample(frac=self.prop, random_state=self.seed).reset_index(drop=True)
if split_key == 'train':
print(df_slice.head())
print("Traing Data Size ({%0.2f}): %d" % (self.prop, df_slice.shape[0]))
split = Generic_Split(df_slice, data_dir=self.data_dir, mode=self.mode, prop=self.prop, num_classes=self.num_classes)
else:
split = None
return split
def get_merged_split_from_df(self, all_splits, split_keys=['train']):
merged_split = []
for split_key in split_keys:
split = all_splits[split_key]
split = split.dropna().reset_index(drop=True).tolist()
merged_split.extend(split)
if len(split) > 0:
mask = self.slide_data['slide_id'].isin(merged_split)
df_slice = self.slide_data[mask].reset_index(drop=True)
split = Generic_Split(df_slice, data_dir=self.data_dir, mode=self.mode, prop=self.prop, num_classes=self.num_classes)
else:
split = None
return split
def return_splits(self, from_id=True, csv_path=None):
if from_id:
if len(self.train_ids) > 0:
train_data = self.slide_data.loc[self.train_ids].reset_index(drop=True)
#if self.prop != 1.0:
# train_data = train_data.sample(frac=self.prop, random_state=self.seed)
#print("Traing Data Size ({0.2f}): %d" % (self.prop, train_data.shape[0]))
train_split = Generic_Split(train_data, data_dir=self.data_dir, num_classes=self.num_classes)
else:
train_split = None
if len(self.val_ids) > 0:
val_data = self.slide_data.loc[self.val_ids].reset_index(drop=True)
val_split = Generic_Split(val_data, data_dir=self.data_dir, num_classes=self.num_classes)
else:
val_split = None
if len(self.test_ids) > 0:
test_data = self.slide_data.loc[self.test_ids].reset_index(drop=True)
test_split = Generic_Split(test_data, data_dir=self.data_dir, num_classes=self.num_classes)
else:
test_split = None
else:
assert csv_path
all_splits = pd.read_csv(csv_path)
train_split = self.get_split_from_df(all_splits, 'train')
val_split = self.get_split_from_df(all_splits, 'val')
test_split = self.get_split_from_df(all_splits, 'test')
return train_split, val_split, test_split
def get_list(self, ids):
return self.slide_data['slide_id'][ids]
def getlabel(self, ids):
return self.slide_data['label'][ids]
def __getitem__(self, idx):
return None
def test_split_gen(self, return_descriptor=False):
if return_descriptor:
index = [list(self.label_dict.keys())[list(self.label_dict.values()).index(i)] for i in range(self.num_classes)]
columns = ['train', 'val', 'test']
df = pd.DataFrame(np.full((len(index), len(columns)), 0, dtype=np.int32), index= index,
columns= columns)
count = len(self.train_ids)
print('\nnumber of training samples: {}'.format(count))
labels = self.getlabel(self.train_ids)
unique, counts = np.unique(labels, return_counts=True)
for u in range(len(unique)):
print('number of samples in cls {}: {}'.format(unique[u], counts[u]))
if return_descriptor:
df.loc[index[u], 'train'] = counts[u]
count = len(self.val_ids)
print('\nnumber of val samples: {}'.format(count))
labels = self.getlabel(self.val_ids)
unique, counts = np.unique(labels, return_counts=True)
for u in range(len(unique)):
print('number of samples in cls {}: {}'.format(unique[u], counts[u]))
if return_descriptor:
df.loc[index[u], 'val'] = counts[u]
count = len(self.test_ids)
print('\nnumber of test samples: {}'.format(count))
labels = self.getlabel(self.test_ids)
unique, counts = np.unique(labels, return_counts=True)
for u in range(len(unique)):
print('number of samples in cls {}: {}'.format(unique[u], counts[u]))
if return_descriptor:
df.loc[index[u], 'test'] = counts[u]
assert len(np.intersect1d(self.train_ids, self.test_ids)) == 0
assert len(np.intersect1d(self.train_ids, self.val_ids)) == 0
assert len(np.intersect1d(self.val_ids, self.test_ids)) == 0
if return_descriptor:
return df
def save_split(self, filename):
train_split = self.get_list(self.train_ids)
val_split = self.get_list(self.val_ids)
test_split = self.get_list(self.test_ids)
df_tr = pd.DataFrame({'train': train_split})
df_v = pd.DataFrame({'val': val_split})
df_t = pd.DataFrame({'test': test_split})
df = pd.concat([df_tr, df_v, df_t], axis=1)
df.to_csv(filename, index = False)
class Generic_MIL_Dataset(Generic_WSI_Classification_Dataset):
def __init__(self,
data_dir,
mode='path',
prop=1.0,
**kwargs):
super(Generic_MIL_Dataset, self).__init__(**kwargs)
self.data_dir = data_dir
self.use_h5 = False
self.mode = mode
self.prop = prop
self.slide_data['slide_id'] = self.slide_data['slide_id'].apply(lambda x: x.replace(".svs", ""))
print("Slide data in geneic mil", self.slide_data)
def load_from_h5(self, toggle):
self.use_h5 = toggle
def __getitem__(self, idx):
slide_id = self.slide_data['slide_id'][idx]
label = self.slide_data['label'][idx]
if type(self.data_dir) == dict:
source = self.slide_data['source'][idx]
data_dir = self.data_dir[source]
else:
data_dir = self.data_dir
if not self.use_h5:
if self.mode == 'path' or self.mode == 'local_region_features':
full_path = os.path.join(data_dir, '{}.pt'.format(slide_id.replace(".svs","")))
features = torch.load(full_path)
if 'dino' in full_path:
if 'vits_tcga_pancancer_dino' in full_path:
pass
else:
features = features[:,1152:1536]
return features, label
elif self.mode == 'pyramid':
full_path = os.path.join(data_dir, '{}.pt'.format(slide_id.replace(".svs","")))
features = torch.load(full_path)
return features, label
elif self.mode == 'cluster':
path_features = []
cluster_ids = []
wsi_path = os.path.join(data_dir, '{}.pt'.format(slide_id.replace(".svs","")))
wsi_bag = torch.load(wsi_path)
if 'dino' in wsi_path:
wsi_bag = wsi_bag[:,1152:1536]
path_features.append(wsi_bag)
cluster_ids.extend(self.fname2ids[slide_id+'.pt'])
path_features = torch.cat(path_features, dim=0)
cluster_ids = torch.Tensor(cluster_ids)
return (path_features, cluster_ids, label)
elif self.mode == 'graph':
path_features = []
from datasets.BatchWSI import BatchWSI
wsi_path = os.path.join("/".join(data_dir.split("/")[0:-1]), 'vits_tcga_pancancer_dino_h5_graph_features', '{}.pt'.format(slide_id.replace('.svs','')))
wsi_bag = torch.load(wsi_path)
path_features.append(wsi_bag)
path_features = BatchWSI.from_data_list(path_features, update_cat_dims={'edge_latent': 1})
return (path_features, label)
else:
return None
class Generic_Split(Generic_MIL_Dataset):
def __init__(self, slide_data, data_dir=None, mode='path', prop=1.0, num_classes=2):
self.use_h5 = False
self.slide_data = slide_data
self.data_dir = data_dir
self.mode = mode
self.prop = prop
self.num_classes = num_classes
self.slide_cls_ids = [[] for i in range(self.num_classes)]
for i in range(self.num_classes):
self.slide_cls_ids[i] = np.where(self.slide_data['label'] == i)[0]
cluster_dir = "/".join(data_dir.split("/")[0:-1])
if os.path.isfile(os.path.join(cluster_dir, 'fast_cluster_ids.pkl')):
with open(os.path.join(cluster_dir, 'fast_cluster_ids.pkl'), 'rb') as handle:
self.fname2ids = pickle.load(handle)
else:
print("Cluster file missing")
def __len__(self):
return len(self.slide_data)
| 16,504 | 38.204276 | 167 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/datasets/dataset_h5.py | from __future__ import print_function, division
import os
import torch
import numpy as np
import pandas as pd
import math
import re
import pdb
import pickle
from torch.utils.data import Dataset, DataLoader, sampler
from torchvision import transforms, utils, models
import torch.nn.functional as F
from PIL import Image
import h5py
from random import randrange
def eval_transforms(pretrained=False):
if pretrained:
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
else:
mean = (0.5,0.5,0.5)
std = (0.5,0.5,0.5)
trnsfrms_val = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(mean = mean, std = std)
]
)
return trnsfrms_val
class Whole_Slide_Bag(Dataset):
def __init__(self,
file_path,
pretrained=False,
custom_transforms=None,
target_patch_size=-1,
):
"""
Args:
file_path (string): Path to the .h5 file containing patched data.
pretrained (bool): Use ImageNet transforms
custom_transforms (callable, optional): Optional transform to be applied on a sample
"""
self.pretrained=pretrained
if target_patch_size > 0:
self.target_patch_size = (target_patch_size, target_patch_size)
else:
self.target_patch_size = None
if not custom_transforms:
self.roi_transforms = eval_transforms(pretrained=pretrained)
else:
self.roi_transforms = custom_transforms
self.file_path = file_path
with h5py.File(self.file_path, "r") as f:
dset = f['imgs']
self.length = len(dset)
self.summary()
def __len__(self):
return self.length
def summary(self):
hdf5_file = h5py.File(self.file_path, "r")
dset = hdf5_file['imgs']
for name, value in dset.attrs.items():
print(name, value)
print('pretrained:', self.pretrained)
print('transformations:', self.roi_transforms)
if self.target_patch_size is not None:
print('target_size: ', self.target_patch_size)
def __getitem__(self, idx):
with h5py.File(self.file_path,'r') as hdf5_file:
img = hdf5_file['imgs'][idx]
coord = hdf5_file['coords'][idx]
img = Image.fromarray(img)
if self.target_patch_size is not None:
img = img.resize(self.target_patch_size)
img = self.roi_transforms(img).unsqueeze(0)
return img, coord
class Whole_Slide_Bag_FP(Dataset):
def __init__(self,
file_path,
wsi,
pretrained=False,
custom_transforms=None,
custom_downsample=1,
target_patch_size=-1
):
"""
Args:
file_path (string): Path to the .h5 file containing patched data.
pretrained (bool): Use ImageNet transforms
custom_transforms (callable, optional): Optional transform to be applied on a sample
custom_downsample (int): Custom defined downscale factor (overruled by target_patch_size)
target_patch_size (int): Custom defined image size before embedding
"""
self.pretrained=pretrained
self.wsi = wsi
if not custom_transforms:
self.roi_transforms = eval_transforms(pretrained=pretrained)
else:
self.roi_transforms = custom_transforms
self.file_path = file_path
with h5py.File(self.file_path, "r") as f:
dset = f['coords']
self.patch_level = f['coords'].attrs['patch_level']
self.patch_size = f['coords'].attrs['patch_size']
self.length = len(dset)
if target_patch_size > 0:
self.target_patch_size = (target_patch_size, ) * 2
elif custom_downsample > 1:
self.target_patch_size = (self.patch_size // custom_downsample, ) * 2
else:
self.target_patch_size = None
self.summary()
def __len__(self):
return self.length
def summary(self):
hdf5_file = h5py.File(self.file_path, "r")
dset = hdf5_file['coords']
for name, value in dset.attrs.items():
print(name, value)
print('\nfeature extraction settings')
print('target patch size: ', self.target_patch_size)
print('pretrained: ', self.pretrained)
print('transformations: ', self.roi_transforms)
def __getitem__(self, idx):
with h5py.File(self.file_path,'r') as hdf5_file:
coord = hdf5_file['coords'][idx]
img = self.wsi.read_region(coord, self.patch_level, (self.patch_size, self.patch_size)).convert('RGB')
if self.target_patch_size is not None:
img = img.resize(self.target_patch_size)
img = self.roi_transforms(img).unsqueeze(0)
return img, coord
class Dataset_All_Bags(Dataset):
def __init__(self, csv_path):
self.df = pd.read_csv(csv_path)
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
return self.df['slide_id'][idx]
| 4,426 | 24.738372 | 104 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/datasets/BatchWSI.py | import torch_geometric
from typing import List
import torch
from torch import Tensor
from torch_sparse import SparseTensor, cat
import torch_geometric
from torch_geometric.data import Data
class BatchWSI(torch_geometric.data.Batch):
def __init__(self):
super(BatchWSI, self).__init__()
pass
@classmethod
def from_data_list(cls, data_list, follow_batch=[], exclude_keys=[], update_cat_dims={}):
r"""Constructs a batch object from a python list holding
:class:`torch_geometric.data.Data` objects.
The assignment vector :obj:`batch` is created on the fly.
Additionally, creates assignment batch vectors for each key in
:obj:`follow_batch`.
Will exclude any keys given in :obj:`exclude_keys`."""
keys = list(set(data_list[0].keys) - set(exclude_keys))
assert 'batch' not in keys and 'ptr' not in keys
batch = cls()
for key in data_list[0].__dict__.keys():
if key[:2] != '__' and key[-2:] != '__':
batch[key] = None
batch.__num_graphs__ = len(data_list)
batch.__data_class__ = data_list[0].__class__
for key in keys + ['batch']:
batch[key] = []
batch['ptr'] = [0]
cat_dims = {}
device = None
slices = {key: [0] for key in keys}
cumsum = {key: [0] for key in keys}
num_nodes_list = []
for i, data in enumerate(data_list):
for key in keys:
item = data[key]
# Increase values by `cumsum` value.
cum = cumsum[key][-1]
if isinstance(item, Tensor) and item.dtype != torch.bool:
if not isinstance(cum, int) or cum != 0:
item = item + cum
elif isinstance(item, SparseTensor):
value = item.storage.value()
if value is not None and value.dtype != torch.bool:
if not isinstance(cum, int) or cum != 0:
value = value + cum
item = item.set_value(value, layout='coo')
elif isinstance(item, (int, float)):
item = item + cum
# Gather the size of the `cat` dimension.
size = 1
if key in update_cat_dims.keys():
cat_dim = update_cat_dims[key]
else:
cat_dim = data.__cat_dim__(key, data[key])
# 0-dimensional tensors have no dimension along which to
# concatenate, so we set `cat_dim` to `None`.
if isinstance(item, Tensor) and item.dim() == 0:
cat_dim = None
cat_dims[key] = cat_dim
# Add a batch dimension to items whose `cat_dim` is `None`:
if isinstance(item, Tensor) and cat_dim is None:
cat_dim = 0 # Concatenate along this new batch dimension.
item = item.unsqueeze(0)
device = item.device
elif isinstance(item, Tensor):
size = item.size(cat_dim)
device = item.device
elif isinstance(item, SparseTensor):
size = torch.tensor(item.sizes())[torch.tensor(cat_dim)]
device = item.device()
batch[key].append(item) # Append item to the attribute list.
slices[key].append(size + slices[key][-1])
inc = data.__inc__(key, item)
if isinstance(inc, (tuple, list)):
inc = torch.tensor(inc)
cumsum[key].append(inc + cumsum[key][-1])
if key in follow_batch:
if isinstance(size, Tensor):
for j, size in enumerate(size.tolist()):
tmp = f'{key}_{j}_batch'
batch[tmp] = [] if i == 0 else batch[tmp]
batch[tmp].append(
torch.full((size, ), i, dtype=torch.long,
device=device))
else:
tmp = f'{key}_batch'
batch[tmp] = [] if i == 0 else batch[tmp]
batch[tmp].append(
torch.full((size, ), i, dtype=torch.long,
device=device))
if hasattr(data, '__num_nodes__'):
num_nodes_list.append(data.__num_nodes__)
else:
num_nodes_list.append(None)
num_nodes = data.num_nodes
if num_nodes is not None:
item = torch.full((num_nodes, ), i, dtype=torch.long,
device=device)
batch.batch.append(item)
batch.ptr.append(batch.ptr[-1] + num_nodes)
batch.batch = None if len(batch.batch) == 0 else batch.batch
batch.ptr = None if len(batch.ptr) == 1 else batch.ptr
batch.__slices__ = slices
batch.__cumsum__ = cumsum
batch.__cat_dims__ = cat_dims
batch.__num_nodes_list__ = num_nodes_list
ref_data = data_list[0]
for key in batch.keys:
items = batch[key]
item = items[0]
### <--- Updating Cat Dim
if key in update_cat_dims.keys():
cat_dim = update_cat_dims[key]
else:
cat_dim = ref_data.__cat_dim__(key, item)
cat_dim = 0 if cat_dim is None else cat_dim
### ---?
if isinstance(item, Tensor):
batch[key] = torch.cat(items, cat_dim)
elif isinstance(item, SparseTensor):
batch[key] = cat(items, cat_dim)
elif isinstance(item, (int, float)):
batch[key] = torch.tensor(items)
if torch_geometric.is_debug_enabled():
batch.debug()
return batch.contiguous() | 6,596 | 42.98 | 93 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/utils/core_utils.py | import numpy as np
import torch
import torch.nn.functional as F
from utils.utils import *
import os
import torch.nn.functional as F
from datasets.dataset_generic import save_splits
from models.model_dsmil import *
from models.model_mil import MIL_fc, MIL_fc_mc
from models.model_dgcn import DeepGraphConv
from models.model_clam import CLAM_MB, CLAM_SB
from models.model_cluster import MIL_Cluster_FC
from models.model_hierarchical_mil import HIPT_None_FC, HIPT_LGP_FC, HIPT_GP_FC
from sklearn.preprocessing import label_binarize
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.metrics import auc as calc_auc
import sys
#from utils.gpu_utils import gpu_profile, print_gpu_mem
#os.environ['GPU_DEBUG']='0'
class Accuracy_Logger(object):
"""Accuracy logger"""
def __init__(self, n_classes):
super(Accuracy_Logger, self).__init__()
self.n_classes = n_classes
self.initialize()
def initialize(self):
self.data = [{"count": 0, "correct": 0} for i in range(self.n_classes)]
def log(self, Y_hat, Y):
Y_hat = int(Y_hat)
Y = int(Y)
self.data[Y]["count"] += 1
self.data[Y]["correct"] += (Y_hat == Y)
def log_batch(self, Y_hat, Y):
Y_hat = np.array(Y_hat).astype(int)
Y = np.array(Y).astype(int)
for label_class in np.unique(Y):
cls_mask = Y == label_class
self.data[label_class]["count"] += cls_mask.sum()
self.data[label_class]["correct"] += (Y_hat[cls_mask] == Y[cls_mask]).sum()
def get_summary(self, c):
count = self.data[c]["count"]
correct = self.data[c]["correct"]
if count == 0:
acc = None
else:
acc = float(correct) / count
return acc, correct, count
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=20, stop_epoch=50, verbose=False):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 20
stop_epoch (int): Earliest epoch possible for stopping
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
"""
self.patience = patience
self.stop_epoch = stop_epoch
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
def __call__(self, epoch, val_loss, model, ckpt_name = 'checkpoint.pt'):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model, ckpt_name)
elif score < self.best_score:
self.counter += 1
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience and epoch > self.stop_epoch:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model, ckpt_name)
self.counter = 0
def save_checkpoint(self, val_loss, model, ckpt_name):
'''Saves model when validation loss decrease.'''
if self.verbose:
print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
torch.save(model.state_dict(), ckpt_name)
self.val_loss_min = val_loss
def train(datasets, cur, args):
"""
train for a single fold
"""
print('\nTraining Fold {}!'.format(cur))
writer_dir = os.path.join(args.results_dir, str(cur))
if not os.path.isdir(writer_dir):
os.mkdir(writer_dir)
if args.log_data:
from tensorboardX import SummaryWriter
writer = SummaryWriter(writer_dir, flush_secs=15)
else:
writer = None
print('\nInit train/val/test splits...', end=' ')
train_split, val_split, test_split = datasets
save_splits(datasets, ['train', 'val', 'test'], os.path.join(args.results_dir, 'splits_{}.csv'.format(cur)))
print('Done!')
print("Training on {} samples".format(len(train_split)))
print("Validating on {} samples".format(len(val_split)))
print("Testing on {} samples".format(len(test_split)))
print('\nInit loss function...', end=' ')
if args.bag_loss == 'svm':
from topk import SmoothTop1SVM
loss_fn = SmoothTop1SVM(n_classes = args.n_classes)
if device.type == 'cuda':
loss_fn = loss_fn.cuda()
else:
loss_fn = nn.CrossEntropyLoss()
print('Done!')
print('\nInit Model...', end=' ')
model_dict = {'path_input_dim': args.path_input_dim, "dropout": args.drop_out, 'n_classes': args.n_classes}
if args.model_type == 'clam' and args.subtyping:
model_dict.update({'subtyping': True})
if args.model_size is not None and args.model_type != 'mil':
model_dict.update({"size_arg": args.model_size})
if args.model_type in ['clam_sb', 'clam_mb']:
if args.subtyping:
model_dict.update({'subtyping': True})
if args.B > 0:
model_dict.update({'k_sample': args.B})
if args.inst_loss == 'svm':
from topk import SmoothTop1SVM
instance_loss_fn = SmoothTop1SVM(n_classes = 2)
if device.type == 'cuda':
instance_loss_fn = instance_loss_fn.cuda()
else:
instance_loss_fn = nn.CrossEntropyLoss()
if args.model_type =='clam_sb':
model = CLAM_SB(**model_dict, instance_loss_fn=instance_loss_fn)
elif args.model_type == 'clam_mb':
model = CLAM_MB(**model_dict, instance_loss_fn=instance_loss_fn)
else:
raise NotImplementedError
elif 'hipt' in args.model_type:
if args.model_type == 'hipt_n':
model = HIPT_None_FC(**model_dict)
elif args.model_type == 'hipt_lgp':
model = HIPT_LGP_FC(**model_dict, freeze_4k=args.freeze_4k, pretrain_4k=args.pretrain_4k, freeze_WSI=args.freeze_WSI, pretrain_WSI=args.pretrain_WSI)
elif args.model_type == 'hipt_gp':
model = HIPT_GP_FC(**model_dict, freeze_WSI=args.freeze_WSI, pretrain_WSI=args.pretrain_WSI)
elif args.model_type == 'dsmil':
i_classifier = FCLayer(in_size=args.path_input_dim, out_size=model_dict['n_classes'])
b_classifier = BClassifier(input_size=args.path_input_dim, output_class=model_dict['n_classes'], dropout_v=0.0)
model = MILNet(i_classifier, b_classifier)
elif args.model_type == 'dgcn':
model_dict = {'path_input_dim': args.path_input_dim}
model = DeepGraphConv(num_features=model_dict['path_input_dim'], n_classes=args.n_classes)
elif args.model_type == 'mi_fcn':
model = MIL_Cluster_FC(path_input_dim=args.path_input_dim, n_classes=args.n_classes)
else: # args.model_type == 'mil'
if args.n_classes > 2:
model = MIL_fc_mc(**model_dict)
else:
model = MIL_fc(**model_dict)
if hasattr(model, "relocate"):
model.relocate()
else:
model = model.to(torch.device('cuda'))
print('Done!')
print_network(model)
print('\nInit optimizer ...', end=' ')
optimizer = get_optim(model, args)
print('Done!')
print('\nInit Loaders...', end=' ')
train_loader = get_split_loader(train_split, training=True, testing = args.testing, weighted = args.weighted_sample, mode=args.mode)
val_loader = get_split_loader(val_split, testing = args.testing, mode=args.mode)
test_loader = get_split_loader(test_split, testing = args.testing, mode=args.mode)
print('Done!')
print('\nSetup EarlyStopping...', end=' ')
if args.early_stopping:
early_stopping = EarlyStopping(patience = 20, stop_epoch=50, verbose = True)
else:
early_stopping = None
print('Done!')
for epoch in range(args.max_epochs):
if args.model_type in ['clam_sb', 'clam_mb'] and not args.no_inst_cluster:
train_loop_clam(epoch, model, train_loader, optimizer, args.n_classes, args.bag_weight, writer, loss_fn, dropinput=args.dropinput)
stop = validate_clam(cur, epoch, model, val_loader, args.n_classes,
early_stopping, writer, loss_fn, args.results_dir)
else:
train_loop(epoch, model, train_loader, optimizer, args.n_classes, writer, loss_fn)
stop = validate(cur, epoch, model, val_loader, args.n_classes,
early_stopping, writer, loss_fn, args.results_dir)
if stop:
break
if args.early_stopping:
model.load_state_dict(torch.load(os.path.join(args.results_dir, "s_{}_checkpoint.pt".format(cur))))
else:
torch.save(model.state_dict(), os.path.join(args.results_dir, "s_{}_checkpoint.pt".format(cur)))
_, val_error, val_auc, _= summary(model, val_loader, args.n_classes)
print('Val error: {:.4f}, ROC AUC: {:.4f}'.format(val_error, val_auc))
results_dict, test_error, test_auc, acc_logger = summary(model, test_loader, args.n_classes)
print('Test error: {:.4f}, ROC AUC: {:.4f}'.format(test_error, test_auc))
for i in range(args.n_classes):
acc, correct, count = acc_logger.get_summary(i)
print('class {}: acc {}, correct {}/{}'.format(i, acc, correct, count))
if writer:
writer.add_scalar('final/test_class_{}_acc'.format(i), acc, 0)
if writer:
writer.add_scalar('final/val_error', val_error, 0)
writer.add_scalar('final/val_auc', val_auc, 0)
writer.add_scalar('final/test_error', test_error, 0)
writer.add_scalar('final/test_auc', test_auc, 0)
writer.close()
return results_dict, test_auc, val_auc, 1-test_error, 1-val_error
def train_loop(epoch, model, loader, optimizer, n_classes, writer = None, loss_fn = None, gc=32):
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.train()
acc_logger = Accuracy_Logger(n_classes=n_classes)
train_loss = 0.
train_error = 0.
print('\n')
for batch_idx, batch in enumerate(loader):
if hasattr(model, "num_clusters"):
data, cluster_id, label = batch
data, cluster_id, label = data.to(device, non_blocking=True), cluster_id, label.to(device, non_blocking=True)
else:
data, label = batch
data, label = data.to(device, non_blocking=True), label.to(device, non_blocking=True)
cluster_id = None
logits, Y_prob, Y_hat, _, _ = model(data, cluster_id=cluster_id)
#logits, Y_prob, Y_hat, _, _ = model(x_path=data)
acc_logger.log(Y_hat, label)
loss = loss_fn(logits, label)
loss_value = loss.item()
train_loss += loss_value
if (batch_idx + 1) % 20 == 0:
print('batch {}, loss: {:.4f}, label: {}, bag_size: {}'.format(batch_idx, loss_value, label.item(), data.size(0)))
error = calculate_error(Y_hat, label)
train_error += error
loss = loss / gc
loss.backward()
# step
optimizer.step()
optimizer.zero_grad()
# calculate loss and error for epoch
train_loss /= len(loader)
train_error /= len(loader)
print('Epoch: {}, train_loss: {:.4f}, train_error: {:.4f}'.format(epoch, train_loss, train_error))
for i in range(n_classes):
acc, correct, count = acc_logger.get_summary(i)
print('class {}: acc {}, correct {}/{}'.format(i, acc, correct, count))
if writer:
writer.add_scalar('train/class_{}_acc'.format(i), acc, epoch)
if writer:
writer.add_scalar('train/loss', train_loss, epoch)
writer.add_scalar('train/error', train_error, epoch)
def validate(cur, epoch, model, loader, n_classes, early_stopping = None, writer = None, loss_fn = None, results_dir=None):
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.eval()
acc_logger = Accuracy_Logger(n_classes=n_classes)
# loader.dataset.update_mode(True)
val_loss = 0.
val_error = 0.
prob = np.zeros((len(loader), n_classes))
labels = np.zeros(len(loader))
with torch.no_grad():
for batch_idx, batch in enumerate(loader):
if hasattr(model, "num_clusters"):
data, cluster_id, label = batch
data, cluster_id, label = data.to(device, non_blocking=True), cluster_id, label.to(device, non_blocking=True)
else:
data, label = batch
data, label = data.to(device, non_blocking=True), label.to(device, non_blocking=True)
cluster_id = None
logits, Y_prob, Y_hat, _, _ = model(data, cluster_id=cluster_id)
#logits, Y_prob, Y_hat, _, _ = model(x_path=data)
acc_logger.log(Y_hat, label)
loss = loss_fn(logits, label)
prob[batch_idx] = Y_prob.cpu().numpy()
labels[batch_idx] = label.item()
val_loss += loss.item()
error = calculate_error(Y_hat, label)
val_error += error
val_error /= len(loader)
val_loss /= len(loader)
if n_classes == 2:
auc = roc_auc_score(labels, prob[:, 1])
else:
auc = roc_auc_score(labels, prob, multi_class='ovr')
if writer:
writer.add_scalar('val/loss', val_loss, epoch)
writer.add_scalar('val/auc', auc, epoch)
writer.add_scalar('val/error', val_error, epoch)
print('\nVal Set, val_loss: {:.4f}, val_error: {:.4f}, auc: {:.4f}'.format(val_loss, val_error, auc))
for i in range(n_classes):
acc, correct, count = acc_logger.get_summary(i)
print('class {}: acc {}, correct {}/{}'.format(i, acc, correct, count))
if early_stopping:
assert results_dir
early_stopping(epoch, val_loss, model, ckpt_name = os.path.join(results_dir, "s_{}_checkpoint.pt".format(cur)))
if early_stopping.early_stop:
print("Early stopping")
return True
return False
def train_loop_clam(epoch, model, loader, optimizer, n_classes, bag_weight, writer = None, loss_fn = None, dropinput=0.0):
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.train()
acc_logger = Accuracy_Logger(n_classes=n_classes)
inst_logger = Accuracy_Logger(n_classes=n_classes)
train_loss = 0.
train_error = 0.
train_inst_loss = 0.
inst_count = 0
print('\n')
for batch_idx, batch in enumerate(loader):
if hasattr(model, "num_clusters"):
data, cluster_id, label = batch
data, cluster_id, label = data.to(device), cluster_id, label.to(device)
else:
data, label = batch
data, label = data.to(device), label.to(device)
cluster_id = None
if dropinput > 0:
data = F.dropout(data, p=dropinput)
logits, Y_prob, Y_hat, _, instance_dict = model(h=data, cluster_id=cluster_id, label=label, instance_eval=True)
acc_logger.log(Y_hat, label)
loss = loss_fn(logits, label)
loss_value = loss.item()
instance_loss = instance_dict['instance_loss']
inst_count+=1
instance_loss_value = instance_loss.item()
train_inst_loss += instance_loss_value
total_loss = bag_weight * loss + (1-bag_weight) * instance_loss
inst_preds = instance_dict['inst_preds']
inst_labels = instance_dict['inst_labels']
inst_logger.log_batch(inst_preds, inst_labels)
train_loss += loss_value
if (batch_idx + 1) % 20 == 0:
print('batch {}, loss: {:.4f}, instance_loss: {:.4f}, weighted_loss: {:.4f}, '.format(batch_idx, loss_value, instance_loss_value, total_loss.item()) +
'label: {}, bag_size: {}'.format(label.item(), data.size(0)))
error = calculate_error(Y_hat, label)
train_error += error
# backward pass
total_loss.backward()
# step
optimizer.step()
optimizer.zero_grad()
# calculate loss and error for epoch
train_loss /= len(loader)
train_error /= len(loader)
if inst_count > 0:
train_inst_loss /= inst_count
print('\n')
for i in range(2):
acc, correct, count = inst_logger.get_summary(i)
print('class {} clustering acc {}: correct {}/{}'.format(i, acc, correct, count))
print('Epoch: {}, train_loss: {:.4f}, train_clustering_loss: {:.4f}, train_error: {:.4f}'.format(epoch, train_loss, train_inst_loss, train_error))
for i in range(n_classes):
acc, correct, count = acc_logger.get_summary(i)
print('class {}: acc {}, correct {}/{}'.format(i, acc, correct, count))
if writer and acc is not None:
writer.add_scalar('train/class_{}_acc'.format(i), acc, epoch)
if writer:
writer.add_scalar('train/loss', train_loss, epoch)
writer.add_scalar('train/error', train_error, epoch)
writer.add_scalar('train/clustering_loss', train_inst_loss, epoch)
def validate_clam(cur, epoch, model, loader, n_classes, early_stopping = None, writer = None, loss_fn = None, results_dir = None):
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.eval()
acc_logger = Accuracy_Logger(n_classes=n_classes)
inst_logger = Accuracy_Logger(n_classes=n_classes)
val_loss = 0.
val_error = 0.
val_inst_loss = 0.
val_inst_acc = 0.
inst_count=0
prob = np.zeros((len(loader), n_classes))
labels = np.zeros(len(loader))
sample_size = model.k_sample
with torch.no_grad():
for batch_idx, batch in enumerate(loader):
if hasattr(model, "num_clusters"):
data, cluster_id, label = batch
data, cluster_id, label = data.to(device), cluster_id, label.to(device)
else:
data, label = batch
data, label = data.to(device), label.to(device)
cluster_id = None
logits, Y_prob, Y_hat, _, instance_dict = model(h=data, cluster_id=cluster_id, label=label, instance_eval=True)
acc_logger.log(Y_hat, label)
loss = loss_fn(logits, label)
val_loss += loss.item()
instance_loss = instance_dict['instance_loss']
inst_count+=1
instance_loss_value = instance_loss.item()
val_inst_loss += instance_loss_value
inst_preds = instance_dict['inst_preds']
inst_labels = instance_dict['inst_labels']
inst_logger.log_batch(inst_preds, inst_labels)
prob[batch_idx] = Y_prob.cpu().numpy()
labels[batch_idx] = label.item()
error = calculate_error(Y_hat, label)
val_error += error
val_error /= len(loader)
val_loss /= len(loader)
if n_classes == 2:
auc = roc_auc_score(labels, prob[:, 1])
aucs = []
else:
aucs = []
binary_labels = label_binarize(labels, classes=[i for i in range(n_classes)])
for class_idx in range(n_classes):
if class_idx in labels:
fpr, tpr, _ = roc_curve(binary_labels[:, class_idx], prob[:, class_idx])
aucs.append(calc_auc(fpr, tpr))
else:
aucs.append(float('nan'))
auc = np.nanmean(np.array(aucs))
print('\nVal Set, val_loss: {:.4f}, val_error: {:.4f}, auc: {:.4f}'.format(val_loss, val_error, auc))
if inst_count > 0:
val_inst_loss /= inst_count
for i in range(2):
acc, correct, count = inst_logger.get_summary(i)
print('class {} clustering acc {}: correct {}/{}'.format(i, acc, correct, count))
if writer:
writer.add_scalar('val/loss', val_loss, epoch)
writer.add_scalar('val/auc', auc, epoch)
writer.add_scalar('val/error', val_error, epoch)
writer.add_scalar('val/inst_loss', val_inst_loss, epoch)
for i in range(n_classes):
acc, correct, count = acc_logger.get_summary(i)
print('class {}: acc {}, correct {}/{}'.format(i, acc, correct, count))
if writer and acc is not None:
writer.add_scalar('val/class_{}_acc'.format(i), acc, epoch)
if early_stopping:
assert results_dir
early_stopping(epoch, val_loss, model, ckpt_name = os.path.join(results_dir, "s_{}_checkpoint.pt".format(cur)))
if early_stopping.early_stop:
print("Early stopping")
return True
return False
def summary(model, loader, n_classes):
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
acc_logger = Accuracy_Logger(n_classes=n_classes)
model.eval()
test_loss = 0.
test_error = 0.
all_probs = np.zeros((len(loader), n_classes))
all_labels = np.zeros(len(loader))
slide_ids = loader.dataset.slide_data['slide_id']
patient_results = {}
for batch_idx, batch in enumerate(loader):
if hasattr(model, "num_clusters"):
data, cluster_id, label = batch
data, cluster_id, label = data.to(device), cluster_id, label.to(device)
else:
data, label = batch
data, label = data.to(device), label.to(device)
cluster_id = None
#data, label = data.to(device), label.to(device)
slide_id = slide_ids.iloc[batch_idx]
with torch.no_grad():
logits, Y_prob, Y_hat, _, _ = model(data, cluster_id=cluster_id)
#logits, Y_prob, Y_hat, _, _ = model(data)
acc_logger.log(Y_hat, label)
probs = Y_prob.cpu().numpy()
all_probs[batch_idx] = probs
all_labels[batch_idx] = label.item()
patient_results.update({slide_id: {'slide_id': np.array(slide_id), 'prob': probs, 'label': label.item()}})
error = calculate_error(Y_hat, label)
test_error += error
test_error /= len(loader)
if n_classes == 2:
auc = roc_auc_score(all_labels, all_probs[:, 1])
aucs = []
else:
aucs = []
binary_labels = label_binarize(all_labels, classes=[i for i in range(n_classes)])
for class_idx in range(n_classes):
if class_idx in all_labels:
fpr, tpr, _ = roc_curve(binary_labels[:, class_idx], all_probs[:, class_idx])
print(calc_auc(fpr, tpr))
aucs.append(calc_auc(fpr, tpr))
else:
print('nan')
aucs.append(float('nan'))
auc = np.nanmean(np.array(aucs))
return patient_results, test_error, auc, acc_logger
| 23,019 | 36.986799 | 163 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/utils/utils.py | import pickle
import torch
import numpy as np
import torch.nn as nn
import pdb
import torch
import numpy as np
import torch.nn as nn
from torchvision import transforms
from torch.utils.data import DataLoader, Sampler, WeightedRandomSampler, RandomSampler, SequentialSampler, sampler
import torch.optim as optim
import pdb
import torch.nn.functional as F
import math
from itertools import islice
import collections
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
class SubsetSequentialSampler(Sampler):
"""Samples elements sequentially from a given list of indices, without replacement.
Arguments:
indices (sequence): a sequence of indices
"""
def __init__(self, indices):
self.indices = indices
def __iter__(self):
return iter(self.indices)
def __len__(self):
return len(self.indices)
def collate_MIL(batch):
img = torch.cat([item[0] for item in batch], dim = 0)
label = torch.LongTensor([item[1] for item in batch])
return [img, label]
def collate_MIL_cluster(batch):
img = torch.cat([item[0] for item in batch], dim = 0)
cluster_ids = torch.cat([item[1] for item in batch], dim = 0).type(torch.LongTensor)
label = torch.LongTensor([item[2] for item in batch])
return [img, cluster_ids, label]
def collate_MIL_graph(batch):
img = batch[0][0]
label = torch.LongTensor([item[1] for item in batch])
#print(img, label)
return [img, label]
def collate_features(batch):
img = torch.cat([item[0] for item in batch], dim = 0)
coords = np.vstack([item[1] for item in batch])
return [img, coords]
def get_simple_loader(dataset, batch_size=1, num_workers=1):
kwargs = {'num_workers': 4, 'pin_memory': False, 'num_workers': num_workers} if device.type == "cuda" else {}
loader = DataLoader(dataset, batch_size=batch_size, sampler = sampler.SequentialSampler(dataset), collate_fn = collate_MIL, **kwargs)
return loader
def get_split_loader(split_dataset, training = False, testing = False, weighted = False, mode='cluster'):
"""
return either the validation loader or training loader
"""
if mode == 'cluster':
collate = collate_MIL_cluster
elif mode == 'graph':
collate = collate_MIL_graph # collate_MIL_graph
else:
collate = collate_MIL
kwargs = {'num_workers': 8} if device.type == "cuda" else {}
if not testing:
if training:
if weighted:
weights = make_weights_for_balanced_classes_split(split_dataset)
loader = DataLoader(split_dataset, batch_size=1, sampler = WeightedRandomSampler(weights, len(weights)), collate_fn = collate, **kwargs)
else:
loader = DataLoader(split_dataset, batch_size=1, sampler = RandomSampler(split_dataset), collate_fn = collate, **kwargs)
else:
loader = DataLoader(split_dataset, batch_size=1, sampler = SequentialSampler(split_dataset), collate_fn = collate, **kwargs)
else:
ids = np.random.choice(np.arange(len(split_dataset), int(len(split_dataset)*0.1)), replace = False)
loader = DataLoader(split_dataset, batch_size=1, sampler = SubsetSequentialSampler(ids), collate_fn = collate, **kwargs )
return loader
def get_optim(model, args):
if args.opt == "adam":
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.reg)
elif args.opt == 'sgd':
optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, momentum=0.9, weight_decay=args.reg)
else:
raise NotImplementedError
return optimizer
def print_network(net):
num_params = 0
num_params_train = 0
print(net)
for param in net.parameters():
n = param.numel()
num_params += n
if param.requires_grad:
num_params_train += n
print('Total number of parameters: %d' % num_params)
print('Total number of trainable parameters: %d' % num_params_train)
def generate_split(cls_ids, val_num, test_num, samples, n_splits = 5,
seed = 7, label_frac = 1.0, custom_test_ids = None):
indices = np.arange(samples).astype(int)
if custom_test_ids is not None:
indices = np.setdiff1d(indices, custom_test_ids)
np.random.seed(seed)
for i in range(n_splits):
all_val_ids = []
all_test_ids = []
sampled_train_ids = []
if custom_test_ids is not None: # pre-built test split, do not need to sample
all_test_ids.extend(custom_test_ids)
for c in range(len(val_num)):
possible_indices = np.intersect1d(cls_ids[c], indices) #all indices of this class
val_ids = np.random.choice(possible_indices, val_num[c], replace = False) # validation ids
remaining_ids = np.setdiff1d(possible_indices, val_ids) #indices of this class left after validation
all_val_ids.extend(val_ids)
if custom_test_ids is None: # sample test split
test_ids = np.random.choice(remaining_ids, test_num[c], replace = False)
remaining_ids = np.setdiff1d(remaining_ids, test_ids)
all_test_ids.extend(test_ids)
if label_frac == 1:
sampled_train_ids.extend(remaining_ids)
else:
sample_num = math.ceil(len(remaining_ids) * label_frac)
slice_ids = np.arange(sample_num)
sampled_train_ids.extend(remaining_ids[slice_ids])
yield sampled_train_ids, all_val_ids, all_test_ids
def nth(iterator, n, default=None):
if n is None:
return collections.deque(iterator, maxlen=0)
else:
return next(islice(iterator,n, None), default)
def calculate_error(Y_hat, Y):
error = 1. - Y_hat.float().eq(Y.float()).float().mean().item()
return error
def make_weights_for_balanced_classes_split(dataset):
N = float(len(dataset))
weight_per_class = [N/len(dataset.slide_cls_ids[c]) for c in range(len(dataset.slide_cls_ids))]
weight = [0] * int(N)
for idx in range(len(dataset)):
y = dataset.getlabel(idx)
weight[idx] = weight_per_class[y]
return torch.DoubleTensor(weight)
def initialize_weights(module):
for m in module.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
| 6,214 | 32.413978 | 197 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/utils/file_utils.py | import pickle
import h5py
def save_pkl(filename, save_object):
writer = open(filename,'wb')
pickle.dump(save_object, writer)
writer.close()
def load_pkl(filename):
loader = open(filename,'rb')
file = pickle.load(loader)
loader.close()
return file
def save_hdf5(output_path, asset_dict, attr_dict= None, mode='a'):
file = h5py.File(output_path, mode)
for key, val in asset_dict.items():
data_shape = val.shape
if key not in file:
data_type = val.dtype
chunk_shape = (1, ) + data_shape[1:]
maxshape = (None, ) + data_shape[1:]
dset = file.create_dataset(key, shape=data_shape, maxshape=maxshape, chunks=chunk_shape, dtype=data_type)
dset[:] = val
if attr_dict is not None:
if key in attr_dict.keys():
for attr_key, attr_val in attr_dict[key].items():
dset.attrs[attr_key] = attr_val
else:
dset = file[key]
dset.resize(len(dset) + data_shape[0], axis=0)
dset[-data_shape[0]:] = val
file.close()
return output_path | 1,129 | 31.285714 | 117 | py |
HIPT | HIPT-master/2-Weakly-Supervised-Subtyping/utils/eval_utils.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.model_mil import MIL_fc, MIL_fc_mc
from models.model_clam import CLAM_SB, CLAM_MB
import pdb
import os
import pandas as pd
from utils.utils import *
from utils.core_utils import Accuracy_Logger
from sklearn.metrics import roc_auc_score, roc_curve, auc
from sklearn.preprocessing import label_binarize
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report, confusion_matrix
def initiate_model(args, ckpt_path):
#print('Init Model')
model_dict = {"dropout": args.drop_out, 'n_classes': args.n_classes}
if args.model_size is not None and args.model_type in ['clam_sb', 'clam_mb']:
model_dict.update({"size_arg": args.model_size})
if args.model_type =='clam_sb':
model = CLAM_SB(**model_dict)
elif args.model_type =='clam_mb':
model = CLAM_MB(**model_dict)
else: # args.model_type == 'mil'
if args.n_classes > 2:
model = MIL_fc_mc(**model_dict)
else:
model = MIL_fc(**model_dict)
#print_network(model)
ckpt = torch.load(ckpt_path)
ckpt_clean = {}
for key in ckpt.keys():
if 'instance_loss_fn' in key:
continue
ckpt_clean.update({key.replace('.module', ''):ckpt[key]})
model.load_state_dict(ckpt_clean, strict=True)
model.relocate()
model.eval()
return model
def eval(dataset, args, ckpt_path):
model = initiate_model(args, ckpt_path)
#print('Init Loaders')
import pdb
#pdb.set_trace()
loader = get_simple_loader(dataset)
patient_results, test_error, auc, df, _ = summary(model, loader, args)
print('test_error: ', test_error)
print('auc: ', auc)
return model, patient_results, test_error, auc, df
def summary(model, loader, args):
acc_logger = Accuracy_Logger(n_classes=args.n_classes)
model.eval()
test_loss = 0.
test_error = 0.
all_probs = np.zeros((len(loader), args.n_classes))
all_labels = np.zeros(len(loader))
all_preds = np.zeros(len(loader))
slide_ids = loader.dataset.slide_data['slide_id']
patient_results = {}
from tqdm import tqdm
for batch_idx, (data, label) in tqdm(enumerate(loader)):
data, label = data.to(device), label.to(device)
slide_id = slide_ids.iloc[batch_idx]
with torch.no_grad():
logits, Y_prob, Y_hat, _, results_dict = model(data)
acc_logger.log(Y_hat, label)
probs = Y_prob.cpu().numpy()
all_probs[batch_idx] = probs
all_labels[batch_idx] = label.item()
all_preds[batch_idx] = Y_hat.item()
patient_results.update({slide_id: {'slide_id': np.array(slide_id), 'prob': probs, 'label': label.item()}})
error = calculate_error(Y_hat, label)
test_error += error
del data
test_error /= len(loader)
aucs = []
if len(np.unique(all_labels)) == 1:
auc_score = -1
else:
if args.n_classes == 2:
import pdb
#pdb.set_trace()
auc_score = roc_auc_score(all_labels, all_probs[:, 1])
fpr, tpr, thresholds = roc_curve(all_labels, all_probs[:,1])
J = tpr - fpr
ix = np.argmax(J)
cutoff = thresholds[ix]
#print(cutoff)
y_pred = np.array(all_probs[:,1] > cutoff).astype(int)
tn, fp, fn, tp = confusion_matrix(all_labels, y_pred).ravel()
classification_report(all_labels, y_pred)
else:
binary_labels = label_binarize(all_labels, classes=[i for i in range(args.n_classes)])
for class_idx in range(args.n_classes):
if class_idx in all_labels:
fpr, tpr, _ = roc_curve(binary_labels[:, class_idx], all_probs[:, class_idx])
aucs.append(auc(fpr, tpr))
else:
aucs.append(float('nan'))
if args.micro_average:
binary_labels = label_binarize(all_labels, classes=[i for i in range(args.n_classes)])
fpr, tpr, _ = roc_curve(binary_labels.ravel(), all_probs.ravel())
auc_score = auc(fpr, tpr)
else:
auc_score = np.nanmean(np.array(aucs))
results_dict = {'slide_id': slide_ids, 'Y': all_labels, 'Y_hat': all_preds}
for c in range(args.n_classes):
results_dict.update({'p_{}'.format(c): all_probs[:,c]})
import pdb
#pdb.set_trace()
df = pd.DataFrame(results_dict)
return patient_results, test_error, auc_score, df, acc_logger
| 4,650 | 33.451852 | 114 | py |
benchmarking_graph | benchmarking_graph-main/src/md.py | from functools import partial
import jax
import jax.numpy as jnp
from jax import jit, lax, value_and_grad
from jax.experimental import optimizers
from .nve import nve, nve2, nve3
# ===============================
# ===============================
def dynamics_generator(ensemble, force_fn, shift_fn, params, dt, mass,):
func = partial(force_fn, mass=mass)
init, apply = ensemble(lambda R, V: func(R, V, params), shift_fn, dt)
def f(state, runs=100, stride=10):
return solve_dynamics(
state, apply, runs=runs, stride=stride)
return init, f
def predition(R, V, params, force_fn, shift_fn, dt, mass, runs=1000, stride=10):
func = partial(force_fn, mass=mass)
init, apply = nve(lambda R, V: func(R, V, params), shift_fn, dt)
state = init(R, V, mass)
states = solve_dynamics(state, apply, runs=runs, stride=stride)
return states
def predition4(R, V, params, force_fn, shift_fn, dt, mass, runs=1000, stride=10):
func = partial(force_fn, mass=mass)
init, apply = nve4(lambda R, V: func(R, V, params), shift_fn, dt)
state = init(R, V, mass)
states = solve_dynamics(state, apply, runs=runs, stride=stride)
return states
# def predition(R, V, params, force_fn, shift_fn, dt, mass, runs=1000, stride=10):
# func = partial(force_fn, mass=mass)
# init, apply = nve(lambda R, V: func(R, V, params), shift_fn, dt)
# state = init(R, V, mass)
# states = solve_dynamics(state, apply, runs=runs, stride=stride)
# return states
def predition2(R, V, params, change_R_V, dt, mass, runs=1000, stride=10):
# func = partial(force_fn, mass=mass)
init, apply = nve2(params, change_R_V, dt)
state = init(R, V, mass)
states = solve_dynamics2(state, apply, runs=runs, stride=stride)
return states
def predition3(R, V, params, change_Acc, dt, mass, runs=1000, stride=10):
# func = partial(force_fn, mass=mass)
init, apply = nve3(params, change_Acc, dt)
state = init(R, V, mass)
states = solve_dynamics(state, apply, runs=runs, stride=stride)
return states
def solve_dynamics(init_state, apply, runs=100, stride=10):
step = jit(lambda i, state: apply(state))
def f(state):
y = jax.lax.fori_loop(0, stride, step, state)
return y, y
def func(state, i): return f(state)
@jit
def scan(init_state):
return jax.lax.scan(func, init_state, jnp.array(range(runs)))
final_state, traj = scan(init_state)
return traj
def solve_dynamics2(init_state, apply, runs=100, stride=10):
step = jit(lambda i, state: apply(state))
def func(state, i):
x = apply(state)
return x,x
@jit
def scan(init_state):
return jax.lax.scan(func, init_state, jnp.array(range(runs)))
final_state, traj = scan(init_state)
return traj
# def solve_dynamics(state, apply, runs=100, stride=10):
# step = jit(lambda i, state: apply(state))
# states = [state]
# for i in range(runs):
# state = lax.fori_loop(0, stride, step, state)
# states += [state]
# return states
# def solve_dynamics(state, apply, runs=100, stride=10):
# step = jit(lambda i, state: apply(state))
# states = [state]
# for i in range(runs):
# state = lax.fori_loop(0, stride, step, state)
# states += [state]
# return states
def minimize(R, params, shift, pot_energy_fn, steps=10, gtol=1.0e-7, lr=1.0e-3):
opt_init, opt_update, get_params = optimizers.adam(lr)
opt_state = opt_init(R)
def gloss2(R):
return value_and_grad(lambda R: pot_energy_fn(R, params))(R)
print(f"Step\tPot. Eng.\t\tTolerance")
for i in range(steps):
v, grads_ = gloss2(R)
grads = jnp.clip(jnp.nan_to_num(grads_), a_min=-1.0, a_max=1.0)
opt_state = opt_update(0, grads, opt_state)
R_ = get_params(opt_state)
dR = R_ - R
R, _ = shift(R, dR, R)
if i % 100 == 0:
_tol = jnp.square(grads).sum()
print(f"{i}\t{v}\t\t{_tol}")
if _tol < gtol:
print(f"gtol reached: {_tol} which is < {gtol}")
break
return R
def _reflective(R, dR, V, _min=0.0, _max=4.0):
V_ = V
R_ = R
dR_ = jnp.maximum(jnp.minimum(dR, (_max-_min)/2), -(_max-_min)/2)
V_ = jnp.where(R + dR_ < _min, -V, V)
V_ = jnp.where(R + dR_ > _max, -V, V_)
R_ = jnp.where(R + dR_ < _min, 2*_min - (R+dR_), R+dR_)
R_ = jnp.where(R + dR_ > _max, 2*_max - (R+dR_), R_)
return R_, V_
def _periodic(R, dR, V, _min=0.0, _max=4.0):
V_ = V
R_ = R
dR_ = jnp.maximum(jnp.minimum(dR, (_max-_min)/2), -(_max-_min)/2)
R_ = jnp.where(R + dR_ < _min, _max - _min + (R+dR_), R+dR_)
R_ = jnp.where(R + dR_ > _max, _min - _max + (R+dR_), R_)
return R_, V_
def _open(R, dR, V):
"""R -> R + dR
V -> V
:param R: Position
:type R: array
:param dR: Displacement
:type dR: array
:param V: Velocity
:type V: array
:return: (R+dR, V)
:rtype: tuple
"""
return R+dR, V
shift = _open
def displacement(a, b):
"""A - B
:param a: Vector A
:type a: array
:param b: Vector B
:type b: array
:return: a-b
:rtype: array
"""
return a-b
| 5,251 | 27.699454 | 83 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.