repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/dqn_algos/demo.py | import numpy as np
from arguments import get_args
from models import net
import torch
from rl_utils.env_wrapper.atari_wrapper import make_atari, wrap_deepmind
def get_tensors(obs):
obs = np.transpose(obs, (2, 0, 1))
obs = np.expand_dims(obs, 0)
obs = torch.tensor(obs, dtype=torch.float32)
return obs
if __name__ == '__main__':
args = get_args()
# create the environment
env = make_atari(args.env_name)
env = wrap_deepmind(env, frame_stack=True)
# create the network
net = net(env.action_space.n, args.use_dueling)
# model path
model_path = args.save_dir + args.env_name + '/model.pt'
# load the models
net.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))
# start to test the demo
obs = env.reset()
for _ in range(2000):
env.render()
with torch.no_grad():
obs_tensor = get_tensors(obs)
action_value = net(obs_tensor)
action = torch.argmax(action_value.squeeze()).item()
obs, reward, done, _ = env.step(action)
if done:
obs = env.reset()
env.close()
| 1,134 | 30.527778 | 90 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/dqn_algos/models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
# the convolution layer of deepmind
class deepmind(nn.Module):
def __init__(self):
super(deepmind, self).__init__()
self.conv1 = nn.Conv2d(4, 32, 8, stride=4)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 32, 3, stride=1)
# start to do the init...
nn.init.orthogonal_(self.conv1.weight.data, gain=nn.init.calculate_gain('relu'))
nn.init.orthogonal_(self.conv2.weight.data, gain=nn.init.calculate_gain('relu'))
nn.init.orthogonal_(self.conv3.weight.data, gain=nn.init.calculate_gain('relu'))
# init the bias...
nn.init.constant_(self.conv1.bias.data, 0)
nn.init.constant_(self.conv2.bias.data, 0)
nn.init.constant_(self.conv3.bias.data, 0)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(-1, 32 * 7 * 7)
return x
# in the initial, just the nature CNN
class net(nn.Module):
def __init__(self, num_actions, use_dueling=False):
super(net, self).__init__()
# if use the dueling network
self.use_dueling = use_dueling
# define the network
self.cnn_layer = deepmind()
# if not use dueling
if not self.use_dueling:
self.fc1 = nn.Linear(32 * 7 * 7, 256)
self.action_value = nn.Linear(256, num_actions)
else:
# the layer for dueling network architecture
self.action_fc = nn.Linear(32 * 7 * 7, 256)
self.state_value_fc = nn.Linear(32 * 7 * 7, 256)
self.action_value = nn.Linear(256, num_actions)
self.state_value = nn.Linear(256, 1)
def forward(self, inputs):
x = self.cnn_layer(inputs / 255.0)
if not self.use_dueling:
x = F.relu(self.fc1(x))
action_value_out = self.action_value(x)
else:
# get the action value
action_fc = F.relu(self.action_fc(x))
action_value = self.action_value(action_fc)
# get the state value
state_value_fc = F.relu(self.state_value_fc(x))
state_value = self.state_value(state_value_fc)
# action value mean
action_value_mean = torch.mean(action_value, dim=1, keepdim=True)
action_value_center = action_value - action_value_mean
# Q = V + A
action_value_out = state_value + action_value_center
return action_value_out
| 2,596 | 37.761194 | 88 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/dqn_algos/train.py | import sys
from arguments import get_args
from rl_utils.env_wrapper.create_env import create_single_env
from rl_utils.logger import logger, bench
from rl_utils.seeds.seeds import set_seeds
from dqn_agent import dqn_agent
import os
import numpy as np
if __name__ == '__main__':
# get arguments
args = get_args()
# start to create the environment
env = create_single_env(args)
# set seeds
set_seeds(args)
# create trainer
dqn_trainer = dqn_agent(env, args)
# start to learn
dqn_trainer.learn()
# finally - close the environment
env.close()
| 589 | 24.652174 | 61 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/dqn_algos/dqn_agent.py | import sys
import numpy as np
from models import net
from utils import linear_schedule, select_actions, reward_recorder
from rl_utils.experience_replay.experience_replay import replay_buffer
import torch
from datetime import datetime
import os
import copy
# define the dqn agent
class dqn_agent:
def __init__(self, env, args):
# define some important
self.env = env
self.args = args
# define the network
self.net = net(self.env.action_space.n, self.args.use_dueling)
# copy the self.net as the
self.target_net = copy.deepcopy(self.net)
# make sure the target net has the same weights as the network
self.target_net.load_state_dict(self.net.state_dict())
if self.args.cuda:
self.net.cuda()
self.target_net.cuda()
# define the optimizer
self.optimizer = torch.optim.Adam(self.net.parameters(), lr=self.args.lr)
# define the replay memory
self.buffer = replay_buffer(self.args.buffer_size)
# define the linear schedule of the exploration
self.exploration_schedule = linear_schedule(int(self.args.total_timesteps * self.args.exploration_fraction), \
self.args.final_ratio, self.args.init_ratio)
# create the folder to save the models
if not os.path.exists(self.args.save_dir):
os.mkdir(self.args.save_dir)
# set the environment folder
self.model_path = os.path.join(self.args.save_dir, self.args.env_name)
if not os.path.exists(self.model_path):
os.mkdir(self.model_path)
# start to do the training
def learn(self):
# the episode reward
episode_reward = reward_recorder()
obs = np.array(self.env.reset())
td_loss = 0
for timestep in range(self.args.total_timesteps):
explore_eps = self.exploration_schedule.get_value(timestep)
with torch.no_grad():
obs_tensor = self._get_tensors(obs)
action_value = self.net(obs_tensor)
# select actions
action = select_actions(action_value, explore_eps)
# excute actions
obs_, reward, done, _ = self.env.step(action)
obs_ = np.array(obs_)
# tryint to append the samples
self.buffer.add(obs, action, reward, obs_, float(done))
obs = obs_
# add the rewards
episode_reward.add_rewards(reward)
if done:
obs = np.array(self.env.reset())
# start new episode to store rewards
episode_reward.start_new_episode()
if timestep > self.args.learning_starts and timestep % self.args.train_freq == 0:
# start to sample the samples from the replay buffer
batch_samples = self.buffer.sample(self.args.batch_size)
td_loss = self._update_network(batch_samples)
if timestep > self.args.learning_starts and timestep % self.args.target_network_update_freq == 0:
# update the target network
self.target_net.load_state_dict(self.net.state_dict())
if done and episode_reward.num_episodes % self.args.display_interval == 0:
print('[{}] Frames: {}, Episode: {}, Mean: {:.3f}, Loss: {:.3f}'.format(datetime.now(), timestep, episode_reward.num_episodes, \
episode_reward.mean, td_loss))
torch.save(self.net.state_dict(), self.model_path + '/model.pt')
# update the network
def _update_network(self, samples):
obses, actions, rewards, obses_next, dones = samples
# convert the data to tensor
obses = self._get_tensors(obses)
actions = torch.tensor(actions, dtype=torch.int64).unsqueeze(-1)
rewards = torch.tensor(rewards, dtype=torch.float32).unsqueeze(-1)
obses_next = self._get_tensors(obses_next)
dones = torch.tensor(1 - dones, dtype=torch.float32).unsqueeze(-1)
# convert into gpu
if self.args.cuda:
actions = actions.cuda()
rewards = rewards.cuda()
dones = dones.cuda()
# calculate the target value
with torch.no_grad():
# if use the double network architecture
if self.args.use_double_net:
q_value_ = self.net(obses_next)
action_max_idx = torch.argmax(q_value_, dim=1, keepdim=True)
target_action_value = self.target_net(obses_next)
target_action_max_value = target_action_value.gather(1, action_max_idx)
else:
target_action_value = self.target_net(obses_next)
target_action_max_value, _ = torch.max(target_action_value, dim=1, keepdim=True)
# target
expected_value = rewards + self.args.gamma * target_action_max_value * dones
# get the real q value
action_value = self.net(obses)
real_value = action_value.gather(1, actions)
loss = (expected_value - real_value).pow(2).mean()
# start to update
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.item()
# get tensors
def _get_tensors(self, obs):
if obs.ndim == 3:
obs = np.transpose(obs, (2, 0, 1))
obs = np.expand_dims(obs, 0)
elif obs.ndim == 4:
obs = np.transpose(obs, (0, 3, 1, 2))
obs = torch.tensor(obs, dtype=torch.float32)
if self.args.cuda:
obs = obs.cuda()
return obs
| 5,646 | 43.81746 | 144 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/trpo/trpo_agent.py | import torch
import numpy as np
import os
from models import network
from rl_utils.running_filter.running_filter import ZFilter
from utils import select_actions, eval_actions, conjugated_gradient, line_search, set_flat_params_to
from datetime import datetime
class trpo_agent:
def __init__(self, env, args):
self.env = env
self.args = args
# define the network
self.net = network(self.env.observation_space.shape[0], self.env.action_space.shape[0])
self.old_net = network(self.env.observation_space.shape[0], self.env.action_space.shape[0])
# make sure the net and old net have the same parameters
self.old_net.load_state_dict(self.net.state_dict())
# define the optimizer
self.optimizer = torch.optim.Adam(self.net.critic.parameters(), lr=self.args.lr)
# define the running mean filter
self.running_state = ZFilter((self.env.observation_space.shape[0],), clip=5)
if not os.path.exists(self.args.save_dir):
os.mkdir(self.args.save_dir)
self.model_path = self.args.save_dir + self.args.env_name + '/'
if not os.path.exists(self.model_path):
os.mkdir(self.model_path)
def learn(self):
num_updates = self.args.total_timesteps // self.args.nsteps
obs = self.running_state(self.env.reset())
final_reward = 0
episode_reward = 0
self.dones = False
for update in range(num_updates):
mb_obs, mb_rewards, mb_actions, mb_dones, mb_values = [], [], [], [], []
for step in range(self.args.nsteps):
with torch.no_grad():
obs_tensor = self._get_tensors(obs)
value, pi = self.net(obs_tensor)
# select actions
actions = select_actions(pi)
# store informations
mb_obs.append(np.copy(obs))
mb_actions.append(actions)
mb_dones.append(self.dones)
mb_values.append(value.detach().numpy().squeeze())
# start to execute actions in the environment
obs_, reward, done, _ = self.env.step(actions)
self.dones = done
mb_rewards.append(reward)
if done:
obs_ = self.env.reset()
obs = self.running_state(obs_)
episode_reward += reward
mask = 0.0 if done else 1.0
final_reward *= mask
final_reward += (1 - mask) * episode_reward
episode_reward *= mask
# to process the rollouts
mb_obs = np.asarray(mb_obs, dtype=np.float32)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
mb_values = np.asarray(mb_values, dtype=np.float32)
# compute the last state value
with torch.no_grad():
obs_tensor = self._get_tensors(obs)
last_value, _ = self.net(obs_tensor)
last_value = last_value.detach().numpy().squeeze()
# compute the advantages
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.args.nsteps)):
if t == self.args.nsteps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_value
else:
nextnonterminal = 1.0 - mb_dones[t + 1]
nextvalues = mb_values[t + 1]
delta = mb_rewards[t] + self.args.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.args.gamma * self.args.tau * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
# normalize the advantages
mb_advs = (mb_advs - mb_advs.mean()) / (mb_advs.std() + 1e-5)
# before the update, make the old network has the parameter of the current network
self.old_net.load_state_dict(self.net.state_dict())
# start to update the network
policy_loss, value_loss = self._update_network(mb_obs, mb_actions, mb_returns, mb_advs)
torch.save([self.net.state_dict(), self.running_state], self.model_path + 'model.pt')
print('[{}] Update: {} / {}, Frames: {}, Reward: {:.3f}, VL: {:.3f}, PL: {:.3f}'.format(datetime.now(), update, \
num_updates, (update + 1)*self.args.nsteps, final_reward, value_loss, policy_loss))
# start to update network
def _update_network(self, mb_obs, mb_actions, mb_returns, mb_advs):
mb_obs_tensor = torch.tensor(mb_obs, dtype=torch.float32)
mb_actions_tensor = torch.tensor(mb_actions, dtype=torch.float32)
mb_returns_tensor = torch.tensor(mb_returns, dtype=torch.float32).unsqueeze(1)
mb_advs_tensor = torch.tensor(mb_advs, dtype=torch.float32).unsqueeze(1)
# try to get the old policy and current policy
values, _ = self.net(mb_obs_tensor)
with torch.no_grad():
_, pi_old = self.old_net(mb_obs_tensor)
# get the surr loss
surr_loss = self._get_surrogate_loss(mb_obs_tensor, mb_advs_tensor, mb_actions_tensor, pi_old)
# comupte the surrogate gardient -> g, Ax = g, where A is the fisher information matrix
surr_grad = torch.autograd.grad(surr_loss, self.net.actor.parameters())
flat_surr_grad = torch.cat([grad.view(-1) for grad in surr_grad]).data
# use the conjugated gradient to calculate the scaled direction vector (natural gradient)
nature_grad = conjugated_gradient(self._fisher_vector_product, -flat_surr_grad, 10, mb_obs_tensor, pi_old)
# calculate the scaleing ratio
non_scale_kl = 0.5 * (nature_grad * self._fisher_vector_product(nature_grad, mb_obs_tensor, pi_old)).sum(0, keepdim=True)
scale_ratio = torch.sqrt(non_scale_kl / self.args.max_kl)
final_nature_grad = nature_grad / scale_ratio[0]
# calculate the expected improvement rate...
expected_improve = (-flat_surr_grad * nature_grad).sum(0, keepdim=True) / scale_ratio[0]
# get the flat param ...
prev_params = torch.cat([param.data.view(-1) for param in self.net.actor.parameters()])
# start to do the line search
success, new_params = line_search(self.net.actor, self._get_surrogate_loss, prev_params, final_nature_grad, \
expected_improve, mb_obs_tensor, mb_advs_tensor, mb_actions_tensor, pi_old)
set_flat_params_to(self.net.actor, new_params)
# then trying to update the critic network
inds = np.arange(mb_obs.shape[0])
for _ in range(self.args.vf_itrs):
np.random.shuffle(inds)
for start in range(0, mb_obs.shape[0], self.args.batch_size):
end = start + self.args.batch_size
mbinds = inds[start:end]
mini_obs = mb_obs[mbinds]
mini_returns = mb_returns[mbinds]
# put things in the tensor
mini_obs = torch.tensor(mini_obs, dtype=torch.float32)
mini_returns = torch.tensor(mini_returns, dtype=torch.float32).unsqueeze(1)
values, _ = self.net(mini_obs)
v_loss = (mini_returns - values).pow(2).mean()
self.optimizer.zero_grad()
v_loss.backward()
self.optimizer.step()
return surr_loss.item(), v_loss.item()
# get the surrogate loss
def _get_surrogate_loss(self, obs, adv, actions, pi_old):
_, pi = self.net(obs)
log_prob = eval_actions(pi, actions)
old_log_prob = eval_actions(pi_old, actions).detach()
surr_loss = -torch.exp(log_prob - old_log_prob) * adv
return surr_loss.mean()
# the product of the fisher informaiton matrix and the nature gradient -> Ax
def _fisher_vector_product(self, v, obs, pi_old):
kl = self._get_kl(obs, pi_old)
kl = kl.mean()
# start to calculate the second order gradient of the KL
kl_grads = torch.autograd.grad(kl, self.net.actor.parameters(), create_graph=True)
flat_kl_grads = torch.cat([grad.view(-1) for grad in kl_grads])
kl_v = (flat_kl_grads * torch.autograd.Variable(v)).sum()
kl_second_grads = torch.autograd.grad(kl_v, self.net.actor.parameters())
flat_kl_second_grads = torch.cat([grad.contiguous().view(-1) for grad in kl_second_grads]).data
flat_kl_second_grads = flat_kl_second_grads + self.args.damping * v
return flat_kl_second_grads
# get the kl divergence between two distributions
def _get_kl(self, obs, pi_old):
mean_old, std_old = pi_old
_, pi = self.net(obs)
mean, std = pi
# start to calculate the kl-divergence
kl = -torch.log(std / std_old) + (std.pow(2) + (mean - mean_old).pow(2)) / (2 * std_old.pow(2)) - 0.5
return kl.sum(1, keepdim=True)
# get the tensors
def _get_tensors(self, obs):
return torch.tensor(obs, dtype=torch.float32).unsqueeze(0)
| 9,299 | 52.142857 | 129 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/trpo/arguments.py | import argparse
def get_args():
parse = argparse.ArgumentParser()
parse.add_argument('--gamma', type=float, default=0.99, help='the discount factor of the RL')
parse.add_argument('--env-name', type=str, default='Walker2d-v2', help='the training environment')
parse.add_argument('--seed', type=int, default=123, help='the random seed')
parse.add_argument('--save-dir', type=str, default='saved_models/', help='the folder to save models')
parse.add_argument('--total-timesteps', type=int, default=int(1e6), help='the total frames')
parse.add_argument('--nsteps', type=int, default=1024, help='the steps to collect samples')
parse.add_argument('--lr', type=float, default=3e-4)
parse.add_argument('--batch-size', type=int, default=64, help='the mini batch size ot update the value function')
parse.add_argument('--vf-itrs', type=int, default=5, help='the times to update the value network')
parse.add_argument('--tau', type=float, default=0.95, help='the param to calculate the gae')
parse.add_argument('--damping', type=float, default=0.1, help='the damping coeffificent')
parse.add_argument('--max-kl', type=float, default=0.01, help='the max kl divergence')
parse.add_argument('--cuda', action='store_true', help='if use gpu')
parse.add_argument('--env-type', type=str, default='mujoco', help='the environment type')
parse.add_argument('--log-dir', type=str, default='logs', help='folder to save log files')
args = parse.parse_args()
return args
| 1,521 | 62.416667 | 117 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/trpo/utils.py | import numpy as np
import torch
from torch.distributions.normal import Normal
# select actions
def select_actions(pi):
mean, std = pi
normal_dist = Normal(mean, std)
return normal_dist.sample().detach().numpy().squeeze()
# evaluate the actions
def eval_actions(pi, actions):
mean, std = pi
normal_dist = Normal(mean, std)
return normal_dist.log_prob(actions).sum(dim=1, keepdim=True)
# conjugated gradient
def conjugated_gradient(fvp, b, update_steps, obs, pi_old, residual_tol=1e-10):
# the initial solution is zero
x = torch.zeros(b.size(), dtype=torch.float32)
r = b.clone()
p = b.clone()
rdotr = torch.dot(r, r)
for i in range(update_steps):
fv_product = fvp(p, obs, pi_old)
alpha = rdotr / torch.dot(p, fv_product)
x = x + alpha * p
r = r - alpha * fv_product
new_rdotr = torch.dot(r, r)
beta = new_rdotr / rdotr
p = r + beta * p
rdotr = new_rdotr
# if less than residual tot.. break
if rdotr < residual_tol:
break
return x
# line search
def line_search(model, loss_fn, x, full_step, expected_rate, obs, adv, actions, pi_old, max_backtracks=10, accept_ratio=0.1):
fval = loss_fn(obs, adv, actions, pi_old).data
for (_n_backtracks, stepfrac) in enumerate(0.5**np.arange(max_backtracks)):
xnew = x + stepfrac * full_step
set_flat_params_to(model, xnew)
new_fval = loss_fn(obs, adv, actions, pi_old).data
actual_improve = fval - new_fval
expected_improve = expected_rate * stepfrac
ratio = actual_improve / expected_improve
if ratio.item() > accept_ratio and actual_improve.item() > 0:
return True, xnew
return False, x
def set_flat_params_to(model, flat_params):
prev_indx = 0
for param in model.parameters():
flat_size = int(np.prod(list(param.size())))
param.data.copy_(flat_params[prev_indx:prev_indx + flat_size].view(param.size()))
prev_indx += flat_size
| 2,026 | 33.355932 | 125 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/trpo/demo.py | import numpy as np
import torch
import gym
from arguments import get_args
from models import network
def denormalize(x, mean, std, clip=10):
x -= mean
x /= (std + 1e-8)
return np.clip(x, -clip, clip)
def get_tensors(x):
return torch.tensor(x, dtype=torch.float32).unsqueeze(0)
if __name__ == '__main__':
args = get_args()
# create the environment
env = gym.make(args.env_name)
# build up the network
net = network(env.observation_space.shape[0], env.action_space.shape[0])
# load the saved model
model_path = args.save_dir + args.env_name + '/model.pt'
network_model, filters = torch.load(model_path, map_location=lambda storage, loc: storage)
net.load_state_dict(network_model)
net.eval()
for _ in range(10):
obs = denormalize(env.reset(), filters.rs.mean, filters.rs.std)
reward_total = 0
for _ in range(10000):
env.render()
obs_tensor = get_tensors(obs)
with torch.no_grad():
_, (mean, _) = net(obs_tensor)
action = mean.numpy().squeeze()
obs, reward, done, _ = env.step(action)
reward_total += reward
obs = denormalize(obs, filters.rs.mean, filters.rs.std)
if done:
break
print('the reward of this episode is: {}'.format(reward_total))
env.close()
| 1,383 | 31.952381 | 94 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/trpo/models.py | import torch
from torch import nn
from torch.nn import functional as F
class network(nn.Module):
def __init__(self, num_states, num_actions):
super(network, self).__init__()
# define the critic
self.critic = critic(num_states)
self.actor = actor(num_states, num_actions)
def forward(self, x):
state_value = self.critic(x)
pi = self.actor(x)
return state_value, pi
class critic(nn.Module):
def __init__(self, num_states):
super(critic, self).__init__()
self.fc1 = nn.Linear(num_states, 64)
self.fc2 = nn.Linear(64, 64)
self.value = nn.Linear(64, 1)
def forward(self, x):
x = F.tanh(self.fc1(x))
x = F.tanh(self.fc2(x))
value = self.value(x)
return value
class actor(nn.Module):
def __init__(self, num_states, num_actions):
super(actor, self).__init__()
self.fc1 = nn.Linear(num_states, 64)
self.fc2 = nn.Linear(64, 64)
self.action_mean = nn.Linear(64, num_actions)
self.sigma_log = nn.Parameter(torch.zeros(1, num_actions))
def forward(self, x):
x = F.tanh(self.fc1(x))
x = F.tanh(self.fc2(x))
mean = self.action_mean(x)
sigma_log = self.sigma_log.expand_as(mean)
sigma = torch.exp(sigma_log)
pi = (mean, sigma)
return pi
| 1,376 | 28.297872 | 66 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/trpo/train.py | from arguments import get_args
from rl_utils.seeds.seeds import set_seeds
from rl_utils.env_wrapper.create_env import create_single_env
from trpo_agent import trpo_agent
if __name__ == '__main__':
args = get_args()
# make environemnts
env = create_single_env(args)
# set the random seeds
set_seeds(args)
# create trpo trainer
trpo_trainer = trpo_agent(env, args)
trpo_trainer.learn()
# close the environment
env.close()
| 461 | 26.176471 | 61 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/a2c/a2c_agent.py | import numpy as np
import torch
from models import net
from datetime import datetime
from utils import select_actions, evaluate_actions, discount_with_dones
import os
class a2c_agent:
def __init__(self, envs, args):
self.envs = envs
self.args = args
# define the network
self.net = net(self.envs.action_space.n)
if self.args.cuda:
self.net.cuda()
# define the optimizer
self.optimizer = torch.optim.RMSprop(self.net.parameters(), lr=self.args.lr, eps=self.args.eps, alpha=self.args.alpha)
if not os.path.exists(self.args.save_dir):
os.mkdir(self.args.save_dir)
# check the saved path for envs..
self.model_path = self.args.save_dir + self.args.env_name + '/'
if not os.path.exists(self.model_path):
os.mkdir(self.model_path)
# get the obs..
self.batch_ob_shape = (self.args.num_workers * self.args.nsteps,) + self.envs.observation_space.shape
self.obs = np.zeros((self.args.num_workers,) + self.envs.observation_space.shape, dtype=self.envs.observation_space.dtype.name)
self.obs[:] = self.envs.reset()
self.dones = [False for _ in range(self.args.num_workers)]
# train the network..
def learn(self):
num_updates = self.args.total_frames // (self.args.num_workers * self.args.nsteps)
# get the reward to calculate other information
episode_rewards = np.zeros((self.args.num_workers, ), dtype=np.float32)
final_rewards = np.zeros((self.args.num_workers, ), dtype=np.float32)
# start to update
for update in range(num_updates):
mb_obs, mb_rewards, mb_actions, mb_dones = [],[],[],[]
for step in range(self.args.nsteps):
with torch.no_grad():
input_tensor = self._get_tensors(self.obs)
_, pi = self.net(input_tensor)
# select actions
actions = select_actions(pi)
cpu_actions = actions.squeeze(1).cpu().numpy()
# start to store the information
mb_obs.append(np.copy(self.obs))
mb_actions.append(cpu_actions)
mb_dones.append(self.dones)
# step
obs, rewards, dones, _ = self.envs.step(cpu_actions)
# start to store the rewards
self.dones = dones
mb_rewards.append(rewards)
for n, done in enumerate(dones):
if done:
self.obs[n] = self.obs[n]*0
self.obs = obs
episode_rewards += rewards
# get the masks
masks = np.array([0.0 if done else 1.0 for done in dones], dtype=np.float32)
final_rewards *= masks
final_rewards += (1 - masks) * episode_rewards
episode_rewards *= masks
# update the obs
mb_dones.append(self.dones)
# process the rollouts
mb_obs = np.asarray(mb_obs, dtype=np.uint8).swapaxes(1, 0).reshape(self.batch_ob_shape)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0)
mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0)
mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0)
mb_masks = mb_dones[:, :-1]
mb_dones = mb_dones[:, 1:]
# calculate the last value
with torch.no_grad():
input_tensor = self._get_tensors(self.obs)
last_values, _ = self.net(input_tensor)
# compute returns
for n, (rewards, dones, value) in enumerate(zip(mb_rewards, mb_dones, last_values.detach().cpu().numpy().squeeze())):
rewards = rewards.tolist()
dones = dones.tolist()
if dones[-1] == 0:
rewards = discount_with_dones(rewards+[value], dones+[0], self.args.gamma)[:-1]
else:
rewards = discount_with_dones(rewards, dones, self.args.gamma)
mb_rewards[n] = rewards
mb_rewards = mb_rewards.flatten()
mb_actions = mb_actions.flatten()
# start to update network
vl, al, ent = self._update_network(mb_obs, mb_rewards, mb_actions)
if update % self.args.log_interval == 0:
print('[{}] Update: {}/{}, Frames: {}, Rewards: {:.1f}, VL: {:.3f}, PL: {:.3f}, Ent: {:.2f}, Min: {}, Max:{}'.format(\
datetime.now(), update, num_updates, (update+1)*(self.args.num_workers * self.args.nsteps),\
final_rewards.mean(), vl, al, ent, final_rewards.min(), final_rewards.max()))
torch.save(self.net.state_dict(), self.model_path + 'model.pt')
# update_network
def _update_network(self, obs, returns, actions):
# evaluate the actions
input_tensor = self._get_tensors(obs)
values, pi = self.net(input_tensor)
# define the tensor of actions, returns
returns = torch.tensor(returns, dtype=torch.float32).unsqueeze(1)
actions = torch.tensor(actions, dtype=torch.int64).unsqueeze(1)
if self.args.cuda:
returns = returns.cuda()
actions = actions.cuda()
# evaluate actions
action_log_probs, dist_entropy = evaluate_actions(pi, actions)
# calculate advantages...
advantages = returns - values
# get the value loss
value_loss = advantages.pow(2).mean()
# get the action loss
action_loss = -(advantages.detach() * action_log_probs).mean()
# total loss
total_loss = action_loss + self.args.value_loss_coef * value_loss - self.args.entropy_coef * dist_entropy
# start to update
self.optimizer.zero_grad()
total_loss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.max_grad_norm)
self.optimizer.step()
return value_loss.item(), action_loss.item(), dist_entropy.item()
# get the tensors...
def _get_tensors(self, obs):
input_tensor = torch.tensor(np.transpose(obs, (0, 3, 1, 2)), dtype=torch.float32)
if self.args.cuda:
input_tensor = input_tensor.cuda()
return input_tensor
| 6,370 | 47.633588 | 135 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/a2c/arguments.py | import argparse
def get_args():
parse = argparse.ArgumentParser()
parse.add_argument('--gamma', type=float, default=0.99, help='the discount factor of RL')
parse.add_argument('--seed', type=int, default=123, help='the random seeds')
parse.add_argument('--env-name', type=str, default='BreakoutNoFrameskip-v4', help='the environment name')
parse.add_argument('--lr', type=float, default=7e-4, help='learning rate of the algorithm')
parse.add_argument('--value-loss-coef', type=float, default=0.5, help='the coefficient of value loss')
parse.add_argument('--tau', type=float, default=0.95, help='gae coefficient')
parse.add_argument('--cuda', action='store_true', help='use cuda do the training')
parse.add_argument('--total-frames', type=int, default=20000000, help='the total frames for training')
parse.add_argument('--eps', type=float, default=1e-5, help='param for adam optimizer')
parse.add_argument('--save-dir', type=str, default='saved_models/', help='the folder to save models')
parse.add_argument('--nsteps', type=int, default=5, help='the steps to update the network')
parse.add_argument('--num-workers', type=int, default=16, help='the number of cpu you use')
parse.add_argument('--entropy-coef', type=float, default=0.01, help='entropy-reg')
parse.add_argument('--log-interval', type=int, default=100, help='the log interval')
parse.add_argument('--alpha', type=float, default=0.99, help='the alpha coe of RMSprop')
parse.add_argument('--max-grad-norm', type=float, default=0.5, help='the grad clip')
parse.add_argument('--use-gae', action='store_true', help='use-gae')
parse.add_argument('--log-dir', type=str, default='logs', help='log dir')
parse.add_argument('--env-type', type=str, default='atari', help='the type of the environment')
args = parse.parse_args()
return args
| 1,881 | 66.214286 | 109 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/a2c/utils.py | import torch
import numpy as np
from torch.distributions.categorical import Categorical
# select - actions
def select_actions(pi, deterministic=False):
cate_dist = Categorical(pi)
if deterministic:
return torch.argmax(pi, dim=1).item()
else:
return cate_dist.sample().unsqueeze(-1)
# get the action log prob and entropy...
def evaluate_actions(pi, actions):
cate_dist = Categorical(pi)
return cate_dist.log_prob(actions.squeeze(-1)).unsqueeze(-1), cate_dist.entropy().mean()
def discount_with_dones(rewards, dones, gamma):
discounted = []
r = 0
for reward, done in zip(rewards[::-1], dones[::-1]):
r = reward + gamma * r * (1.-done)
discounted.append(r)
return discounted[::-1]
| 749 | 29 | 92 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/a2c/demo.py | from arguments import get_args
from models import net
import torch
from utils import select_actions
import cv2
import numpy as np
from rl_utils.env_wrapper.frame_stack import VecFrameStack
from rl_utils.env_wrapper.atari_wrapper import make_atari, wrap_deepmind
# update the current observation
def get_tensors(obs):
input_tensor = torch.tensor(np.transpose(obs, (2, 0, 1)), dtype=torch.float32).unsqueeze(0)
return input_tensor
if __name__ == "__main__":
args = get_args()
# create environment
#env = VecFrameStack(wrap_deepmind(make_atari(args.env_name)), 4)
env = make_atari(args.env_name)
env = wrap_deepmind(env, frame_stack=True)
# get the model path
model_path = args.save_dir + args.env_name + '/model.pt'
network = net(env.action_space.n)
network.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))
obs = env.reset()
while True:
env.render()
# get the obs
with torch.no_grad():
input_tensor = get_tensors(obs)
_, pi = network(input_tensor)
actions = select_actions(pi, True)
obs, reward, done, _ = env.step([actions])
env.close()
| 1,193 | 33.114286 | 95 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/a2c/models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
# the convolution layer of deepmind
class deepmind(nn.Module):
def __init__(self):
super(deepmind, self).__init__()
self.conv1 = nn.Conv2d(4, 32, 8, stride=4)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 32, 3, stride=1)
self.fc1 = nn.Linear(32 * 7 * 7, 512)
# start to do the init...
nn.init.orthogonal_(self.conv1.weight.data, gain=nn.init.calculate_gain('relu'))
nn.init.orthogonal_(self.conv2.weight.data, gain=nn.init.calculate_gain('relu'))
nn.init.orthogonal_(self.conv3.weight.data, gain=nn.init.calculate_gain('relu'))
nn.init.orthogonal_(self.fc1.weight.data, gain=nn.init.calculate_gain('relu'))
# init the bias...
nn.init.constant_(self.conv1.bias.data, 0)
nn.init.constant_(self.conv2.bias.data, 0)
nn.init.constant_(self.conv3.bias.data, 0)
nn.init.constant_(self.fc1.bias.data, 0)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(-1, 32 * 7 * 7)
x = F.relu(self.fc1(x))
return x
# in the initial, just the nature CNN
class net(nn.Module):
def __init__(self, num_actions):
super(net, self).__init__()
self.cnn_layer = deepmind()
self.critic = nn.Linear(512, 1)
self.actor = nn.Linear(512, num_actions)
# init the linear layer..
nn.init.orthogonal_(self.critic.weight.data)
nn.init.constant_(self.critic.bias.data, 0)
# init the policy layer...
nn.init.orthogonal_(self.actor.weight.data, gain=0.01)
nn.init.constant_(self.actor.bias.data, 0)
def forward(self, inputs):
x = self.cnn_layer(inputs / 255.0)
value = self.critic(x)
pi = F.softmax(self.actor(x), dim=1)
return value, pi
| 1,959 | 37.431373 | 88 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/a2c/train.py | from arguments import get_args
from a2c_agent import a2c_agent
from rl_utils.env_wrapper.create_env import create_multiple_envs
from rl_utils.seeds.seeds import set_seeds
from a2c_agent import a2c_agent
import os
if __name__ == '__main__':
# set signle thread
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'
# get args
args = get_args()
# create environments
envs = create_multiple_envs(args)
# set seeds
set_seeds(args)
# create trainer
a2c_trainer = a2c_agent(envs, args)
a2c_trainer.learn()
# close the environment
envs.close()
| 612 | 25.652174 | 64 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/ddpg/arguments.py | import argparse
def get_args():
parse = argparse.ArgumentParser(description='ddpg')
parse.add_argument('--env-name', type=str, default='Pendulum-v0', help='the training environment')
parse.add_argument('--lr-actor', type=float, default=1e-4, help='the lr of the actor')
parse.add_argument('--lr-critic', type=float, default=1e-3, help='the lr of the critic')
parse.add_argument('--critic-l2-reg', type=float, default=1e-2, help='the critic reg')
parse.add_argument('--gamma', type=float, default=0.99, help='the discount factor')
parse.add_argument('--nb-epochs', type=int, default=500, help='the epochs to train the network')
parse.add_argument('--nb-cycles', type=int, default=20)
parse.add_argument('--nb-train', type=int, default=50, help='number to train the agent')
parse.add_argument('--nb-rollout-steps', type=int, default=100, help='steps to collect samples')
parse.add_argument('--nb-test-rollouts', type=int, default=10, help='the number of test')
parse.add_argument('--batch-size', type=int, default=128, help='the batch size to update network')
parse.add_argument('--replay-size', type=int, default=int(1e6), help='the size of the replay buffer')
parse.add_argument('--clip-range', type=float, default=5, help='clip range of the observation')
parse.add_argument('--save-dir', type=str, default='saved_models/', help='the place save the models')
parse.add_argument('--polyak', type=float, default=0.95, help='the expoential weighted coefficient.')
parse.add_argument('--total-frames', type=int, default=int(1e6), help='total frames')
parse.add_argument('--log-dir', type=str, default='logs', help='place to save log files')
parse.add_argument('--env-type', type=str, default=None, help='environment type')
parse.add_argument('--seed', type=int, default=123, help='random seed')
parse.add_argument('--display-interval', type=int, default=10, help='interval to display')
# ddpg not support gpu
parse.add_argument('--cuda', action='store_true', help='if use GPU')
args = parse.parse_args()
return args
| 2,113 | 69.466667 | 105 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/ddpg/utils.py | import numpy as np
import torch
# add ounoise here
class ounoise():
def __init__(self, std, action_dim, mean=0, theta=0.15, dt=1e-2, x0=None):
self.std = std
self.mean = mean
self.action_dim = action_dim
self.theta = theta
self.dt = dt
self.x0 = x0
# reset the noise
def reset(self):
self.x_prev = self.x0 if self.x0 is not None else np.zeros(self.action_dim)
# generate noise
def noise(self):
x = self.x_prev + self.theta * (self.mean - self.x_prev) * self.dt + \
self.std * np.sqrt(self.dt) * np.random.normal(size=self.action_dim)
self.x_prev = x
return x
| 686 | 27.625 | 84 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/ddpg/demo.py | from arguments import get_args
import gym
from models import actor
import torch
import numpy as np
def normalize(obs, mean, std, clip):
return np.clip((obs - mean) / std, -clip, clip)
if __name__ == '__main__':
args = get_args()
env = gym.make(args.env_name)
# get environment infos
obs_dims = env.observation_space.shape[0]
action_dims = env.action_space.shape[0]
action_max = env.action_space.high[0]
# define the network
actor_net = actor(obs_dims, action_dims)
# load models
model_path = args.save_dir + args.env_name + '/model.pt'
model, mean, std = torch.load(model_path, map_location=lambda storage, loc: storage)
# load models into the network
actor_net.load_state_dict(model)
for ep in range(10):
obs = env.reset()
reward_sum = 0
while True:
env.render()
with torch.no_grad():
norm_obs = normalize(obs, mean, std, args.clip_range)
norm_obs_tensor = torch.tensor(norm_obs, dtype=torch.float32).unsqueeze(0)
actions = actor_net(norm_obs_tensor)
actions = actions.detach().numpy().squeeze()
if action_dims == 1:
actions = np.array([actions])
obs_, reward, done, _ = env.step(action_max * actions)
reward_sum += reward
if done:
break
obs = obs_
print('the episode is: {}, the reward is: {}'.format(ep, reward_sum))
env.close()
| 1,518 | 34.325581 | 90 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/ddpg/ddpg_agent.py | import numpy as np
from models import actor, critic
import torch
import os
from datetime import datetime
from mpi4py import MPI
from rl_utils.mpi_utils.normalizer import normalizer
from rl_utils.mpi_utils.utils import sync_networks, sync_grads
from rl_utils.experience_replay.experience_replay import replay_buffer
from utils import ounoise
import copy
import gym
"""
ddpg algorithms - revised baseline version
support MPI training
"""
class ddpg_agent:
def __init__(self, env, args):
self.env = env
self.args = args
# get the dims and action max of the environment
obs_dims = self.env.observation_space.shape[0]
self.action_dims = self.env.action_space.shape[0]
self.action_max = self.env.action_space.high[0]
# define the network
self.actor_net = actor(obs_dims, self.action_dims)
self.critic_net = critic(obs_dims, self.action_dims)
# sync the weights across the mpi
sync_networks(self.actor_net)
sync_networks(self.critic_net)
# build the target newtork
self.actor_target_net = copy.deepcopy(self.actor_net)
self.critic_target_net = copy.deepcopy(self.critic_net)
# create the optimizer
self.actor_optim = torch.optim.Adam(self.actor_net.parameters(), self.args.lr_actor)
self.critic_optim = torch.optim.Adam(self.critic_net.parameters(), self.args.lr_critic, weight_decay=self.args.critic_l2_reg)
# create the replay buffer
self.replay_buffer = replay_buffer(self.args.replay_size)
# create the normalizer
self.o_norm = normalizer(obs_dims, default_clip_range=self.args.clip_range)
# create the noise generator
self.noise_generator = ounoise(std=0.2, action_dim=self.action_dims)
# create the dir to save models
if MPI.COMM_WORLD.Get_rank() == 0:
if not os.path.exists(self.args.save_dir):
os.mkdir(self.args.save_dir)
self.model_path = os.path.join(self.args.save_dir, self.args.env_name)
if not os.path.exists(self.model_path):
os.mkdir(self.model_path)
# create a eval environemnt
self.eval_env = gym.make(self.args.env_name)
# set seeds
self.eval_env.seed(self.args.seed * 2 + MPI.COMM_WORLD.Get_rank())
def learn(self):
"""
the learning part
"""
self.actor_net.train()
# reset the environmenr firstly
obs = self.env.reset()
self.noise_generator.reset()
# get the number of epochs
nb_epochs = self.args.total_frames // (self.args.nb_rollout_steps * self.args.nb_cycles)
for epoch in range(nb_epochs):
for _ in range(self.args.nb_cycles):
# used to update the normalizer
ep_obs = []
for _ in range(self.args.nb_rollout_steps):
with torch.no_grad():
inputs_tensor = self._preproc_inputs(obs)
pi = self.actor_net(inputs_tensor)
action = self._select_actions(pi)
# feed actions into the environment
obs_, reward, done, _ = self.env.step(self.action_max * action)
# append the rollout information into the memory
self.replay_buffer.add(obs, action, reward, obs_, float(done))
ep_obs.append(obs.copy())
obs = obs_
# if done, reset the environment
if done:
obs = self.env.reset()
self.noise_generator.reset()
# then start to do the update of the normalizer
ep_obs = np.array(ep_obs)
self.o_norm.update(ep_obs)
self.o_norm.recompute_stats()
# then start to update the network
for _ in range(self.args.nb_train):
a_loss, c_loss = self._update_network()
# update the target network
self._soft_update_target_network(self.actor_target_net, self.actor_net)
self._soft_update_target_network(self.critic_target_net, self.critic_net)
# start to do the evaluation
success_rate = self._eval_agent()
# convert back to normal
self.actor_net.train()
if epoch % self.args.display_interval == 0:
if MPI.COMM_WORLD.Get_rank() == 0:
print('[{}] Epoch: {} / {}, Frames: {}, Rewards: {:.3f}, Actor loss: {:.3f}, Critic Loss: {:.3f}'.format(datetime.now(), \
epoch, nb_epochs, (epoch+1) * self.args.nb_rollout_steps * self.args.nb_cycles, success_rate, a_loss, c_loss))
torch.save([self.actor_net.state_dict(), self.o_norm.mean, self.o_norm.std], self.model_path + '/model.pt')
# functions to preprocess the image
def _preproc_inputs(self, obs):
obs_norm = self.o_norm.normalize(obs)
inputs_tensor = torch.tensor(obs_norm, dtype=torch.float32).unsqueeze(0)
return inputs_tensor
# this function will choose action for the agent and do the exploration
def _select_actions(self, pi):
action = pi.cpu().numpy().squeeze()
# TODO: Noise type now - only support ounoise
# add the gaussian noise
#action = action + np.random.normal(0, 0.1, self.action_dims)
# add ou noise
action = action + self.noise_generator.noise()
action = np.clip(action, -1, 1)
return action
# update the network
def _update_network(self):
# sample the samples from the replay buffer
samples = self.replay_buffer.sample(self.args.batch_size)
obses, actions, rewards, obses_next, dones = samples
# try to do the normalization of obses
norm_obses = self.o_norm.normalize(obses)
norm_obses_next = self.o_norm.normalize(obses_next)
# transfer them into tensors
norm_obses_tensor = torch.tensor(norm_obses, dtype=torch.float32)
norm_obses_next_tensor = torch.tensor(norm_obses_next, dtype=torch.float32)
actions_tensor = torch.tensor(actions, dtype=torch.float32)
rewards_tensor = torch.tensor(rewards, dtype=torch.float32).unsqueeze(1)
dones_tensor = torch.tensor(dones, dtype=torch.float32).unsqueeze(1)
with torch.no_grad():
actions_next = self.actor_target_net(norm_obses_next_tensor)
q_next_value = self.critic_target_net(norm_obses_next_tensor, actions_next)
target_q_value = rewards_tensor + (1 - dones_tensor) * self.args.gamma * q_next_value
# the real q value
real_q_value = self.critic_net(norm_obses_tensor, actions_tensor)
critic_loss = (real_q_value - target_q_value).pow(2).mean()
# the actor loss
actions_real = self.actor_net(norm_obses_tensor)
actor_loss = -self.critic_net(norm_obses_tensor, actions_real).mean()
# start to update the network
self.actor_optim.zero_grad()
actor_loss.backward()
sync_grads(self.actor_net)
self.actor_optim.step()
# update the critic network
self.critic_optim.zero_grad()
critic_loss.backward()
sync_grads(self.critic_net)
self.critic_optim.step()
return actor_loss.item(), critic_loss.item()
# soft update the target network...
def _soft_update_target_network(self, target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_((1 - self.args.polyak) * param.data + self.args.polyak * target_param.data)
# do the evaluation
def _eval_agent(self):
self.actor_net.eval()
total_success_rate = []
for _ in range(self.args.nb_test_rollouts):
per_success_rate = []
obs = self.eval_env.reset()
while True:
with torch.no_grad():
inputs_tensor = self._preproc_inputs(obs)
pi = self.actor_net(inputs_tensor)
actions = pi.detach().cpu().numpy().squeeze()
if self.action_dims == 1:
actions = np.array([actions])
obs_, reward, done, _ = self.eval_env.step(actions * self.action_max)
per_success_rate.append(reward)
obs = obs_
if done:
break
total_success_rate.append(np.sum(per_success_rate))
local_success_rate = np.mean(total_success_rate)
global_success_rate = MPI.COMM_WORLD.allreduce(local_success_rate, op=MPI.SUM)
return global_success_rate / MPI.COMM_WORLD.Get_size()
| 8,833 | 45.494737 | 142 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/ddpg/models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
# define the actor network
class actor(nn.Module):
def __init__(self, obs_dims, action_dims):
super(actor, self).__init__()
self.fc1 = nn.Linear(obs_dims, 400)
self.fc2 = nn.Linear(400, 300)
self.action_out = nn.Linear(300, action_dims)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
actions = torch.tanh(self.action_out(x))
return actions
class critic(nn.Module):
def __init__(self, obs_dims, action_dims):
super(critic, self).__init__()
self.fc1 = nn.Linear(obs_dims, 400)
self.fc2 = nn.Linear(400 + action_dims, 300)
self.q_out = nn.Linear(300, 1)
def forward(self, x, actions):
x = F.relu(self.fc1(x))
x = torch.cat([x, actions], dim=1)
x = F.relu(self.fc2(x))
q_value = self.q_out(x)
return q_value
| 950 | 28.71875 | 53 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/ddpg/train.py | from ddpg_agent import ddpg_agent
from arguments import get_args
from rl_utils.seeds.seeds import set_seeds
from rl_utils.env_wrapper.create_env import create_single_env
from mpi4py import MPI
import os
if __name__ == '__main__':
# set thread and mpi stuff
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['IN_MPI'] = '1'
# train the network
args = get_args()
# build up the environment
env = create_single_env(args, MPI.COMM_WORLD.Get_rank())
# set the random seeds
set_seeds(args, MPI.COMM_WORLD.Get_rank())
# start traininng
ddpg_trainer = ddpg_agent(env, args)
ddpg_trainer.learn()
# close the environment
env.close()
| 717 | 28.916667 | 61 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/ppo/arguments.py | import argparse
def get_args():
parse = argparse.ArgumentParser()
parse.add_argument('--gamma', type=float, default=0.99, help='the discount factor of RL')
parse.add_argument('--seed', type=int, default=123, help='the random seeds')
parse.add_argument('--num-workers', type=int, default=8, help='the number of workers to collect samples')
parse.add_argument('--env-name', type=str, default='PongNoFrameskip-v4', help='the environment name')
parse.add_argument('--batch-size', type=int, default=4, help='the batch size of updating')
parse.add_argument('--lr', type=float, default=2.5e-4, help='learning rate of the algorithm')
parse.add_argument('--epoch', type=int, default=4, help='the epoch during training')
parse.add_argument('--nsteps', type=int, default=128, help='the steps to collect samples')
parse.add_argument('--vloss-coef', type=float, default=0.5, help='the coefficient of value loss')
parse.add_argument('--ent-coef', type=float, default=0.01, help='the entropy loss coefficient')
parse.add_argument('--tau', type=float, default=0.95, help='gae coefficient')
parse.add_argument('--cuda', action='store_true', help='use cuda do the training')
parse.add_argument('--total-frames', type=int, default=20000000, help='the total frames for training')
parse.add_argument('--dist', type=str, default='gauss', help='the distributions for sampling actions')
parse.add_argument('--eps', type=float, default=1e-5, help='param for adam optimizer')
parse.add_argument('--clip', type=float, default=0.1, help='the ratio clip param')
parse.add_argument('--save-dir', type=str, default='saved_models/', help='the folder to save models')
parse.add_argument('--lr-decay', action='store_true', help='if using the learning rate decay during decay')
parse.add_argument('--max-grad-norm', type=float, default=0.5, help='grad norm')
parse.add_argument('--display-interval', type=int, default=10, help='the interval that display log information')
parse.add_argument('--env-type', type=str, default='atari', help='the type of the environment')
parse.add_argument('--log-dir', type=str, default='logs', help='the folders to save the log files')
args = parse.parse_args()
return args
| 2,274 | 72.387097 | 116 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/ppo/utils.py | import numpy as np
import torch
from torch.distributions.normal import Normal
from torch.distributions.beta import Beta
from torch.distributions.categorical import Categorical
import random
def select_actions(pi, dist_type, env_type):
if env_type == 'atari':
actions = Categorical(pi).sample()
else:
if dist_type == 'gauss':
mean, std = pi
actions = Normal(mean, std).sample()
elif dist_type == 'beta':
alpha, beta = pi
actions = Beta(alpha.detach().cpu(), beta.detach().cpu()).sample()
# return actions
return actions.detach().cpu().numpy().squeeze()
def evaluate_actions(pi, actions, dist_type, env_type):
if env_type == 'atari':
cate_dist = Categorical(pi)
log_prob = cate_dist.log_prob(actions).unsqueeze(-1)
entropy = cate_dist.entropy().mean()
else:
if dist_type == 'gauss':
mean, std = pi
normal_dist = Normal(mean, std)
log_prob = normal_dist.log_prob(actions).sum(dim=1, keepdim=True)
entropy = normal_dist.entropy().mean()
elif dist_type == 'beta':
alpha, beta = pi
beta_dist = Beta(alpha, beta)
log_prob = beta_dist.log_prob(actions).sum(dim=1, keepdim=True)
entropy = beta_dist.entropy().mean()
return log_prob, entropy
| 1,370 | 35.078947 | 78 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/ppo/demo.py | from arguments import get_args
from models import cnn_net, mlp_net
import torch
import cv2
import numpy as np
import gym
from rl_utils.env_wrapper.frame_stack import VecFrameStack
from rl_utils.env_wrapper.atari_wrapper import make_atari, wrap_deepmind
# denormalize
def normalize(x, mean, std, clip=10):
x -= mean
x /= (std + 1e-8)
return np.clip(x, -clip, clip)
# get tensors for the agent
def get_tensors(obs, env_type, filters=None):
if env_type == 'atari':
tensor = torch.tensor(np.transpose(obs, (2, 0, 1)), dtype=torch.float32).unsqueeze(0)
elif env_type == 'mujoco':
tensor = torch.tensor(normalize(obs, filters.rs.mean, filters.rs.std), dtype=torch.float32).unsqueeze(0)
return tensor
if __name__ == '__main__':
# get the arguments
args = get_args()
# create the environment
if args.env_type == 'atari':
env = make_atari(args.env_name)
env = wrap_deepmind(env, frame_stack=True)
elif args.env_type == 'mujoco':
env = gym.make(args.env_name)
# get the model path
model_path = args.save_dir + args.env_name + '/model.pt'
# create the network
if args.env_type == 'atari':
network = cnn_net(env.action_space.n)
network.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))
filters = None
elif args.env_type == 'mujoco':
network = mlp_net(env.observation_space.shape[0], env.action_space.shape[0], args.dist)
net_models, filters = torch.load(model_path, map_location=lambda storage, loc: storage)
# load models
network.load_state_dict(net_models)
# start to play the demo
obs = env.reset()
reward_total = 0
# just one episode
while True:
env.render()
with torch.no_grad():
obs_tensor = get_tensors(obs, args.env_type, filters)
_, pi = network(obs_tensor)
# get actions
if args.env_type == 'atari':
actions = torch.argmax(pi, dim=1).item()
elif args.env_type == 'mujoco':
if args.dist == 'gauss':
mean, _ = pi
actions = mean.numpy().squeeze()
elif args.dist == 'beta':
alpha, beta = pi
actions = (alpha - 1) / (alpha + beta - 2)
actions = actions.numpy().squeeze()
actions = -1 + 2 * actions
obs_, reward, done, _ = env.step(actions)
reward_total += reward
if done:
break
obs = obs_
print('the rewrads is: {}'.format(reward_total))
| 2,641 | 35.694444 | 112 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/ppo/models.py | import torch
from torch import nn
from torch.nn import functional as F
"""
this network also include gaussian distribution and beta distribution
"""
class mlp_net(nn.Module):
def __init__(self, state_size, num_actions, dist_type):
super(mlp_net, self).__init__()
self.dist_type = dist_type
self.fc1_v = nn.Linear(state_size, 64)
self.fc2_v = nn.Linear(64, 64)
self.fc1_a = nn.Linear(state_size, 64)
self.fc2_a = nn.Linear(64, 64)
# check the type of distribution
if self.dist_type == 'gauss':
self.sigma_log = nn.Parameter(torch.zeros(1, num_actions))
self.action_mean = nn.Linear(64, num_actions)
self.action_mean.weight.data.mul_(0.1)
self.action_mean.bias.data.zero_()
elif self.dist_type == 'beta':
self.action_alpha = nn.Linear(64, num_actions)
self.action_beta = nn.Linear(64, num_actions)
# init..
self.action_alpha.weight.data.mul_(0.1)
self.action_alpha.bias.data.zero_()
self.action_beta.weight.data.mul_(0.1)
self.action_beta.bias.data.zero_()
# define layers to output state value
self.value = nn.Linear(64, 1)
self.value.weight.data.mul_(0.1)
self.value.bias.data.zero_()
def forward(self, x):
x_v = torch.tanh(self.fc1_v(x))
x_v = torch.tanh(self.fc2_v(x_v))
state_value = self.value(x_v)
# output the policy...
x_a = torch.tanh(self.fc1_a(x))
x_a = torch.tanh(self.fc2_a(x_a))
if self.dist_type == 'gauss':
mean = self.action_mean(x_a)
sigma_log = self.sigma_log.expand_as(mean)
sigma = torch.exp(sigma_log)
pi = (mean, sigma)
elif self.dist_type == 'beta':
alpha = F.softplus(self.action_alpha(x_a)) + 1
beta = F.softplus(self.action_beta(x_a)) + 1
pi = (alpha, beta)
return state_value, pi
# the convolution layer of deepmind
class deepmind(nn.Module):
def __init__(self):
super(deepmind, self).__init__()
self.conv1 = nn.Conv2d(4, 32, 8, stride=4)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 32, 3, stride=1)
self.fc1 = nn.Linear(32 * 7 * 7, 512)
# start to do the init...
nn.init.orthogonal_(self.conv1.weight.data, gain=nn.init.calculate_gain('relu'))
nn.init.orthogonal_(self.conv2.weight.data, gain=nn.init.calculate_gain('relu'))
nn.init.orthogonal_(self.conv3.weight.data, gain=nn.init.calculate_gain('relu'))
nn.init.orthogonal_(self.fc1.weight.data, gain=nn.init.calculate_gain('relu'))
# init the bias...
nn.init.constant_(self.conv1.bias.data, 0)
nn.init.constant_(self.conv2.bias.data, 0)
nn.init.constant_(self.conv3.bias.data, 0)
nn.init.constant_(self.fc1.bias.data, 0)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(-1, 32 * 7 * 7)
x = F.relu(self.fc1(x))
return x
# in the initial, just the nature CNN
class cnn_net(nn.Module):
def __init__(self, num_actions):
super(cnn_net, self).__init__()
self.cnn_layer = deepmind()
self.critic = nn.Linear(512, 1)
self.actor = nn.Linear(512, num_actions)
# init the linear layer..
nn.init.orthogonal_(self.critic.weight.data)
nn.init.constant_(self.critic.bias.data, 0)
# init the policy layer...
nn.init.orthogonal_(self.actor.weight.data, gain=0.01)
nn.init.constant_(self.actor.bias.data, 0)
def forward(self, inputs):
x = self.cnn_layer(inputs / 255.0)
value = self.critic(x)
pi = F.softmax(self.actor(x), dim=1)
return value, pi
| 3,913 | 37 | 88 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/ppo/ppo_agent.py | import numpy as np
import torch
from torch import optim
from rl_utils.running_filter.running_filter import ZFilter
from models import cnn_net, mlp_net
from utils import select_actions, evaluate_actions
from datetime import datetime
import os
import copy
class ppo_agent:
def __init__(self, envs, args):
self.envs = envs
self.args = args
# start to build the network.
if self.args.env_type == 'atari':
self.net = cnn_net(envs.action_space.n)
elif self.args.env_type == 'mujoco':
self.net = mlp_net(envs.observation_space.shape[0], envs.action_space.shape[0], self.args.dist)
self.old_net = copy.deepcopy(self.net)
# if use the cuda...
if self.args.cuda:
self.net.cuda()
self.old_net.cuda()
# define the optimizer...
self.optimizer = optim.Adam(self.net.parameters(), self.args.lr, eps=self.args.eps)
# running filter...
if self.args.env_type == 'mujoco':
num_states = self.envs.observation_space.shape[0]
self.running_state = ZFilter((num_states, ), clip=5)
# check saving folder..
if not os.path.exists(self.args.save_dir):
os.mkdir(self.args.save_dir)
# env folder..
self.model_path = os.path.join(self.args.save_dir, self.args.env_name)
if not os.path.exists(self.model_path):
os.mkdir(self.model_path)
# get the observation
self.batch_ob_shape = (self.args.num_workers * self.args.nsteps, ) + self.envs.observation_space.shape
self.obs = np.zeros((self.args.num_workers, ) + self.envs.observation_space.shape, dtype=self.envs.observation_space.dtype.name)
if self.args.env_type == 'mujoco':
self.obs[:] = np.expand_dims(self.running_state(self.envs.reset()), 0)
else:
self.obs[:] = self.envs.reset()
self.dones = [False for _ in range(self.args.num_workers)]
# start to train the network...
def learn(self):
num_updates = self.args.total_frames // (self.args.nsteps * self.args.num_workers)
# get the reward to calculate other informations
episode_rewards = np.zeros((self.args.num_workers, ), dtype=np.float32)
final_rewards = np.zeros((self.args.num_workers, ), dtype=np.float32)
for update in range(num_updates):
mb_obs, mb_rewards, mb_actions, mb_dones, mb_values = [], [], [], [], []
if self.args.lr_decay:
self._adjust_learning_rate(update, num_updates)
for step in range(self.args.nsteps):
with torch.no_grad():
# get tensors
obs_tensor = self._get_tensors(self.obs)
values, pis = self.net(obs_tensor)
# select actions
actions = select_actions(pis, self.args.dist, self.args.env_type)
if self.args.env_type == 'atari':
input_actions = actions
else:
if self.args.dist == 'gauss':
input_actions = actions.copy()
elif self.args.dist == 'beta':
input_actions = -1 + 2 * actions
# start to store information
mb_obs.append(np.copy(self.obs))
mb_actions.append(actions)
mb_dones.append(self.dones)
mb_values.append(values.detach().cpu().numpy().squeeze())
# start to excute the actions in the environment
obs, rewards, dones, _ = self.envs.step(input_actions)
# update dones
if self.args.env_type == 'mujoco':
dones = np.array([dones])
rewards = np.array([rewards])
self.dones = dones
mb_rewards.append(rewards)
# clear the observation
for n, done in enumerate(dones):
if done:
self.obs[n] = self.obs[n] * 0
if self.args.env_type == 'mujoco':
# reset the environment
obs = self.envs.reset()
self.obs = obs if self.args.env_type == 'atari' else np.expand_dims(self.running_state(obs), 0)
# process the rewards part -- display the rewards on the screen
episode_rewards += rewards
masks = np.array([0.0 if done_ else 1.0 for done_ in dones], dtype=np.float32)
final_rewards *= masks
final_rewards += (1 - masks) * episode_rewards
episode_rewards *= masks
# process the rollouts
mb_obs = np.asarray(mb_obs, dtype=np.float32)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
mb_values = np.asarray(mb_values, dtype=np.float32)
if self.args.env_type == 'mujoco':
mb_values = np.expand_dims(mb_values, 1)
# compute the last state value
with torch.no_grad():
obs_tensor = self._get_tensors(self.obs)
last_values, _ = self.net(obs_tensor)
last_values = last_values.detach().cpu().numpy().squeeze()
# start to compute advantages...
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.args.nsteps)):
if t == self.args.nsteps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t + 1]
nextvalues = mb_values[t + 1]
delta = mb_rewards[t] + self.args.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.args.gamma * self.args.tau * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
# after compute the returns, let's process the rollouts
mb_obs = mb_obs.swapaxes(0, 1).reshape(self.batch_ob_shape)
if self.args.env_type == 'atari':
mb_actions = mb_actions.swapaxes(0, 1).flatten()
mb_returns = mb_returns.swapaxes(0, 1).flatten()
mb_advs = mb_advs.swapaxes(0, 1).flatten()
# before update the network, the old network will try to load the weights
self.old_net.load_state_dict(self.net.state_dict())
# start to update the network
pl, vl, ent = self._update_network(mb_obs, mb_actions, mb_returns, mb_advs)
# display the training information
if update % self.args.display_interval == 0:
print('[{}] Update: {} / {}, Frames: {}, Rewards: {:.3f}, Min: {:.3f}, Max: {:.3f}, PL: {:.3f},'\
'VL: {:.3f}, Ent: {:.3f}'.format(datetime.now(), update, num_updates, (update + 1)*self.args.nsteps*self.args.num_workers, \
final_rewards.mean(), final_rewards.min(), final_rewards.max(), pl, vl, ent))
# save the model
if self.args.env_type == 'atari':
torch.save(self.net.state_dict(), self.model_path + '/model.pt')
else:
# for the mujoco, we also need to keep the running mean filter!
torch.save([self.net.state_dict(), self.running_state], self.model_path + '/model.pt')
# update the network
def _update_network(self, obs, actions, returns, advantages):
inds = np.arange(obs.shape[0])
nbatch_train = obs.shape[0] // self.args.batch_size
for _ in range(self.args.epoch):
np.random.shuffle(inds)
for start in range(0, obs.shape[0], nbatch_train):
# get the mini-batchs
end = start + nbatch_train
mbinds = inds[start:end]
mb_obs = obs[mbinds]
mb_actions = actions[mbinds]
mb_returns = returns[mbinds]
mb_advs = advantages[mbinds]
# convert minibatches to tensor
mb_obs = self._get_tensors(mb_obs)
mb_actions = torch.tensor(mb_actions, dtype=torch.float32)
mb_returns = torch.tensor(mb_returns, dtype=torch.float32).unsqueeze(1)
mb_advs = torch.tensor(mb_advs, dtype=torch.float32).unsqueeze(1)
# normalize adv
mb_advs = (mb_advs - mb_advs.mean()) / (mb_advs.std() + 1e-8)
if self.args.cuda:
mb_actions = mb_actions.cuda()
mb_returns = mb_returns.cuda()
mb_advs = mb_advs.cuda()
# start to get values
mb_values, pis = self.net(mb_obs)
# start to calculate the value loss...
value_loss = (mb_returns - mb_values).pow(2).mean()
# start to calculate the policy loss
with torch.no_grad():
_, old_pis = self.old_net(mb_obs)
# get the old log probs
old_log_prob, _ = evaluate_actions(old_pis, mb_actions, self.args.dist, self.args.env_type)
old_log_prob = old_log_prob.detach()
# evaluate the current policy
log_prob, ent_loss = evaluate_actions(pis, mb_actions, self.args.dist, self.args.env_type)
prob_ratio = torch.exp(log_prob - old_log_prob)
# surr1
surr1 = prob_ratio * mb_advs
surr2 = torch.clamp(prob_ratio, 1 - self.args.clip, 1 + self.args.clip) * mb_advs
policy_loss = -torch.min(surr1, surr2).mean()
# final total loss
total_loss = policy_loss + self.args.vloss_coef * value_loss - ent_loss * self.args.ent_coef
# clear the grad buffer
self.optimizer.zero_grad()
total_loss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), self.args.max_grad_norm)
# update
self.optimizer.step()
return policy_loss.item(), value_loss.item(), ent_loss.item()
# convert the numpy array to tensors
def _get_tensors(self, obs):
if self.args.env_type == 'atari':
obs_tensor = torch.tensor(np.transpose(obs, (0, 3, 1, 2)), dtype=torch.float32)
else:
obs_tensor = torch.tensor(obs, dtype=torch.float32)
# decide if put the tensor on the GPU
if self.args.cuda:
obs_tensor = obs_tensor.cuda()
return obs_tensor
# adjust the learning rate
def _adjust_learning_rate(self, update, num_updates):
lr_frac = 1 - (update / num_updates)
adjust_lr = self.args.lr * lr_frac
for param_group in self.optimizer.param_groups:
param_group['lr'] = adjust_lr
| 11,143 | 50.592593 | 144 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/ppo/train.py | from arguments import get_args
from ppo_agent import ppo_agent
from rl_utils.env_wrapper.create_env import create_multiple_envs, create_single_env
from rl_utils.seeds.seeds import set_seeds
import os
if __name__ == '__main__':
# set signle thread
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'
# get arguments
args = get_args()
# start to create the environment
if args.env_type == 'atari':
envs = create_multiple_envs(args)
elif args.env_type == 'mujoco':
envs = create_single_env(args)
else:
raise NotImplementedError
# create trainer
ppo_trainer = ppo_agent(envs, args)
# start to learn
ppo_trainer.learn()
# close the environment
envs.close()
| 757 | 28.153846 | 83 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/sac/arguments.py | import argparse
# define the arguments that will be used in the SAC
def get_args():
parse = argparse.ArgumentParser()
parse.add_argument('--env-name', type=str, default='HalfCheetah-v2', help='the environment name')
parse.add_argument('--cuda', action='store_true', help='use GPU do the training')
parse.add_argument('--seed', type=int, default=123, help='the random seed to reproduce results')
parse.add_argument('--hidden-size', type=int, default=256, help='the size of the hidden layer')
parse.add_argument('--train-loop-per-epoch', type=int, default=1, help='the training loop per epoch')
parse.add_argument('--q-lr', type=float, default=3e-4, help='the learning rate')
parse.add_argument('--p-lr', type=float, default=3e-4, help='the learning rate of the actor')
parse.add_argument('--n-epochs', type=int, default=int(3e3), help='the number of total epochs')
parse.add_argument('--epoch-length', type=int, default=int(1e3), help='the lenght of each epoch')
parse.add_argument('--n-updates', type=int, default=int(1e3), help='the number of training updates execute')
parse.add_argument('--init-exploration-steps', type=int, default=int(1e3), help='the steps of the initial exploration')
parse.add_argument('--init-exploration-policy', type=str, default='gaussian', help='the inital exploration policy')
parse.add_argument('--buffer-size', type=int, default=int(1e6), help='the size of the replay buffer')
parse.add_argument('--batch-size', type=int, default=256, help='the batch size of samples for training')
parse.add_argument('--reward-scale', type=float, default=1, help='the reward scale')
parse.add_argument('--gamma', type=float, default=0.99, help='the discount factor')
parse.add_argument('--log-std-max', type=float, default=2, help='the maximum log std value')
parse.add_argument('--log-std-min', type=float, default=-20, help='the minimum log std value')
parse.add_argument('--entropy-weights', type=float, default=0.2, help='the entropy weights')
parse.add_argument('--tau', type=float, default=5e-3, help='the soft update coefficient')
parse.add_argument('--target-update-interval', type=int, default=1, help='the interval to update target network')
parse.add_argument('--update-cycles', type=int, default=int(1e3), help='how many updates apply in the update')
parse.add_argument('--eval-episodes', type=int, default=10, help='the episodes that used for evaluation')
parse.add_argument('--display-interval', type=int, default=1, help='the display interval')
parse.add_argument('--save-dir', type=str, default='saved_models/', help='the place to save models')
parse.add_argument('--reg', type=float, default=1e-3, help='the reg term')
parse.add_argument('--auto-ent-tuning', action='store_true', help='tune the entorpy automatically')
parse.add_argument('--log-dir', type=str, default='logs', help='dir to save log information')
parse.add_argument('--env-type', type=str, default=None, help='environment type')
return parse.parse_args()
| 3,080 | 82.27027 | 123 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/sac/utils.py | import numpy as np
import torch
from torch.distributions.normal import Normal
from torch.distributions import Distribution
"""
the tanhnormal distributions from rlkit may not stable
"""
class tanh_normal(Distribution):
def __init__(self, normal_mean, normal_std, epsilon=1e-6, cuda=False):
self.normal_mean = normal_mean
self.normal_std = normal_std
self.cuda = cuda
self.normal = Normal(normal_mean, normal_std)
self.epsilon = epsilon
def sample_n(self, n, return_pre_tanh_value=False):
z = self.normal.sample_n(n)
if return_pre_tanh_value:
return torch.tanh(z), z
else:
return torch.tanh(z)
def log_prob(self, value, pre_tanh_value=None):
"""
:param value: some value, x
:param pre_tanh_value: arctanh(x)
:return:
"""
if pre_tanh_value is None:
pre_tanh_value = torch.log((1 + value) / (1 - value)) / 2
return self.normal.log_prob(pre_tanh_value) - torch.log(1 - value * value + self.epsilon)
def sample(self, return_pretanh_value=False):
"""
Gradients will and should *not* pass through this operation.
See https://github.com/pytorch/pytorch/issues/4620 for discussion.
"""
z = self.normal.sample().detach()
if return_pretanh_value:
return torch.tanh(z), z
else:
return torch.tanh(z)
def rsample(self, return_pretanh_value=False):
"""
Sampling in the reparameterization case.
"""
sample_mean = torch.zeros(self.normal_mean.size(), dtype=torch.float32, device='cuda' if self.cuda else 'cpu')
sample_std = torch.ones(self.normal_std.size(), dtype=torch.float32, device='cuda' if self.cuda else 'cpu')
z = (self.normal_mean + self.normal_std * Normal(sample_mean, sample_std).sample())
z.requires_grad_()
if return_pretanh_value:
return torch.tanh(z), z
else:
return torch.tanh(z)
# get action_infos
class get_action_info:
def __init__(self, pis, cuda=False):
self.mean, self.std = pis
self.dist = tanh_normal(normal_mean=self.mean, normal_std=self.std, cuda=cuda)
# select actions
def select_actions(self, exploration=True, reparameterize=True):
if exploration:
if reparameterize:
actions, pretanh = self.dist.rsample(return_pretanh_value=True)
return actions, pretanh
else:
actions = self.dist.sample()
else:
actions = torch.tanh(self.mean)
return actions
def get_log_prob(self, actions, pre_tanh_value):
log_prob = self.dist.log_prob(actions, pre_tanh_value=pre_tanh_value)
return log_prob.sum(dim=1, keepdim=True)
| 2,841 | 34.08642 | 118 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/sac/demo.py | from arguments import get_args
import gym
import torch
import numpy as np
from models import tanh_gaussian_actor
if __name__ == '__main__':
args = get_args()
env = gym.make(args.env_name)
# get environment infos
obs_dims = env.observation_space.shape[0]
action_dims = env.action_space.shape[0]
action_max = env.action_space.high[0]
# define the network
actor_net = tanh_gaussian_actor(obs_dims, action_dims, args.hidden_size, args.log_std_min, args.log_std_max)
# load models
model_path = args.save_dir + args.env_name + '/model.pt'
# load the network weights
actor_net.load_state_dict(torch.load(model_path, map_location='cpu'))
for ep in range(5):
obs = env.reset()
reward_sum = 0
# set the maximum timesteps here...
for _ in range(1000):
env.render()
with torch.no_grad():
obs_tensor = torch.tensor(obs, dtype=torch.float32).unsqueeze(0)
mean, std = actor_net(obs_tensor)
actions = torch.tanh(mean).detach().numpy().squeeze()
if action_dims == 1:
actions = np.array([actions])
obs_, reward, done, _ = env.step(action_max * actions)
reward_sum += reward
if done:
break
obs = obs_
print('the episode is: {}, the reward is: {}'.format(ep, reward_sum))
env.close()
| 1,433 | 35.769231 | 112 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/sac/sac_agent.py | import numpy as np
import torch
from models import flatten_mlp, tanh_gaussian_actor
from rl_utils.experience_replay.experience_replay import replay_buffer
from utils import get_action_info
from datetime import datetime
import copy
import os
import gym
"""
2019-Nov-12 - start to add the automatically tempature tuning
2019-JUN-05
author: Tianhong Dai
"""
# the soft-actor-critic agent
class sac_agent:
def __init__(self, env, args):
self.args = args
self.env = env
# create eval environment
self.eval_env = gym.make(self.args.env_name)
self.eval_env.seed(args.seed * 2)
# build up the network that will be used.
self.qf1 = flatten_mlp(self.env.observation_space.shape[0], self.args.hidden_size, self.env.action_space.shape[0])
self.qf2 = flatten_mlp(self.env.observation_space.shape[0], self.args.hidden_size, self.env.action_space.shape[0])
# set the target q functions
self.target_qf1 = copy.deepcopy(self.qf1)
self.target_qf2 = copy.deepcopy(self.qf2)
# build up the policy network
self.actor_net = tanh_gaussian_actor(self.env.observation_space.shape[0], self.env.action_space.shape[0], self.args.hidden_size, \
self.args.log_std_min, self.args.log_std_max)
# define the optimizer for them
self.qf1_optim = torch.optim.Adam(self.qf1.parameters(), lr=self.args.q_lr)
self.qf2_optim = torch.optim.Adam(self.qf2.parameters(), lr=self.args.q_lr)
# the optimizer for the policy network
self.actor_optim = torch.optim.Adam(self.actor_net.parameters(), lr=self.args.p_lr)
# entorpy target
self.target_entropy = -np.prod(self.env.action_space.shape).item()
self.log_alpha = torch.zeros(1, requires_grad=True, device='cuda' if self.args.cuda else 'cpu')
# define the optimizer
self.alpha_optim = torch.optim.Adam([self.log_alpha], lr=self.args.p_lr)
# define the replay buffer
self.buffer = replay_buffer(self.args.buffer_size)
# get the action max
self.action_max = self.env.action_space.high[0]
# if use cuda, put tensor onto the gpu
if self.args.cuda:
self.actor_net.cuda()
self.qf1.cuda()
self.qf2.cuda()
self.target_qf1.cuda()
self.target_qf2.cuda()
# automatically create the folders to save models
if not os.path.exists(self.args.save_dir):
os.mkdir(self.args.save_dir)
self.model_path = os.path.join(self.args.save_dir, self.args.env_name)
if not os.path.exists(self.model_path):
os.mkdir(self.model_path)
# train the agent
def learn(self):
global_timesteps = 0
# before the official training, do the initial exploration to add episodes into the replay buffer
self._initial_exploration(exploration_policy=self.args.init_exploration_policy)
# reset the environment
obs = self.env.reset()
for epoch in range(self.args.n_epochs):
for _ in range(self.args.train_loop_per_epoch):
# for each epoch, it will reset the environment
for t in range(self.args.epoch_length):
# start to collect samples
with torch.no_grad():
obs_tensor = self._get_tensor_inputs(obs)
pi = self.actor_net(obs_tensor)
action = get_action_info(pi, cuda=self.args.cuda).select_actions(reparameterize=False)
action = action.cpu().numpy()[0]
# input the actions into the environment
obs_, reward, done, _ = self.env.step(self.action_max * action)
# store the samples
self.buffer.add(obs, action, reward, obs_, float(done))
# reassign the observations
obs = obs_
if done:
# reset the environment
obs = self.env.reset()
# after collect the samples, start to update the network
for _ in range(self.args.update_cycles):
qf1_loss, qf2_loss, actor_loss, alpha, alpha_loss = self._update_newtork()
# update the target network
if global_timesteps % self.args.target_update_interval == 0:
self._update_target_network(self.target_qf1, self.qf1)
self._update_target_network(self.target_qf2, self.qf2)
global_timesteps += 1
# print the log information
if epoch % self.args.display_interval == 0:
# start to do the evaluation
mean_rewards = self._evaluate_agent()
print('[{}] Epoch: {} / {}, Frames: {}, Rewards: {:.3f}, QF1: {:.3f}, QF2: {:.3f}, AL: {:.3f}, Alpha: {:.5f}, AlphaL: {:.5f}'.format(datetime.now(), \
epoch, self.args.n_epochs, (epoch + 1) * self.args.epoch_length, mean_rewards, qf1_loss, qf2_loss, actor_loss, alpha, alpha_loss))
# save models
torch.save(self.actor_net.state_dict(), self.model_path + '/model.pt')
# do the initial exploration by using the uniform policy
def _initial_exploration(self, exploration_policy='gaussian'):
# get the action information of the environment
obs = self.env.reset()
for _ in range(self.args.init_exploration_steps):
if exploration_policy == 'uniform':
raise NotImplementedError
elif exploration_policy == 'gaussian':
# the sac does not need normalize?
with torch.no_grad():
obs_tensor = self._get_tensor_inputs(obs)
# generate the policy
pi = self.actor_net(obs_tensor)
action = get_action_info(pi).select_actions(reparameterize=False)
action = action.cpu().numpy()[0]
# input the action input the environment
obs_, reward, done, _ = self.env.step(self.action_max * action)
# store the episodes
self.buffer.add(obs, action, reward, obs_, float(done))
obs = obs_
if done:
# if done, reset the environment
obs = self.env.reset()
print("Initial exploration has been finished!")
# get tensors
def _get_tensor_inputs(self, obs):
obs_tensor = torch.tensor(obs, dtype=torch.float32, device='cuda' if self.args.cuda else 'cpu').unsqueeze(0)
return obs_tensor
# update the network
def _update_newtork(self):
# smaple batch of samples from the replay buffer
obses, actions, rewards, obses_, dones = self.buffer.sample(self.args.batch_size)
# preprocessing the data into the tensors, will support GPU later
obses = torch.tensor(obses, dtype=torch.float32, device='cuda' if self.args.cuda else 'cpu')
actions = torch.tensor(actions, dtype=torch.float32, device='cuda' if self.args.cuda else 'cpu')
rewards = torch.tensor(rewards, dtype=torch.float32, device='cuda' if self.args.cuda else 'cpu').unsqueeze(-1)
obses_ = torch.tensor(obses_, dtype=torch.float32, device='cuda' if self.args.cuda else 'cpu')
inverse_dones = torch.tensor(1 - dones, dtype=torch.float32, device='cuda' if self.args.cuda else 'cpu').unsqueeze(-1)
# start to update the actor network
pis = self.actor_net(obses)
actions_info = get_action_info(pis, cuda=self.args.cuda)
actions_, pre_tanh_value = actions_info.select_actions(reparameterize=True)
log_prob = actions_info.get_log_prob(actions_, pre_tanh_value)
# use the automatically tuning
alpha_loss = -(self.log_alpha * (log_prob + self.target_entropy).detach()).mean()
self.alpha_optim.zero_grad()
alpha_loss.backward()
self.alpha_optim.step()
# get the param
alpha = self.log_alpha.exp()
# get the q_value for new actions
q_actions_ = torch.min(self.qf1(obses, actions_), self.qf2(obses, actions_))
actor_loss = (alpha * log_prob - q_actions_).mean()
# q value function loss
q1_value = self.qf1(obses, actions)
q2_value = self.qf2(obses, actions)
with torch.no_grad():
pis_next = self.actor_net(obses_)
actions_info_next = get_action_info(pis_next, cuda=self.args.cuda)
actions_next_, pre_tanh_value_next = actions_info_next.select_actions(reparameterize=True)
log_prob_next = actions_info_next.get_log_prob(actions_next_, pre_tanh_value_next)
target_q_value_next = torch.min(self.target_qf1(obses_, actions_next_), self.target_qf2(obses_, actions_next_)) - alpha * log_prob_next
target_q_value = self.args.reward_scale * rewards + inverse_dones * self.args.gamma * target_q_value_next
qf1_loss = (q1_value - target_q_value).pow(2).mean()
qf2_loss = (q2_value - target_q_value).pow(2).mean()
# qf1
self.qf1_optim.zero_grad()
qf1_loss.backward()
self.qf1_optim.step()
# qf2
self.qf2_optim.zero_grad()
qf2_loss.backward()
self.qf2_optim.step()
# policy loss
self.actor_optim.zero_grad()
actor_loss.backward()
self.actor_optim.step()
return qf1_loss.item(), qf2_loss.item(), actor_loss.item(), alpha.item(), alpha_loss.item()
# update the target network
def _update_target_network(self, target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(self.args.tau * param.data + (1 - self.args.tau) * target_param.data)
# evaluate the agent
def _evaluate_agent(self):
total_reward = 0
for _ in range(self.args.eval_episodes):
obs = self.eval_env.reset()
episode_reward = 0
while True:
with torch.no_grad():
obs_tensor = self._get_tensor_inputs(obs)
pi = self.actor_net(obs_tensor)
action = get_action_info(pi, cuda=self.args.cuda).select_actions(exploration=False, reparameterize=False)
action = action.detach().cpu().numpy()[0]
# input the action into the environment
obs_, reward, done, _ = self.eval_env.step(self.action_max * action)
episode_reward += reward
if done:
break
obs = obs_
total_reward += episode_reward
return total_reward / self.args.eval_episodes
| 10,871 | 49.803738 | 166 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/sac/models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
# the flatten mlp
class flatten_mlp(nn.Module):
#TODO: add the initialization method for it
def __init__(self, input_dims, hidden_size, action_dims=None):
super(flatten_mlp, self).__init__()
self.fc1 = nn.Linear(input_dims, hidden_size) if action_dims is None else nn.Linear(input_dims + action_dims, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.q_value = nn.Linear(hidden_size, 1)
def forward(self, obs, action=None):
inputs = torch.cat([obs, action], dim=1) if action is not None else obs
x = F.relu(self.fc1(inputs))
x = F.relu(self.fc2(x))
output = self.q_value(x)
return output
# define the policy network - tanh gaussian policy network
# TODO: Not use the log std
class tanh_gaussian_actor(nn.Module):
def __init__(self, input_dims, action_dims, hidden_size, log_std_min, log_std_max):
super(tanh_gaussian_actor, self).__init__()
self.fc1 = nn.Linear(input_dims, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.mean = nn.Linear(hidden_size, action_dims)
self.log_std = nn.Linear(hidden_size, action_dims)
# the log_std_min and log_std_max
self.log_std_min = log_std_min
self.log_std_max = log_std_max
def forward(self, obs):
x = F.relu(self.fc1(obs))
x = F.relu(self.fc2(x))
mean = self.mean(x)
log_std = self.log_std(x)
# clamp the log std
log_std = torch.clamp(log_std, min=self.log_std_min, max=self.log_std_max)
# the reparameterization trick
# return mean and std
return (mean, torch.exp(log_std))
| 1,745 | 38.681818 | 130 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_algorithms/sac/train.py | from arguments import get_args
from sac_agent import sac_agent
from rl_utils.seeds.seeds import set_seeds
from rl_utils.env_wrapper.create_env import create_single_env
if __name__ == '__main__':
args = get_args()
# build the environment
env = create_single_env(args)
# set the seeds
set_seeds(args)
# create the agent
sac_trainer = sac_agent(env, args)
sac_trainer.learn()
# close the environment
env.close()
| 450 | 25.529412 | 61 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_utils/__init__.py | 0 | 0 | 0 | py | |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_utils/seeds/seeds.py | import numpy as np
import random
import torch
# set random seeds for the pytorch, numpy and random
def set_seeds(args, rank=0):
# set seeds for the numpy
np.random.seed(args.seed + rank)
# set seeds for the random.random
random.seed(args.seed + rank)
# set seeds for the pytorch
torch.manual_seed(args.seed + rank)
if args.cuda:
torch.cuda.manual_seed(args.seed + rank)
| 407 | 26.2 | 52 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_utils/experience_replay/experience_replay.py | import numpy as np
import random
"""
define the replay buffer and corresponding algorithms like PER
"""
class replay_buffer:
def __init__(self, memory_size):
self.storge = []
self.memory_size = memory_size
self.next_idx = 0
# add the samples
def add(self, obs, action, reward, obs_, done):
data = (obs, action, reward, obs_, done)
if self.next_idx >= len(self.storge):
self.storge.append(data)
else:
self.storge[self.next_idx] = data
# get the next idx
self.next_idx = (self.next_idx + 1) % self.memory_size
# encode samples
def _encode_sample(self, idx):
obses, actions, rewards, obses_, dones = [], [], [], [], []
for i in idx:
data = self.storge[i]
obs, action, reward, obs_, done = data
obses.append(np.array(obs, copy=False))
actions.append(np.array(action, copy=False))
rewards.append(reward)
obses_.append(np.array(obs_, copy=False))
dones.append(done)
return np.array(obses), np.array(actions), np.array(rewards), np.array(obses_), np.array(dones)
# sample from the memory
def sample(self, batch_size):
idxes = [random.randint(0, len(self.storge) - 1) for _ in range(batch_size)]
return self._encode_sample(idxes)
| 1,380 | 31.880952 | 103 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_utils/logger/logger.py | import os
import sys
import shutil
import os.path as osp
import json
import time
import datetime
import tempfile
from collections import defaultdict
from contextlib import contextmanager
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, 'wt')
self.own_file = True
else:
assert hasattr(filename_or_file, 'read'), 'expected file or str, got %s'%filename_or_file
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if hasattr(val, '__float__'):
valstr = '%-8.3g' % val
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
print('WARNING: tried to write empty key-value dict')
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = '-' * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()):
lines.append('| %s%s | %s%s |' % (
key,
' ' * (keywidth - len(key)),
val,
' ' * (valwidth - len(val)),
))
lines.append(dashes)
self.file.write('\n'.join(lines) + '\n')
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
maxlen = 30
return s[:maxlen-3] + '...' if len(s) > maxlen else s
def writeseq(self, seq):
seq = list(seq)
for (i, elem) in enumerate(seq):
self.file.write(elem)
if i < len(seq) - 1: # add space unless this is the last one
self.file.write(' ')
self.file.write('\n')
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'wt')
def writekvs(self, kvs):
for k, v in sorted(kvs.items()):
if hasattr(v, 'dtype'):
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + '\n')
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'w+t')
self.keys = []
self.sep = ','
def writekvs(self, kvs):
# Add our current row to the history
extra_keys = list(kvs.keys() - self.keys)
extra_keys.sort()
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
self.file.write(k)
self.file.write('\n')
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write('\n')
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
v = kvs.get(k)
if v is not None:
self.file.write(str(v))
self.file.write('\n')
self.file.flush()
def close(self):
self.file.close()
class TensorBoardOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = 'events'
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {'tag': k, 'simple_value': float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = self.step # is there any reason why you'd want to specify the step?
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
def make_output_format(format, ev_dir, log_suffix=''):
os.makedirs(ev_dir, exist_ok=True)
if format == 'stdout':
return HumanOutputFormat(sys.stdout)
elif format == 'log':
return HumanOutputFormat(osp.join(ev_dir, 'log%s.txt' % log_suffix))
elif format == 'json':
return JSONOutputFormat(osp.join(ev_dir, 'progress%s.json' % log_suffix))
elif format == 'csv':
return CSVOutputFormat(osp.join(ev_dir, 'progress%s.csv' % log_suffix))
elif format == 'tensorboard':
return TensorBoardOutputFormat(osp.join(ev_dir, 'tb%s' % log_suffix))
else:
raise ValueError('Unknown format specified: %s' % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
"""
get_current().logkv(key, val)
def logkv_mean(key, val):
"""
The same as logkv(), but if called many times, values averaged.
"""
get_current().logkv_mean(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
"""
return get_current().dumpkvs()
def getkvs():
return get_current().name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
"""
get_current().log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
get_current().set_level(level)
def set_comm(comm):
get_current().set_comm(comm)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return get_current().get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
@contextmanager
def profile_kv(scopename):
logkey = 'wait_' + scopename
tstart = time.time()
try:
yield
finally:
get_current().name2val[logkey] += time.time() - tstart
def profile(n):
"""
Usage:
@profile("my_func")
def my_func(): code
"""
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with profile_kv(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
# ================================================================
# Backend
# ================================================================
def get_current():
if Logger.CURRENT is None:
_configure_default_logger()
return Logger.CURRENT
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats, comm=None):
self.name2val = defaultdict(float) # values this iteration
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
self.comm = comm
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
oldval, cnt = self.name2val[key], self.name2cnt[key]
self.name2val[key] = oldval*cnt/(cnt+1) + val/(cnt+1)
self.name2cnt[key] = cnt + 1
def dumpkvs(self):
if self.comm is None:
d = self.name2val
else:
from baselines.common import mpi_util
d = mpi_util.mpi_weighted_mean(self.comm,
{name : (val, self.name2cnt.get(name, 1))
for (name, val) in self.name2val.items()})
if self.comm.rank != 0:
d['dummy'] = 1 # so we don't get a warning about empty dict
out = d.copy() # Return the dict for unit testing purposes
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(d)
self.name2val.clear()
self.name2cnt.clear()
return out
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def set_comm(self, comm):
self.comm = comm
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
def get_rank_without_mpi_import():
# check environment variables here instead of importing mpi4py
# to avoid calling MPI_Init() when this module is imported
for varname in ['PMI_RANK', 'OMPI_COMM_WORLD_RANK']:
if varname in os.environ:
return int(os.environ[varname])
return 0
def configure(dir=None, format_strs=None, comm=None, log_suffix=''):
"""
If comm is provided, average all numerical stats across that comm
"""
if dir is None:
dir = os.getenv('OPENAI_LOGDIR')
if dir is None:
dir = osp.join(tempfile.gettempdir(),
datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"))
assert isinstance(dir, str)
dir = os.path.expanduser(dir)
os.makedirs(os.path.expanduser(dir), exist_ok=True)
rank = get_rank_without_mpi_import()
if rank > 0:
log_suffix = log_suffix + "-rank%03i" % rank
if format_strs is None:
if rank == 0:
format_strs = os.getenv('OPENAI_LOG_FORMAT', 'stdout,log,csv').split(',')
else:
format_strs = os.getenv('OPENAI_LOG_FORMAT_MPI', 'log').split(',')
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
if output_formats:
log('Logging to %s'%dir)
def _configure_default_logger():
configure()
Logger.DEFAULT = Logger.CURRENT
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log('Reset logger')
@contextmanager
def scoped_configure(dir=None, format_strs=None, comm=None):
prevlogger = Logger.CURRENT
configure(dir=dir, format_strs=format_strs, comm=comm)
try:
yield
finally:
Logger.CURRENT.close()
Logger.CURRENT = prevlogger
# ================================================================
def _demo():
info("hi")
debug("shouldn't appear")
set_level(DEBUG)
debug("should appear")
dir = "/tmp/testlogging"
if os.path.exists(dir):
shutil.rmtree(dir)
configure(dir=dir)
logkv("a", 3)
logkv("b", 2.5)
dumpkvs()
logkv("b", -2.5)
logkv("a", 5.5)
dumpkvs()
info("^^^ should see a = 5.5")
logkv_mean("b", -22.5)
logkv_mean("b", -44.4)
logkv("a", 5.5)
dumpkvs()
info("^^^ should see b = -33.3")
logkv("b", -2.5)
dumpkvs()
logkv("a", "longasslongasslongasslongasslongasslongassvalue")
dumpkvs()
# ================================================================
# Readers
# ================================================================
def read_json(fname):
import pandas
ds = []
with open(fname, 'rt') as fh:
for line in fh:
ds.append(json.loads(line))
return pandas.DataFrame(ds)
def read_csv(fname):
import pandas
return pandas.read_csv(fname, index_col=None, comment='#')
def read_tb(path):
"""
path : a tensorboard file OR a directory, where we will find all TB files
of the form events.*
"""
import pandas
import numpy as np
from glob import glob
import tensorflow as tf
if osp.isdir(path):
fnames = glob(osp.join(path, "events.*"))
elif osp.basename(path).startswith("events."):
fnames = [path]
else:
raise NotImplementedError("Expected tensorboard file or directory containing them. Got %s"%path)
tag2pairs = defaultdict(list)
maxstep = 0
for fname in fnames:
for summary in tf.train.summary_iterator(fname):
if summary.step > 0:
for v in summary.summary.value:
pair = (summary.step, v.simple_value)
tag2pairs[v.tag].append(pair)
maxstep = max(summary.step, maxstep)
data = np.empty((maxstep, len(tag2pairs)))
data[:] = np.nan
tags = sorted(tag2pairs.keys())
for (colidx,tag) in enumerate(tags):
pairs = tag2pairs[tag]
for (step, value) in pairs:
data[step-1, colidx] = value
return pandas.DataFrame(data, columns=tags)
if __name__ == "__main__":
_demo()
| 14,802 | 28.429423 | 122 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_utils/logger/bench.py | __all__ = ['Monitor', 'get_monitor_files', 'load_results']
from gym.core import Wrapper
import time
from glob import glob
import csv
import os.path as osp
import json
class Monitor(Wrapper):
EXT = "monitor.csv"
f = None
def __init__(self, env, filename, allow_early_resets=False, reset_keywords=(), info_keywords=()):
Wrapper.__init__(self, env=env)
self.tstart = time.time()
if filename:
self.results_writer = ResultsWriter(filename,
header={"t_start": time.time(), 'env_id' : env.spec and env.spec.id},
extra_keys=reset_keywords + info_keywords
)
else:
self.results_writer = None
self.reset_keywords = reset_keywords
self.info_keywords = info_keywords
self.allow_early_resets = allow_early_resets
self.rewards = None
self.needs_reset = True
self.episode_rewards = []
self.episode_lengths = []
self.episode_times = []
self.total_steps = 0
self.current_reset_info = {} # extra info about the current episode, that was passed in during reset()
def reset(self, **kwargs):
self.reset_state()
for k in self.reset_keywords:
v = kwargs.get(k)
if v is None:
raise ValueError('Expected you to pass kwarg %s into reset'%k)
self.current_reset_info[k] = v
return self.env.reset(**kwargs)
def reset_state(self):
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError("Tried to reset an environment before done. If you want to allow early resets, wrap your env with Monitor(env, path, allow_early_resets=True)")
self.rewards = []
self.needs_reset = False
def step(self, action):
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
ob, rew, done, info = self.env.step(action)
self.update(ob, rew, done, info)
return (ob, rew, done, info)
def update(self, ob, rew, done, info):
self.rewards.append(rew)
if done:
self.needs_reset = True
eprew = sum(self.rewards)
eplen = len(self.rewards)
epinfo = {"r": round(eprew, 6), "l": eplen, "t": round(time.time() - self.tstart, 6)}
for k in self.info_keywords:
epinfo[k] = info[k]
self.episode_rewards.append(eprew)
self.episode_lengths.append(eplen)
self.episode_times.append(time.time() - self.tstart)
epinfo.update(self.current_reset_info)
if self.results_writer:
self.results_writer.write_row(epinfo)
assert isinstance(info, dict)
if isinstance(info, dict):
info['episode'] = epinfo
self.total_steps += 1
def close(self):
if self.f is not None:
self.f.close()
def get_total_steps(self):
return self.total_steps
def get_episode_rewards(self):
return self.episode_rewards
def get_episode_lengths(self):
return self.episode_lengths
def get_episode_times(self):
return self.episode_times
class LoadMonitorResultsError(Exception):
pass
class ResultsWriter(object):
def __init__(self, filename, header='', extra_keys=()):
self.extra_keys = extra_keys
assert filename is not None
if not filename.endswith(Monitor.EXT):
if osp.isdir(filename):
filename = osp.join(filename, Monitor.EXT)
else:
filename = filename + "." + Monitor.EXT
self.f = open(filename, "wt")
if isinstance(header, dict):
header = '# {} \n'.format(json.dumps(header))
self.f.write(header)
self.logger = csv.DictWriter(self.f, fieldnames=('r', 'l', 't')+tuple(extra_keys))
self.logger.writeheader()
self.f.flush()
def write_row(self, epinfo):
if self.logger:
self.logger.writerow(epinfo)
self.f.flush()
def get_monitor_files(dir):
return glob(osp.join(dir, "*" + Monitor.EXT))
def load_results(dir):
import pandas
monitor_files = (
glob(osp.join(dir, "*monitor.json")) +
glob(osp.join(dir, "*monitor.csv"))) # get both csv and (old) json files
if not monitor_files:
raise LoadMonitorResultsError("no monitor files of the form *%s found in %s" % (Monitor.EXT, dir))
dfs = []
headers = []
for fname in monitor_files:
with open(fname, 'rt') as fh:
if fname.endswith('csv'):
firstline = fh.readline()
if not firstline:
continue
assert firstline[0] == '#'
header = json.loads(firstline[1:])
df = pandas.read_csv(fh, index_col=None)
headers.append(header)
elif fname.endswith('json'): # Deprecated json format
episodes = []
lines = fh.readlines()
header = json.loads(lines[0])
headers.append(header)
for line in lines[1:]:
episode = json.loads(line)
episodes.append(episode)
df = pandas.DataFrame(episodes)
else:
assert 0, 'unreachable'
df['t'] += header['t_start']
dfs.append(df)
df = pandas.concat(dfs)
df.sort_values('t', inplace=True)
df.reset_index(inplace=True)
df['t'] -= min(header['t_start'] for header in headers)
df.headers = headers # HACK to preserve backwards compatibility
return df
| 5,704 | 34 | 174 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_utils/logger/plot.py | import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
from rl_utils.bench import load_results
sns.set(style="dark")
sns.set_context("poster", font_scale=2, rc={"lines.linewidth": 2})
sns.set(rc={"figure.figsize": (15, 8)})
colors = sns.color_palette(palette='muted')
X_TIMESTEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]
EPISODES_WINDOW = 150
COLORS = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'purple', 'pink',
'brown', 'orange', 'teal', 'coral', 'lightblue', 'lime', 'lavender', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'lightpurple', 'darkred', 'darkblue']
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=-1)
return x[window-1:], yw_func
def ts2xy(ts, xaxis):
if xaxis == X_TIMESTEPS:
x = np.cumsum(ts.l.values)
y = ts.r.values
elif xaxis == X_EPISODES:
x = np.arange(len(ts))
y = ts.r.values
elif xaxis == X_WALLTIME:
x = ts.t.values / 3600.
y = ts.r.values
else:
raise NotImplementedError
return x, y
def plot_curves(xy_list, xaxis, title, plt_order, beta=False):
maxx = max(xy[0][-1] for xy in xy_list)
minx = 0
if beta == 'dqn':
label = ['DQN']
elif beta == 'ddqn':
label = ['Double-DQN']
elif beta == 'dueling':
label = ['Dueling-DQN']
psub = plt.subplot(plt_order)
plt.tight_layout()
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
for (i, (x, y)) in enumerate(xy_list):
#plt.scatter(x, y, s=2)
x, y_mean = window_func(x, y, EPISODES_WINDOW, np.mean) #So returns average of last EPISODE_WINDOW episodes
psub.plot(x, y_mean, label=label[i])
psub.set_xlim([minx, maxx])
psub.set_title(title)
psub.legend(loc='best')
psub.set_xlabel(xaxis)
psub.set_ylabel("rewards")
def plot_results(dirs, num_timesteps, xaxis, task_name, plt_order, beta=False):
tslist = []
for dir in dirs:
ts = load_results(dir)
ts = ts[ts.l.cumsum() <= num_timesteps]
tslist.append(ts)
xy_list = [ts2xy(ts, xaxis) for ts in tslist]
plot_curves(xy_list, xaxis, task_name, plt_order, beta)
def main():
import argparse
import os
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dirs', help='List of log directories', nargs = '*', default='logs_dqn/')
parser.add_argument('--num_timesteps', type=int, default=int(2e7))
parser.add_argument('--xaxis', help = 'Varible on X-axis', default = X_TIMESTEPS)
parser.add_argument('--task_name', help = 'Title of plot', default = 'BreakoutNoFrameskip-v4')
args = parser.parse_args()
env_name = ['BankHeistNoFrameskip-v4', 'BreakoutNoFrameskip-v4', 'KangarooNoFrameskip-v4', \
'PongNoFrameskip-v4', 'SeaquestNoFrameskip-v4', 'SpaceInvadersNoFrameskip-v4']
dirs = [os.path.abspath(args.dirs + name) for name in env_name]
for idx in range(len(dirs)):
plot_results([dirs[idx]], args.num_timesteps, args.xaxis, env_name[idx], 231+idx, beta='dqn')
double_dirs = [os.path.abspath('logs_ddqn/' + name) for name in env_name]
for idx in range(len(dirs)):
plot_results([double_dirs[idx]], args.num_timesteps, args.xaxis, env_name[idx], 231+idx, beta='ddqn')
dueling_dirs = [os.path.abspath('logs/' + name) for name in env_name]
for idx in range(len(dirs)):
plot_results([dueling_dirs[idx]], args.num_timesteps, args.xaxis, env_name[idx], 231+idx, beta='dueling')
plt.savefig("dueling.png")
if __name__ == '__main__':
main()
| 3,962 | 38.237624 | 115 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_utils/logger/__init__.py | 0 | 0 | 0 | py | |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_utils/mpi_utils/normalizer.py | import threading
import numpy as np
from mpi4py import MPI
class normalizer:
def __init__(self, size, eps=1e-2, default_clip_range=np.inf):
self.size = size
self.eps = eps
self.default_clip_range = default_clip_range
# some local information
self.local_sum = np.zeros(self.size, np.float32)
self.local_sumsq = np.zeros(self.size, np.float32)
self.local_count = np.zeros(1, np.float32)
# get the total sum sumsq and sum count
self.total_sum = np.zeros(self.size, np.float32)
self.total_sumsq = np.zeros(self.size, np.float32)
self.total_count = np.ones(1, np.float32)
# get the mean and std
self.mean = np.zeros(self.size, np.float32)
self.std = np.ones(self.size, np.float32)
# thread locker
self.lock = threading.Lock()
# update the parameters of the normalizer
def update(self, v):
v = v.reshape(-1, self.size)
# do the computing
with self.lock:
self.local_sum += v.sum(axis=0)
self.local_sumsq += (np.square(v)).sum(axis=0)
self.local_count[0] += v.shape[0]
# sync the parameters across the cpus
def sync(self, local_sum, local_sumsq, local_count):
local_sum[...] = self._mpi_average(local_sum)
local_sumsq[...] = self._mpi_average(local_sumsq)
local_count[...] = self._mpi_average(local_count)
return local_sum, local_sumsq, local_count
def recompute_stats(self):
with self.lock:
local_count = self.local_count.copy()
local_sum = self.local_sum.copy()
local_sumsq = self.local_sumsq.copy()
# reset
self.local_count[...] = 0
self.local_sum[...] = 0
self.local_sumsq[...] = 0
# synrc the stats
sync_sum, sync_sumsq, sync_count = self.sync(local_sum, local_sumsq, local_count)
# update the total stuff
self.total_sum += sync_sum
self.total_sumsq += sync_sumsq
self.total_count += sync_count
# calculate the new mean and std
self.mean = self.total_sum / self.total_count
self.std = np.sqrt(np.maximum(np.square(self.eps), (self.total_sumsq / self.total_count) - np.square(self.total_sum / self.total_count)))
# average across the cpu's data
def _mpi_average(self, x):
buf = np.zeros_like(x)
MPI.COMM_WORLD.Allreduce(x, buf, op=MPI.SUM)
buf /= MPI.COMM_WORLD.Get_size()
return buf
# normalize the observation
def normalize(self, v, clip_range=None):
if clip_range is None:
clip_range = self.default_clip_range
return np.clip((v - self.mean) / (self.std), -clip_range, clip_range)
| 2,777 | 38.126761 | 145 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_utils/mpi_utils/utils.py | from mpi4py import MPI
import numpy as np
import torch
# sync_networks across the different cores
def sync_networks(network):
"""
netowrk is the network you want to sync
"""
comm = MPI.COMM_WORLD
flat_params = _get_flat_params_or_grads(network, mode='params')
comm.Bcast(flat_params, root=0)
# set the flat params back to the network
_set_flat_params_or_grads(network, flat_params, mode='params')
def sync_grads(network):
flat_grads = _get_flat_params_or_grads(network, mode='grads')
comm = MPI.COMM_WORLD
global_grads = np.zeros_like(flat_grads)
comm.Allreduce(flat_grads, global_grads, op=MPI.SUM)
_set_flat_params_or_grads(network, global_grads, mode='grads')
# get the flat grads or params
def _get_flat_params_or_grads(network, mode='params'):
"""
include two kinds: grads and params
"""
attr = 'data' if mode == 'params' else 'grad'
return np.concatenate([getattr(param, attr).cpu().numpy().flatten() for param in network.parameters()])
def _set_flat_params_or_grads(network, flat_params, mode='params'):
"""
include two kinds: grads and params
"""
attr = 'data' if mode == 'params' else 'grad'
# the pointer
pointer = 0
for param in network.parameters():
getattr(param, attr).copy_(torch.tensor(flat_params[pointer:pointer + param.data.numel()]).view_as(param.data))
pointer += param.data.numel()
| 1,427 | 31.454545 | 119 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_utils/mpi_utils/__init__.py | 0 | 0 | 0 | py | |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_utils/running_filter/__init__.py | 0 | 0 | 0 | py | |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_utils/running_filter/running_filter.py | from collections import deque
import numpy as np
# this is from the https://github.com/ikostrikov/pytorch-trpo/blob/master/running_state.py
# from https://github.com/joschu/modular_rl
# http://www.johndcook.com/blog/standard_deviation/
class RunningStat(object):
def __init__(self, shape):
self._n = 0
self._M = np.zeros(shape)
self._S = np.zeros(shape)
def push(self, x):
x = np.asarray(x)
assert x.shape == self._M.shape
self._n += 1
if self._n == 1:
self._M[...] = x
else:
oldM = self._M.copy()
self._M[...] = oldM + (x - oldM) / self._n
self._S[...] = self._S + (x - oldM) * (x - self._M)
@property
def n(self):
return self._n
@property
def mean(self):
return self._M
@property
def var(self):
return self._S / (self._n - 1) if self._n > 1 else np.square(self._M)
@property
def std(self):
return np.sqrt(self.var)
@property
def shape(self):
return self._M.shape
class ZFilter:
"""
y = (x-mean)/std
using running estimates of mean,std
"""
def __init__(self, shape, demean=True, destd=True, clip=10.0):
self.demean = demean
self.destd = destd
self.clip = clip
self.rs = RunningStat(shape)
def __call__(self, x, update=True):
if update: self.rs.push(x)
if self.demean:
x = x - self.rs.mean
if self.destd:
x = x / (self.rs.std + 1e-8)
if self.clip:
x = np.clip(x, -self.clip, self.clip)
return x
def output_shape(self, input_space):
return input_space.shape
| 1,715 | 23.169014 | 90 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_utils/env_wrapper/create_env.py | from rl_utils.env_wrapper.atari_wrapper import make_atari, wrap_deepmind
from rl_utils.env_wrapper.multi_envs_wrapper import SubprocVecEnv
from rl_utils.env_wrapper.frame_stack import VecFrameStack
from rl_utils.logger import logger, bench
import os
import gym
"""
this functions is to create the environments
"""
def create_single_env(args, rank=0):
# setup the log files
if rank == 0:
if not os.path.exists(args.log_dir):
os.mkdir(args.log_dir)
log_path = args.log_dir + '/{}/'.format(args.env_name)
logger.configure(log_path)
# start to create environment
if args.env_type == 'atari':
# create the environment
env = make_atari(args.env_name)
# the monitor
env = bench.Monitor(env, logger.get_dir())
# use the deepmind environment wrapper
env = wrap_deepmind(env, frame_stack=True)
else:
env = gym.make(args.env_name)
# add log information
env = bench.Monitor(env, logger.get_dir(), allow_early_resets=True)
# set seeds to the environment to make sure the reproducebility
env.seed(args.seed + rank)
return env
# create multiple environments - for multiple
def create_multiple_envs(args):
# now only support the atari games
if args.env_type == 'atari':
def make_env(rank):
def _thunk():
if not os.path.exists(args.log_dir):
os.mkdir(args.log_dir)
log_path = args.log_dir + '/{}/'.format(args.env_name)
logger.configure(log_path)
env = make_atari(args.env_name)
# set the seed for the environment
env.seed(args.seed + rank)
# set loggler
env = bench.Monitor(env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)))
# use the deepmind environment wrapper
env = wrap_deepmind(env)
return env
return _thunk
# put into sub processing
envs = SubprocVecEnv([make_env(i) for i in range(args.num_workers)])
# then, frame stack
envs = VecFrameStack(envs, 4)
else:
raise NotImplementedError
return envs
| 2,239 | 34.555556 | 104 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_utils/env_wrapper/multi_envs_wrapper.py | import multiprocessing as mp
import numpy as np
from rl_utils.env_wrapper import VecEnv, CloudpickleWrapper, clear_mpi_env_vars
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
try:
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'render':
remote.send(env.render(mode='rgb_array'))
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces_spec':
remote.send((env.observation_space, env.action_space, env.spec))
else:
raise NotImplementedError
except KeyboardInterrupt:
print('SubprocVecEnv worker: got KeyboardInterrupt')
finally:
env.close()
class SubprocVecEnv(VecEnv):
"""
VecEnv that runs multiple environments in parallel in subproceses and communicates with them via pipes.
Recommended to use when num_envs > 1 and step() can be a bottleneck.
"""
def __init__(self, env_fns, spaces=None, context='spawn'):
"""
Arguments:
env_fns: iterable of callables - functions that create environments to run in subprocesses. Need to be cloud-pickleable
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
ctx = mp.get_context(context)
self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(nenvs)])
self.ps = [ctx.Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
with clear_mpi_env_vars():
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces_spec', None))
observation_space, action_space, self.spec = self.remotes[0].recv()
self.viewer = None
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
self._assert_not_closed()
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
self._assert_not_closed()
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return _flatten_obs(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
self._assert_not_closed()
for remote in self.remotes:
remote.send(('reset', None))
return _flatten_obs([remote.recv() for remote in self.remotes])
def close_extras(self):
self.closed = True
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
def get_images(self):
self._assert_not_closed()
for pipe in self.remotes:
pipe.send(('render', None))
imgs = [pipe.recv() for pipe in self.remotes]
return imgs
def _assert_not_closed(self):
assert not self.closed, "Trying to operate on a SubprocVecEnv after calling close()"
def __del__(self):
if not self.closed:
self.close()
def _flatten_obs(obs):
assert isinstance(obs, (list, tuple))
assert len(obs) > 0
if isinstance(obs[0], dict):
keys = obs[0].keys()
return {k: np.stack([o[k] for o in obs]) for k in keys}
else:
return np.stack(obs)
| 4,074 | 34.12931 | 128 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_utils/env_wrapper/frame_stack.py | from rl_utils.env_wrapper import VecEnvWrapper
import numpy as np
from gym import spaces
class VecFrameStack(VecEnvWrapper):
def __init__(self, venv, nstack):
self.venv = venv
self.nstack = nstack
wos = venv.observation_space # wrapped ob space
low = np.repeat(wos.low, self.nstack, axis=-1)
high = np.repeat(wos.high, self.nstack, axis=-1)
self.stackedobs = np.zeros((venv.num_envs,) + low.shape, low.dtype)
observation_space = spaces.Box(low=low, high=high, dtype=venv.observation_space.dtype)
VecEnvWrapper.__init__(self, venv, observation_space=observation_space)
def step_wait(self):
obs, rews, news, infos = self.venv.step_wait()
self.stackedobs = np.roll(self.stackedobs, shift=-1, axis=-1)
for (i, new) in enumerate(news):
if new:
self.stackedobs[i] = 0
self.stackedobs[..., -obs.shape[-1]:] = obs
return self.stackedobs, rews, news, infos
def reset(self):
obs = self.venv.reset()
self.stackedobs[...] = 0
self.stackedobs[..., -obs.shape[-1]:] = obs
return self.stackedobs
| 1,162 | 36.516129 | 94 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_utils/env_wrapper/__init__.py | import os
from abc import ABC, abstractmethod
import contextlib
class AlreadySteppingError(Exception):
"""
Raised when an asynchronous step is running while
step_async() is called again.
"""
def __init__(self):
msg = 'already running an async step'
Exception.__init__(self, msg)
class NotSteppingError(Exception):
"""
Raised when an asynchronous step is not running but
step_wait() is called.
"""
def __init__(self):
msg = 'not running an async step'
Exception.__init__(self, msg)
class VecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
Used to batch data from multiple copies of an environment, so that
each observation becomes an batch of observations, and expected action is a batch of actions to
be applied per-environment.
"""
closed = False
viewer = None
metadata = {
'render.modes': ['human', 'rgb_array']
}
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a dict of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a dict of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close_extras(self):
"""
Clean up the extra resources, beyond what's in this base class.
Only runs when not self.closed.
"""
pass
def close(self):
if self.closed:
return
if self.viewer is not None:
self.viewer.close()
self.close_extras()
self.closed = True
def step(self, actions):
"""
Step the environments synchronously.
This is available for backwards compatibility.
"""
self.step_async(actions)
return self.step_wait()
def render(self, mode='human'):
raise NotImplementedError
def get_images(self):
"""
Return RGB images from each environment
"""
raise NotImplementedError
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def get_viewer(self):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
return self.viewer
class VecEnvWrapper(VecEnv):
"""
An environment wrapper that applies to an entire batch
of environments at once.
"""
def __init__(self, venv, observation_space=None, action_space=None):
self.venv = venv
super().__init__(num_envs=venv.num_envs,
observation_space=observation_space or venv.observation_space,
action_space=action_space or venv.action_space)
def step_async(self, actions):
self.venv.step_async(actions)
@abstractmethod
def reset(self):
pass
@abstractmethod
def step_wait(self):
pass
def close(self):
return self.venv.close()
def render(self, mode='human'):
return self.venv.render(mode=mode)
def get_images(self):
return self.venv.get_images()
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError("attempted to get missing private attribute '{}'".format(name))
return getattr(self.venv, name)
class VecEnvObservationWrapper(VecEnvWrapper):
@abstractmethod
def process(self, obs):
pass
def reset(self):
obs = self.venv.reset()
return self.process(obs)
def step_wait(self):
obs, rews, dones, infos = self.venv.step_wait()
return self.process(obs), rews, dones, infos
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
@contextlib.contextmanager
def clear_mpi_env_vars():
"""
from mpi4py import MPI will call MPI_Init by default. If the child process has MPI environment variables, MPI will think that the child process is an MPI process just like the parent and do bad things such as hang.
This context manager is a hacky way to clear those environment variables temporarily such as when we are starting multiprocessing
Processes.
"""
removed_environment = {}
for k, v in list(os.environ.items()):
for prefix in ['OMPI_', 'PMI_']:
if k.startswith(prefix):
removed_environment[k] = v
del os.environ[k]
try:
yield
finally:
os.environ.update(removed_environment)
| 5,877 | 26.596244 | 219 | py |
reinforcement-learning-algorithms | reinforcement-learning-algorithms-master/rl_utils/env_wrapper/atari_wrapper.py | import numpy as np
import os
os.environ.setdefault('PATH', '')
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
"""
the wrapper is taken from the openai baselines
"""
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):
"""
Warp frames to 84x84 as done in the Nature paper and later work.
If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which
observation should be warped.
"""
super().__init__(env)
self._width = width
self._height = height
self._grayscale = grayscale
self._key = dict_space_key
if self._grayscale:
num_colors = 1
else:
num_colors = 3
new_space = gym.spaces.Box(
low=0,
high=255,
shape=(self._height, self._width, num_colors),
dtype=np.uint8,
)
if self._key is None:
original_space = self.observation_space
self.observation_space = new_space
else:
original_space = self.observation_space.spaces[self._key]
self.observation_space.spaces[self._key] = new_space
assert original_space.dtype == np.uint8 and len(original_space.shape) == 3
def observation(self, obs):
if self._key is None:
frame = obs
else:
frame = obs[self._key]
if self._grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self._width, self._height), interpolation=cv2.INTER_AREA
)
if self._grayscale:
frame = np.expand_dims(frame, -1)
if self._key is None:
obs = frame
else:
obs = obs.copy()
obs[self._key] = frame
return obs
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:-1] + (shp[-1] * k,)), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=-1)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def count(self):
frames = self._force()
return frames.shape[frames.ndim - 1]
def frame(self, i):
return self._force()[..., i]
def make_atari(env_id, max_episode_steps=None):
env = gym.make(env_id)
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if max_episode_steps is not None:
env = TimeLimit(env, max_episode_steps=max_episode_steps)
return env
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
# time limit
class TimeLimit(gym.Wrapper):
def __init__(self, env, max_episode_steps=None):
super(TimeLimit, self).__init__(env)
self._max_episode_steps = max_episode_steps
self._elapsed_steps = 0
def step(self, ac):
observation, reward, done, info = self.env.step(ac)
self._elapsed_steps += 1
if self._elapsed_steps >= self._max_episode_steps:
done = True
info['TimeLimit.truncated'] = True
return observation, reward, done, info
def reset(self, **kwargs):
self._elapsed_steps = 0
return self.env.reset(**kwargs)
| 10,334 | 32.125 | 130 | py |
gcn-over-pruned-trees | gcn-over-pruned-trees-master/eval.py | """
Run evaluation with saved models.
"""
import random
import argparse
from tqdm import tqdm
import torch
from data.loader import DataLoader
from model.trainer import GCNTrainer
from utils import torch_utils, scorer, constant, helper
from utils.vocab import Vocab
parser = argparse.ArgumentParser()
parser.add_argument('model_dir', type=str, help='Directory of the model.')
parser.add_argument('--model', type=str, default='best_model.pt', help='Name of the model file.')
parser.add_argument('--data_dir', type=str, default='dataset/tacred')
parser.add_argument('--dataset', type=str, default='test', help="Evaluate on dev or test.")
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--cpu', action='store_true')
args = parser.parse_args()
torch.manual_seed(args.seed)
random.seed(1234)
if args.cpu:
args.cuda = False
elif args.cuda:
torch.cuda.manual_seed(args.seed)
# load opt
model_file = args.model_dir + '/' + args.model
print("Loading model from {}".format(model_file))
opt = torch_utils.load_config(model_file)
trainer = GCNTrainer(opt)
trainer.load(model_file)
# load vocab
vocab_file = args.model_dir + '/vocab.pkl'
vocab = Vocab(vocab_file, load=True)
assert opt['vocab_size'] == vocab.size, "Vocab size must match that in the saved model."
# load data
data_file = opt['data_dir'] + '/{}.json'.format(args.dataset)
print("Loading data from {} with batch size {}...".format(data_file, opt['batch_size']))
batch = DataLoader(data_file, opt['batch_size'], opt, vocab, evaluation=True)
helper.print_config(opt)
label2id = constant.LABEL_TO_ID
id2label = dict([(v,k) for k,v in label2id.items()])
predictions = []
all_probs = []
batch_iter = tqdm(batch)
for i, b in enumerate(batch_iter):
preds, probs, _ = trainer.predict(b)
predictions += preds
all_probs += probs
predictions = [id2label[p] for p in predictions]
p, r, f1 = scorer.score(batch.gold(), predictions, verbose=True)
print("{} set evaluate result: {:.2f}\t{:.2f}\t{:.2f}".format(args.dataset,p,r,f1))
print("Evaluation ended.")
| 2,130 | 30.80597 | 97 | py |
gcn-over-pruned-trees | gcn-over-pruned-trees-master/prepare_vocab.py | """
Prepare vocabulary and initial word vectors.
"""
import json
import pickle
import argparse
import numpy as np
from collections import Counter
from utils import vocab, constant, helper
def parse_args():
parser = argparse.ArgumentParser(description='Prepare vocab for relation extraction.')
parser.add_argument('data_dir', help='TACRED directory.')
parser.add_argument('vocab_dir', help='Output vocab directory.')
parser.add_argument('--glove_dir', default='dataset/glove', help='GloVe directory.')
parser.add_argument('--wv_file', default='glove.840B.300d.txt', help='GloVe vector file.')
parser.add_argument('--wv_dim', type=int, default=300, help='GloVe vector dimension.')
parser.add_argument('--min_freq', type=int, default=0, help='If > 0, use min_freq as the cutoff.')
parser.add_argument('--lower', action='store_true', help='If specified, lowercase all words.')
args = parser.parse_args()
return args
def main():
args = parse_args()
# input files
train_file = args.data_dir + '/train.json'
dev_file = args.data_dir + '/dev.json'
test_file = args.data_dir + '/test.json'
wv_file = args.glove_dir + '/' + args.wv_file
wv_dim = args.wv_dim
# output files
helper.ensure_dir(args.vocab_dir)
vocab_file = args.vocab_dir + '/vocab.pkl'
emb_file = args.vocab_dir + '/embedding.npy'
# load files
print("loading files...")
train_tokens = load_tokens(train_file)
dev_tokens = load_tokens(dev_file)
test_tokens = load_tokens(test_file)
if args.lower:
train_tokens, dev_tokens, test_tokens = [[t.lower() for t in tokens] for tokens in\
(train_tokens, dev_tokens, test_tokens)]
# load glove
print("loading glove...")
glove_vocab = vocab.load_glove_vocab(wv_file, wv_dim)
print("{} words loaded from glove.".format(len(glove_vocab)))
print("building vocab...")
v = build_vocab(train_tokens, glove_vocab, args.min_freq)
print("calculating oov...")
datasets = {'train': train_tokens, 'dev': dev_tokens, 'test': test_tokens}
for dname, d in datasets.items():
total, oov = count_oov(d, v)
print("{} oov: {}/{} ({:.2f}%)".format(dname, oov, total, oov*100.0/total))
print("building embeddings...")
embedding = vocab.build_embedding(wv_file, v, wv_dim)
print("embedding size: {} x {}".format(*embedding.shape))
print("dumping to files...")
with open(vocab_file, 'wb') as outfile:
pickle.dump(v, outfile)
np.save(emb_file, embedding)
print("all done.")
def load_tokens(filename):
with open(filename) as infile:
data = json.load(infile)
tokens = []
for d in data:
ts = d['token']
ss, se, os, oe = d['subj_start'], d['subj_end'], d['obj_start'], d['obj_end']
# do not create vocab for entity words
ts[ss:se+1] = ['<PAD>']*(se-ss+1)
ts[os:oe+1] = ['<PAD>']*(oe-os+1)
tokens += list(filter(lambda t: t!='<PAD>', ts))
print("{} tokens from {} examples loaded from {}.".format(len(tokens), len(data), filename))
return tokens
def build_vocab(tokens, glove_vocab, min_freq):
""" build vocab from tokens and glove words. """
counter = Counter(t for t in tokens)
# if min_freq > 0, use min_freq, otherwise keep all glove words
if min_freq > 0:
v = sorted([t for t in counter if counter.get(t) >= min_freq], key=counter.get, reverse=True)
else:
v = sorted([t for t in counter if t in glove_vocab], key=counter.get, reverse=True)
# add special tokens and entity mask tokens
v = constant.VOCAB_PREFIX + entity_masks() + v
print("vocab built with {}/{} words.".format(len(v), len(counter)))
return v
def count_oov(tokens, vocab):
c = Counter(t for t in tokens)
total = sum(c.values())
matched = sum(c[t] for t in vocab)
return total, total-matched
def entity_masks():
""" Get all entity mask tokens as a list. """
masks = []
subj_entities = list(constant.SUBJ_NER_TO_ID.keys())[2:]
obj_entities = list(constant.OBJ_NER_TO_ID.keys())[2:]
masks += ["SUBJ-" + e for e in subj_entities]
masks += ["OBJ-" + e for e in obj_entities]
return masks
if __name__ == '__main__':
main()
| 4,313 | 35.252101 | 102 | py |
gcn-over-pruned-trees | gcn-over-pruned-trees-master/train.py | """
Train a model on TACRED.
"""
import os
import sys
from datetime import datetime
import time
import numpy as np
import random
import argparse
from shutil import copyfile
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from data.loader import DataLoader
from model.trainer import GCNTrainer
from utils import torch_utils, scorer, constant, helper
from utils.vocab import Vocab
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='dataset/tacred')
parser.add_argument('--vocab_dir', type=str, default='dataset/vocab')
parser.add_argument('--emb_dim', type=int, default=300, help='Word embedding dimension.')
parser.add_argument('--ner_dim', type=int, default=30, help='NER embedding dimension.')
parser.add_argument('--pos_dim', type=int, default=30, help='POS embedding dimension.')
parser.add_argument('--hidden_dim', type=int, default=200, help='RNN hidden state size.')
parser.add_argument('--num_layers', type=int, default=2, help='Num of RNN layers.')
parser.add_argument('--input_dropout', type=float, default=0.5, help='Input dropout rate.')
parser.add_argument('--gcn_dropout', type=float, default=0.5, help='GCN layer dropout rate.')
parser.add_argument('--word_dropout', type=float, default=0.04, help='The rate at which randomly set a word to UNK.')
parser.add_argument('--topn', type=int, default=1e10, help='Only finetune top N word embeddings.')
parser.add_argument('--lower', dest='lower', action='store_true', help='Lowercase all words.')
parser.add_argument('--no-lower', dest='lower', action='store_false')
parser.set_defaults(lower=False)
parser.add_argument('--prune_k', default=-1, type=int, help='Prune the dependency tree to <= K distance off the dependency path; set to -1 for no pruning.')
parser.add_argument('--conv_l2', type=float, default=0, help='L2-weight decay on conv layers only.')
parser.add_argument('--pooling', choices=['max', 'avg', 'sum'], default='max', help='Pooling function type. Default max.')
parser.add_argument('--pooling_l2', type=float, default=0, help='L2-penalty for all pooling output.')
parser.add_argument('--mlp_layers', type=int, default=2, help='Number of output mlp layers.')
parser.add_argument('--no_adj', dest='no_adj', action='store_true', help="Zero out adjacency matrix for ablation.")
parser.add_argument('--no-rnn', dest='rnn', action='store_false', help='Do not use RNN layer.')
parser.add_argument('--rnn_hidden', type=int, default=200, help='RNN hidden state size.')
parser.add_argument('--rnn_layers', type=int, default=1, help='Number of RNN layers.')
parser.add_argument('--rnn_dropout', type=float, default=0.5, help='RNN dropout rate.')
parser.add_argument('--lr', type=float, default=1.0, help='Applies to sgd and adagrad.')
parser.add_argument('--lr_decay', type=float, default=0.9, help='Learning rate decay rate.')
parser.add_argument('--decay_epoch', type=int, default=5, help='Decay learning rate after this epoch.')
parser.add_argument('--optim', choices=['sgd', 'adagrad', 'adam', 'adamax'], default='sgd', help='Optimizer: sgd, adagrad, adam or adamax.')
parser.add_argument('--num_epoch', type=int, default=100, help='Number of total training epochs.')
parser.add_argument('--batch_size', type=int, default=50, help='Training batch size.')
parser.add_argument('--max_grad_norm', type=float, default=5.0, help='Gradient clipping.')
parser.add_argument('--log_step', type=int, default=20, help='Print log every k steps.')
parser.add_argument('--log', type=str, default='logs.txt', help='Write training log to file.')
parser.add_argument('--save_epoch', type=int, default=100, help='Save model checkpoints every k epochs.')
parser.add_argument('--save_dir', type=str, default='./saved_models', help='Root dir for saving models.')
parser.add_argument('--id', type=str, default='00', help='Model ID under which to save models.')
parser.add_argument('--info', type=str, default='', help='Optional info for the experiment.')
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--cpu', action='store_true', help='Ignore CUDA.')
parser.add_argument('--load', dest='load', action='store_true', help='Load pretrained model.')
parser.add_argument('--model_file', type=str, help='Filename of the pretrained model.')
args = parser.parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(1234)
if args.cpu:
args.cuda = False
elif args.cuda:
torch.cuda.manual_seed(args.seed)
init_time = time.time()
# make opt
opt = vars(args)
label2id = constant.LABEL_TO_ID
opt['num_class'] = len(label2id)
# load vocab
vocab_file = opt['vocab_dir'] + '/vocab.pkl'
vocab = Vocab(vocab_file, load=True)
opt['vocab_size'] = vocab.size
emb_file = opt['vocab_dir'] + '/embedding.npy'
emb_matrix = np.load(emb_file)
assert emb_matrix.shape[0] == vocab.size
assert emb_matrix.shape[1] == opt['emb_dim']
# load data
print("Loading data from {} with batch size {}...".format(opt['data_dir'], opt['batch_size']))
train_batch = DataLoader(opt['data_dir'] + '/train.json', opt['batch_size'], opt, vocab, evaluation=False)
dev_batch = DataLoader(opt['data_dir'] + '/dev.json', opt['batch_size'], opt, vocab, evaluation=True)
model_id = opt['id'] if len(opt['id']) > 1 else '0' + opt['id']
model_save_dir = opt['save_dir'] + '/' + model_id
opt['model_save_dir'] = model_save_dir
helper.ensure_dir(model_save_dir, verbose=True)
# save config
helper.save_config(opt, model_save_dir + '/config.json', verbose=True)
vocab.save(model_save_dir + '/vocab.pkl')
file_logger = helper.FileLogger(model_save_dir + '/' + opt['log'], header="# epoch\ttrain_loss\tdev_loss\tdev_score\tbest_dev_score")
# print model info
helper.print_config(opt)
# model
if not opt['load']:
trainer = GCNTrainer(opt, emb_matrix=emb_matrix)
else:
# load pretrained model
model_file = opt['model_file']
print("Loading model from {}".format(model_file))
model_opt = torch_utils.load_config(model_file)
model_opt['optim'] = opt['optim']
trainer = GCNTrainer(model_opt)
trainer.load(model_file)
id2label = dict([(v,k) for k,v in label2id.items()])
dev_score_history = []
current_lr = opt['lr']
global_step = 0
global_start_time = time.time()
format_str = '{}: step {}/{} (epoch {}/{}), loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}'
max_steps = len(train_batch) * opt['num_epoch']
# start training
for epoch in range(1, opt['num_epoch']+1):
train_loss = 0
for i, batch in enumerate(train_batch):
start_time = time.time()
global_step += 1
loss = trainer.update(batch)
train_loss += loss
if global_step % opt['log_step'] == 0:
duration = time.time() - start_time
print(format_str.format(datetime.now(), global_step, max_steps, epoch,\
opt['num_epoch'], loss, duration, current_lr))
# eval on dev
print("Evaluating on dev set...")
predictions = []
dev_loss = 0
for i, batch in enumerate(dev_batch):
preds, _, loss = trainer.predict(batch)
predictions += preds
dev_loss += loss
predictions = [id2label[p] for p in predictions]
train_loss = train_loss / train_batch.num_examples * opt['batch_size'] # avg loss per batch
dev_loss = dev_loss / dev_batch.num_examples * opt['batch_size']
dev_p, dev_r, dev_f1 = scorer.score(dev_batch.gold(), predictions)
print("epoch {}: train_loss = {:.6f}, dev_loss = {:.6f}, dev_f1 = {:.4f}".format(epoch,\
train_loss, dev_loss, dev_f1))
dev_score = dev_f1
file_logger.log("{}\t{:.6f}\t{:.6f}\t{:.4f}\t{:.4f}".format(epoch, train_loss, dev_loss, dev_score, max([dev_score] + dev_score_history)))
# save
model_file = model_save_dir + '/checkpoint_epoch_{}.pt'.format(epoch)
trainer.save(model_file, epoch)
if epoch == 1 or dev_score > max(dev_score_history):
copyfile(model_file, model_save_dir + '/best_model.pt')
print("new best model saved.")
file_logger.log("new best model saved at epoch {}: {:.2f}\t{:.2f}\t{:.2f}"\
.format(epoch, dev_p*100, dev_r*100, dev_score*100))
if epoch % opt['save_epoch'] != 0:
os.remove(model_file)
# lr schedule
if len(dev_score_history) > opt['decay_epoch'] and dev_score <= dev_score_history[-1] and \
opt['optim'] in ['sgd', 'adagrad', 'adadelta']:
current_lr *= opt['lr_decay']
trainer.update_lr(current_lr)
dev_score_history += [dev_score]
print("")
print("Training ended with {} epochs.".format(epoch))
| 8,638 | 44.708995 | 156 | py |
gcn-over-pruned-trees | gcn-over-pruned-trees-master/utils/constant.py | """
Define constants.
"""
EMB_INIT_RANGE = 1.0
# vocab
PAD_TOKEN = '<PAD>'
PAD_ID = 0
UNK_TOKEN = '<UNK>'
UNK_ID = 1
VOCAB_PREFIX = [PAD_TOKEN, UNK_TOKEN]
# hard-coded mappings from fields to ids
SUBJ_NER_TO_ID = {PAD_TOKEN: 0, UNK_TOKEN: 1, 'ORGANIZATION': 2, 'PERSON': 3}
OBJ_NER_TO_ID = {PAD_TOKEN: 0, UNK_TOKEN: 1, 'PERSON': 2, 'ORGANIZATION': 3, 'DATE': 4, 'NUMBER': 5, 'TITLE': 6, 'COUNTRY': 7, 'LOCATION': 8, 'CITY': 9, 'MISC': 10, 'STATE_OR_PROVINCE': 11, 'DURATION': 12, 'NATIONALITY': 13, 'CAUSE_OF_DEATH': 14, 'CRIMINAL_CHARGE': 15, 'RELIGION': 16, 'URL': 17, 'IDEOLOGY': 18}
NER_TO_ID = {PAD_TOKEN: 0, UNK_TOKEN: 1, 'O': 2, 'PERSON': 3, 'ORGANIZATION': 4, 'LOCATION': 5, 'DATE': 6, 'NUMBER': 7, 'MISC': 8, 'DURATION': 9, 'MONEY': 10, 'PERCENT': 11, 'ORDINAL': 12, 'TIME': 13, 'SET': 14}
POS_TO_ID = {PAD_TOKEN: 0, UNK_TOKEN: 1, 'NNP': 2, 'NN': 3, 'IN': 4, 'DT': 5, ',': 6, 'JJ': 7, 'NNS': 8, 'VBD': 9, 'CD': 10, 'CC': 11, '.': 12, 'RB': 13, 'VBN': 14, 'PRP': 15, 'TO': 16, 'VB': 17, 'VBG': 18, 'VBZ': 19, 'PRP$': 20, ':': 21, 'POS': 22, '\'\'': 23, '``': 24, '-RRB-': 25, '-LRB-': 26, 'VBP': 27, 'MD': 28, 'NNPS': 29, 'WP': 30, 'WDT': 31, 'WRB': 32, 'RP': 33, 'JJR': 34, 'JJS': 35, '$': 36, 'FW': 37, 'RBR': 38, 'SYM': 39, 'EX': 40, 'RBS': 41, 'WP$': 42, 'PDT': 43, 'LS': 44, 'UH': 45, '#': 46}
DEPREL_TO_ID = {PAD_TOKEN: 0, UNK_TOKEN: 1, 'punct': 2, 'compound': 3, 'case': 4, 'nmod': 5, 'det': 6, 'nsubj': 7, 'amod': 8, 'conj': 9, 'dobj': 10, 'ROOT': 11, 'cc': 12, 'nmod:poss': 13, 'mark': 14, 'advmod': 15, 'appos': 16, 'nummod': 17, 'dep': 18, 'ccomp': 19, 'aux': 20, 'advcl': 21, 'acl:relcl': 22, 'xcomp': 23, 'cop': 24, 'acl': 25, 'auxpass': 26, 'nsubjpass': 27, 'nmod:tmod': 28, 'neg': 29, 'compound:prt': 30, 'mwe': 31, 'parataxis': 32, 'root': 33, 'nmod:npmod': 34, 'expl': 35, 'csubj': 36, 'cc:preconj': 37, 'iobj': 38, 'det:predet': 39, 'discourse': 40, 'csubjpass': 41}
NEGATIVE_LABEL = 'no_relation'
LABEL_TO_ID = {'no_relation': 0, 'per:title': 1, 'org:top_members/employees': 2, 'per:employee_of': 3, 'org:alternate_names': 4, 'org:country_of_headquarters': 5, 'per:countries_of_residence': 6, 'org:city_of_headquarters': 7, 'per:cities_of_residence': 8, 'per:age': 9, 'per:stateorprovinces_of_residence': 10, 'per:origin': 11, 'org:subsidiaries': 12, 'org:parents': 13, 'per:spouse': 14, 'org:stateorprovince_of_headquarters': 15, 'per:children': 16, 'per:other_family': 17, 'per:alternate_names': 18, 'org:members': 19, 'per:siblings': 20, 'per:schools_attended': 21, 'per:parents': 22, 'per:date_of_death': 23, 'org:member_of': 24, 'org:founded_by': 25, 'org:website': 26, 'per:cause_of_death': 27, 'org:political/religious_affiliation': 28, 'org:founded': 29, 'per:city_of_death': 30, 'org:shareholders': 31, 'org:number_of_employees/members': 32, 'per:date_of_birth': 33, 'per:city_of_birth': 34, 'per:charges': 35, 'per:stateorprovince_of_death': 36, 'per:religion': 37, 'per:stateorprovince_of_birth': 38, 'per:country_of_birth': 39, 'org:dissolved': 40, 'per:country_of_death': 41}
INFINITY_NUMBER = 1e12
| 3,049 | 100.666667 | 1,091 | py |
gcn-over-pruned-trees | gcn-over-pruned-trees-master/utils/scorer.py | #!/usr/bin/env python
"""
Score the predictions with gold labels, using precision, recall and F1 metrics.
"""
import argparse
import sys
from collections import Counter
NO_RELATION = "no_relation"
def parse_arguments():
parser = argparse.ArgumentParser(description='Score a prediction file using the gold labels.')
parser.add_argument('gold_file', help='The gold relation file; one relation per line')
parser.add_argument('pred_file', help='A prediction file; one relation per line, in the same order as the gold file.')
args = parser.parse_args()
return args
def score(key, prediction, verbose=False):
correct_by_relation = Counter()
guessed_by_relation = Counter()
gold_by_relation = Counter()
# Loop over the data to compute a score
for row in range(len(key)):
gold = key[row]
guess = prediction[row]
if gold == NO_RELATION and guess == NO_RELATION:
pass
elif gold == NO_RELATION and guess != NO_RELATION:
guessed_by_relation[guess] += 1
elif gold != NO_RELATION and guess == NO_RELATION:
gold_by_relation[gold] += 1
elif gold != NO_RELATION and guess != NO_RELATION:
guessed_by_relation[guess] += 1
gold_by_relation[gold] += 1
if gold == guess:
correct_by_relation[guess] += 1
# Print verbose information
if verbose:
print("Per-relation statistics:")
relations = gold_by_relation.keys()
longest_relation = 0
for relation in sorted(relations):
longest_relation = max(len(relation), longest_relation)
for relation in sorted(relations):
# (compute the score)
correct = correct_by_relation[relation]
guessed = guessed_by_relation[relation]
gold = gold_by_relation[relation]
prec = 1.0
if guessed > 0:
prec = float(correct) / float(guessed)
recall = 0.0
if gold > 0:
recall = float(correct) / float(gold)
f1 = 0.0
if prec + recall > 0:
f1 = 2.0 * prec * recall / (prec + recall)
# (print the score)
sys.stdout.write(("{:<" + str(longest_relation) + "}").format(relation))
sys.stdout.write(" P: ")
if prec < 0.1: sys.stdout.write(' ')
if prec < 1.0: sys.stdout.write(' ')
sys.stdout.write("{:.2%}".format(prec))
sys.stdout.write(" R: ")
if recall < 0.1: sys.stdout.write(' ')
if recall < 1.0: sys.stdout.write(' ')
sys.stdout.write("{:.2%}".format(recall))
sys.stdout.write(" F1: ")
if f1 < 0.1: sys.stdout.write(' ')
if f1 < 1.0: sys.stdout.write(' ')
sys.stdout.write("{:.2%}".format(f1))
sys.stdout.write(" #: %d" % gold)
sys.stdout.write("\n")
print("")
# Print the aggregate score
if verbose:
print("Final Score:")
prec_micro = 1.0
if sum(guessed_by_relation.values()) > 0:
prec_micro = float(sum(correct_by_relation.values())) / float(sum(guessed_by_relation.values()))
recall_micro = 0.0
if sum(gold_by_relation.values()) > 0:
recall_micro = float(sum(correct_by_relation.values())) / float(sum(gold_by_relation.values()))
f1_micro = 0.0
if prec_micro + recall_micro > 0.0:
f1_micro = 2.0 * prec_micro * recall_micro / (prec_micro + recall_micro)
print( "Precision (micro): {:.3%}".format(prec_micro) )
print( " Recall (micro): {:.3%}".format(recall_micro) )
print( " F1 (micro): {:.3%}".format(f1_micro) )
return prec_micro, recall_micro, f1_micro
if __name__ == "__main__":
# Parse the arguments from stdin
args = parse_arguments()
key = [str(line).rstrip('\n') for line in open(str(args.gold_file))]
prediction = [str(line).rstrip('\n') for line in open(str(args.pred_file))]
# Check that the lengths match
if len(prediction) != len(key):
print("Gold and prediction file must have same number of elements: %d in gold vs %d in prediction" % (len(key), len(prediction)))
exit(1)
# Score the predictions
score(key, prediction, verbose=True)
| 4,324 | 37.616071 | 137 | py |
gcn-over-pruned-trees | gcn-over-pruned-trees-master/utils/helper.py | """
Helper functions.
"""
import os
import subprocess
import json
import argparse
### IO
def check_dir(d):
if not os.path.exists(d):
print("Directory {} does not exist. Exit.".format(d))
exit(1)
def check_files(files):
for f in files:
if f is not None and not os.path.exists(f):
print("File {} does not exist. Exit.".format(f))
exit(1)
def ensure_dir(d, verbose=True):
if not os.path.exists(d):
if verbose:
print("Directory {} do not exist; creating...".format(d))
os.makedirs(d)
def save_config(config, path, verbose=True):
with open(path, 'w') as outfile:
json.dump(config, outfile, indent=2)
if verbose:
print("Config saved to file {}".format(path))
return config
def load_config(path, verbose=True):
with open(path) as f:
config = json.load(f)
if verbose:
print("Config loaded from file {}".format(path))
return config
def print_config(config):
info = "Running with the following configs:\n"
for k,v in config.items():
info += "\t{} : {}\n".format(k, str(v))
print("\n" + info + "\n")
return
class FileLogger(object):
"""
A file logger that opens the file periodically and write to it.
"""
def __init__(self, filename, header=None):
self.filename = filename
if os.path.exists(filename):
# remove the old file
os.remove(filename)
if header is not None:
with open(filename, 'w') as out:
print(header, file=out)
def log(self, message):
with open(self.filename, 'a') as out:
print(message, file=out)
| 1,698 | 24.742424 | 69 | py |
gcn-over-pruned-trees | gcn-over-pruned-trees-master/utils/vocab.py | """
A class for basic vocab operations.
"""
from __future__ import print_function
import os
import random
import numpy as np
import pickle
from utils import constant
random.seed(1234)
np.random.seed(1234)
def build_embedding(wv_file, vocab, wv_dim):
vocab_size = len(vocab)
emb = np.random.uniform(-1, 1, (vocab_size, wv_dim))
emb[constant.PAD_ID] = 0 # <pad> should be all 0
w2id = {w: i for i, w in enumerate(vocab)}
with open(wv_file, encoding="utf8") as f:
for line in f:
elems = line.split()
token = ''.join(elems[0:-wv_dim])
if token in w2id:
emb[w2id[token]] = [float(v) for v in elems[-wv_dim:]]
return emb
def load_glove_vocab(file, wv_dim):
"""
Load all words from glove.
"""
vocab = set()
with open(file, encoding='utf8') as f:
for line in f:
elems = line.split()
token = ''.join(elems[0:-wv_dim])
vocab.add(token)
return vocab
class Vocab(object):
def __init__(self, filename, load=False, word_counter=None, threshold=0):
if load:
assert os.path.exists(filename), "Vocab file does not exist at " + filename
# load from file and ignore all other params
self.id2word, self.word2id = self.load(filename)
self.size = len(self.id2word)
print("Vocab size {} loaded from file".format(self.size))
else:
print("Creating vocab from scratch...")
assert word_counter is not None, "word_counter is not provided for vocab creation."
self.word_counter = word_counter
if threshold > 1:
# remove words that occur less than thres
self.word_counter = dict([(k,v) for k,v in self.word_counter.items() if v >= threshold])
self.id2word = sorted(self.word_counter, key=lambda k:self.word_counter[k], reverse=True)
# add special tokens to the beginning
self.id2word = [constant.PAD_TOKEN, constant.UNK_TOKEN] + self.id2word
self.word2id = dict([(self.id2word[idx],idx) for idx in range(len(self.id2word))])
self.size = len(self.id2word)
self.save(filename)
print("Vocab size {} saved to file {}".format(self.size, filename))
def load(self, filename):
with open(filename, 'rb') as infile:
id2word = pickle.load(infile)
word2id = dict([(id2word[idx], idx) for idx in range(len(id2word))])
return id2word, word2id
def save(self, filename):
if os.path.exists(filename):
print("Overwriting old vocab file at " + filename)
os.remove(filename)
with open(filename, 'wb') as outfile:
pickle.dump(self.id2word, outfile)
return
def map(self, token_list):
"""
Map a list of tokens to their ids.
"""
return [self.word2id[w] if w in self.word2id else constant.VOCAB_UNK_ID for w in token_list]
def unmap(self, idx_list):
"""
Unmap ids back to tokens.
"""
return [self.id2word[idx] for idx in idx_list]
def get_embeddings(self, word_vectors=None, dim=100):
self.embeddings = 2 * constant.EMB_INIT_RANGE * np.random.rand(self.size, dim) - constant.EMB_INIT_RANGE
if word_vectors is not None:
assert len(list(word_vectors.values())[0]) == dim, \
"Word vectors does not have required dimension {}.".format(dim)
for w, idx in self.word2id.items():
if w in word_vectors:
self.embeddings[idx] = np.asarray(word_vectors[w])
return self.embeddings
| 3,714 | 35.782178 | 112 | py |
gcn-over-pruned-trees | gcn-over-pruned-trees-master/utils/torch_utils.py | """
Utility functions for torch.
"""
import torch
from torch import nn, optim
from torch.optim import Optimizer
### class
class MyAdagrad(Optimizer):
"""My modification of the Adagrad optimizer that allows to specify an initial
accumulater value. This mimics the behavior of the default Adagrad implementation
in Tensorflow. The default PyTorch Adagrad uses 0 for initial acculmulator value.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
lr_decay (float, optional): learning rate decay (default: 0)
init_accu_value (float, optional): initial accumulater value.
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
"""
def __init__(self, params, lr=1e-2, lr_decay=0, init_accu_value=0.1, weight_decay=0):
defaults = dict(lr=lr, lr_decay=lr_decay, init_accu_value=init_accu_value, \
weight_decay=weight_decay)
super(MyAdagrad, self).__init__(params, defaults)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = 0
state['sum'] = torch.ones(p.data.size()).type_as(p.data) *\
init_accu_value
def share_memory(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['sum'].share_memory_()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
state['step'] += 1
if group['weight_decay'] != 0:
if p.grad.data.is_sparse:
raise RuntimeError("weight_decay option is not compatible with sparse gradients ")
grad = grad.add(group['weight_decay'], p.data)
clr = group['lr'] / (1 + (state['step'] - 1) * group['lr_decay'])
if p.grad.data.is_sparse:
grad = grad.coalesce() # the update is non-linear so indices must be unique
grad_indices = grad._indices()
grad_values = grad._values()
size = torch.Size([x for x in grad.size()])
def make_sparse(values):
constructor = type(p.grad.data)
if grad_indices.dim() == 0 or values.dim() == 0:
return constructor()
return constructor(grad_indices, values, size)
state['sum'].add_(make_sparse(grad_values.pow(2)))
std = state['sum']._sparse_mask(grad)
std_values = std._values().sqrt_().add_(1e-10)
p.data.add_(-clr, make_sparse(grad_values / std_values))
else:
state['sum'].addcmul_(1, grad, grad)
std = state['sum'].sqrt().add_(1e-10)
p.data.addcdiv_(-clr, grad, std)
return loss
### torch specific functions
def get_optimizer(name, parameters, lr, l2=0):
if name == 'sgd':
return torch.optim.SGD(parameters, lr=lr, weight_decay=l2)
elif name in ['adagrad', 'myadagrad']:
# use my own adagrad to allow for init accumulator value
return MyAdagrad(parameters, lr=lr, init_accu_value=0.1, weight_decay=l2)
elif name == 'adam':
return torch.optim.Adam(parameters, weight_decay=l2) # use default lr
elif name == 'adamax':
return torch.optim.Adamax(parameters, weight_decay=l2) # use default lr
elif name == 'adadelta':
return torch.optim.Adadelta(parameters, lr=lr, weight_decay=l2)
else:
raise Exception("Unsupported optimizer: {}".format(name))
def change_lr(optimizer, new_lr):
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
def flatten_indices(seq_lens, width):
flat = []
for i, l in enumerate(seq_lens):
for j in range(l):
flat.append(i * width + j)
return flat
def set_cuda(var, cuda):
if cuda:
return var.cuda()
return var
def keep_partial_grad(grad, topk):
"""
Keep only the topk rows of grads.
"""
assert topk < grad.size(0)
grad.data[topk:].zero_()
return grad
### model IO
def save(model, optimizer, opt, filename):
params = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'config': opt
}
try:
torch.save(params, filename)
except BaseException:
print("[ Warning: model saving failed. ]")
def load(model, optimizer, filename):
try:
dump = torch.load(filename)
except BaseException:
print("[ Fail: model loading failed. ]")
if model is not None:
model.load_state_dict(dump['model'])
if optimizer is not None:
optimizer.load_state_dict(dump['optimizer'])
opt = dump['config']
return model, optimizer, opt
def load_config(filename):
try:
dump = torch.load(filename)
except BaseException:
print("[ Fail: model loading failed. ]")
return dump['config']
| 5,681 | 33.858896 | 106 | py |
gcn-over-pruned-trees | gcn-over-pruned-trees-master/data/loader.py | """
Data loader for TACRED json files.
"""
import json
import random
import torch
import numpy as np
from utils import constant, helper, vocab
class DataLoader(object):
"""
Load data from json files, preprocess and prepare batches.
"""
def __init__(self, filename, batch_size, opt, vocab, evaluation=False):
self.batch_size = batch_size
self.opt = opt
self.vocab = vocab
self.eval = evaluation
self.label2id = constant.LABEL_TO_ID
with open(filename) as infile:
data = json.load(infile)
self.raw_data = data
data = self.preprocess(data, vocab, opt)
# shuffle for training
if not evaluation:
indices = list(range(len(data)))
random.shuffle(indices)
data = [data[i] for i in indices]
self.id2label = dict([(v,k) for k,v in self.label2id.items()])
self.labels = [self.id2label[d[-1]] for d in data]
self.num_examples = len(data)
# chunk into batches
data = [data[i:i+batch_size] for i in range(0, len(data), batch_size)]
self.data = data
print("{} batches created for {}".format(len(data), filename))
def preprocess(self, data, vocab, opt):
""" Preprocess the data and convert to ids. """
processed = []
for d in data:
tokens = list(d['token'])
if opt['lower']:
tokens = [t.lower() for t in tokens]
# anonymize tokens
ss, se = d['subj_start'], d['subj_end']
os, oe = d['obj_start'], d['obj_end']
tokens[ss:se+1] = ['SUBJ-'+d['subj_type']] * (se-ss+1)
tokens[os:oe+1] = ['OBJ-'+d['obj_type']] * (oe-os+1)
tokens = map_to_ids(tokens, vocab.word2id)
pos = map_to_ids(d['stanford_pos'], constant.POS_TO_ID)
ner = map_to_ids(d['stanford_ner'], constant.NER_TO_ID)
deprel = map_to_ids(d['stanford_deprel'], constant.DEPREL_TO_ID)
head = [int(x) for x in d['stanford_head']]
assert any([x == 0 for x in head])
l = len(tokens)
subj_positions = get_positions(d['subj_start'], d['subj_end'], l)
obj_positions = get_positions(d['obj_start'], d['obj_end'], l)
subj_type = [constant.SUBJ_NER_TO_ID[d['subj_type']]]
obj_type = [constant.OBJ_NER_TO_ID[d['obj_type']]]
relation = self.label2id[d['relation']]
processed += [(tokens, pos, ner, deprel, head, subj_positions, obj_positions, subj_type, obj_type, relation)]
return processed
def gold(self):
""" Return gold labels as a list. """
return self.labels
def __len__(self):
return len(self.data)
def __getitem__(self, key):
""" Get a batch with index. """
if not isinstance(key, int):
raise TypeError
if key < 0 or key >= len(self.data):
raise IndexError
batch = self.data[key]
batch_size = len(batch)
batch = list(zip(*batch))
assert len(batch) == 10
# sort all fields by lens for easy RNN operations
lens = [len(x) for x in batch[0]]
batch, orig_idx = sort_all(batch, lens)
# word dropout
if not self.eval:
words = [word_dropout(sent, self.opt['word_dropout']) for sent in batch[0]]
else:
words = batch[0]
# convert to tensors
words = get_long_tensor(words, batch_size)
masks = torch.eq(words, 0)
pos = get_long_tensor(batch[1], batch_size)
ner = get_long_tensor(batch[2], batch_size)
deprel = get_long_tensor(batch[3], batch_size)
head = get_long_tensor(batch[4], batch_size)
subj_positions = get_long_tensor(batch[5], batch_size)
obj_positions = get_long_tensor(batch[6], batch_size)
subj_type = get_long_tensor(batch[7], batch_size)
obj_type = get_long_tensor(batch[8], batch_size)
rels = torch.LongTensor(batch[9])
return (words, masks, pos, ner, deprel, head, subj_positions, obj_positions, subj_type, obj_type, rels, orig_idx)
def __iter__(self):
for i in range(self.__len__()):
yield self.__getitem__(i)
def map_to_ids(tokens, vocab):
ids = [vocab[t] if t in vocab else constant.UNK_ID for t in tokens]
return ids
def get_positions(start_idx, end_idx, length):
""" Get subj/obj position sequence. """
return list(range(-start_idx, 0)) + [0]*(end_idx - start_idx + 1) + \
list(range(1, length-end_idx))
def get_long_tensor(tokens_list, batch_size):
""" Convert list of list of tokens to a padded LongTensor. """
token_len = max(len(x) for x in tokens_list)
tokens = torch.LongTensor(batch_size, token_len).fill_(constant.PAD_ID)
for i, s in enumerate(tokens_list):
tokens[i, :len(s)] = torch.LongTensor(s)
return tokens
def sort_all(batch, lens):
""" Sort all fields by descending order of lens, and return the original indices. """
unsorted_all = [lens] + [range(len(lens))] + list(batch)
sorted_all = [list(t) for t in zip(*sorted(zip(*unsorted_all), reverse=True))]
return sorted_all[2:], sorted_all[1]
def word_dropout(tokens, dropout):
""" Randomly dropout tokens (IDs) and replace them with <UNK> tokens. """
return [constant.UNK_ID if x != constant.UNK_ID and np.random.random() < dropout \
else x for x in tokens]
| 5,487 | 36.848276 | 121 | py |
gcn-over-pruned-trees | gcn-over-pruned-trees-master/model/tree.py | """
Basic operations on trees.
"""
import numpy as np
from collections import defaultdict
class Tree(object):
"""
Reused tree object from stanfordnlp/treelstm.
"""
def __init__(self):
self.parent = None
self.num_children = 0
self.children = list()
def add_child(self,child):
child.parent = self
self.num_children += 1
self.children.append(child)
def size(self):
if getattr(self,'_size'):
return self._size
count = 1
for i in xrange(self.num_children):
count += self.children[i].size()
self._size = count
return self._size
def depth(self):
if getattr(self,'_depth'):
return self._depth
count = 0
if self.num_children>0:
for i in xrange(self.num_children):
child_depth = self.children[i].depth()
if child_depth>count:
count = child_depth
count += 1
self._depth = count
return self._depth
def __iter__(self):
yield self
for c in self.children:
for x in c:
yield x
def head_to_tree(head, tokens, len_, prune, subj_pos, obj_pos):
"""
Convert a sequence of head indexes into a tree object.
"""
tokens = tokens[:len_].tolist()
head = head[:len_].tolist()
root = None
if prune < 0:
nodes = [Tree() for _ in head]
for i in range(len(nodes)):
h = head[i]
nodes[i].idx = i
nodes[i].dist = -1 # just a filler
if h == 0:
root = nodes[i]
else:
nodes[h-1].add_child(nodes[i])
else:
# find dependency path
subj_pos = [i for i in range(len_) if subj_pos[i] == 0]
obj_pos = [i for i in range(len_) if obj_pos[i] == 0]
cas = None
subj_ancestors = set(subj_pos)
for s in subj_pos:
h = head[s]
tmp = [s]
while h > 0:
tmp += [h-1]
subj_ancestors.add(h-1)
h = head[h-1]
if cas is None:
cas = set(tmp)
else:
cas.intersection_update(tmp)
obj_ancestors = set(obj_pos)
for o in obj_pos:
h = head[o]
tmp = [o]
while h > 0:
tmp += [h-1]
obj_ancestors.add(h-1)
h = head[h-1]
cas.intersection_update(tmp)
# find lowest common ancestor
if len(cas) == 1:
lca = list(cas)[0]
else:
child_count = {k:0 for k in cas}
for ca in cas:
if head[ca] > 0 and head[ca] - 1 in cas:
child_count[head[ca] - 1] += 1
# the LCA has no child in the CA set
for ca in cas:
if child_count[ca] == 0:
lca = ca
break
path_nodes = subj_ancestors.union(obj_ancestors).difference(cas)
path_nodes.add(lca)
# compute distance to path_nodes
dist = [-1 if i not in path_nodes else 0 for i in range(len_)]
for i in range(len_):
if dist[i] < 0:
stack = [i]
while stack[-1] >= 0 and stack[-1] not in path_nodes:
stack.append(head[stack[-1]] - 1)
if stack[-1] in path_nodes:
for d, j in enumerate(reversed(stack)):
dist[j] = d
else:
for j in stack:
if j >= 0 and dist[j] < 0:
dist[j] = int(1e4) # aka infinity
highest_node = lca
nodes = [Tree() if dist[i] <= prune else None for i in range(len_)]
for i in range(len(nodes)):
if nodes[i] is None:
continue
h = head[i]
nodes[i].idx = i
nodes[i].dist = dist[i]
if h > 0 and i != highest_node:
assert nodes[h-1] is not None
nodes[h-1].add_child(nodes[i])
root = nodes[highest_node]
assert root is not None
return root
def tree_to_adj(sent_len, tree, directed=True, self_loop=False):
"""
Convert a tree object to an (numpy) adjacency matrix.
"""
ret = np.zeros((sent_len, sent_len), dtype=np.float32)
queue = [tree]
idx = []
while len(queue) > 0:
t, queue = queue[0], queue[1:]
idx += [t.idx]
for c in t.children:
ret[t.idx, c.idx] = 1
queue += t.children
if not directed:
ret = ret + ret.T
if self_loop:
for i in idx:
ret[i, i] = 1
return ret
def tree_to_dist(sent_len, tree):
ret = -1 * np.ones(sent_len, dtype=np.int64)
for node in tree:
ret[node.idx] = node.dist
return ret
| 4,951 | 25.623656 | 75 | py |
gcn-over-pruned-trees | gcn-over-pruned-trees-master/model/gcn.py | """
GCN model for relation extraction.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from model.tree import Tree, head_to_tree, tree_to_adj
from utils import constant, torch_utils
class GCNClassifier(nn.Module):
""" A wrapper classifier for GCNRelationModel. """
def __init__(self, opt, emb_matrix=None):
super().__init__()
self.gcn_model = GCNRelationModel(opt, emb_matrix=emb_matrix)
in_dim = opt['hidden_dim']
self.classifier = nn.Linear(in_dim, opt['num_class'])
self.opt = opt
def conv_l2(self):
return self.gcn_model.gcn.conv_l2()
def forward(self, inputs):
outputs, pooling_output = self.gcn_model(inputs)
logits = self.classifier(outputs)
return logits, pooling_output
class GCNRelationModel(nn.Module):
def __init__(self, opt, emb_matrix=None):
super().__init__()
self.opt = opt
self.emb_matrix = emb_matrix
# create embedding layers
self.emb = nn.Embedding(opt['vocab_size'], opt['emb_dim'], padding_idx=constant.PAD_ID)
self.pos_emb = nn.Embedding(len(constant.POS_TO_ID), opt['pos_dim']) if opt['pos_dim'] > 0 else None
self.ner_emb = nn.Embedding(len(constant.NER_TO_ID), opt['ner_dim']) if opt['ner_dim'] > 0 else None
embeddings = (self.emb, self.pos_emb, self.ner_emb)
self.init_embeddings()
# gcn layer
self.gcn = GCN(opt, embeddings, opt['hidden_dim'], opt['num_layers'])
# output mlp layers
in_dim = opt['hidden_dim']*3
layers = [nn.Linear(in_dim, opt['hidden_dim']), nn.ReLU()]
for _ in range(self.opt['mlp_layers']-1):
layers += [nn.Linear(opt['hidden_dim'], opt['hidden_dim']), nn.ReLU()]
self.out_mlp = nn.Sequential(*layers)
def init_embeddings(self):
if self.emb_matrix is None:
self.emb.weight.data[1:,:].uniform_(-1.0, 1.0)
else:
self.emb_matrix = torch.from_numpy(self.emb_matrix)
self.emb.weight.data.copy_(self.emb_matrix)
# decide finetuning
if self.opt['topn'] <= 0:
print("Do not finetune word embedding layer.")
self.emb.weight.requires_grad = False
elif self.opt['topn'] < self.opt['vocab_size']:
print("Finetune top {} word embeddings.".format(self.opt['topn']))
self.emb.weight.register_hook(lambda x: \
torch_utils.keep_partial_grad(x, self.opt['topn']))
else:
print("Finetune all embeddings.")
def forward(self, inputs):
words, masks, pos, ner, deprel, head, subj_pos, obj_pos, subj_type, obj_type = inputs # unpack
l = (masks.data.cpu().numpy() == 0).astype(np.int64).sum(1)
maxlen = max(l)
def inputs_to_tree_reps(head, words, l, prune, subj_pos, obj_pos):
head, words, subj_pos, obj_pos = head.cpu().numpy(), words.cpu().numpy(), subj_pos.cpu().numpy(), obj_pos.cpu().numpy()
trees = [head_to_tree(head[i], words[i], l[i], prune, subj_pos[i], obj_pos[i]) for i in range(len(l))]
adj = [tree_to_adj(maxlen, tree, directed=False, self_loop=False).reshape(1, maxlen, maxlen) for tree in trees]
adj = np.concatenate(adj, axis=0)
adj = torch.from_numpy(adj)
return Variable(adj.cuda()) if self.opt['cuda'] else Variable(adj)
adj = inputs_to_tree_reps(head.data, words.data, l, self.opt['prune_k'], subj_pos.data, obj_pos.data)
h, pool_mask = self.gcn(adj, inputs)
# pooling
subj_mask, obj_mask = subj_pos.eq(0).eq(0).unsqueeze(2), obj_pos.eq(0).eq(0).unsqueeze(2) # invert mask
pool_type = self.opt['pooling']
h_out = pool(h, pool_mask, type=pool_type)
subj_out = pool(h, subj_mask, type=pool_type)
obj_out = pool(h, obj_mask, type=pool_type)
outputs = torch.cat([h_out, subj_out, obj_out], dim=1)
outputs = self.out_mlp(outputs)
return outputs, h_out
class GCN(nn.Module):
""" A GCN/Contextualized GCN module operated on dependency graphs. """
def __init__(self, opt, embeddings, mem_dim, num_layers):
super(GCN, self).__init__()
self.opt = opt
self.layers = num_layers
self.use_cuda = opt['cuda']
self.mem_dim = mem_dim
self.in_dim = opt['emb_dim'] + opt['pos_dim'] + opt['ner_dim']
self.emb, self.pos_emb, self.ner_emb = embeddings
# rnn layer
if self.opt.get('rnn', False):
input_size = self.in_dim
self.rnn = nn.LSTM(input_size, opt['rnn_hidden'], opt['rnn_layers'], batch_first=True, \
dropout=opt['rnn_dropout'], bidirectional=True)
self.in_dim = opt['rnn_hidden'] * 2
self.rnn_drop = nn.Dropout(opt['rnn_dropout']) # use on last layer output
self.in_drop = nn.Dropout(opt['input_dropout'])
self.gcn_drop = nn.Dropout(opt['gcn_dropout'])
# gcn layer
self.W = nn.ModuleList()
for layer in range(self.layers):
input_dim = self.in_dim if layer == 0 else self.mem_dim
self.W.append(nn.Linear(input_dim, self.mem_dim))
def conv_l2(self):
conv_weights = []
for w in self.W:
conv_weights += [w.weight, w.bias]
return sum([x.pow(2).sum() for x in conv_weights])
def encode_with_rnn(self, rnn_inputs, masks, batch_size):
seq_lens = list(masks.data.eq(constant.PAD_ID).long().sum(1).squeeze())
h0, c0 = rnn_zero_state(batch_size, self.opt['rnn_hidden'], self.opt['rnn_layers'])
rnn_inputs = nn.utils.rnn.pack_padded_sequence(rnn_inputs, seq_lens, batch_first=True)
rnn_outputs, (ht, ct) = self.rnn(rnn_inputs, (h0, c0))
rnn_outputs, _ = nn.utils.rnn.pad_packed_sequence(rnn_outputs, batch_first=True)
return rnn_outputs
def forward(self, adj, inputs):
words, masks, pos, ner, deprel, head, subj_pos, obj_pos, subj_type, obj_type = inputs # unpack
word_embs = self.emb(words)
embs = [word_embs]
if self.opt['pos_dim'] > 0:
embs += [self.pos_emb(pos)]
if self.opt['ner_dim'] > 0:
embs += [self.ner_emb(ner)]
embs = torch.cat(embs, dim=2)
embs = self.in_drop(embs)
# rnn layer
if self.opt.get('rnn', False):
gcn_inputs = self.rnn_drop(self.encode_with_rnn(embs, masks, words.size()[0]))
else:
gcn_inputs = embs
# gcn layer
denom = adj.sum(2).unsqueeze(2) + 1
mask = (adj.sum(2) + adj.sum(1)).eq(0).unsqueeze(2)
# zero out adj for ablation
if self.opt.get('no_adj', False):
adj = torch.zeros_like(adj)
for l in range(self.layers):
Ax = adj.bmm(gcn_inputs)
AxW = self.W[l](Ax)
AxW = AxW + self.W[l](gcn_inputs) # self loop
AxW = AxW / denom
gAxW = F.relu(AxW)
gcn_inputs = self.gcn_drop(gAxW) if l < self.layers - 1 else gAxW
return gcn_inputs, mask
def pool(h, mask, type='max'):
if type == 'max':
h = h.masked_fill(mask, -constant.INFINITY_NUMBER)
return torch.max(h, 1)[0]
elif type == 'avg':
h = h.masked_fill(mask, 0)
return h.sum(1) / (mask.size(1) - mask.float().sum(1))
else:
h = h.masked_fill(mask, 0)
return h.sum(1)
def rnn_zero_state(batch_size, hidden_dim, num_layers, bidirectional=True, use_cuda=True):
total_layers = num_layers * 2 if bidirectional else num_layers
state_shape = (total_layers, batch_size, hidden_dim)
h0 = c0 = Variable(torch.zeros(*state_shape), requires_grad=False)
if use_cuda:
return h0.cuda(), c0.cuda()
else:
return h0, c0
| 7,886 | 39.239796 | 131 | py |
gcn-over-pruned-trees | gcn-over-pruned-trees-master/model/trainer.py | """
A trainer class.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from model.gcn import GCNClassifier
from utils import constant, torch_utils
class Trainer(object):
def __init__(self, opt, emb_matrix=None):
raise NotImplementedError
def update(self, batch):
raise NotImplementedError
def predict(self, batch):
raise NotImplementedError
def update_lr(self, new_lr):
torch_utils.change_lr(self.optimizer, new_lr)
def load(self, filename):
try:
checkpoint = torch.load(filename)
except BaseException:
print("Cannot load model from {}".format(filename))
exit()
self.model.load_state_dict(checkpoint['model'])
self.opt = checkpoint['config']
def save(self, filename, epoch):
params = {
'model': self.model.state_dict(),
'config': self.opt,
}
try:
torch.save(params, filename)
print("model saved to {}".format(filename))
except BaseException:
print("[Warning: Saving failed... continuing anyway.]")
def unpack_batch(batch, cuda):
if cuda:
inputs = [Variable(b.cuda()) for b in batch[:10]]
labels = Variable(batch[10].cuda())
else:
inputs = [Variable(b) for b in batch[:10]]
labels = Variable(batch[10])
tokens = batch[0]
head = batch[5]
subj_pos = batch[6]
obj_pos = batch[7]
lens = batch[1].eq(0).long().sum(1).squeeze()
return inputs, labels, tokens, head, subj_pos, obj_pos, lens
class GCNTrainer(Trainer):
def __init__(self, opt, emb_matrix=None):
self.opt = opt
self.emb_matrix = emb_matrix
self.model = GCNClassifier(opt, emb_matrix=emb_matrix)
self.criterion = nn.CrossEntropyLoss()
self.parameters = [p for p in self.model.parameters() if p.requires_grad]
if opt['cuda']:
self.model.cuda()
self.criterion.cuda()
self.optimizer = torch_utils.get_optimizer(opt['optim'], self.parameters, opt['lr'])
def update(self, batch):
inputs, labels, tokens, head, subj_pos, obj_pos, lens = unpack_batch(batch, self.opt['cuda'])
# step forward
self.model.train()
self.optimizer.zero_grad()
logits, pooling_output = self.model(inputs)
loss = self.criterion(logits, labels)
# l2 decay on all conv layers
if self.opt.get('conv_l2', 0) > 0:
loss += self.model.conv_l2() * self.opt['conv_l2']
# l2 penalty on output representations
if self.opt.get('pooling_l2', 0) > 0:
loss += self.opt['pooling_l2'] * (pooling_output ** 2).sum(1).mean()
loss_val = loss.item()
# backward
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.opt['max_grad_norm'])
self.optimizer.step()
return loss_val
def predict(self, batch, unsort=True):
inputs, labels, tokens, head, subj_pos, obj_pos, lens = unpack_batch(batch, self.opt['cuda'])
orig_idx = batch[11]
# forward
self.model.eval()
logits, _ = self.model(inputs)
loss = self.criterion(logits, labels)
probs = F.softmax(logits, 1).data.cpu().numpy().tolist()
predictions = np.argmax(logits.data.cpu().numpy(), axis=1).tolist()
if unsort:
_, predictions, probs = [list(t) for t in zip(*sorted(zip(orig_idx,\
predictions, probs)))]
return predictions, probs, loss.item()
| 3,659 | 32.888889 | 101 | py |
SwinMR | SwinMR-main/main_test_swinmr_CC.py | '''
# -----------------------------------------
Main Program for Testing
SwinMR for MRI_Recon
Dataset: CC
by Jiahao Huang (j.huang21@imperial.ac.uk)
# -----------------------------------------
'''
import argparse
import cv2
import csv
import sys
import numpy as np
from collections import OrderedDict
import os
import torch
from utils import utils_option as option
from torch.utils.data import DataLoader
from models.network_swinmr import SwinIR as net
from utils import utils_image as util
from data.select_dataset import define_Dataset
import time
from math import ceil
import lpips
import shutil
def main(json_path):
parser = argparse.ArgumentParser()
parser.add_argument('--opt', type=str, default=json_path, help='Path to option JSON file.')
opt = option.parse(parser.parse_args().opt, is_train=False)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device = 'cpu'
# set up model
if os.path.exists(opt['model_path']):
print(f"loading model from {opt['model_path']}")
else:
print('can\'t find model.')
model = define_model(opt)
model.eval()
model = model.to(device)
# setup folder and path
save_dir, border, window_size = setup(opt)
os.makedirs(save_dir, exist_ok=True)
test_results = OrderedDict()
test_results['psnr'] = []
test_results['ssim'] = []
test_results['lpips'] = []
test_results['zf_psnr'] = []
test_results['zf_ssim'] = []
test_results['zf_lpips'] = []
with open(os.path.join(save_dir, 'results.csv'), 'w') as cf:
writer = csv.writer(cf)
writer.writerow(['METHOD', 'MASK', 'SSIM', 'PSNR', 'LPIPS'])
with open(os.path.join(save_dir, 'results_ave.csv'), 'w') as cf:
writer = csv.writer(cf)
writer.writerow(['METHOD', 'MASK',
'SSIM', 'SSIM_STD',
'PSNR', 'PSNR_STD',
'LPIPS', 'LPIPS_STD',
'FID'])
with open(os.path.join(save_dir, 'zf_results.csv'), 'w') as cf:
writer = csv.writer(cf)
writer.writerow(['METHOD', 'MASK', 'SSIM', 'PSNR', 'LPIPS'])
with open(os.path.join(save_dir, 'zf_results_ave.csv'), 'w') as cf:
writer = csv.writer(cf)
writer.writerow(['METHOD', 'MASK',
'SSIM', 'SSIM_STD',
'PSNR', 'PSNR_STD',
'LPIPS', 'LPIPS_STD',
'FID'])
# ----------------------------------------
# return None for missing key
# ----------------------------------------
opt = option.dict_to_nonedict(opt)
dataset_opt = opt['datasets']['test']
test_set = define_Dataset(dataset_opt)
test_loader = DataLoader(test_set, batch_size=1,
shuffle=False, num_workers=1,
drop_last=False, pin_memory=True)
loss_fn_alex = lpips.LPIPS(net='alex').to(device)
for idx, test_data in enumerate(test_loader):
img_gt = test_data['H'].to(device)
img_lq = test_data['L'].to(device)
# inference
with torch.no_grad():
# pad input image to be a multiple of window_size
_, _, h_old, w_old = img_lq.size()
# old_size = img_lq.size()
#
# h_pad = ceil(h_old / (window_size * 8)) * (window_size * 8) - h_old
# w_pad = ceil(w_old / (window_size * 8)) * (window_size * 8) - w_old
#
# img_lq = torch.cat([img_lq, torch.flip(img_lq, [2])], 2)[:, :, :h_old + h_pad, :]
# img_lq = torch.cat([img_lq, torch.flip(img_lq, [3])], 3)[:, :, :, :w_old + w_pad]
#
# img_gt = torch.cat([img_gt, torch.flip(img_gt, [2])], 2)[:, :, :h_old + h_pad, :]
# img_gt = torch.cat([img_gt, torch.flip(img_gt, [3])], 3)[:, :, :, :w_old + w_pad]
#
# print('Padding: {} --> {}; GPU RAM USED: {:2f} G; GPU RAM MAX USED {:2f} G'
# .format(old_size, img_lq.size(), torch.cuda.memory_allocated()*1e-9, torch.cuda.max_memory_allocated()*1e-9))
time_start = time.time()
img_gen = model(img_lq)
time_end = time.time()
time_c = time_end - time_start # time used
print('time cost', time_c, 's')
img_lq = img_lq[..., :h_old * opt['scale'], :w_old * opt['scale']]
img_gt = img_gt[..., :h_old * opt['scale'], :w_old * opt['scale']]
img_gen = img_gen[..., :h_old * opt['scale'], :w_old * opt['scale']]
diff_gen_x10 = torch.mul(torch.abs(torch.sub(img_gt, img_gen)), 10)
diff_lq_x10 = torch.mul(torch.abs(torch.sub(img_gt, img_lq)), 10)
# evaluate lpips
lpips_ = util.calculate_lpips_single(loss_fn_alex, img_gt, img_gen)
lpips_ = lpips_.data.squeeze().float().cpu().numpy()
test_results['lpips'].append(lpips_)
# evaluate lpips zf
zf_lpips_ = util.calculate_lpips_single(loss_fn_alex, img_gt, img_lq)
zf_lpips_ = zf_lpips_.data.squeeze().float().cpu().numpy()
test_results['zf_lpips'].append(zf_lpips_)
# save image
img_lq = img_lq.data.squeeze().float().cpu().numpy()
img_gt = img_gt.data.squeeze().float().cpu().numpy()
img_gen = img_gen.data.squeeze().float().cpu().numpy()
diff_gen_x10 = diff_gen_x10.data.squeeze().float().cpu().clamp_(0, 1).numpy()
diff_lq_x10 = diff_lq_x10.data.squeeze().float().cpu().clamp_(0, 1).numpy()
# evaluate psnr/ssim
psnr = util.calculate_psnr_single(img_gt, img_gen, border=border)
ssim = util.calculate_ssim_single(img_gt, img_gen, border=border)
test_results['psnr'].append(psnr)
test_results['ssim'].append(ssim)
print('Testing {:d} - PSNR: {:.2f} dB; SSIM: {:.4f}; LPIPS: {:.4f} '.format(idx, psnr, ssim, lpips_))
with open(os.path.join(save_dir, 'results.csv'), 'a') as cf:
writer = csv.writer(cf)
writer.writerow(['SwinMR', dataset_opt['mask'],
test_results['ssim'][idx], test_results['psnr'][idx], test_results['lpips'][idx]])
# evaluate psnr/ssim zf
zf_psnr = util.calculate_psnr_single(img_gt, img_lq, border=border)
zf_ssim = util.calculate_ssim_single(img_gt, img_lq, border=border)
test_results['zf_psnr'].append(zf_psnr)
test_results['zf_ssim'].append(zf_ssim)
print('ZF Testing {:d} - PSNR: {:.2f} dB; SSIM: {:.4f}; LPIPS: {:.4f} '.format(idx, zf_psnr, zf_ssim, zf_lpips_))
with open(os.path.join(save_dir, 'zf_results.csv'), 'a') as cf:
writer = csv.writer(cf)
writer.writerow(['ZF', dataset_opt['mask'],
test_results['zf_ssim'][idx], test_results['zf_psnr'][idx], test_results['zf_lpips'][idx]])
img_lq = (np.clip(img_lq, 0, 1) * 255.0).round().astype(np.uint8) # float32 to uint8
img_gt = (np.clip(img_gt, 0, 1) * 255.0).round().astype(np.uint8) # float32 to uint8
img_gen = (np.clip(img_gen, 0, 1) * 255.0).round().astype(np.uint8) # float32 to uint8
diff_gen_x10 = (diff_gen_x10 * 255.0).round().astype(np.uint8) # float32 to uint8
diff_lq_x10 = (diff_lq_x10 * 255.0).round().astype(np.uint8) # float32 to uint8
isExists = os.path.exists(os.path.join(save_dir, 'ZF'))
if not isExists:
os.makedirs(os.path.join(save_dir, 'ZF'))
isExists = os.path.exists(os.path.join(save_dir, 'GT'))
if not isExists:
os.makedirs(os.path.join(save_dir, 'GT'))
isExists = os.path.exists(os.path.join(save_dir, 'Recon'))
if not isExists:
os.makedirs(os.path.join(save_dir, 'Recon'))
isExists = os.path.exists(os.path.join(save_dir, 'Different'))
if not isExists:
os.makedirs(os.path.join(save_dir, 'Different'))
cv2.imwrite(os.path.join(save_dir, 'ZF', 'ZF_{:05d}.png'.format(idx)), img_lq)
cv2.imwrite(os.path.join(save_dir, 'GT', 'GT_{:05d}.png'.format(idx)), img_gt)
cv2.imwrite(os.path.join(save_dir, 'Recon', 'Recon_{:05d}.png'.format(idx)), img_gen)
diff_gen_x10_color = cv2.applyColorMap(diff_gen_x10, cv2.COLORMAP_JET)
diff_lq_x10_color = cv2.applyColorMap(diff_lq_x10, cv2.COLORMAP_JET)
cv2.imwrite(os.path.join(save_dir, 'Different', 'Diff_Recon_{:05d}.png'.format(idx)), diff_gen_x10_color)
cv2.imwrite(os.path.join(save_dir, 'Different', 'Diff_ZF_{:05d}.png'.format(idx)), diff_lq_x10_color)
# summarize psnr/ssim
ave_psnr = np.mean(test_results['psnr'])
std_psnr = np.std(test_results['psnr'], ddof=1)
ave_ssim = np.mean(test_results['ssim'])
std_ssim = np.std(test_results['ssim'], ddof=1)
ave_lpips = np.mean(test_results['lpips'])
std_lpips = np.std(test_results['lpips'], ddof=1)
print('\n{} \n-- Average PSNR {:.2f} dB ({:.4f} dB)\n-- Average SSIM {:.4f} ({:.6f})\n-- Average LPIPS {:.4f} ({:.6f})'
.format(save_dir, ave_psnr, std_psnr, ave_ssim, std_ssim, ave_lpips, std_lpips))
# summarize psnr/ssim zf
zf_ave_psnr = np.mean(test_results['zf_psnr'])
zf_std_psnr = np.std(test_results['zf_psnr'], ddof=1)
zf_ave_ssim = np.mean(test_results['zf_ssim'])
zf_std_ssim = np.std(test_results['zf_ssim'], ddof=1)
zf_ave_lpips = np.mean(test_results['zf_lpips'])
zf_std_lpips = np.std(test_results['zf_lpips'], ddof=1)
print('\n{} \n-- ZF Average PSNR {:.2f} dB ({:.4f} dB)\n-- ZF Average SSIM {:.4f} ({:.6f})\n-- ZF Average LPIPS {:.4f} ({:.6f})'
.format(save_dir, zf_ave_psnr, zf_std_psnr, zf_ave_ssim, zf_std_ssim, zf_ave_lpips, zf_std_lpips))
# FID
log = os.popen("{} -m pytorch_fid {} {} ".format(
sys.executable,
os.path.join(save_dir, 'GT'),
os.path.join(save_dir, 'Recon'))).read()
print(log)
fid = eval(log.replace('FID: ', ''))
with open(os.path.join(save_dir, 'results_ave.csv'), 'a') as cf:
writer = csv.writer(cf)
writer.writerow(['SwinMR', dataset_opt['mask'],
ave_ssim, std_ssim,
ave_psnr, std_psnr,
ave_lpips, std_lpips,
fid])
# FID ZF
log = os.popen("{} -m pytorch_fid {} {} ".format(
sys.executable,
os.path.join(save_dir, 'GT'),
os.path.join(save_dir, 'ZF'))).read()
print(log)
zf_fid = eval(log.replace('FID: ', ''))
with open(os.path.join(save_dir, 'zf_results_ave.csv'), 'a') as cf:
writer = csv.writer(cf)
writer.writerow(['ZF', dataset_opt['mask'],
zf_ave_ssim, zf_std_ssim,
zf_ave_psnr, zf_std_psnr,
zf_ave_lpips, zf_std_lpips,
zf_fid])
def define_model(args):
model = net(upscale=1, in_chans=1, img_size=256, window_size=8,
img_range=1., depths=[6, 6, 6, 6, 6, 6], embed_dim=args['netG']['embed_dim'], num_heads=[6, 6, 6, 6, 6, 6],
mlp_ratio=2, upsampler='', resi_connection='1conv')
param_key_g = 'params'
pretrained_model = torch.load(args['model_path'])
model.load_state_dict(pretrained_model[param_key_g] if param_key_g in pretrained_model.keys() else pretrained_model, strict=True)
return model
def setup(args):
save_dir = f"results/{args['task']}/{args['model_name']}"
border = 0
window_size = 8
return save_dir, border, window_size
if __name__ == '__main__':
main()
| 11,599 | 40.281139 | 134 | py |
SwinMR | SwinMR-main/main_train_swinmr.py | '''
# -----------------------------------------
Main Program for Training
SwinMR for MRI_Recon
by Jiahao Huang (j.huang21@imperial.ac.uk)
# -----------------------------------------
'''
import os
import sys
import math
import argparse
import random
import cv2
import numpy as np
import logging
import time
import torch
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from utils import utils_logger
from utils import utils_image as util
from utils import utils_option as option
from utils.utils_dist import get_dist_info, init_dist
from utils import utils_early_stopping
from data.select_dataset import define_Dataset
from models.select_model import define_Model
from tensorboardX import SummaryWriter
from collections import OrderedDict
from skimage.transform import resize
import lpips
def main(json_path=''):
'''
# ----------------------------------------
# Step--1 (prepare opt)
# ----------------------------------------
'''
parser = argparse.ArgumentParser()
parser.add_argument('--opt', type=str, default=json_path, help='Path to option JSON file.')
parser.add_argument('--launcher', default='pytorch', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
# parser.add_argument('--dist', default=False)
opt = option.parse(parser.parse_args().opt, is_train=True)
# opt['dist'] = parser.parse_args().dist
# distributed settings
if opt['dist']:
init_dist('pytorch')
opt['rank'], opt['world_size'] = get_dist_info()
if opt['rank'] == 0:
util.mkdirs((path for key, path in opt['path'].items() if 'pretrained' not in key))
# update opt
init_iter_G, init_path_G = option.find_last_checkpoint(opt['path']['models'], net_type='G')
init_iter_E, init_path_E = option.find_last_checkpoint(opt['path']['models'], net_type='E')
opt['path']['pretrained_netG'] = init_path_G
opt['path']['pretrained_netE'] = init_path_E
init_iter_optimizerG, init_path_optimizerG = option.find_last_checkpoint(opt['path']['models'], net_type='optimizerG')
opt['path']['pretrained_optimizerG'] = init_path_optimizerG
current_step = max(init_iter_G, init_iter_E, init_iter_optimizerG)
# save opt to a '../option.json' file
if opt['rank'] == 0:
option.save(opt)
# return None for missing key
opt = option.dict_to_nonedict(opt)
# configure logger
if opt['rank'] == 0:
# logger
logger_name = 'train'
utils_logger.logger_info(logger_name, os.path.join(opt['path']['log'], logger_name+'.log'))
logger = logging.getLogger(logger_name)
logger.info(option.dict2str(opt))
# tensorbordX log
logger_tensorboard = SummaryWriter(os.path.join(opt['path']['log']))
# set seed
seed = opt['manual_seed']
if seed is None:
seed = random.randint(1, 10000)
print('Random seed: {}'.format(seed))
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
'''
# ----------------------------------------
# Step--2 (creat dataloader)
# ----------------------------------------
'''
# ----------------------------------------
# 1) create_dataset
# 2) creat_dataloader for train and test
# ----------------------------------------
for phase, dataset_opt in opt['datasets'].items():
if phase == 'train':
train_set = define_Dataset(dataset_opt)
train_size = int(math.ceil(len(train_set) / dataset_opt['dataloader_batch_size']))
if opt['rank'] == 0:
logger.info('Number of train images: {:,d}, iters: {:,d}'.format(len(train_set), train_size))
if opt['dist']:
train_sampler = DistributedSampler(train_set, shuffle=dataset_opt['dataloader_shuffle'], drop_last=True, seed=seed)
train_loader = DataLoader(train_set,
batch_size=dataset_opt['dataloader_batch_size']//opt['num_gpu'],
shuffle=False,
num_workers=dataset_opt['dataloader_num_workers']//opt['num_gpu'],
drop_last=True,
pin_memory=False,
sampler=train_sampler)
else:
train_loader = DataLoader(train_set,
batch_size=dataset_opt['dataloader_batch_size'],
shuffle=dataset_opt['dataloader_shuffle'],
num_workers=dataset_opt['dataloader_num_workers'],
drop_last=True,
pin_memory=False)
elif phase == 'test':
test_set = define_Dataset(dataset_opt)
test_loader = DataLoader(test_set, batch_size=1,
shuffle=False, num_workers=1,
drop_last=False, pin_memory=False)
else:
raise NotImplementedError("Phase [%s] is not recognized." % phase)
'''
# ----------------------------------------
# Step--3 (initialize model)
# ----------------------------------------
'''
# define model
model = define_Model(opt)
model.init_train()
# define LPIPS function
loss_fn_alex = lpips.LPIPS(net='alex').to(model.device)
# define early stopping
if opt['train']['is_early_stopping']:
early_stopping = utils_early_stopping.EarlyStopping(patience=opt['train']['early_stopping_num'])
# record
if opt['rank'] == 0:
logger.info(model.info_network())
logger.info(model.info_params())
'''
# ----------------------------------------
# Step--4 (main training)
# ----------------------------------------
'''
for epoch in range(100000000): # keep running
if opt['dist']:
train_sampler.set_epoch(epoch)
for i, train_data in enumerate(train_loader):
current_step += 1
# -------------------------------
# 1) update learning rate
# -------------------------------
model.update_learning_rate(current_step)
# -------------------------------
# 2) feed patch pairs
# -------------------------------
model.feed_data(train_data)
# -------------------------------
# 3) optimize parameters
# -------------------------------
model.optimize_parameters(current_step)
# -------------------------------
# 4) training information
# -------------------------------
if current_step % opt['train']['checkpoint_print'] == 0 and opt['rank'] == 0:
logs = model.current_log()
message = '<epoch:{:3d}, iter:{:8,d}, lr:{:.3e}> '.format(epoch, current_step, model.current_learning_rate())
for k, v in logs.items():
message += '{:s}: {:.3e} '.format(k, v)
logger.info(message)
# record train loss
logger_tensorboard.add_scalar('Learning Rate', model.current_learning_rate(), global_step=current_step)
logger_tensorboard.add_scalar('TRAIN Generator LOSS/G_loss', logs['G_loss'], global_step=current_step)
if 'G_loss_image' in logs.keys():
logger_tensorboard.add_scalar('TRAIN Generator LOSS/G_loss_image', logs['G_loss_image'], global_step=current_step)
if 'G_loss_frequency' in logs.keys():
logger_tensorboard.add_scalar('TRAIN Generator LOSS/G_loss_frequency', logs['G_loss_frequency'], global_step=current_step)
if 'G_loss_preceptual' in logs.keys():
logger_tensorboard.add_scalar('TRAIN Generator LOSS/G_loss_preceptual', logs['G_loss_preceptual'], global_step=current_step)
# -------------------------------
# 5) save model
# -------------------------------
if current_step % opt['train']['checkpoint_save'] == 0 and opt['rank'] == 0:
logger.info('Saving the model.')
model.save(current_step)
# -------------------------------
# 6) testing
# -------------------------------
if current_step % opt['train']['checkpoint_test'] == 0 and opt['rank'] == 0:
# create folder for FID
img_dir_tmp_H = os.path.join(opt['path']['images'], 'tempH')
util.mkdir(img_dir_tmp_H)
img_dir_tmp_E = os.path.join(opt['path']['images'], 'tempE')
util.mkdir(img_dir_tmp_E)
img_dir_tmp_L = os.path.join(opt['path']['images'], 'tempL')
util.mkdir(img_dir_tmp_L)
# create result dict
test_results = OrderedDict()
test_results['psnr'] = []
test_results['ssim'] = []
test_results['lpips'] = []
test_results['G_loss'] = []
test_results['G_loss_image'] = []
test_results['G_loss_frequency'] = []
test_results['G_loss_preceptual'] = []
for idx, test_data in enumerate(test_loader):
with torch.no_grad():
img_info = test_data['img_info'][0]
img_dir = os.path.join(opt['path']['images'], img_info)
# testing and adjust resolution
model.feed_data(test_data)
model.check_windowsize()
model.test()
model.recover_windowsize()
# acquire test result
results = model.current_results_gpu()
# calculate LPIPS (GPU | torch.tensor)
L_img = results['L']
E_img = results['E']
H_img = results['H']
current_lpips = util.calculate_lpips_single(loss_fn_alex, H_img, E_img).data.squeeze().float().cpu().numpy()
# calculate PSNR SSIM (CPU | np.float)
L_img = util.tensor2float(L_img)
E_img = util.tensor2float(E_img)
H_img = util.tensor2float(H_img)
current_psnr = util.calculate_psnr_single(H_img, E_img, border=0)
current_ssim = util.calculate_ssim_single(H_img, E_img, border=0)
# record metrics
test_results['psnr'].append(current_psnr)
test_results['ssim'].append(current_ssim)
test_results['lpips'].append(current_lpips)
# save samples
if idx < 5:
util.mkdir(img_dir)
cv2.imwrite(os.path.join(img_dir, 'ZF_{:05d}.png'.format(current_step)), np.clip(L_img, 0, 1) * 255)
cv2.imwrite(os.path.join(img_dir, 'Recon_{:05d}.png'.format(current_step)), np.clip(E_img, 0, 1) * 255)
cv2.imwrite(os.path.join(img_dir, 'GT_{:05d}.png'.format(current_step)), np.clip(H_img, 0, 1) * 255)
if opt['datasets']['test']['resize_for_fid']:
resize_for_fid = opt['datasets']['test']['resize_for_fid']
cv2.imwrite(os.path.join(img_dir_tmp_L, 'ZF_{:05d}.png'.format(idx)), resize(np.clip(L_img, 0, 1), (resize_for_fid[0], resize_for_fid[1])) * 255)
cv2.imwrite(os.path.join(img_dir_tmp_E, 'Recon_{:05d}.png'.format(idx)), resize(np.clip(E_img, 0, 1), (resize_for_fid[0], resize_for_fid[1])) * 255)
cv2.imwrite(os.path.join(img_dir_tmp_H, 'GT_{:05d}.png'.format(idx)), resize(np.clip(H_img, 0, 1), (resize_for_fid[0], resize_for_fid[1])) * 255)
else:
cv2.imwrite(os.path.join(img_dir_tmp_L, 'ZF_{:05d}.png'.format(idx)), np.clip(L_img, 0, 1) * 255)
cv2.imwrite(os.path.join(img_dir_tmp_E, 'Recon_{:05d}.png'.format(idx)), np.clip(E_img, 0, 1) * 255)
cv2.imwrite(os.path.join(img_dir_tmp_H, 'GT_{:05d}.png'.format(idx)), np.clip(H_img, 0, 1) * 255)
# summarize psnr/ssim/lpips
ave_psnr = np.mean(test_results['psnr'])
# std_psnr = np.std(test_results['psnr'], ddof=1)
ave_ssim = np.mean(test_results['ssim'])
# std_ssim = np.std(test_results['ssim'], ddof=1)
ave_lpips = np.mean(test_results['lpips'])
# std_lpips = np.std(test_results['lpips'], ddof=1)
# calculate FID
if opt['dist']:
# DistributedDataParallel (If multiple GPUs are used to train, use the 2nd GPU for FID calculation.)
log = os.popen("{} -m pytorch_fid {} {} ".format(
sys.executable,
img_dir_tmp_H,
img_dir_tmp_E)).read()
else:
# DataParallel (If multiple GPUs are used to train, use the 2nd GPU for FID calculation for unbalance of GPU menory use.)
if len(opt['gpu_ids']) > 1:
log = os.popen("{} -m pytorch_fid --device cuda:1 {} {} ".format(
sys.executable,
img_dir_tmp_H,
img_dir_tmp_E)).read()
else:
log = os.popen("{} -m pytorch_fid {} {} ".format(
sys.executable,
img_dir_tmp_H,
img_dir_tmp_E)).read()
print(log)
fid = eval(log.replace('FID: ', ''))
# testing log
logger.info('<epoch:{:3d}, iter:{:8,d}, Average PSNR : {:<.2f}; Average Average SSIM : {:<.4f}; LPIPS : {:<.4f}; FID : {:<.2f}'
.format(epoch, current_step, ave_psnr, ave_ssim, ave_lpips, fid))
logger_tensorboard.add_scalar('VALIDATION PSNR', ave_psnr, global_step=current_step)
logger_tensorboard.add_scalar('VALIDATION SSIM', ave_ssim, global_step=current_step)
logger_tensorboard.add_scalar('VALIDATION LPIPS', ave_lpips, global_step=current_step)
logger_tensorboard.add_scalar('VALIDATION FID', fid, global_step=current_step)
# # early stopping
# if opt['train']['is_early_stopping']:
# early_stopping(ave_psnr, model, epoch, current_step)
# if early_stopping.is_save:
# logger.info('Saving the model by early stopping')
# model.save(f'best_{current_step}')
# if early_stopping.early_stop:
# print("Early stopping!")
# break
print("Training Stop")
if __name__ == '__main__':
main()
| 15,434 | 43.353448 | 176 | py |
SwinMR | SwinMR-main/models/model_base.py | import os
import torch
import torch.nn as nn
from utils.utils_bnorm import merge_bn, tidy_sequential
from torch.nn.parallel import DataParallel, DistributedDataParallel
class ModelBase():
def __init__(self, opt):
self.opt = opt # opt
self.save_dir = opt['path']['models'] # save models
self.device = torch.device('cuda' if opt['gpu_ids'] is not None else 'cpu')
self.is_train = opt['is_train'] # training or not
self.schedulers = [] # schedulers
"""
# ----------------------------------------
# Preparation before training with data
# Save model during training
# ----------------------------------------
"""
def init_train(self):
pass
def load(self):
pass
def save(self, label):
pass
def define_loss(self):
pass
def define_optimizer(self):
pass
def define_scheduler(self):
pass
"""
# ----------------------------------------
# Optimization during training with data
# Testing/evaluation
# ----------------------------------------
"""
def feed_data(self, data):
pass
def optimize_parameters(self):
pass
def current_visuals(self):
pass
def current_losses(self):
pass
def update_learning_rate(self, n):
for scheduler in self.schedulers:
scheduler.step(n)
def current_learning_rate(self):
return self.schedulers[0].get_last_lr()[0]
def requires_grad(self, model, flag=True):
for p in model.parameters():
p.requires_grad = flag
"""
# ----------------------------------------
# Information of net
# ----------------------------------------
"""
def print_network(self):
pass
def info_network(self):
pass
def print_params(self):
pass
def info_params(self):
pass
def get_bare_model(self, network):
"""Get bare model, especially under wrapping with
DistributedDataParallel or DataParallel.
"""
if isinstance(network, (DataParallel, DistributedDataParallel)):
network = network.module
return network
def model_to_device(self, network):
"""Model to device. It also warps models with DistributedDataParallel
or DataParallel.
Args:
network (nn.Module)
"""
network = network.to(self.device)
if self.opt['dist']:
find_unused_parameters = self.opt['find_unused_parameters']
network = DistributedDataParallel(network, device_ids=[torch.cuda.current_device()], find_unused_parameters=find_unused_parameters)
else:
network = DataParallel(network)
return network
# ----------------------------------------
# network name and number of parameters
# ----------------------------------------
def describe_network(self, network):
network = self.get_bare_model(network)
msg = '\n'
msg += 'Networks name: {}'.format(network.__class__.__name__) + '\n'
msg += 'Params number: {}'.format(sum(map(lambda x: x.numel(), network.parameters()))) + '\n'
msg += 'Net structure:\n{}'.format(str(network)) + '\n'
return msg
# ----------------------------------------
# parameters description
# ----------------------------------------
def describe_params(self, network):
network = self.get_bare_model(network)
msg = '\n'
msg += ' | {:^6s} | {:^6s} | {:^6s} | {:^6s} || {:<20s}'.format('mean', 'min', 'max', 'std', 'shape', 'param_name') + '\n'
for name, param in network.state_dict().items():
if not 'num_batches_tracked' in name:
v = param.data.clone().float()
msg += ' | {:>6.3f} | {:>6.3f} | {:>6.3f} | {:>6.3f} | {} || {:s}'.format(v.mean(), v.min(), v.max(), v.std(), v.shape, name) + '\n'
return msg
"""
# ----------------------------------------
# Save prameters
# Load prameters
# ----------------------------------------
"""
# ----------------------------------------
# save the state_dict of the network
# ----------------------------------------
def save_network(self, save_dir, network, network_label, iter_label):
save_filename = '{}_{}.pth'.format(iter_label, network_label)
save_path = os.path.join(save_dir, save_filename)
network = self.get_bare_model(network)
state_dict = network.state_dict()
for key, param in state_dict.items():
state_dict[key] = param.cpu()
torch.save(state_dict, save_path)
# ----------------------------------------
# load the state_dict of the network
# ----------------------------------------
def load_network(self, load_path, network, strict=True, param_key='params'):
network = self.get_bare_model(network)
if strict:
state_dict = torch.load(load_path)
if param_key in state_dict.keys():
state_dict = state_dict[param_key]
network.load_state_dict(state_dict, strict=strict)
else:
state_dict_old = torch.load(load_path)
if param_key in state_dict_old.keys():
state_dict_old = state_dict_old[param_key]
state_dict = network.state_dict()
for ((key_old, param_old), (key, param)) in zip(state_dict_old.items(), state_dict.items()):
state_dict[key] = param_old
network.load_state_dict(state_dict, strict=True)
del state_dict_old, state_dict
# ----------------------------------------
# save the state_dict of the optimizer
# ----------------------------------------
def save_optimizer(self, save_dir, optimizer, optimizer_label, iter_label):
save_filename = '{}_{}.pth'.format(iter_label, optimizer_label)
save_path = os.path.join(save_dir, save_filename)
torch.save(optimizer.state_dict(), save_path)
# ----------------------------------------
# load the state_dict of the optimizer
# ----------------------------------------
def load_optimizer(self, load_path, optimizer):
optimizer.load_state_dict(torch.load(load_path, map_location=lambda storage, loc: storage.cuda(torch.cuda.current_device())))
def update_E(self, decay=0.999):
netG = self.get_bare_model(self.netG)
netG_params = dict(netG.named_parameters())
netE_params = dict(self.netE.named_parameters())
for k in netG_params.keys():
netE_params[k].data.mul_(decay).add_(netG_params[k].data, alpha=1-decay)
"""
# ----------------------------------------
# Merge Batch Normalization for training
# Merge Batch Normalization for testing
# ----------------------------------------
"""
# ----------------------------------------
# merge bn during training
# ----------------------------------------
def merge_bnorm_train(self):
merge_bn(self.netG)
tidy_sequential(self.netG)
self.define_optimizer()
self.define_scheduler()
# ----------------------------------------
# merge bn before testing
# ----------------------------------------
def merge_bnorm_test(self):
merge_bn(self.netG)
tidy_sequential(self.netG)
| 7,442 | 33.299539 | 148 | py |
SwinMR | SwinMR-main/models/select_network.py | '''
# -----------------------------------------
Define Training Network
by Jiahao Huang (j.huang21@imperial.ac.uk)
# -----------------------------------------
'''
import functools
import torch
import torchvision.models
from torch.nn import init
# --------------------------------------------
# Recon Generator, netG, G
# --------------------------------------------
def define_G(opt):
opt_net = opt['netG']
net_type = opt_net['net_type']
# ----------------------------------------
# SwinIR (for SwinMR)
# ----------------------------------------
if net_type == 'swinir':
from models.network_swinmr import SwinIR as net
netG = net(img_size=opt_net['img_size'],
in_chans=opt_net['in_chans'],
out_chans=opt_net['out_chans'],
embed_dim=opt_net['embed_dim'],
depths=opt_net['depths'],
num_heads=opt_net['num_heads'],
window_size=opt_net['window_size'],
mlp_ratio=opt_net['mlp_ratio'],
upscale=opt_net['upscale'],
img_range=opt_net['img_range'],
upsampler=opt_net['upsampler'],
resi_connection=opt_net['resi_connection'])
# ----------------------------------------
# initialize weights
# ----------------------------------------
if opt['is_train']:
init_weights(netG,
init_type=opt_net['init_type'],
init_bn_type=opt_net['init_bn_type'],
gain=opt_net['init_gain'])
return netG
# --------------------------------------------
# VGGfeature, netF, F
# --------------------------------------------
def define_F(opt, use_bn=False):
device = torch.device('cuda' if opt['gpu_ids'] else 'cpu')
from models.network_feature import VGGFeatureExtractor
# pytorch pretrained VGG19-54, before ReLU.
if use_bn:
feature_layer = 49
else:
feature_layer = 34
netF = VGGFeatureExtractor(feature_layer=feature_layer,
use_bn=use_bn,
use_input_norm=True,
device=device)
netF.eval() # No need to train, but need BP to input
return netF
# --------------------------------------------
# weights initialization
# --------------------------------------------
def init_weights(net, init_type='xavier_uniform', init_bn_type='uniform', gain=1):
"""
# Kai Zhang, https://github.com/cszn/KAIR
#
# Args:
# init_type:
# default, none: pass init_weights
# normal; normal; xavier_normal; xavier_uniform;
# kaiming_normal; kaiming_uniform; orthogonal
# init_bn_type:
# uniform; constant
# gain:
# 0.2
"""
def init_fn(m, init_type='xavier_uniform', init_bn_type='uniform', gain=1):
classname = m.__class__.__name__
if classname.find('Conv') != -1 or classname.find('Linear') != -1:
if init_type == 'normal':
init.normal_(m.weight.data, 0, 0.1)
m.weight.data.clamp_(-1, 1).mul_(gain)
elif init_type == 'uniform':
init.uniform_(m.weight.data, -0.2, 0.2)
m.weight.data.mul_(gain)
elif init_type == 'xavier_normal':
init.xavier_normal_(m.weight.data, gain=gain)
m.weight.data.clamp_(-1, 1)
elif init_type == 'xavier_uniform':
init.xavier_uniform_(m.weight.data, gain=gain)
elif init_type == 'kaiming_normal':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in', nonlinearity='relu')
m.weight.data.clamp_(-1, 1).mul_(gain)
elif init_type == 'kaiming_uniform':
init.kaiming_uniform_(m.weight.data, a=0, mode='fan_in', nonlinearity='relu')
m.weight.data.mul_(gain)
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('Initialization method [{:s}] is not implemented'.format(init_type))
if m.bias is not None:
m.bias.data.zero_()
elif classname.find('BatchNorm2d') != -1:
if init_bn_type == 'uniform': # preferred
if m.affine:
init.uniform_(m.weight.data, 0.1, 1.0)
init.constant_(m.bias.data, 0.0)
elif init_bn_type == 'constant':
if m.affine:
init.constant_(m.weight.data, 1.0)
init.constant_(m.bias.data, 0.0)
else:
raise NotImplementedError('Initialization method [{:s}] is not implemented'.format(init_bn_type))
if init_type not in ['default', 'none']:
print('Initialization method [{:s} + {:s}], gain is [{:.2f}]'.format(init_type, init_bn_type, gain))
fn = functools.partial(init_fn, init_type=init_type, init_bn_type=init_bn_type, gain=gain)
net.apply(fn)
else:
print('Pass this initialization! Initialization was done during network defination!')
| 5,220 | 35.006897 | 113 | py |
SwinMR | SwinMR-main/models/network_swinmr.py | '''
# -----------------------------------------
Network
SwinMR m.1.3
by Jiahao Huang (j.huang21@imperial.ac.uk)
Thanks:
https://github.com/JingyunLiang/SwinIR
https://github.com/microsoft/Swin-Transformer
# -----------------------------------------
'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
'''
Multilayer Perceptron
'''
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads # number of head 6
head_dim = dim // num_heads # head_dim: 180//6=30
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape # B_: 576 number of Windows * Batch_size in a GPU N: 64 patch number in a window C: 180 embedding channel
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
# q,k,v (576,6,64,30) (number of Windows * Batch_size in a GPU, number of head, patch number in a window, head_dim)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
def flops(self, N):
# calculate flops for 1 window with token length of N
flops = 0
# qkv = self.qkv(x)
flops += N * self.dim * 3 * self.dim
# attn = (q @ k.transpose(-2, -1))
flops += self.num_heads * N * (self.dim // self.num_heads) * N
# x = (attn @ v)
flops += self.num_heads * N * N * (self.dim // self.num_heads)
# x = self.proj(x)
flops += N * self.dim * self.dim
return flops
def params(self):
# calculate params for 1 window with token length of N
params = 0
# qkv = self.qkv(x)
params += self.dim * 3 * self.dim
# attn = (q @ k.transpose(-2, -1))
params += 0
# x = (attn @ v)
params += 0
# x = self.proj(x)
params += self.dim * self.dim
return params
class SwinTransformerBlock(nn.Module):
r""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if self.shift_size > 0:
attn_mask = self.calculate_mask(self.input_resolution)
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def calculate_mask(self, x_size):
# calculate attention mask for SW-MSA
H, W = x_size
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
return attn_mask
def forward(self, x, x_size):
H, W = x_size # x (4,9216,180) (batch_in_each_GPU, H*W, embedding_channel) x_size (96,96)
B, L, C = x.shape # B:4 C:180 L:9216
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x) # x (4,9216,180) (batch_in_each_GPU, H*W, embedding_channel)
x = x.view(B, H, W, C) # x (4,96,96,180) (batch_in_each_GPU, embedding_channel, H, W)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x # shifted_x (4,96,96,180) (batch_in_each_GPU, embedding_channel, H, W)
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # (576,8,8,180) (nW*B, window_size, window_size, C) nW:number of Windows
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # (576,64,180) (nW*B, window_size*window_size, C) nW:number of Windows
# W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size
if self.input_resolution == x_size:
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
else:
attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device)) # (576,64,180) (nW*B, window_size*window_size, C) nW:number of Windows
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) # (576,8,8,180) (nW*B, window_size, window_size, C) nW:number of Windows
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C shifted_x (4,96,96,180) (batch_in_each_GPU, embedding_channel, H, W)
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C) # x (4,9216,180) (batch_in_each_GPU, H*W, embedding_channel) x_size (96,96)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x # x (4,9216,180) (batch_in_each_GPU, H*W, embedding_channel) x_size (96,96)
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
def flops(self):
flops = 0
H, W = self.input_resolution
# norm1
flops += self.dim * H * W
# W-MSA/SW-MSA
nW = H * W / self.window_size / self.window_size
flops += nW * self.attn.flops(self.window_size * self.window_size)
# mlp
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
# norm2
flops += self.dim * H * W
return flops
def params(self):
params = 0
# norm1
params += self.dim * 2
# W-MSA/SW-MSA
params += self.attn.params()
# mlp
params += 2 * self.dim * self.dim * self.mlp_ratio
# norm2
params += self.dim * 2
return params
class PatchMerging(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
def params(self):
params = 2 * self.dim
params += 4 * self.dim * 2 * self.dim
return params
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x, x_size):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x, x_size)
else:
x = blk(x, x_size)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
def params(self):
params = 0
for blk in self.blocks:
params += blk.params()
if self.downsample is not None:
params += self.downsample.params()
return params
class RSTB(nn.Module):
"""Residual Swin Transformer Block (RSTB).
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
img_size: Input image size.
patch_size: Patch size.
resi_connection: The convolutional block before residual connection.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False,
img_size=224, patch_size=4, resi_connection='1conv'):
super(RSTB, self).__init__()
self.dim = dim
self.input_resolution = input_resolution
self.residual_group = BasicLayer(dim=dim,
input_resolution=input_resolution,
depth=depth,
num_heads=num_heads,
window_size=window_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path,
norm_layer=norm_layer,
downsample=downsample,
use_checkpoint=use_checkpoint)
if resi_connection == '1conv':
self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
elif resi_connection == '3conv':
# to save parameters and memory
self.conv = nn.Sequential(nn.Conv2d(dim, dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(dim // 4, dim // 4, 1, 1, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(dim // 4, dim, 3, 1, 1))
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim,
norm_layer=None)
self.patch_unembed = PatchUnEmbed(
img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim,
norm_layer=None)
def forward(self, x, x_size):
return self.patch_embed(self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size))) + x
def flops(self):
flops = 0
flops += self.residual_group.flops()
H, W = self.input_resolution
flops += H * W * self.dim * self.dim * 9
flops += self.patch_embed.flops()
flops += self.patch_unembed.flops()
return flops
def params(self):
params = 0
params += self.residual_group.params()
params += self.dim * self.dim * 9
params += self.patch_embed.params()
params += self.patch_unembed.params()
return params
class PatchEmbed(nn.Module):
r""" Image to Patch Embedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
x = x.flatten(2).transpose(1, 2) # B Ph*Pw C
if self.norm is not None:
x = self.norm(x)
return x
def flops(self):
flops = 0
H, W = self.img_size
if self.norm is not None:
flops += H * W * self.embed_dim
return flops
def params(self):
params = 0
if self.norm is not None:
params += 2 * self.embed_dim
return params
class PatchUnEmbed(nn.Module):
r""" Image to Patch Unembedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
def forward(self, x, x_size):
B, HW, C = x.shape
x = x.transpose(1, 2).view(B, self.embed_dim, x_size[0], x_size[1]) # B Ph*Pw C
return x
def flops(self):
flops = 0
return flops
def params(self):
params = 0
return params
class Upsample(nn.Sequential):
"""Upsample module.
Args:
scale (int): Scale factor. Supported scales: 2^n and 3.
num_feat (int): Channel number of intermediate features.
"""
def __init__(self, scale, num_feat):
m = []
if (scale & (scale - 1)) == 0: # scale = 2^n
for _ in range(int(math.log(scale, 2))):
m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
m.append(nn.PixelShuffle(2))
elif scale == 3:
m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
m.append(nn.PixelShuffle(3))
else:
raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
super(Upsample, self).__init__(*m)
class UpsampleOneStep(nn.Sequential):
"""UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)
Used in lightweight SR to save parameters.
Args:
scale (int): Scale factor. Supported scales: 2^n and 3.
num_feat (int): Channel number of intermediate features.
"""
def __init__(self, scale, num_feat, num_out_ch, input_resolution=None):
self.num_feat = num_feat
self.input_resolution = input_resolution
m = []
m.append(nn.Conv2d(num_feat, (scale ** 2) * num_out_ch, 3, 1, 1))
m.append(nn.PixelShuffle(scale))
super(UpsampleOneStep, self).__init__(*m)
def flops(self):
H, W = self.input_resolution
flops = H * W * self.num_feat * 3 * 9
return flops
def params(self):
params = self.num_feat * 3 * 9
return params
class SwinIR(nn.Module):
r""" SwinIR
A PyTorch impl of : `SwinIR: Image Restoration Using Swin Transformer`, based on Swin Transformer.
Args:
img_size (int | tuple(int)): Input image size. Default 64
patch_size (int | tuple(int)): Patch size. Default: 1
in_chans (int): Number of input image channels. Default: 3
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
upscale: Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction
img_range: Image range. 1. or 255.
upsampler: The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None
resi_connection: The convolutional block before residual connection. '1conv'/'3conv'
"""
def __init__(self, img_size=64, patch_size=1, in_chans=1,
embed_dim=96, depths=[6, 6, 6, 6], num_heads=[6, 6, 6, 6],
window_size=8, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, upscale=1, img_range=1., upsampler='', resi_connection='1conv',
**kwargs):
super(SwinIR, self).__init__()
num_in_ch = in_chans # 1
num_out_ch = in_chans # 1
num_feat = 64
self.img_range = img_range # 1.0
if in_chans == 3:
rgb_mean = (0.4488, 0.4371, 0.4040)
self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
else:
self.mean = torch.zeros(1, 1, 1, 1)
self.upscale = upscale # 1
self.upsampler = upsampler # None
self.window_size = window_size # 8
#####################################################################################################
################################### 1, shallow feature extraction ###################################
self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1) # in 1 out 180
#####################################################################################################
################################### 2, deep feature extraction ######################################
self.num_layers = len(depths) # [6,6,6,6,6,6]
self.embed_dim = embed_dim # 180
self.ape = ape # False
self.patch_norm = patch_norm # True
self.num_features = embed_dim # 180
self.mlp_ratio = mlp_ratio # 2?
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches # num_patcher 65536
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# merge non-overlapping patches into image
self.patch_unembed = PatchUnEmbed(
img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build Residual Swin Transformer blocks (RSTB)
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = RSTB(dim=embed_dim,
input_resolution=(patches_resolution[0],
patches_resolution[1]),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results
norm_layer=norm_layer,
downsample=None,
use_checkpoint=use_checkpoint,
img_size=img_size,
patch_size=patch_size,
resi_connection=resi_connection
)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
# build the last conv layer in deep feature extraction
if resi_connection == '1conv':
self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)
elif resi_connection == '3conv':
# to save parameters and memory
self.conv_after_body = nn.Sequential(nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1))
#####################################################################################################
################################ 3, high quality image reconstruction ################################
if self.upsampler == 'pixelshuffle':
# for classical SR
self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
nn.LeakyReLU(inplace=True))
self.upsample = Upsample(upscale, num_feat)
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
elif self.upsampler == 'pixelshuffledirect':
# for lightweight SR (to save parameters)
self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch,
(patches_resolution[0], patches_resolution[1]))
elif self.upsampler == 'nearest+conv':
# for real-world SR (less artifacts)
assert self.upscale == 4, 'only support x4 now.'
self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
nn.LeakyReLU(inplace=True))
self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
else:
# for image denoising and JPEG compression artifact reduction
self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def check_image_size(self, x):
_, _, h, w = x.size()
mod_pad_h = (self.window_size - h % self.window_size) % self.window_size
mod_pad_w = (self.window_size - w % self.window_size) % self.window_size
x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect')
return x
def forward_features(self, x):
x_size = (x.shape[2], x.shape[3]) # x (4,180,96,96) (batch_size_in_each_GPU, embedding_channel180, H (random-crop 96 in traning and 256 in testing), W) x_size (96,96)
x = self.patch_embed(x) # x (4,9216,180) (batch_size_in_each_GPU, H*W, embedding_channel180)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for layer in self.layers:
x = layer(x, x_size)
x = self.norm(x) # B L C # x (4,9216,180) (batch_size_in_each_GPU, H*W, embedding_channel180)
x = self.patch_unembed(x, x_size) # x (4,180,96,96) (batch_size_in_each_GPU, embedding_channel180, H (random-crop 96 in traning and 256 in testing), W) x_size (96,96)
return x
def forward(self, x):
H, W = x.shape[2:] # x (4,1,96,96) (batch_size_in_each_GPU, input_image_channel, H (random-crop 96 in traning and 256 in testing), W)
x = self.check_image_size(x)
self.mean = self.mean.type_as(x)
x = (x - self.mean) * self.img_range
if self.upsampler == 'pixelshuffle':
# for classical SR
x = self.conv_first(x)
x = self.conv_after_body(self.forward_features(x)) + x
x = self.conv_before_upsample(x)
x = self.conv_last(self.upsample(x))
elif self.upsampler == 'pixelshuffledirect':
# for lightweight SR
x = self.conv_first(x)
x = self.conv_after_body(self.forward_features(x)) + x
x = self.upsample(x)
elif self.upsampler == 'nearest+conv':
# for real-world SR
x = self.conv_first(x)
x = self.conv_after_body(self.forward_features(x)) + x
x = self.conv_before_upsample(x)
x = self.lrelu(self.conv_up1(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
x = self.conv_last(self.lrelu(self.conv_hr(x)))
else:
# for image denoising and JPEG compression artifact reduction
x_first = self.conv_first(x) # x_first (4,180,96,96) (batch_size_in_each_GPU, embedding_channel180, H (random-crop 96 in traning and 256 in testing), W)
res = self.conv_after_body(self.forward_features(x_first)) + x_first # res (4,180,96,96)
x = x + self.conv_last(res)
x = x / self.img_range + self.mean
return x[:, :, :H*self.upscale, :W*self.upscale]
def flops(self):
flops = 0
H, W = self.patches_resolution
flops += H * W * 1 * self.embed_dim * 9
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += H * W * self.embed_dim * self.embed_dim * 9
flops += H * W * self.embed_dim * 1 * 9
return flops
def params(self):
params = 0
params += 1 * self.embed_dim * 9
params += self.patch_embed.params()
for i, layer in enumerate(self.layers):
params += layer.params()
params += self.embed_dim * self.embed_dim * 9
params += self.embed_dim * 1 * 9
return params
if __name__ == '__main__':
from thop import profile
from thop import clever_format
import os
batch = 1
height = 256
width = 256
device = 'cuda'
torch.cuda.empty_cache()
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
print('swinmr')
model = SwinIR(upscale=1,
in_chans=1,
img_size=[256, 256],
window_size=8,
img_range=1.0,
depths=[6, 6, 6, 6, 6, 6],
embed_dim=180,
num_heads=[6, 6, 6, 6, 6, 6],
mlp_ratio=2.0,
upsampler='',
resi_connection='1conv',).to(device)
# print(model)
# print('FLOPs: {}G'.format(round((model.flops() * 1e-9),3)))
# print('PARAMs: {}M'.format(round((model.params() * 1e-6), 3)))
x = torch.randn((batch, 1, height, width)).to(device)
print(f'Input shape: {x.shape}')
with torch.no_grad():
x = model(x)
print(f'Output shape: {x.shape}')
print('-------------------------------')
# macs, params = profile(model, inputs=(x, ))
# macs, params = clever_format([macs, params], "%.3f")
# print(macs)
# print(params) | 41,096 | 41.631743 | 175 | py |
SwinMR | SwinMR-main/models/loss.py | import torch
import torch.nn as nn
import torchvision
from torch.nn import functional as F
from torch import autograd as autograd
import math
"""
Sequential(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU(inplace)
(2*): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU(inplace)
(4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(6): ReLU(inplace)
(7*): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(8): ReLU(inplace)
(9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(11): ReLU(inplace)
(12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(13): ReLU(inplace)
(14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(15): ReLU(inplace)
(16*): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(17): ReLU(inplace)
(18): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(19): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(20): ReLU(inplace)
(21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(22): ReLU(inplace)
(23): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(24): ReLU(inplace)
(25*): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(26): ReLU(inplace)
(27): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(29): ReLU(inplace)
(30): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(31): ReLU(inplace)
(32): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(33): ReLU(inplace)
(34*): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(35): ReLU(inplace)
(36): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
"""
# --------------------------------------------
# Perceptual loss
# --------------------------------------------
class VGGFeatureExtractor(nn.Module):
def __init__(self, feature_layer=[2,7,16,25,34], use_input_norm=True, use_range_norm=False):
super(VGGFeatureExtractor, self).__init__()
'''
use_input_norm: If True, x: [0, 1] --> (x - mean) / std
use_range_norm: If True, x: [0, 1] --> x: [-1, 1]
'''
model = torchvision.models.vgg19(pretrained=True)
self.use_input_norm = use_input_norm
self.use_range_norm = use_range_norm
if self.use_input_norm:
mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)
std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)
self.register_buffer('mean', mean)
self.register_buffer('std', std)
self.list_outputs = isinstance(feature_layer, list)
if self.list_outputs:
self.features = nn.Sequential()
feature_layer = [-1] + feature_layer
for i in range(len(feature_layer)-1):
self.features.add_module('child'+str(i), nn.Sequential(*list(model.features.children())[(feature_layer[i]+1):(feature_layer[i+1]+1)]))
else:
self.features = nn.Sequential(*list(model.features.children())[:(feature_layer + 1)])
print(self.features)
# No need to BP to variable
for k, v in self.features.named_parameters():
v.requires_grad = False
def forward(self, x):
if self.use_range_norm:
x = (x + 1.0) / 2.0
if self.use_input_norm:
x = (x - self.mean) / self.std
if self.list_outputs:
output = []
for child_model in self.features.children():
x = child_model(x)
output.append(x.clone())
return output
else:
return self.features(x)
class PerceptualLoss(nn.Module):
"""VGG Perceptual loss
"""
def __init__(self, feature_layer=[2,7,16,25,34], weights=[0.1,0.1,1.0,1.0,1.0], lossfn_type='l1', use_input_norm=True, use_range_norm=False):
super(PerceptualLoss, self).__init__()
self.vgg = VGGFeatureExtractor(feature_layer=feature_layer, use_input_norm=use_input_norm, use_range_norm=use_range_norm)
self.lossfn_type = lossfn_type
self.weights = weights
if self.lossfn_type == 'l1':
self.lossfn = nn.L1Loss()
else:
self.lossfn = nn.MSELoss()
print(f'feature_layer: {feature_layer} with weights: {weights}')
def forward(self, x, gt):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
gt (Tensor): Ground-truth tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
x_vgg, gt_vgg = self.vgg(x), self.vgg(gt.detach())
loss = 0.0
if isinstance(x_vgg, list):
n = len(x_vgg)
for i in range(n):
loss += self.weights[i] * self.lossfn(x_vgg[i], gt_vgg[i])
else:
loss += self.lossfn(x_vgg, gt_vgg.detach())
return loss
# --------------------------------------------
# GAN loss: gan, ragan
# --------------------------------------------
class GANLoss(nn.Module):
def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):
super(GANLoss, self).__init__()
self.gan_type = gan_type.lower()
self.real_label_val = real_label_val
self.fake_label_val = fake_label_val
if self.gan_type == 'gan' or self.gan_type == 'ragan':
self.loss = nn.BCEWithLogitsLoss()
elif self.gan_type == 'lsgan':
self.loss = nn.MSELoss()
elif self.gan_type == 'wgan':
def wgan_loss(input, target):
# target is boolean
return -1 * input.mean() if target else input.mean()
self.loss = wgan_loss
elif self.gan_type == 'softplusgan':
def softplusgan_loss(input, target):
# target is boolean
return F.softplus(-input).mean() if target else F.softplus(input).mean()
self.loss = softplusgan_loss
else:
raise NotImplementedError('GAN type [{:s}] is not found'.format(self.gan_type))
def get_target_label(self, input, target_is_real):
if self.gan_type in ['wgan', 'softplusgan']:
return target_is_real
if target_is_real:
return torch.empty_like(input).fill_(self.real_label_val)
else:
return torch.empty_like(input).fill_(self.fake_label_val)
def forward(self, input, target_is_real):
target_label = self.get_target_label(input, target_is_real)
loss = self.loss(input, target_label)
return loss
# --------------------------------------------
# TV loss
# --------------------------------------------
class TVLoss(nn.Module):
def __init__(self, tv_loss_weight=1):
"""
Total variation loss
https://github.com/jxgu1016/Total_Variation_Loss.pytorch
Args:
tv_loss_weight (int):
"""
super(TVLoss, self).__init__()
self.tv_loss_weight = tv_loss_weight
def forward(self, x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = self.tensor_size(x[:, :, 1:, :])
count_w = self.tensor_size(x[:, :, :, 1:])
h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum()
w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum()
return self.tv_loss_weight * 2 * (h_tv / count_h + w_tv / count_w) / batch_size
@staticmethod
def tensor_size(t):
return t.size()[1] * t.size()[2] * t.size()[3]
# --------------------------------------------
# Charbonnier loss
# --------------------------------------------
class CharbonnierLoss(nn.Module):
"""Charbonnier Loss (L1)"""
def __init__(self, eps=1e-9):
super(CharbonnierLoss, self).__init__()
self.eps = eps
def forward(self, x, y):
diff = x - y
loss = torch.mean(torch.sqrt((diff * diff) + self.eps))
return loss
def r1_penalty(real_pred, real_img):
"""R1 regularization for discriminator. The core idea is to
penalize the gradient on real data alone: when the
generator distribution produces the true data distribution
and the discriminator is equal to 0 on the data manifold, the
gradient penalty ensures that the discriminator cannot create
a non-zero gradient orthogonal to the data manifold without
suffering a loss in the GAN game.
Ref:
Eq. 9 in Which training methods for GANs do actually converge.
"""
grad_real = autograd.grad(
outputs=real_pred.sum(), inputs=real_img, create_graph=True)[0]
grad_penalty = grad_real.pow(2).view(grad_real.shape[0], -1).sum(1).mean()
return grad_penalty
def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01):
noise = torch.randn_like(fake_img) / math.sqrt(
fake_img.shape[2] * fake_img.shape[3])
grad = autograd.grad(
outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True)[0]
path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))
path_mean = mean_path_length + decay * (
path_lengths.mean() - mean_path_length)
path_penalty = (path_lengths - path_mean).pow(2).mean()
return path_penalty, path_lengths.detach().mean(), path_mean.detach()
def gradient_penalty_loss(discriminator, real_data, fake_data, weight=None):
"""Calculate gradient penalty for wgan-gp.
Args:
discriminator (nn.Module): Network for the discriminator.
real_data (Tensor): Real input data.
fake_data (Tensor): Fake input data.
weight (Tensor): Weight tensor. Default: None.
Returns:
Tensor: A tensor for gradient penalty.
"""
batch_size = real_data.size(0)
alpha = real_data.new_tensor(torch.rand(batch_size, 1, 1, 1))
# interpolate between real_data and fake_data
interpolates = alpha * real_data + (1. - alpha) * fake_data
interpolates = autograd.Variable(interpolates, requires_grad=True)
disc_interpolates = discriminator(interpolates)
gradients = autograd.grad(
outputs=disc_interpolates,
inputs=interpolates,
grad_outputs=torch.ones_like(disc_interpolates),
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
if weight is not None:
gradients = gradients * weight
gradients_penalty = ((gradients.norm(2, dim=1) - 1)**2).mean()
if weight is not None:
gradients_penalty /= torch.mean(weight)
return gradients_penalty
# PyTorch
class BinaryDiceLoss(nn.Module):
"""Dice loss of binary class
Args:
smooth: A float number to smooth loss, and avoid NaN error, default: 1
p: Denominator value: \sum{x^p} + \sum{y^p}, default: 2
predict: A tensor of shape [N, *]
target: A tensor of shape same with predict
reduction: Reduction method to apply, return mean over batch if 'mean',
return sum if 'sum', return a tensor of shape [N,] if 'none'
Returns:
Loss tensor according to arg reduction
Raise:
Exception if unexpected reduction
"""
def __init__(self, smooth=1, p=2, reduction='mean'):
super(BinaryDiceLoss, self).__init__()
self.smooth = smooth
self.p = p
self.reduction = reduction
def forward(self, predict, target):
assert predict.shape[0] == target.shape[0], "predict & target batch size don't match"
predict = predict.contiguous().view(predict.shape[0], -1)
target = target.contiguous().view(target.shape[0], -1)
num = torch.sum(torch.mul(predict, target), dim=1) + self.smooth
den = torch.sum(predict.pow(self.p) + target.pow(self.p), dim=1) + self.smooth
loss = 1 - num / den
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
elif self.reduction == 'none':
return loss
else:
raise Exception('Unexpected reduction {}'.format(self.reduction))
def mask_to_onehot(net_output, gt):
"""
net_output must be (b, c, x, y(, z)))
mask with shape (b, 1, x, y(, z)) OR shape (b, x, y(, z)))
"""
shp_x = net_output.shape
shp_y = gt.shape
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(net_output.shape, gt.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == "cuda":
y_onehot = y_onehot.cuda(net_output.device.index)
y_onehot.scatter_(1, gt, 1)
# print(y_onehot)
return y_onehot
class DiceLoss(nn.Module):
"""Dice loss, need one hot encode input
Args:
weight: An array of shape [num_classes,]
ignore_index: class index to ignore
predict: A tensor of shape [N, C, *]
target: A tensor of same shape with predict
other args pass to BinaryDiceLoss
Return:
same as BinaryDiceLoss
"""
def __init__(self, weight=None, ignore_index=None, **kwargs):
super(DiceLoss, self).__init__()
self.kwargs = kwargs
self.weight = weight
self.ignore_index = ignore_index
def forward(self, predict, target):
assert predict.shape == target.shape, 'predict & target shape do not match'
dice = BinaryDiceLoss(**self.kwargs)
total_loss = 0
# predict = F.softmax(predict, dim=1)
# predict = F.sigmoid(predict, dim=1)
for i in range(target.shape[1]):
if i != self.ignore_index:
dice_loss = dice(predict[:, i], target[:, i])
if self.weight is not None:
assert self.weight.shape[0] == target.shape[1], \
'Expect weight shape [{}], get[{}]'.format(target.shape[1], self.weight.shape[0])
dice_loss *= self.weights[i]
total_loss += dice_loss
return total_loss/target.shape[1] | 14,821 | 37.299742 | 150 | py |
SwinMR | SwinMR-main/models/network_feature.py | import torch
import torch.nn as nn
import torchvision
"""
# --------------------------------------------
# VGG Feature Extractor
# --------------------------------------------
"""
# --------------------------------------------
# VGG features
# Assume input range is [0, 1]
# --------------------------------------------
class VGGFeatureExtractor(nn.Module):
def __init__(self,
feature_layer=34,
use_bn=False,
use_input_norm=True,
device=torch.device('cpu')):
super(VGGFeatureExtractor, self).__init__()
if use_bn:
model = torchvision.models.vgg19_bn(pretrained=True)
else:
model = torchvision.models.vgg19(pretrained=True)
self.use_input_norm = use_input_norm
if self.use_input_norm:
mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(device)
# [0.485-1, 0.456-1, 0.406-1] if input in range [-1,1]
std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(device)
# [0.229*2, 0.224*2, 0.225*2] if input in range [-1,1]
self.register_buffer('mean', mean)
self.register_buffer('std', std)
self.features = nn.Sequential(*list(model.features.children())[:(feature_layer + 1)])
# No need to BP to variable
for k, v in self.features.named_parameters():
v.requires_grad = False
def forward(self, x):
if self.use_input_norm:
x = (x - self.mean) / self.std
output = self.features(x)
return output
| 1,594 | 32.93617 | 93 | py |
SwinMR | SwinMR-main/models/basicblock.py | from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
'''
# --------------------------------------------
# Advanced nn.Sequential
# https://github.com/xinntao/BasicSR
# --------------------------------------------
'''
def sequential(*args):
"""Advanced nn.Sequential.
Args:
nn.Sequential, nn.Module
Returns:
nn.Sequential
"""
if len(args) == 1:
if isinstance(args[0], OrderedDict):
raise NotImplementedError('sequential does not support OrderedDict input.')
return args[0] # No sequential is needed.
modules = []
for module in args:
if isinstance(module, nn.Sequential):
for submodule in module.children():
modules.append(submodule)
elif isinstance(module, nn.Module):
modules.append(module)
return nn.Sequential(*modules)
'''
# --------------------------------------------
# Useful blocks
# https://github.com/xinntao/BasicSR
# --------------------------------
# conv + normaliation + relu (conv)
# (PixelUnShuffle)
# (ConditionalBatchNorm2d)
# concat (ConcatBlock)
# sum (ShortcutBlock)
# resblock (ResBlock)
# Channel Attention (CA) Layer (CALayer)
# Residual Channel Attention Block (RCABlock)
# Residual Channel Attention Group (RCAGroup)
# Residual Dense Block (ResidualDenseBlock_5C)
# Residual in Residual Dense Block (RRDB)
# --------------------------------------------
'''
# --------------------------------------------
# return nn.Sequantial of (Conv + BN + ReLU)
# --------------------------------------------
def conv(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='CBR', negative_slope=0.2):
L = []
for t in mode:
if t == 'C':
L.append(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias))
elif t == 'T':
L.append(nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias))
elif t == 'B':
L.append(nn.BatchNorm2d(out_channels, momentum=0.9, eps=1e-04, affine=True))
elif t == 'I':
L.append(nn.InstanceNorm2d(out_channels, affine=True))
elif t == 'R':
L.append(nn.ReLU(inplace=True))
elif t == 'r':
L.append(nn.ReLU(inplace=False))
elif t == 'L':
L.append(nn.LeakyReLU(negative_slope=negative_slope, inplace=True))
elif t == 'l':
L.append(nn.LeakyReLU(negative_slope=negative_slope, inplace=False))
elif t == '2':
L.append(nn.PixelShuffle(upscale_factor=2))
elif t == '3':
L.append(nn.PixelShuffle(upscale_factor=3))
elif t == '4':
L.append(nn.PixelShuffle(upscale_factor=4))
elif t == 'U':
L.append(nn.Upsample(scale_factor=2, mode='nearest'))
elif t == 'u':
L.append(nn.Upsample(scale_factor=3, mode='nearest'))
elif t == 'v':
L.append(nn.Upsample(scale_factor=4, mode='nearest'))
elif t == 'M':
L.append(nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=0))
elif t == 'A':
L.append(nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=0))
else:
raise NotImplementedError('Undefined type: '.format(t))
return sequential(*L)
# --------------------------------------------
# inverse of pixel_shuffle
# --------------------------------------------
def pixel_unshuffle(input, upscale_factor):
r"""Rearranges elements in a Tensor of shape :math:`(C, rH, rW)` to a
tensor of shape :math:`(*, r^2C, H, W)`.
Authors:
Zhaoyi Yan, https://github.com/Zhaoyi-Yan
Kai Zhang, https://github.com/cszn/FFDNet
Date:
01/Jan/2019
"""
batch_size, channels, in_height, in_width = input.size()
out_height = in_height // upscale_factor
out_width = in_width // upscale_factor
input_view = input.contiguous().view(
batch_size, channels, out_height, upscale_factor,
out_width, upscale_factor)
channels *= upscale_factor ** 2
unshuffle_out = input_view.permute(0, 1, 3, 5, 2, 4).contiguous()
return unshuffle_out.view(batch_size, channels, out_height, out_width)
class PixelUnShuffle(nn.Module):
r"""Rearranges elements in a Tensor of shape :math:`(C, rH, rW)` to a
tensor of shape :math:`(*, r^2C, H, W)`.
Authors:
Zhaoyi Yan, https://github.com/Zhaoyi-Yan
Kai Zhang, https://github.com/cszn/FFDNet
Date:
01/Jan/2019
"""
def __init__(self, upscale_factor):
super(PixelUnShuffle, self).__init__()
self.upscale_factor = upscale_factor
def forward(self, input):
return pixel_unshuffle(input, self.upscale_factor)
def extra_repr(self):
return 'upscale_factor={}'.format(self.upscale_factor)
# --------------------------------------------
# conditional batch norm
# https://github.com/pytorch/pytorch/issues/8985#issuecomment-405080775
# --------------------------------------------
class ConditionalBatchNorm2d(nn.Module):
def __init__(self, num_features, num_classes):
super().__init__()
self.num_features = num_features
self.bn = nn.BatchNorm2d(num_features, affine=False)
self.embed = nn.Embedding(num_classes, num_features * 2)
self.embed.weight.data[:, :num_features].normal_(1, 0.02) # Initialise scale at N(1, 0.02)
self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0
def forward(self, x, y):
out = self.bn(x)
gamma, beta = self.embed(y).chunk(2, 1)
out = gamma.view(-1, self.num_features, 1, 1) * out + beta.view(-1, self.num_features, 1, 1)
return out
# --------------------------------------------
# Concat the output of a submodule to its input
# --------------------------------------------
class ConcatBlock(nn.Module):
def __init__(self, submodule):
super(ConcatBlock, self).__init__()
self.sub = submodule
def forward(self, x):
output = torch.cat((x, self.sub(x)), dim=1)
return output
def __repr__(self):
return self.sub.__repr__() + 'concat'
# --------------------------------------------
# sum the output of a submodule to its input
# --------------------------------------------
class ShortcutBlock(nn.Module):
def __init__(self, submodule):
super(ShortcutBlock, self).__init__()
self.sub = submodule
def forward(self, x):
output = x + self.sub(x)
return output
def __repr__(self):
tmpstr = 'Identity + \n|'
modstr = self.sub.__repr__().replace('\n', '\n|')
tmpstr = tmpstr + modstr
return tmpstr
# --------------------------------------------
# Res Block: x + conv(relu(conv(x)))
# --------------------------------------------
class ResBlock(nn.Module):
def __init__(self, in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='CRC', negative_slope=0.2):
super(ResBlock, self).__init__()
assert in_channels == out_channels, 'Only support in_channels==out_channels.'
if mode[0] in ['R', 'L']:
mode = mode[0].lower() + mode[1:]
self.res = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode, negative_slope)
def forward(self, x):
res = self.res(x)
return x + res
# --------------------------------------------
# simplified information multi-distillation block (IMDB)
# x + conv1(concat(split(relu(conv(x)))x3))
# --------------------------------------------
class IMDBlock(nn.Module):
"""
@inproceedings{hui2019lightweight,
title={Lightweight Image Super-Resolution with Information Multi-distillation Network},
author={Hui, Zheng and Gao, Xinbo and Yang, Yunchu and Wang, Xiumei},
booktitle={Proceedings of the 27th ACM International Conference on Multimedia (ACM MM)},
pages={2024--2032},
year={2019}
}
@inproceedings{zhang2019aim,
title={AIM 2019 Challenge on Constrained Super-Resolution: Methods and Results},
author={Kai Zhang and Shuhang Gu and Radu Timofte and others},
booktitle={IEEE International Conference on Computer Vision Workshops},
year={2019}
}
"""
def __init__(self, in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='CL', d_rate=0.25, negative_slope=0.05):
super(IMDBlock, self).__init__()
self.d_nc = int(in_channels * d_rate)
self.r_nc = int(in_channels - self.d_nc)
assert mode[0] == 'C', 'convolutional layer first'
self.conv1 = conv(in_channels, in_channels, kernel_size, stride, padding, bias, mode, negative_slope)
self.conv2 = conv(self.r_nc, in_channels, kernel_size, stride, padding, bias, mode, negative_slope)
self.conv3 = conv(self.r_nc, in_channels, kernel_size, stride, padding, bias, mode, negative_slope)
self.conv4 = conv(self.r_nc, self.d_nc, kernel_size, stride, padding, bias, mode[0], negative_slope)
self.conv1x1 = conv(self.d_nc*4, out_channels, kernel_size=1, stride=1, padding=0, bias=bias, mode=mode[0], negative_slope=negative_slope)
def forward(self, x):
d1, r1 = torch.split(self.conv1(x), (self.d_nc, self.r_nc), dim=1)
d2, r2 = torch.split(self.conv2(r1), (self.d_nc, self.r_nc), dim=1)
d3, r3 = torch.split(self.conv3(r2), (self.d_nc, self.r_nc), dim=1)
d4 = self.conv4(r3)
res = self.conv1x1(torch.cat((d1, d2, d3, d4), dim=1))
return x + res
# --------------------------------------------
# Enhanced Spatial Attention (ESA)
# --------------------------------------------
class ESA(nn.Module):
def __init__(self, channel=64, reduction=4, bias=True):
super(ESA, self).__init__()
# -->conv3x3(conv21)-----------------------------------------------------------------------------------------+
# conv1x1(conv1)-->conv3x3-2(conv2)-->maxpool7-3-->conv3x3(conv3)(relu)-->conv3x3(conv4)(relu)-->conv3x3(conv5)-->bilinear--->conv1x1(conv6)-->sigmoid
self.r_nc = channel // reduction
self.conv1 = nn.Conv2d(channel, self.r_nc, kernel_size=1)
self.conv21 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=1)
self.conv2 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, stride=2, padding=0)
self.conv3 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv5 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv6 = nn.Conv2d(self.r_nc, channel, kernel_size=1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x1 = self.conv1(x)
x2 = F.max_pool2d(self.conv2(x1), kernel_size=7, stride=3) # 1/6
x2 = self.relu(self.conv3(x2))
x2 = self.relu(self.conv4(x2))
x2 = F.interpolate(self.conv5(x2), (x.size(2), x.size(3)), mode='bilinear', align_corners=False)
x2 = self.conv6(x2 + self.conv21(x1))
return x.mul(self.sigmoid(x2))
# return x.mul_(self.sigmoid(x2))
class CFRB(nn.Module):
def __init__(self, in_channels=50, out_channels=50, kernel_size=3, stride=1, padding=1, bias=True, mode='CL', d_rate=0.5, negative_slope=0.05):
super(CFRB, self).__init__()
self.d_nc = int(in_channels * d_rate)
self.r_nc = in_channels # int(in_channels - self.d_nc)
assert mode[0] == 'C', 'convolutional layer first'
self.conv1_d = conv(in_channels, self.d_nc, kernel_size=1, stride=1, padding=0, bias=bias, mode=mode[0])
self.conv1_r = conv(in_channels, self.r_nc, kernel_size, stride, padding, bias=bias, mode=mode[0])
self.conv2_d = conv(self.r_nc, self.d_nc, kernel_size=1, stride=1, padding=0, bias=bias, mode=mode[0])
self.conv2_r = conv(self.r_nc, self.r_nc, kernel_size, stride, padding, bias=bias, mode=mode[0])
self.conv3_d = conv(self.r_nc, self.d_nc, kernel_size=1, stride=1, padding=0, bias=bias, mode=mode[0])
self.conv3_r = conv(self.r_nc, self.r_nc, kernel_size, stride, padding, bias=bias, mode=mode[0])
self.conv4_d = conv(self.r_nc, self.d_nc, kernel_size, stride, padding, bias=bias, mode=mode[0])
self.conv1x1 = conv(self.d_nc*4, out_channels, kernel_size=1, stride=1, padding=0, bias=bias, mode=mode[0])
self.act = conv(mode=mode[-1], negative_slope=negative_slope)
self.esa = ESA(in_channels, reduction=4, bias=True)
def forward(self, x):
d1 = self.conv1_d(x)
x = self.act(self.conv1_r(x)+x)
d2 = self.conv2_d(x)
x = self.act(self.conv2_r(x)+x)
d3 = self.conv3_d(x)
x = self.act(self.conv3_r(x)+x)
x = self.conv4_d(x)
x = self.act(torch.cat([d1, d2, d3, x], dim=1))
x = self.esa(self.conv1x1(x))
return x
# --------------------------------------------
# Channel Attention (CA) Layer
# --------------------------------------------
class CALayer(nn.Module):
def __init__(self, channel=64, reduction=16):
super(CALayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_fc = nn.Sequential(
nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),
nn.Sigmoid()
)
def forward(self, x):
y = self.avg_pool(x)
y = self.conv_fc(y)
return x * y
# --------------------------------------------
# Residual Channel Attention Block (RCAB)
# --------------------------------------------
class RCABlock(nn.Module):
def __init__(self, in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='CRC', reduction=16, negative_slope=0.2):
super(RCABlock, self).__init__()
assert in_channels == out_channels, 'Only support in_channels==out_channels.'
if mode[0] in ['R','L']:
mode = mode[0].lower() + mode[1:]
self.res = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode, negative_slope)
self.ca = CALayer(out_channels, reduction)
def forward(self, x):
res = self.res(x)
res = self.ca(res)
return res + x
# --------------------------------------------
# Residual Channel Attention Group (RG)
# --------------------------------------------
class RCAGroup(nn.Module):
def __init__(self, in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='CRC', reduction=16, nb=12, negative_slope=0.2):
super(RCAGroup, self).__init__()
assert in_channels == out_channels, 'Only support in_channels==out_channels.'
if mode[0] in ['R','L']:
mode = mode[0].lower() + mode[1:]
RG = [RCABlock(in_channels, out_channels, kernel_size, stride, padding, bias, mode, reduction, negative_slope) for _ in range(nb)]
RG.append(conv(out_channels, out_channels, mode='C'))
self.rg = nn.Sequential(*RG) # self.rg = ShortcutBlock(nn.Sequential(*RG))
def forward(self, x):
res = self.rg(x)
return res + x
# --------------------------------------------
# Residual Dense Block
# style: 5 convs
# --------------------------------------------
class ResidualDenseBlock_5C(nn.Module):
def __init__(self, nc=64, gc=32, kernel_size=3, stride=1, padding=1, bias=True, mode='CR', negative_slope=0.2):
super(ResidualDenseBlock_5C, self).__init__()
# gc: growth channel
self.conv1 = conv(nc, gc, kernel_size, stride, padding, bias, mode, negative_slope)
self.conv2 = conv(nc+gc, gc, kernel_size, stride, padding, bias, mode, negative_slope)
self.conv3 = conv(nc+2*gc, gc, kernel_size, stride, padding, bias, mode, negative_slope)
self.conv4 = conv(nc+3*gc, gc, kernel_size, stride, padding, bias, mode, negative_slope)
self.conv5 = conv(nc+4*gc, nc, kernel_size, stride, padding, bias, mode[:-1], negative_slope)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(torch.cat((x, x1), 1))
x3 = self.conv3(torch.cat((x, x1, x2), 1))
x4 = self.conv4(torch.cat((x, x1, x2, x3), 1))
x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
return x5.mul_(0.2) + x
# --------------------------------------------
# Residual in Residual Dense Block
# 3x5c
# --------------------------------------------
class RRDB(nn.Module):
def __init__(self, nc=64, gc=32, kernel_size=3, stride=1, padding=1, bias=True, mode='CR', negative_slope=0.2):
super(RRDB, self).__init__()
self.RDB1 = ResidualDenseBlock_5C(nc, gc, kernel_size, stride, padding, bias, mode, negative_slope)
self.RDB2 = ResidualDenseBlock_5C(nc, gc, kernel_size, stride, padding, bias, mode, negative_slope)
self.RDB3 = ResidualDenseBlock_5C(nc, gc, kernel_size, stride, padding, bias, mode, negative_slope)
def forward(self, x):
out = self.RDB1(x)
out = self.RDB2(out)
out = self.RDB3(out)
return out.mul_(0.2) + x
"""
# --------------------------------------------
# Upsampler
# Kai Zhang, https://github.com/cszn/KAIR
# --------------------------------------------
# upsample_pixelshuffle
# upsample_upconv
# upsample_convtranspose
# --------------------------------------------
"""
# --------------------------------------------
# conv + subp (+ relu)
# --------------------------------------------
def upsample_pixelshuffle(in_channels=64, out_channels=3, kernel_size=3, stride=1, padding=1, bias=True, mode='2R', negative_slope=0.2):
assert len(mode)<4 and mode[0] in ['2', '3', '4'], 'mode examples: 2, 2R, 2BR, 3, ..., 4BR.'
up1 = conv(in_channels, out_channels * (int(mode[0]) ** 2), kernel_size, stride, padding, bias, mode='C'+mode, negative_slope=negative_slope)
return up1
# --------------------------------------------
# nearest_upsample + conv (+ R)
# --------------------------------------------
def upsample_upconv(in_channels=64, out_channels=3, kernel_size=3, stride=1, padding=1, bias=True, mode='2R', negative_slope=0.2):
assert len(mode)<4 and mode[0] in ['2', '3', '4'], 'mode examples: 2, 2R, 2BR, 3, ..., 4BR'
if mode[0] == '2':
uc = 'UC'
elif mode[0] == '3':
uc = 'uC'
elif mode[0] == '4':
uc = 'vC'
mode = mode.replace(mode[0], uc)
up1 = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode=mode, negative_slope=negative_slope)
return up1
# --------------------------------------------
# convTranspose (+ relu)
# --------------------------------------------
def upsample_convtranspose(in_channels=64, out_channels=3, kernel_size=2, stride=2, padding=0, bias=True, mode='2R', negative_slope=0.2):
assert len(mode)<4 and mode[0] in ['2', '3', '4'], 'mode examples: 2, 2R, 2BR, 3, ..., 4BR.'
kernel_size = int(mode[0])
stride = int(mode[0])
mode = mode.replace(mode[0], 'T')
up1 = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode, negative_slope)
return up1
'''
# --------------------------------------------
# Downsampler
# Kai Zhang, https://github.com/cszn/KAIR
# --------------------------------------------
# downsample_strideconv
# downsample_maxpool
# downsample_avgpool
# --------------------------------------------
'''
# --------------------------------------------
# strideconv (+ relu)
# --------------------------------------------
def downsample_strideconv(in_channels=64, out_channels=64, kernel_size=2, stride=2, padding=0, bias=True, mode='2R', negative_slope=0.2):
assert len(mode)<4 and mode[0] in ['2', '3', '4'], 'mode examples: 2, 2R, 2BR, 3, ..., 4BR.'
kernel_size = int(mode[0])
stride = int(mode[0])
mode = mode.replace(mode[0], 'C')
down1 = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode, negative_slope)
return down1
# --------------------------------------------
# maxpooling + conv (+ relu)
# --------------------------------------------
def downsample_maxpool(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=0, bias=True, mode='2R', negative_slope=0.2):
assert len(mode)<4 and mode[0] in ['2', '3'], 'mode examples: 2, 2R, 2BR, 3, ..., 3BR.'
kernel_size_pool = int(mode[0])
stride_pool = int(mode[0])
mode = mode.replace(mode[0], 'MC')
pool = conv(kernel_size=kernel_size_pool, stride=stride_pool, mode=mode[0], negative_slope=negative_slope)
pool_tail = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode=mode[1:], negative_slope=negative_slope)
return sequential(pool, pool_tail)
# --------------------------------------------
# averagepooling + conv (+ relu)
# --------------------------------------------
def downsample_avgpool(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='2R', negative_slope=0.2):
assert len(mode)<4 and mode[0] in ['2', '3'], 'mode examples: 2, 2R, 2BR, 3, ..., 3BR.'
kernel_size_pool = int(mode[0])
stride_pool = int(mode[0])
mode = mode.replace(mode[0], 'AC')
pool = conv(kernel_size=kernel_size_pool, stride=stride_pool, mode=mode[0], negative_slope=negative_slope)
pool_tail = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode=mode[1:], negative_slope=negative_slope)
return sequential(pool, pool_tail)
'''
# --------------------------------------------
# NonLocalBlock2D:
# embedded_gaussian
# +W(softmax(thetaXphi)Xg)
# --------------------------------------------
'''
# --------------------------------------------
# non-local block with embedded_gaussian
# https://github.com/AlexHex7/Non-local_pytorch
# --------------------------------------------
class NonLocalBlock2D(nn.Module):
def __init__(self, nc=64, kernel_size=1, stride=1, padding=0, bias=True, act_mode='B', downsample=False, downsample_mode='maxpool', negative_slope=0.2):
super(NonLocalBlock2D, self).__init__()
inter_nc = nc // 2
self.inter_nc = inter_nc
self.W = conv(inter_nc, nc, kernel_size, stride, padding, bias, mode='C'+act_mode)
self.theta = conv(nc, inter_nc, kernel_size, stride, padding, bias, mode='C')
if downsample:
if downsample_mode == 'avgpool':
downsample_block = downsample_avgpool
elif downsample_mode == 'maxpool':
downsample_block = downsample_maxpool
elif downsample_mode == 'strideconv':
downsample_block = downsample_strideconv
else:
raise NotImplementedError('downsample mode [{:s}] is not found'.format(downsample_mode))
self.phi = downsample_block(nc, inter_nc, kernel_size, stride, padding, bias, mode='2')
self.g = downsample_block(nc, inter_nc, kernel_size, stride, padding, bias, mode='2')
else:
self.phi = conv(nc, inter_nc, kernel_size, stride, padding, bias, mode='C')
self.g = conv(nc, inter_nc, kernel_size, stride, padding, bias, mode='C')
def forward(self, x):
'''
:param x: (b, c, t, h, w)
:return:
'''
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_nc, -1)
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(batch_size, self.inter_nc, -1)
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(batch_size, self.inter_nc, -1)
f = torch.matmul(theta_x, phi_x)
f_div_C = F.softmax(f, dim=-1)
y = torch.matmul(f_div_C, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_nc, *x.size()[2:])
W_y = self.W(y)
z = W_y + x
return z
| 24,138 | 39.775338 | 160 | py |
SwinMR | SwinMR-main/models/select_model.py | '''
# -----------------------------------------
Define Training Model
by Jiahao Huang (j.huang21@imperial.ac.uk)
# -----------------------------------------
'''
def define_Model(opt):
model = opt['model']
# --------------------------------------------------------
# SwinMR
# --------------------------------------------------------
if model == 'swinmr_pi':
from models.model_swinmr_pi import MRI_SwinMR_PI as M
elif model == 'swinmr_npi':
from models.model_swinmr import MRI_SwinMR_NPI as M
else:
raise NotImplementedError('Model [{:s}] is not defined.'.format(model))
m = M(opt)
print('Training model [{:s}] is created.'.format(m.__class__.__name__))
return m
| 731 | 26.111111 | 79 | py |
SwinMR | SwinMR-main/models/select_mask.py | '''
# -----------------------------------------
Define Undersampling Mask
by Jiahao Huang (j.huang21@imperial.ac.uk)
# -----------------------------------------
'''
import os
import scipy
import scipy.fftpack
from scipy.io import loadmat
import cv2
import numpy as np
def define_Mask(opt):
mask_name = opt['mask']
# 256 * 256 Gaussian 1D
if mask_name == 'G1D10':
mask = loadmat(os.path.join('mask', 'Gaussian1D', "GaussianDistribution1DMask_10.mat"))['maskRS1']
elif mask_name == 'G1D20':
mask = loadmat(os.path.join('mask', 'Gaussian1D', "GaussianDistribution1DMask_20.mat"))['maskRS1']
elif mask_name == 'G1D30':
mask = loadmat(os.path.join('mask', 'Gaussian1D', "GaussianDistribution1DMask_30.mat"))['maskRS1']
elif mask_name == 'G1D40':
mask = loadmat(os.path.join('mask', 'Gaussian1D', "GaussianDistribution1DMask_40.mat"))['maskRS1']
elif mask_name == 'G1D50':
mask = loadmat(os.path.join('mask', 'Gaussian1D', "GaussianDistribution1DMask_50.mat"))['maskRS1']
# 256 * 256 Gaussian 2D
elif mask_name == 'G2D10':
mask = loadmat(os.path.join('mask', 'Gaussian2D', "GaussianDistribution2DMask_10.mat"))['maskRS2']
elif mask_name == 'G2D20':
mask = loadmat(os.path.join('mask', 'Gaussian2D', "GaussianDistribution2DMask_20.mat"))['maskRS2']
elif mask_name == 'G2D30':
mask = loadmat(os.path.join('mask', 'Gaussian2D', "GaussianDistribution2DMask_30.mat"))['maskRS2']
elif mask_name == 'G2D40':
mask = loadmat(os.path.join('mask', 'Gaussian2D', "GaussianDistribution2DMask_40.mat"))['maskRS2']
elif mask_name == 'G2D50':
mask = loadmat(os.path.join('mask', 'Gaussian2D', "GaussianDistribution2DMask_50.mat"))['maskRS2']
# 256 * 256 poisson 2D
elif mask_name == 'P2D10':
mask = loadmat(os.path.join('mask', 'Poisson2D', "PoissonDistributionMask_10.mat"))['population_matrix']
elif mask_name == 'P2D20':
mask = loadmat(os.path.join('mask', 'Poisson2D', "PoissonDistributionMask_20.mat"))['population_matrix']
elif mask_name == 'P2D30':
mask = loadmat(os.path.join('mask', 'Poisson2D', "PoissonDistributionMask_30.mat"))['population_matrix']
elif mask_name == 'P2D40':
mask = loadmat(os.path.join('mask', 'Poisson2D', "PoissonDistributionMask_40.mat"))['population_matrix']
elif mask_name == 'P2D50':
mask = loadmat(os.path.join('mask', 'Poisson2D', "PoissonDistributionMask_50.mat"))['population_matrix']
# 256 * 256 radial
elif mask_name == 'R10':
mask_shift = cv2.imread(os.path.join('mask', 'radial', 'radial_10.tif'), 0) / 255
mask = scipy.fftpack.fftshift(mask_shift)
elif mask_name == 'R20':
mask_shift = cv2.imread(os.path.join('mask', 'radial', 'radial_20.tif'), 0) / 255
mask = scipy.fftpack.fftshift(mask_shift)
elif mask_name == 'R30':
mask_shift = cv2.imread(os.path.join('mask', 'radial', 'radial_30.tif'), 0) / 255
mask = scipy.fftpack.fftshift(mask_shift)
elif mask_name == 'R40':
mask_shift = cv2.imread(os.path.join('mask', 'radial', 'radial_40.tif'), 0) / 255
mask = scipy.fftpack.fftshift(mask_shift)
elif mask_name == 'R50':
mask_shift = cv2.imread(os.path.join('mask', 'radial', 'radial_50.tif'), 0) / 255
mask = scipy.fftpack.fftshift(mask_shift)
elif mask_name == 'R60':
mask_shift = cv2.imread(os.path.join('mask', 'radial', 'radial_60.tif'), 0) / 255
mask = scipy.fftpack.fftshift(mask_shift)
elif mask_name == 'R70':
mask_shift = cv2.imread(os.path.join('mask', 'radial', 'radial_70.tif'), 0) / 255
mask = scipy.fftpack.fftshift(mask_shift)
elif mask_name == 'R80':
mask_shift = cv2.imread(os.path.join('mask', 'radial', 'radial_80.tif'), 0) / 255
mask = scipy.fftpack.fftshift(mask_shift)
elif mask_name == 'R90':
mask_shift = cv2.imread(os.path.join('mask', 'radial', 'radial_90.tif'), 0) / 255
mask = scipy.fftpack.fftshift(mask_shift)
# 256 * 256 spiral
elif mask_name == 'S10':
mask_shift = cv2.imread(os.path.join('mask', 'spiral', 'spiral_10.tif'), 0) / 255
mask = scipy.fftpack.fftshift(mask_shift)
elif mask_name == 'S20':
mask_shift = cv2.imread(os.path.join('mask', 'spiral', 'spiral_20.tif'), 0) / 255
mask = scipy.fftpack.fftshift(mask_shift)
elif mask_name == 'S30':
mask_shift = cv2.imread(os.path.join('mask', 'spiral', 'spiral_30.tif'), 0) / 255
mask = scipy.fftpack.fftshift(mask_shift)
elif mask_name == 'S40':
mask_shift = cv2.imread(os.path.join('mask', 'spiral', 'spiral_40.tif'), 0) / 255
mask = scipy.fftpack.fftshift(mask_shift)
elif mask_name == 'S50':
mask_shift = cv2.imread(os.path.join('mask', 'spiral', 'spiral_50.tif'), 0) / 255
mask = scipy.fftpack.fftshift(mask_shift)
elif mask_name == 'S60':
mask_shift = cv2.imread(os.path.join('mask', 'spiral', 'spiral_60.tif'), 0) / 255
mask = scipy.fftpack.fftshift(mask_shift)
elif mask_name == 'S70':
mask_shift = cv2.imread(os.path.join('mask', 'spiral', 'spiral_70.tif'), 0) / 255
mask = scipy.fftpack.fftshift(mask_shift)
elif mask_name == 'S80':
mask_shift = cv2.imread(os.path.join('mask', 'spiral', 'spiral_80.tif'), 0) / 255
mask = scipy.fftpack.fftshift(mask_shift)
elif mask_name == 'S90':
mask_shift = cv2.imread(os.path.join('mask', 'spiral', 'spiral_90.tif'), 0) / 255
mask = scipy.fftpack.fftshift(mask_shift)
else:
raise NotImplementedError('Model [{:s}] is not defined.'.format(mask_name))
print('Training model [{:s}] is created.'.format(mask_name))
return mask
| 5,773 | 47.521008 | 112 | py |
SwinMR | SwinMR-main/models/model_swinmr_pi.py | '''
# -----------------------------------------
Model
SwinMR (PI) m.1.3
by Jiahao Huang (j.huang21@imperial.ac.uk)
Thanks:
https://github.com/JingyunLiang/SwinIR
https://github.com/microsoft/Swin-Transformer
# -----------------------------------------
'''
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.optim import lr_scheduler
from torch.optim import Adam, AdamW
from models.select_network import define_G
from models.model_base import ModelBase
from models.loss import CharbonnierLoss, PerceptualLoss
from models.loss_ssim import SSIMLoss
from utils.utils_model import test_mode
from utils.utils_regularizers import regularizer_orth, regularizer_clip
from utils.utils_swinmr import *
import matplotlib.pyplot as plt
import einops
from math import ceil
import copy
class MRI_SwinMR_PI(ModelBase):
def __init__(self, opt):
super(MRI_SwinMR_PI, self).__init__(opt)
# ------------------------------------
# define network
# ------------------------------------
self.opt_train = self.opt['train'] # training option
self.opt_dataset = self.opt['datasets']
self.netG = define_G(opt)
self.netG = self.model_to_device(self.netG)
if self.opt_train['freeze_patch_embedding']:
for para in self.netG.module.patch_embed.parameters():
para.requires_grad = False
print("Patch Embedding Frozen (Requires Grad)!")
if self.opt_train['E_decay'] > 0:
self.netE = define_G(opt).to(self.device).eval()
"""
# ----------------------------------------
# Preparation before training with data
# Save model during training
# ----------------------------------------
"""
# ----------------------------------------
# initialize training
# ----------------------------------------
def init_train(self):
self.load() # load model
self.netG.train() # set training mode,for BN
self.define_loss() # define loss
self.define_optimizer() # define optimizer
self.load_optimizers() # load optimizer
self.define_scheduler() # define scheduler
self.log_dict = OrderedDict() # log
# ----------------------------------------
# load pre-trained G and E model
# ----------------------------------------
def load(self):
load_path_G = self.opt['path']['pretrained_netG']
if load_path_G is not None:
print('Loading model for G [{:s}] ...'.format(load_path_G))
self.load_network(load_path_G, self.netG, strict=self.opt_train['G_param_strict'], param_key='params')
load_path_E = self.opt['path']['pretrained_netE']
if self.opt_train['E_decay'] > 0:
if load_path_E is not None:
print('Loading model for E [{:s}] ...'.format(load_path_E))
self.load_network(load_path_E, self.netE, strict=self.opt_train['E_param_strict'], param_key='params_ema')
else:
print('Copying model for E ...')
self.update_E(0)
self.netE.eval()
# ----------------------------------------
# load optimizer
# ----------------------------------------
def load_optimizers(self):
load_path_optimizerG = self.opt['path']['pretrained_optimizerG']
if load_path_optimizerG is not None and self.opt_train['G_optimizer_reuse']:
print('Loading optimizerG [{:s}] ...'.format(load_path_optimizerG))
self.load_optimizer(load_path_optimizerG, self.G_optimizer)
# ----------------------------------------
# save model / optimizer(optional)
# ----------------------------------------
def save(self, iter_label):
self.save_network(self.save_dir, self.netG, 'G', iter_label)
if self.opt_train['E_decay'] > 0:
self.save_network(self.save_dir, self.netE, 'E', iter_label)
if self.opt_train['G_optimizer_reuse']:
self.save_optimizer(self.save_dir, self.G_optimizer, 'optimizerG', iter_label)
# ----------------------------------------
# define loss
# ----------------------------------------
def define_loss(self):
G_lossfn_type = self.opt_train['G_lossfn_type']
if G_lossfn_type == 'l1':
self.G_lossfn = nn.L1Loss().to(self.device)
elif G_lossfn_type == 'l2':
self.G_lossfn = nn.MSELoss().to(self.device)
elif G_lossfn_type == 'l2sum':
self.G_lossfn = nn.MSELoss(reduction='sum').to(self.device)
elif G_lossfn_type == 'ssim':
self.G_lossfn = SSIMLoss().to(self.device)
elif G_lossfn_type == 'charbonnier':
self.G_lossfn = CharbonnierLoss(self.opt_train['G_charbonnier_eps']).to(self.device)
else:
raise NotImplementedError('Loss type [{:s}] is not found.'.format(G_lossfn_type))
self.G_lossfn_weight = self.opt_train['G_lossfn_weight']
self.perceptual_lossfn = PerceptualLoss().to(self.device)
def total_loss(self):
self.alpha = self.opt_train['alpha']
self.beta = self.opt_train['beta']
self.gamma = self.opt_train['gamma']
# H (1,1,256,256) ---> (1,12,256,256)
# SM (1,12,256,256)
# H_multi (1,12,256,256)
self.H_multi = torch.mul(self.SM, self.H.repeat(1, 12, 1, 1))
self.E_multi = torch.mul(self.SM, self.E.repeat(1, 12, 1, 1))
self.H_k_real, self.H_k_imag = fft_map(self.H_multi)
self.E_k_real, self.E_k_imag = fft_map(self.E_multi)
self.loss_image = self.G_lossfn(self.E_multi, self.H_multi)
self.loss_freq = (self.G_lossfn(self.E_k_real, self.H_k_real) + self.G_lossfn(self.E_k_imag, self.H_k_imag)) / 2
self.loss_perc = self.perceptual_lossfn(self.E, self.H)
return self.alpha * self.loss_image + self.beta * self.loss_freq + self.gamma * self.loss_perc
# ----------------------------------------
# define optimizer
# ----------------------------------------
def define_optimizer(self):
G_optim_params = []
for k, v in self.netG.named_parameters():
if v.requires_grad:
G_optim_params.append(v)
else:
print('Params [{:s}] will not optimize.'.format(k))
if self.opt_train['G_optimizer_type'] == 'adam':
if self.opt_train['freeze_patch_embedding']:
self.G_optimizer = Adam(filter(lambda p: p.requires_grad, G_optim_params), lr=self.opt_train['G_optimizer_lr'], weight_decay=self.opt_train['G_optimizer_wd'])
print("Patch Embedding Frozen (Optimizer)!")
else:
self.G_optimizer = Adam(G_optim_params, lr=self.opt_train['G_optimizer_lr'], weight_decay=self.opt_train['G_optimizer_wd'])
elif self.opt_train['G_optimizer_type'] == 'adamw':
if self.opt_train['freeze_patch_embedding']:
self.G_optimizer = AdamW(filter(lambda p: p.requires_grad, G_optim_params), lr=self.opt_train['G_optimizer_lr'], weight_decay=self.opt_train['G_optimizer_wd'])
print("Patch Embedding Frozen (Optimizer)!")
else:
self.G_optimizer = AdamW(G_optim_params, lr=self.opt_train['G_optimizer_lr'], weight_decay=self.opt_train['G_optimizer_wd'])
else:
raise NotImplementedError
# ----------------------------------------
# define scheduler, only "MultiStepLR"
# ----------------------------------------
def define_scheduler(self):
self.schedulers.append(lr_scheduler.MultiStepLR(self.G_optimizer,
self.opt_train['G_scheduler_milestones'],
self.opt_train['G_scheduler_gamma']
))
"""
# ----------------------------------------
# Optimization during training with data
# Testing/evaluation
# ----------------------------------------
"""
# ----------------------------------------
# feed L/H data
# ----------------------------------------
def feed_data(self, data, need_H=True):
self.H = data['H'].to(self.device)
self.L = data['L'].to(self.device)
self.SM = data['SM'].to(self.device)
# self.mask = data['mask'].to(self.device)
# ----------------------------------------
# feed L to netG
# ----------------------------------------
def netG_forward(self):
self.E = self.netG(self.L)
# ----------------------------------------
# update parameters and get loss
# ----------------------------------------
def optimize_parameters(self, current_step):
self.current_step = current_step
self.G_optimizer.zero_grad()
self.netG_forward()
G_loss = self.G_lossfn_weight * self.total_loss()
G_loss.backward()
# ------------------------------------
# clip_grad
# ------------------------------------
# `clip_grad_norm` helps prevent the exploding gradient problem.
G_optimizer_clipgrad = self.opt_train['G_optimizer_clipgrad'] if self.opt_train['G_optimizer_clipgrad'] else 0
if G_optimizer_clipgrad > 0:
torch.nn.utils.clip_grad_norm_(self.parameters(), max_norm=self.opt_train['G_optimizer_clipgrad'], norm_type=2)
self.G_optimizer.step()
# ------------------------------------
# regularizer
# ------------------------------------
G_regularizer_orthstep = self.opt_train['G_regularizer_orthstep'] if self.opt_train['G_regularizer_orthstep'] else 0
if G_regularizer_orthstep > 0 and current_step % G_regularizer_orthstep == 0 and current_step % self.opt['train']['checkpoint_save'] != 0:
self.netG.apply(regularizer_orth)
G_regularizer_clipstep = self.opt_train['G_regularizer_clipstep'] if self.opt_train['G_regularizer_clipstep'] else 0
if G_regularizer_clipstep > 0 and current_step % G_regularizer_clipstep == 0 and current_step % self.opt['train']['checkpoint_save'] != 0:
self.netG.apply(regularizer_clip)
# ------------------------------------
# record log
# ------------------------------------
self.log_dict['G_loss'] = G_loss.item()
self.log_dict['G_loss_image'] = self.loss_image.item()
self.log_dict['G_loss_frequency'] = self.loss_freq.item()
self.log_dict['G_loss_preceptual'] = self.loss_perc.item()
if self.opt_train['E_decay'] > 0:
self.update_E(self.opt_train['E_decay'])
def record_loss_for_val(self):
G_loss = self.G_lossfn_weight * self.total_loss()
self.log_dict['G_loss'] = G_loss.item()
self.log_dict['G_loss_image'] = self.loss_image.item()
self.log_dict['G_loss_frequency'] = self.loss_freq.item()
self.log_dict['G_loss_preceptual'] = self.loss_perc.item()
def check_windowsize(self):
self.window_size = self.opt['netG']['window_size']
_, _, h_old, w_old = self.H.size()
h_pad = ceil(h_old / self.window_size) * self.window_size - h_old # downsampling for 3 times (2^3=8)
w_pad = ceil(w_old / self.window_size) * self.window_size - w_old
self.h_old = h_old
self.w_old = w_old
self.H = torch.cat([self.H, torch.flip(self.H, [2])], 2)[:, :, :h_old + h_pad, :]
self.H = torch.cat([self.H, torch.flip(self.H, [3])], 3)[:, :, :, :w_old + w_pad]
self.L = torch.cat([self.L, torch.flip(self.L, [2])], 2)[:, :, :h_old + h_pad, :]
self.L = torch.cat([self.L, torch.flip(self.L, [3])], 3)[:, :, :, :w_old + w_pad]
def recover_windowsize(self):
self.L = self.L[..., :self.h_old, :self.w_old]
self.H = self.H[..., :self.h_old, :self.w_old]
self.E = self.E[..., :self.h_old, :self.w_old]
# ----------------------------------------
# test / inference
# ----------------------------------------
def test(self):
self.netG.eval()
with torch.no_grad():
self.netG_forward()
self.netG.train()
# ----------------------------------------
# get log_dict
# ----------------------------------------
def current_log(self):
return self.log_dict
# ----------------------------------------
# get L, E, H image
# ----------------------------------------
def current_visuals(self, need_H=True):
out_dict = OrderedDict()
out_dict['L'] = self.L.detach()[0].float().cpu()
out_dict['E'] = self.E.detach()[0].float().cpu()
if need_H:
out_dict['H'] = self.H.detach()[0].float().cpu()
return out_dict
def current_visuals_gpu(self, need_H=True):
out_dict = OrderedDict()
out_dict['L'] = self.L.detach()[0].float()
out_dict['E'] = self.E.detach()[0].float()
if need_H:
out_dict['H'] = self.H.detach()[0].float()
return out_dict
# ----------------------------------------
# get L, E, H batch images
# ----------------------------------------
def current_results(self, need_H=True):
out_dict = OrderedDict()
out_dict['L'] = self.L.detach().float().cpu()
out_dict['E'] = self.E.detach().float().cpu()
if need_H:
out_dict['H'] = self.H.detach().float().cpu()
return out_dict
def current_results_gpu(self, need_H=True):
out_dict = OrderedDict()
out_dict['L'] = self.L.detach().float()
out_dict['E'] = self.E.detach().float()
if need_H:
out_dict['H'] = self.H.detach().float()
return out_dict
"""
# ----------------------------------------
# Information of netG
# ----------------------------------------
"""
# ----------------------------------------
# print network
# ----------------------------------------
def print_network(self):
msg = self.describe_network(self.netG)
print(msg)
# ----------------------------------------
# print params
# ----------------------------------------
def print_params(self):
msg = self.describe_params(self.netG)
print(msg)
# ----------------------------------------
# network information
# ----------------------------------------
def info_network(self):
msg = self.describe_network(self.netG)
return msg
# ----------------------------------------
# params information
# ----------------------------------------
def info_params(self):
msg = self.describe_params(self.netG)
return msg
| 14,836 | 39.649315 | 176 | py |
SwinMR | SwinMR-main/models/model_swinmr.py | '''
# -----------------------------------------
Model
SwinMR m.1.3
by Jiahao Huang (j.huang21@imperial.ac.uk)
Thanks:
https://github.com/JingyunLiang/SwinIR
https://github.com/microsoft/Swin-Transformer
# -----------------------------------------
'''
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.optim import lr_scheduler
from torch.optim import Adam, AdamW
from models.select_network import define_G
from models.model_base import ModelBase
from models.loss import CharbonnierLoss, PerceptualLoss
from models.loss_ssim import SSIMLoss
from utils.utils_model import test_mode
from utils.utils_regularizers import regularizer_orth, regularizer_clip
from utils.utils_swinmr import *
import matplotlib.pyplot as plt
import einops
from math import ceil
import copy
class MRI_SwinMR_NPI(ModelBase):
def __init__(self, opt):
super(MRI_SwinMR_NPI, self).__init__(opt)
# ------------------------------------
# define network
# ------------------------------------
self.opt_train = self.opt['train'] # training option
self.opt_dataset = self.opt['datasets']
self.netG = define_G(opt)
self.netG = self.model_to_device(self.netG)
if self.opt_train['freeze_patch_embedding']:
for para in self.netG.module.patch_embed.parameters():
para.requires_grad = False
print("Patch Embedding Frozen (Requires Grad)!")
if self.opt_train['E_decay'] > 0:
self.netE = define_G(opt).to(self.device).eval()
"""
# ----------------------------------------
# Preparation before training with data
# Save model during training
# ----------------------------------------
"""
# ----------------------------------------
# initialize training
# ----------------------------------------
def init_train(self):
self.load() # load model
self.netG.train() # set training mode,for BN
self.define_loss() # define loss
self.define_optimizer() # define optimizer
self.load_optimizers() # load optimizer
self.define_scheduler() # define scheduler
self.log_dict = OrderedDict() # log
# ----------------------------------------
# load pre-trained G and E model
# ----------------------------------------
def load(self):
load_path_G = self.opt['path']['pretrained_netG']
if load_path_G is not None:
print('Loading model for G [{:s}] ...'.format(load_path_G))
self.load_network(load_path_G, self.netG, strict=self.opt_train['G_param_strict'], param_key='params')
load_path_E = self.opt['path']['pretrained_netE']
if self.opt_train['E_decay'] > 0:
if load_path_E is not None:
print('Loading model for E [{:s}] ...'.format(load_path_E))
self.load_network(load_path_E, self.netE, strict=self.opt_train['E_param_strict'], param_key='params_ema')
else:
print('Copying model for E ...')
self.update_E(0)
self.netE.eval()
# ----------------------------------------
# load optimizer
# ----------------------------------------
def load_optimizers(self):
load_path_optimizerG = self.opt['path']['pretrained_optimizerG']
if load_path_optimizerG is not None and self.opt_train['G_optimizer_reuse']:
print('Loading optimizerG [{:s}] ...'.format(load_path_optimizerG))
self.load_optimizer(load_path_optimizerG, self.G_optimizer)
# ----------------------------------------
# save model / optimizer(optional)
# ----------------------------------------
def save(self, iter_label):
self.save_network(self.save_dir, self.netG, 'G', iter_label)
if self.opt_train['E_decay'] > 0:
self.save_network(self.save_dir, self.netE, 'E', iter_label)
if self.opt_train['G_optimizer_reuse']:
self.save_optimizer(self.save_dir, self.G_optimizer, 'optimizerG', iter_label)
# ----------------------------------------
# define loss
# ----------------------------------------
def define_loss(self):
G_lossfn_type = self.opt_train['G_lossfn_type']
if G_lossfn_type == 'l1':
self.G_lossfn = nn.L1Loss().to(self.device)
elif G_lossfn_type == 'l2':
self.G_lossfn = nn.MSELoss().to(self.device)
elif G_lossfn_type == 'l2sum':
self.G_lossfn = nn.MSELoss(reduction='sum').to(self.device)
elif G_lossfn_type == 'ssim':
self.G_lossfn = SSIMLoss().to(self.device)
elif G_lossfn_type == 'charbonnier':
self.G_lossfn = CharbonnierLoss(self.opt_train['G_charbonnier_eps']).to(self.device)
else:
raise NotImplementedError('Loss type [{:s}] is not found.'.format(G_lossfn_type))
self.G_lossfn_weight = self.opt_train['G_lossfn_weight']
self.perceptual_lossfn = PerceptualLoss().to(self.device)
def total_loss(self):
self.alpha = self.opt_train['alpha']
self.beta = self.opt_train['beta']
self.gamma = self.opt_train['gamma']
# H HR, E Recon, L LR
self.H_k_real, self.H_k_imag = fft_map(self.H)
self.E_k_real, self.E_k_imag = fft_map(self.E)
self.loss_image = self.G_lossfn(self.E, self.H)
self.loss_freq = (self.G_lossfn(self.E_k_real, self.H_k_real) + self.G_lossfn(self.E_k_imag, self.H_k_imag)) / 2
self.loss_perc = self.perceptual_lossfn(self.E, self.H)
return self.alpha * self.loss_image + self.beta * self.loss_freq + self.gamma * self.loss_perc
# ----------------------------------------
# define optimizer
# ----------------------------------------
def define_optimizer(self):
G_optim_params = []
for k, v in self.netG.named_parameters():
if v.requires_grad:
G_optim_params.append(v)
else:
print('Params [{:s}] will not optimize.'.format(k))
if self.opt_train['G_optimizer_type'] == 'adam':
if self.opt_train['freeze_patch_embedding']:
self.G_optimizer = Adam(filter(lambda p: p.requires_grad, G_optim_params), lr=self.opt_train['G_optimizer_lr'], weight_decay=self.opt_train['G_optimizer_wd'])
print("Patch Embedding Frozen (Optimizer)!")
else:
self.G_optimizer = Adam(G_optim_params, lr=self.opt_train['G_optimizer_lr'], weight_decay=self.opt_train['G_optimizer_wd'])
elif self.opt_train['G_optimizer_type'] == 'adamw':
if self.opt_train['freeze_patch_embedding']:
self.G_optimizer = AdamW(filter(lambda p: p.requires_grad, G_optim_params), lr=self.opt_train['G_optimizer_lr'], weight_decay=self.opt_train['G_optimizer_wd'])
print("Patch Embedding Frozen (Optimizer)!")
else:
self.G_optimizer = AdamW(G_optim_params, lr=self.opt_train['G_optimizer_lr'], weight_decay=self.opt_train['G_optimizer_wd'])
else:
raise NotImplementedError
# ----------------------------------------
# define scheduler, only "MultiStepLR"
# ----------------------------------------
def define_scheduler(self):
self.schedulers.append(lr_scheduler.MultiStepLR(self.G_optimizer,
self.opt_train['G_scheduler_milestones'],
self.opt_train['G_scheduler_gamma']
))
"""
# ----------------------------------------
# Optimization during training with data
# Testing/evaluation
# ----------------------------------------
"""
# ----------------------------------------
# feed L/H data
# ----------------------------------------
def feed_data(self, data, need_H=True):
self.H = data['H'].to(self.device)
self.L = data['L'].to(self.device)
# self.mask = data['mask'].to(self.device)
# ----------------------------------------
# feed L to netG
# ----------------------------------------
def netG_forward(self):
self.E = self.netG(self.L)
# ----------------------------------------
# update parameters and get loss
# ----------------------------------------
def optimize_parameters(self, current_step):
self.current_step = current_step
self.G_optimizer.zero_grad()
self.netG_forward()
G_loss = self.G_lossfn_weight * self.total_loss()
G_loss.backward()
# ------------------------------------
# clip_grad
# ------------------------------------
# `clip_grad_norm` helps prevent the exploding gradient problem.
G_optimizer_clipgrad = self.opt_train['G_optimizer_clipgrad'] if self.opt_train['G_optimizer_clipgrad'] else 0
if G_optimizer_clipgrad > 0:
torch.nn.utils.clip_grad_norm_(self.parameters(), max_norm=self.opt_train['G_optimizer_clipgrad'], norm_type=2)
self.G_optimizer.step()
# ------------------------------------
# regularizer
# ------------------------------------
G_regularizer_orthstep = self.opt_train['G_regularizer_orthstep'] if self.opt_train['G_regularizer_orthstep'] else 0
if G_regularizer_orthstep > 0 and current_step % G_regularizer_orthstep == 0 and current_step % self.opt['train']['checkpoint_save'] != 0:
self.netG.apply(regularizer_orth)
G_regularizer_clipstep = self.opt_train['G_regularizer_clipstep'] if self.opt_train['G_regularizer_clipstep'] else 0
if G_regularizer_clipstep > 0 and current_step % G_regularizer_clipstep == 0 and current_step % self.opt['train']['checkpoint_save'] != 0:
self.netG.apply(regularizer_clip)
# ------------------------------------
# record log
# ------------------------------------
self.log_dict['G_loss'] = G_loss.item()
self.log_dict['G_loss_image'] = self.loss_image.item()
self.log_dict['G_loss_frequency'] = self.loss_freq.item()
self.log_dict['G_loss_preceptual'] = self.loss_perc.item()
if self.opt_train['E_decay'] > 0:
self.update_E(self.opt_train['E_decay'])
def record_loss_for_val(self):
G_loss = self.G_lossfn_weight * self.total_loss()
self.log_dict['G_loss'] = G_loss.item()
self.log_dict['G_loss_image'] = self.loss_image.item()
self.log_dict['G_loss_frequency'] = self.loss_freq.item()
self.log_dict['G_loss_preceptual'] = self.loss_perc.item()
def check_windowsize(self):
self.window_size = self.opt['netG']['window_size']
_, _, h_old, w_old = self.H.size()
h_pad = ceil(h_old / self.window_size) * self.window_size - h_old # downsampling for 3 times (2^3=8)
w_pad = ceil(w_old / self.window_size) * self.window_size - w_old
self.h_old = h_old
self.w_old = w_old
self.H = torch.cat([self.H, torch.flip(self.H, [2])], 2)[:, :, :h_old + h_pad, :]
self.H = torch.cat([self.H, torch.flip(self.H, [3])], 3)[:, :, :, :w_old + w_pad]
self.L = torch.cat([self.L, torch.flip(self.L, [2])], 2)[:, :, :h_old + h_pad, :]
self.L = torch.cat([self.L, torch.flip(self.L, [3])], 3)[:, :, :, :w_old + w_pad]
def recover_windowsize(self):
self.L = self.L[..., :self.h_old, :self.w_old]
self.H = self.H[..., :self.h_old, :self.w_old]
self.E = self.E[..., :self.h_old, :self.w_old]
# ----------------------------------------
# test / inference
# ----------------------------------------
def test(self):
self.netG.eval()
with torch.no_grad():
self.netG_forward()
self.netG.train()
# ----------------------------------------
# get log_dict
# ----------------------------------------
def current_log(self):
return self.log_dict
# ----------------------------------------
# get L, E, H image
# ----------------------------------------
def current_visuals(self, need_H=True):
out_dict = OrderedDict()
out_dict['L'] = self.L.detach()[0].float().cpu()
out_dict['E'] = self.E.detach()[0].float().cpu()
if need_H:
out_dict['H'] = self.H.detach()[0].float().cpu()
return out_dict
def current_visuals_gpu(self, need_H=True):
out_dict = OrderedDict()
out_dict['L'] = self.L.detach()[0].float()
out_dict['E'] = self.E.detach()[0].float()
if need_H:
out_dict['H'] = self.H.detach()[0].float()
return out_dict
# ----------------------------------------
# get L, E, H batch images
# ----------------------------------------
def current_results(self, need_H=True):
out_dict = OrderedDict()
out_dict['L'] = self.L.detach().float().cpu()
out_dict['E'] = self.E.detach().float().cpu()
if need_H:
out_dict['H'] = self.H.detach().float().cpu()
return out_dict
def current_results_gpu(self, need_H=True):
out_dict = OrderedDict()
out_dict['L'] = self.L.detach().float()
out_dict['E'] = self.E.detach().float()
if need_H:
out_dict['H'] = self.H.detach().float()
return out_dict
"""
# ----------------------------------------
# Information of netG
# ----------------------------------------
"""
# ----------------------------------------
# print network
# ----------------------------------------
def print_network(self):
msg = self.describe_network(self.netG)
print(msg)
# ----------------------------------------
# print params
# ----------------------------------------
def print_params(self):
msg = self.describe_params(self.netG)
print(msg)
# ----------------------------------------
# network information
# ----------------------------------------
def info_network(self):
msg = self.describe_network(self.netG)
return msg
# ----------------------------------------
# params information
# ----------------------------------------
def info_params(self):
msg = self.describe_params(self.netG)
return msg
| 14,546 | 39.520891 | 176 | py |
SwinMR | SwinMR-main/utils/utils_early_stopping.py | """
# --------------------------------------------
# Early Stopping
# --------------------------------------------
# Jiahao Huang (j.huang21@imperial.uk.ac)
# 30/Jan/2022
# --------------------------------------------
"""
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=10, delta = 0):
self.patience = patience
self.counter = 0
self.best_score = None
self.early_stop = False
self.delta = delta
self.is_save = 0
def __call__(self, psnr, model, epoch, step):
# psnr(0 --> +) score(0 --> +)
score = psnr
# init score
if self.best_score is None:
self.best_score = score
self.is_save = 0
# new model is worse
elif score < self.best_score + self.delta:
self.counter += 1
self.is_save = 0
if self.counter >= self.patience:
self.early_stop = True
# new model is better
else:
self.best_score = score
self.is_save = 1
self.counter = 0
print(f'EarlyStopping counter of epoch {epoch} step {step} : {self.counter} out of {self.patience}')
return self.is_save
| 1,297 | 27.217391 | 108 | py |
SwinMR | SwinMR-main/utils/utils_image.py | import os
import math
import random
import numpy as np
import torch
import cv2
from numpy import Inf
from torchvision.utils import make_grid
from datetime import datetime
# import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import skimage.metrics
import SimpleITK as sitk
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
import medpy.metric
'''
# --------------------------------------------
Jiahao Huang (j.huang21@imperial.ac.uk)
https://github.com/JiahaoHuang99/MRI_Recon
# --------------------------------------------
# https://github.com/cszn
# https://github.com/twhui/SRGAN-pyTorch
# https://github.com/xinntao/BasicSR
# --------------------------------------------
'''
IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG',
'.ppm', '.PPM',
'.bmp', '.BMP',
'.tif', '.npy', '.mat']
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def get_timestamp():
return datetime.now().strftime('%y%m%d-%H%M%S')
def imshow(x, title=None, cbar=False, figsize=None):
plt.figure(figsize=figsize)
plt.imshow(np.squeeze(x), interpolation='nearest', cmap='gray')
if title:
plt.title(title)
if cbar:
plt.colorbar()
plt.show()
def surf(Z, cmap='rainbow', figsize=None):
plt.figure(figsize=figsize)
ax3 = plt.axes(projection='3d')
w, h = Z.shape[:2]
xx = np.arange(0, w, 1)
yy = np.arange(0, h, 1)
X, Y = np.meshgrid(xx, yy)
ax3.plot_surface(X, Y, Z, cmap=cmap)
# ax3.contour(X,Y,Z, zdim='z',offset=-2,cmap=cmap)
plt.show()
'''
# --------------------------------------------
# get image pathes
# --------------------------------------------
'''
def get_image_paths(dataroot):
paths = None # return None if dataroot is None
if isinstance(dataroot, str):
paths = sorted(_get_paths_from_images(dataroot))
elif isinstance(dataroot, list):
paths = []
for i in dataroot:
paths += sorted(_get_paths_from_images(i))
return paths
def _get_paths_from_images(path):
assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)
images = []
for dirpath, _, fnames in sorted(os.walk(path)):
for fname in sorted(fnames):
if is_image_file(fname):
img_path = os.path.join(dirpath, fname)
images.append(img_path)
assert images, '{:s} has no valid image file'.format(path)
return images
'''
# --------------------------------------------
# split large images into small images
# --------------------------------------------
'''
def patches_from_image(img, p_size=512, p_overlap=64, p_max=800):
w, h = img.shape[:2]
patches = []
if w > p_max and h > p_max:
w1 = list(np.arange(0, w - p_size, p_size - p_overlap, dtype=np.int))
h1 = list(np.arange(0, h - p_size, p_size - p_overlap, dtype=np.int))
w1.append(w - p_size)
h1.append(h - p_size)
# print(w1)
# print(h1)
for i in w1:
for j in h1:
patches.append(img[i:i + p_size, j:j + p_size, :])
else:
patches.append(img)
return patches
def imssave(imgs, img_path):
"""
imgs: list, N images of size WxHxC
"""
img_name, ext = os.path.splitext(os.path.basename(img_path))
for i, img in enumerate(imgs):
if img.ndim == 3:
img = img[:, :, [2, 1, 0]]
new_path = os.path.join(os.path.dirname(img_path), img_name + str('_{:04d}'.format(i)) + '.png')
cv2.imwrite(new_path, img)
def split_imageset(original_dataroot, taget_dataroot, n_channels=3, p_size=512, p_overlap=96, p_max=800):
"""
split the large images from original_dataroot into small overlapped images with size (p_size)x(p_size),
and save them into taget_dataroot; only the images with larger size than (p_max)x(p_max)
will be splitted.
Args:
original_dataroot:
taget_dataroot:
p_size: size of small images
p_overlap: patch size in training is a good choice
p_max: images with smaller size than (p_max)x(p_max) keep unchanged.
"""
paths = get_image_paths(original_dataroot)
for img_path in paths:
# img_name, ext = os.path.splitext(os.path.basename(img_path))
img = imread_uint(img_path, n_channels=n_channels)
patches = patches_from_image(img, p_size, p_overlap, p_max)
imssave(patches, os.path.join(taget_dataroot, os.path.basename(img_path)))
# if original_dataroot == taget_dataroot:
# del img_path
'''
# --------------------------------------------
# makedir
# --------------------------------------------
'''
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def mkdirs(paths):
if isinstance(paths, str):
mkdir(paths)
else:
for path in paths:
mkdir(path)
def mkdir_and_rename(path):
if os.path.exists(path):
new_name = path + '_archived_' + get_timestamp()
print('Path already exists. Rename it to [{:s}]'.format(new_name))
os.rename(path, new_name)
os.makedirs(path)
'''
# --------------------------------------------
# read image from path
# opencv is fast, but read BGR numpy image
# --------------------------------------------
'''
# --------------------------------------------
# get uint8 image of size HxWxn_channles (RGB)
# --------------------------------------------
def imread_uint(path, n_channels=3):
# input: path
# output: HxWx3(RGB or GGG), or HxWx1 (G)
if n_channels == 1:
img = cv2.imread(path, 0) # cv2.IMREAD_GRAYSCALE
img = np.expand_dims(img, axis=2) # HxWx1
elif n_channels == 3:
img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # BGR or G
if img.ndim == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # GGG
else:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # RGB
return img
# --------------------------------------------
# matlab's imwrite
# --------------------------------------------
def imsave(img, img_path):
img = np.squeeze(img)
if img.ndim == 3:
img = img[:, :, [2, 1, 0]]
cv2.imwrite(img_path, img)
def imwrite(img, img_path):
img = np.squeeze(img)
if img.ndim == 3:
img = img[:, :, [2, 1, 0]]
cv2.imwrite(img_path, img)
# --------------------------------------------
# get single image of size HxWxn_channles (BGR)
# --------------------------------------------
def read_img(path):
# read image by cv2
# return: Numpy float32, HWC, BGR, [0,1]
img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # cv2.IMREAD_GRAYSCALE
img = img.astype(np.float32) / 255.
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
# some images have 4 channels
if img.shape[2] > 3:
img = img[:, :, :3]
return img
'''
# --------------------------------------------
# image format conversion
# --------------------------------------------
# numpy(single) <---> numpy(uint)
# numpy(single) <---> tensor
# numpy(uint) <---> tensor
# --------------------------------------------
'''
# --------------------------------------------
# numpy(single) [0, 1] <---> numpy(uint)
# --------------------------------------------
def uint2single(img):
return np.float32(img / 255.)
def single2uint(img):
return np.uint8((img.clip(0, 1) * 255.).round())
def uint162single(img):
return np.float32(img / 65535.)
def single2uint16(img):
return np.uint16((img.clip(0, 1) * 65535.).round())
# --------------------------------------------
# numpy(uint) (HxWxC or HxW) <---> tensor
# --------------------------------------------
# convert uint to 4-dimensional torch tensor
def uint2tensor4(img):
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.).unsqueeze(0)
# convert uint to 3-dimensional torch tensor
def uint2tensor3(img):
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.)
# convert float0~1 to 3-dimensional torch tensor
def float2tensor3(img):
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float()
# convert 2/3/4-dimensional torch tensor to uint0~255
def tensor2uint(img):
img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy()
if img.ndim == 3:
img = np.transpose(img, (1, 2, 0))
return np.uint8((img * 255.0).round())
# convert 2/3/4-dimensional torch tensor to float0~1
def tensor2float(img):
img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy()
if img.ndim == 3:
img = np.transpose(img, (1, 2, 0))
return img
# --------------------------------------------
# numpy(single) (HxWxC) <---> tensor
# --------------------------------------------
# convert single (HxWxC) to 3-dimensional torch tensor
def single2tensor3(img):
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float()
# convert single (HxWxC) to 4-dimensional torch tensor
def single2tensor4(img):
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().unsqueeze(0)
# convert torch tensor to single
def tensor2single(img):
img = img.data.squeeze().float().cpu().numpy()
if img.ndim == 3:
img = np.transpose(img, (1, 2, 0))
return img
# convert torch tensor to single
def tensor2single3(img):
img = img.data.squeeze().float().cpu().numpy()
if img.ndim == 3:
img = np.transpose(img, (1, 2, 0))
elif img.ndim == 2:
img = np.expand_dims(img, axis=2)
return img
def single2tensor5(img):
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float().unsqueeze(0)
def single32tensor5(img):
return torch.from_numpy(np.ascontiguousarray(img)).float().unsqueeze(0).unsqueeze(0)
def single42tensor4(img):
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float()
# from skimage.io import imread, imsave
def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
'''
Converts a torch Tensor into an image Numpy array of BGR channel order
Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
'''
tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp
tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1]
n_dim = tensor.dim()
if n_dim == 4:
n_img = len(tensor)
img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
elif n_dim == 3:
img_np = tensor.numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
elif n_dim == 2:
img_np = tensor.numpy()
else:
raise TypeError(
'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
if out_type == np.uint8:
img_np = (img_np * 255.0).round()
# Important. Unlike matlab, numpy.uint8() WILL NOT round by default.
return img_np.astype(out_type)
'''
# --------------------------------------------
# Augmentation, flipe and/or rotate
# --------------------------------------------
# The following two are enough.
# (1) augmet_img: numpy image of WxHxC or WxH
# (2) augment_img_tensor4: tensor image 1xCxWxH
# --------------------------------------------
'''
def augment_img_no_rot(img, mode=0):
'''Kai Zhang (github: https://github.com/cszn)
'''
if mode == 0:
return img
elif mode == 1:
return np.flipud(img)
def augment_img(img, mode=0):
'''Kai Zhang (github: https://github.com/cszn)
'''
if mode == 0:
return img
elif mode == 1:
return np.flipud(np.rot90(img))
elif mode == 2:
return np.flipud(img)
elif mode == 3:
return np.rot90(img, k=3)
elif mode == 4:
return np.flipud(np.rot90(img, k=2))
elif mode == 5:
return np.rot90(img)
elif mode == 6:
return np.rot90(img, k=2)
elif mode == 7:
return np.flipud(np.rot90(img, k=3))
def augment_img_tensor4(img, mode=0):
'''Kai Zhang (github: https://github.com/cszn)
'''
if mode == 0:
return img
elif mode == 1:
return img.rot90(1, [2, 3]).flip([2])
elif mode == 2:
return img.flip([2])
elif mode == 3:
return img.rot90(3, [2, 3])
elif mode == 4:
return img.rot90(2, [2, 3]).flip([2])
elif mode == 5:
return img.rot90(1, [2, 3])
elif mode == 6:
return img.rot90(2, [2, 3])
elif mode == 7:
return img.rot90(3, [2, 3]).flip([2])
def augment_img_tensor(img, mode=0):
'''Kai Zhang (github: https://github.com/cszn)
'''
img_size = img.size()
img_np = img.data.cpu().numpy()
if len(img_size) == 3:
img_np = np.transpose(img_np, (1, 2, 0))
elif len(img_size) == 4:
img_np = np.transpose(img_np, (2, 3, 1, 0))
img_np = augment_img(img_np, mode=mode)
img_tensor = torch.from_numpy(np.ascontiguousarray(img_np))
if len(img_size) == 3:
img_tensor = img_tensor.permute(2, 0, 1)
elif len(img_size) == 4:
img_tensor = img_tensor.permute(3, 2, 0, 1)
return img_tensor.type_as(img)
def augment_img_np3(img, mode=0):
if mode == 0:
return img
elif mode == 1:
return img.transpose(1, 0, 2)
elif mode == 2:
return img[::-1, :, :]
elif mode == 3:
img = img[::-1, :, :]
img = img.transpose(1, 0, 2)
return img
elif mode == 4:
return img[:, ::-1, :]
elif mode == 5:
img = img[:, ::-1, :]
img = img.transpose(1, 0, 2)
return img
elif mode == 6:
img = img[:, ::-1, :]
img = img[::-1, :, :]
return img
elif mode == 7:
img = img[:, ::-1, :]
img = img[::-1, :, :]
img = img.transpose(1, 0, 2)
return img
def augment_imgs(img_list, hflip=True, rot=True):
# horizontal flip OR rotate
hflip = hflip and random.random() < 0.5
vflip = rot and random.random() < 0.5
rot90 = rot and random.random() < 0.5
def _augment(img):
if hflip:
img = img[:, ::-1, :]
if vflip:
img = img[::-1, :, :]
if rot90:
img = img.transpose(1, 0, 2)
return img
return [_augment(img) for img in img_list]
'''
# --------------------------------------------
# modcrop and shave
# --------------------------------------------
'''
def modcrop(img_in, scale):
# img_in: Numpy, HWC or HW
img = np.copy(img_in)
if img.ndim == 2:
H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r]
elif img.ndim == 3:
H, W, C = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r, :]
else:
raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
return img
def shave(img_in, border=0):
# img_in: Numpy, HWC or HW
img = np.copy(img_in)
h, w = img.shape[:2]
img = img[border:h - border, border:w - border]
return img
'''
# --------------------------------------------
# image processing process on numpy image
# channel_convert(in_c, tar_type, img_list):
# rgb2ycbcr(img, only_y=True):
# bgr2ycbcr(img, only_y=True):
# ycbcr2rgb(img):
# --------------------------------------------
'''
def rgb2ycbcr(img, only_y=True):
'''same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.
# convert
if only_y:
rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def ycbcr2rgb(img):
'''same as matlab ycbcr2rgb
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.
# convert
rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
[0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]
rlt = np.clip(rlt, 0, 255)
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def bgr2ycbcr(img, only_y=True):
'''bgr version of rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.
# convert
if only_y:
rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0
else:
rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
[65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def channel_convert(in_c, tar_type, img_list):
# conversion among BGR, gray and y
if in_c == 3 and tar_type == 'gray': # BGR to gray
gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list]
return [np.expand_dims(img, axis=2) for img in gray_list]
elif in_c == 3 and tar_type == 'y': # BGR to y
y_list = [bgr2ycbcr(img, only_y=True) for img in img_list]
return [np.expand_dims(img, axis=2) for img in y_list]
elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR
return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list]
else:
return img_list
'''
# --------------------------------------------
# metric, PSNR, SSIM and PSNRB
# --------------------------------------------
'''
# --------------------------------------------
# PSNR
# --------------------------------------------
# def calculate_psnr(img1, img2, border=0):
# # img1 and img2 have range [0, 255]
# # img1 = img1.squeeze()
# # img2 = img2.squeeze()
# if not img1.shape == img2.shape:
# raise ValueError('Input images must have the same dimensions.')
# h, w = img1.shape[:2]
# img1 = img1[border:h - border, border:w - border]
# img2 = img2[border:h - border, border:w - border]
#
# img1 = img1.astype(np.float64)
# img2 = img2.astype(np.float64)
# mse = np.mean((img1 - img2) ** 2)
# if mse == 0:
# return float('inf')
# return 20 * math.log10(255.0 / math.sqrt(mse))
def calculate_psnr_single(img1, img2, border=0):
# img1 = np.clip(img1.squeeze(), -1, 1)
# img2 = np.clip(img2.squeeze(), -1, 1)
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
h, w = img1.shape[:2]
img1 = img1[border:h - border, border:w - border]
img2 = img2[border:h - border, border:w - border]
# img1 = img1.astype(np.float64)
# img2 = img2.astype(np.float64)
# gt recon
return skimage.metrics.peak_signal_noise_ratio(img1, img2)
# --------------------------------------------
# SSIM
# --------------------------------------------
# def ssim(img1, img2):
# C1 = (0.01 * 255)**2
# C2 = (0.03 * 255)**2
#
# img1 = img1.astype(np.float64)
# img2 = img2.astype(np.float64)
# kernel = cv2.getGaussianKernel(11, 1.5)
# window = np.outer(kernel, kernel.transpose())
#
# mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
# mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
# mu1_sq = mu1**2
# mu2_sq = mu2**2
# mu1_mu2 = mu1 * mu2
# sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
# sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
# sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
#
# ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
# (sigma1_sq + sigma2_sq + C2))
# return ssim_map.mean()
# def calculate_ssim(img1, img2, border=0):
#
# #img1 = img1.squeeze()
# #img2 = img2.squeeze()
# if not img1.shape == img2.shape:
# raise ValueError('Input images must have the same dimensions.')
# h, w = img1.shape[:2]
# img1 = img1[border:h-border, border:w-border]
# img2 = img2[border:h-border, border:w-border]
#
# if img1.ndim == 2:
# return ssim(img1, img2)
# elif img1.ndim == 3:
# if img1.shape[2] == 3:
# ssims = []
# for i in range(3):
# ssims.append(ssim(img1[:,:,i], img2[:,:,i]))
# return np.array(ssims).mean()
# elif img1.shape[2] == 1:
# return ssim(np.squeeze(img1), np.squeeze(img2))
# else:
# raise ValueError('Wrong input image dimensions.')
def calculate_ssim_single(img1, img2, border=0):
# img1 = img1.squeeze()
# img2 = img2.squeeze()
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
h, w = img1.shape[:2]
img1 = img1[border:h - border, border:w - border]
img2 = img2[border:h - border, border:w - border]
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
return skimage.metrics.structural_similarity(img1, img2)
# --------------------------------------------
# LPIPS
# --------------------------------------------
def calculate_lpips_single(func, img1, img2):
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
# -1 ~ 1
img1 = (img1 * 2 - 1)
img2 = (img2 * 2 - 1)
return func(img1, img2)
def _blocking_effect_factor(im):
block_size = 8
block_horizontal_positions = torch.arange(7, im.shape[3] - 1, 8)
block_vertical_positions = torch.arange(7, im.shape[2] - 1, 8)
horizontal_block_difference = (
(im[:, :, :, block_horizontal_positions] - im[:, :, :, block_horizontal_positions + 1]) ** 2).sum(
3).sum(2).sum(1)
vertical_block_difference = (
(im[:, :, block_vertical_positions, :] - im[:, :, block_vertical_positions + 1, :]) ** 2).sum(3).sum(
2).sum(1)
nonblock_horizontal_positions = np.setdiff1d(torch.arange(0, im.shape[3] - 1), block_horizontal_positions)
nonblock_vertical_positions = np.setdiff1d(torch.arange(0, im.shape[2] - 1), block_vertical_positions)
horizontal_nonblock_difference = (
(im[:, :, :, nonblock_horizontal_positions] - im[:, :, :, nonblock_horizontal_positions + 1]) ** 2).sum(
3).sum(2).sum(1)
vertical_nonblock_difference = (
(im[:, :, nonblock_vertical_positions, :] - im[:, :, nonblock_vertical_positions + 1, :]) ** 2).sum(
3).sum(2).sum(1)
n_boundary_horiz = im.shape[2] * (im.shape[3] // block_size - 1)
n_boundary_vert = im.shape[3] * (im.shape[2] // block_size - 1)
boundary_difference = (horizontal_block_difference + vertical_block_difference) / (
n_boundary_horiz + n_boundary_vert)
n_nonboundary_horiz = im.shape[2] * (im.shape[3] - 1) - n_boundary_horiz
n_nonboundary_vert = im.shape[3] * (im.shape[2] - 1) - n_boundary_vert
nonboundary_difference = (horizontal_nonblock_difference + vertical_nonblock_difference) / (
n_nonboundary_horiz + n_nonboundary_vert)
scaler = np.log2(block_size) / np.log2(min([im.shape[2], im.shape[3]]))
bef = scaler * (boundary_difference - nonboundary_difference)
bef[boundary_difference <= nonboundary_difference] = 0
return bef
def calculate_psnrb(img1, img2, border=0):
"""Calculate PSNR-B (Peak Signal-to-Noise Ratio).
Ref: Quality assessment of deblocked images, for JPEG image deblocking evaluation
# https://gitlab.com/Queuecumber/quantization-guided-ac/-/blob/master/metrics/psnrb.py
Args:
img1 (ndarray): Images with range [0, 255].
img2 (ndarray): Images with range [0, 255].
border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the PSNR calculation.
test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
Returns:
float: psnr result.
"""
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
if img1.ndim == 2:
img1, img2 = np.expand_dims(img1, 2), np.expand_dims(img2, 2)
h, w = img1.shape[:2]
img1 = img1[border:h - border, border:w - border]
img2 = img2[border:h - border, border:w - border]
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
# follow https://gitlab.com/Queuecumber/quantization-guided-ac/-/blob/master/metrics/psnrb.py
img1 = torch.from_numpy(img1).permute(2, 0, 1).unsqueeze(0) / 255.
img2 = torch.from_numpy(img2).permute(2, 0, 1).unsqueeze(0) / 255.
total = 0
for c in range(img1.shape[1]):
mse = torch.nn.functional.mse_loss(img1[:, c:c + 1, :, :], img2[:, c:c + 1, :, :], reduction='none')
bef = _blocking_effect_factor(img1[:, c:c + 1, :, :])
mse = mse.view(mse.shape[0], -1).mean(1)
total += 10 * torch.log10(1 / (mse + bef))
return float(total) / img1.shape[1]
'''
# --------------------------------------------
# matlab's bicubic imresize (numpy and torch) [0, 1]
# --------------------------------------------
'''
# matlab 'imresize' function, now only support 'bicubic'
def cubic(x):
absx = torch.abs(x)
absx2 = absx ** 2
absx3 = absx ** 3
return (1.5 * absx3 - 2.5 * absx2 + 1) * ((absx <= 1).type_as(absx)) + \
(-0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2) * (((absx > 1) * (absx <= 2)).type_as(absx))
def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
if (scale < 1) and (antialiasing):
# Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
kernel_width = kernel_width / scale
# Output-space coordinates
x = torch.linspace(1, out_length, out_length)
# Input-space coordinates. Calculate the inverse mapping such that 0.5
# in output space maps to 0.5 in input space, and 0.5+scale in output
# space maps to 1.5 in input space.
u = x / scale + 0.5 * (1 - 1 / scale)
# What is the left-most pixel that can be involved in the computation?
left = torch.floor(u - kernel_width / 2)
# What is the maximum number of pixels that can be involved in the
# computation? Note: it's OK to use an extra pixel here; if the
# corresponding weights are all zero, it will be eliminated at the end
# of this function.
P = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
1, P).expand(out_length, P)
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
# apply cubic kernel
if (scale < 1) and (antialiasing):
weights = scale * cubic(distance_to_center * scale)
else:
weights = cubic(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = torch.sum(weights, 1).view(out_length, 1)
weights = weights / weights_sum.expand(out_length, P)
# If a column in weights is all zero, get rid of it. only consider the first and last column.
weights_zero_tmp = torch.sum((weights == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
indices = indices.narrow(1, 1, P - 2)
weights = weights.narrow(1, 1, P - 2)
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
indices = indices.narrow(1, 0, P - 2)
weights = weights.narrow(1, 0, P - 2)
weights = weights.contiguous()
indices = indices.contiguous()
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
# --------------------------------------------
# imresize for tensor image [0, 1]
# --------------------------------------------
def imresize(img, scale, antialiasing=True):
# Now the scale should be the same for H and W
# input: img: pytorch tensor, CHW or HW [0,1]
# output: CHW or HW [0,1] w/o round
need_squeeze = True if img.dim() == 2 else False
if need_squeeze:
img.unsqueeze_(0)
in_C, in_H, in_W = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
kernel_width = 4
kernel = 'cubic'
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:, :sym_len_Hs, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[:, -sym_len_He:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(in_C, out_H, in_W)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
for j in range(out_C):
out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)
out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :, :sym_len_Ws]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, :, -sym_len_We:]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(in_C, out_H, out_W)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
for j in range(out_C):
out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_W[i])
if need_squeeze:
out_2.squeeze_()
return out_2
# --------------------------------------------
# imresize for numpy image [0, 1]
# --------------------------------------------
def imresize_np(img, scale, antialiasing=True):
# Now the scale should be the same for H and W
# input: img: Numpy, HWC or HW [0,1]
# output: HWC or HW [0,1] w/o round
img = torch.from_numpy(img)
need_squeeze = True if img.dim() == 2 else False
if need_squeeze:
img.unsqueeze_(2)
in_H, in_W, in_C = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
kernel_width = 4
kernel = 'cubic'
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:sym_len_Hs, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[-sym_len_He:, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(out_H, in_W, in_C)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
for j in range(out_C):
out_1[i, :, j] = img_aug[idx:idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :sym_len_Ws, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, -sym_len_We:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(out_H, out_W, in_C)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
for j in range(out_C):
out_2[:, i, j] = out_1_aug[:, idx:idx + kernel_width, j].mv(weights_W[i])
if need_squeeze:
out_2.squeeze_()
return out_2.numpy()
# ------------------------------------------
# Segmentation Metrics
# ------------------------------------------
def get_dice_medpy(res, ref):
# TODO: SUPPORT 3D DATA
res = np.uint8(res)
ref = np.uint8(ref)
if res.sum() > 0 and ref.sum() > 0:
dice = medpy.metric.binary.dc(res, ref)
elif res.sum() == 0 and ref.sum() == 0:
dice = 1
else:
dice = 0
return dice
def get_hd_medpy(res, ref):
# TODO: SUPPORT 3D DATA
res = np.uint8(res)
ref = np.uint8(ref)
if res.sum() > 0 and ref.sum() > 0:
hd = medpy.metric.binary.hd(res, ref)
elif res.sum() == 0 and ref.sum() == 0:
hd = 0
else:
hd = Inf
return hd
# def get_dice_2d(pred, gt):
# EPSILON = 1e-7
# labelPred = sitk.GetImageFromArray(pred + EPSILON, isVector=False)
# labelTrue = sitk.GetImageFromArray(gt + EPSILON, isVector=False)
#
# dicecomputer = sitk.LabelOverlapMeasuresImageFilter()
# # dicecomputer.Execute(labelTrue, labelPred)
# dicecomputer.Execute(labelTrue > 0.5, labelPred > 0.5)
# dice = dicecomputer.GetDiceCoefficient()
#
# return dice
#
# def get_dice_3d(pred, gt):
# EPSILON = 1e-7
# labelPred = sitk.GetImageFromArray(pred + EPSILON, isVector=False)
# labelTrue = sitk.GetImageFromArray(gt + EPSILON, isVector=False)
#
# dicecomputer = sitk.LabelOverlapMeasuresImageFilter()
# # dicecomputer.Execute(labelTrue, labelPred)
# dicecomputer.Execute(labelTrue > 0.5, labelPred > 0.5)
# dice = dicecomputer.GetDiceCoefficient()
#
# return dice
#
# def get_iou_2d(pred, gt):
# EPSILON = 1e-7
# dims = (0, *range(1, len(pred.shape)))
# intersection = pred * gt
# union = pred + gt - intersection
# iou = (np.sum(intersection) + EPSILON) / (np.sum(union) + EPSILON)
#
# return iou
#
# def get_iou_3d(pred, gt):
# EPSILON = 1e-7
# dims = (0, *range(1, len(pred.shape)))
# intersection = pred * gt
# union = pred + gt - intersection
# iou = (np.sum(intersection) + EPSILON) / (np.sum(union) + EPSILON)
#
# return iou
#
#
# def get_hausdorff_2d(pred, gt):
# EPSILON = 1e-7
# labelPred = sitk.GetImageFromArray(pred + EPSILON, isVector=False)
# labelTrue = sitk.GetImageFromArray(gt + EPSILON, isVector=False)
#
# hausdorffcomputer = sitk.HausdorffDistanceImageFilter()
# # hausdorffcomputer.Execute(labelTrue, labelPred)
# hausdorffcomputer.Execute(labelTrue > 0.5, labelPred > 0.5)
# avgHausdorff = hausdorffcomputer.GetAverageHausdorffDistance()
# Hausdorff = hausdorffcomputer.GetHausdorffDistance()
#
# return avgHausdorff, Hausdorff
#
# def get_hausdorff_3d(pred, gt):
# EPSILON = 1e-7
# labelPred = sitk.GetImageFromArray(pred + EPSILON, isVector=False)
# labelTrue = sitk.GetImageFromArray(gt + EPSILON, isVector=False)
#
# hausdorffcomputer = sitk.HausdorffDistanceImageFilter()
# # hausdorffcomputer.Execute(labelTrue, labelPred)
# hausdorffcomputer.Execute(labelTrue > 0.5, labelPred > 0.5)
# avgHausdorff = hausdorffcomputer.GetAverageHausdorffDistance()
# Hausdorff = hausdorffcomputer.GetHausdorffDistance()
#
# return avgHausdorff, Hausdorff
if __name__ == '__main__':
img = imread_uint('test.bmp', 3)
# img = uint2single(img)
# img_bicubic = imresize_np(img, 1/4)
# imshow(single2uint(img_bicubic))
#
# img_tensor = single2tensor4(img)
# for i in range(8):
# imshow(np.concatenate((augment_img(img, i), tensor2single(augment_img_tensor4(img_tensor, i))), 1))
# patches = patches_from_image(img, p_size=128, p_overlap=0, p_max=200)
# imssave(patches,'a.png')
| 38,657 | 31.595278 | 120 | py |
SwinMR | SwinMR-main/utils/utils_dist.py | # Modified from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/dist_utils.py # noqa: E501
import functools
import os
import subprocess
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
# ----------------------------------
# init
# ----------------------------------
def init_dist(launcher, backend='nccl', **kwargs):
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method('spawn')
if launcher == 'pytorch':
_init_dist_pytorch(backend, **kwargs)
elif launcher == 'slurm':
_init_dist_slurm(backend, **kwargs)
else:
raise ValueError(f'Invalid launcher type: {launcher}')
def _init_dist_pytorch(backend, **kwargs):
rank = int(os.environ['RANK'])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(rank % num_gpus)
dist.init_process_group(backend=backend, **kwargs)
def _init_dist_slurm(backend, port=None):
"""Initialize slurm distributed training environment.
If argument ``port`` is not specified, then the master port will be system
environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system
environment variable, then a default port ``29500`` will be used.
Args:
backend (str): Backend of torch.distributed.
port (int, optional): Master port. Defaults to None.
"""
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(proc_id % num_gpus)
addr = subprocess.getoutput(
f'scontrol show hostname {node_list} | head -n1')
# specify master port
if port is not None:
os.environ['MASTER_PORT'] = str(port)
elif 'MASTER_PORT' in os.environ:
pass # use MASTER_PORT in the environment variable
else:
# 29500 is torch.distributed default port
os.environ['MASTER_PORT'] = '29500'
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['LOCAL_RANK'] = str(proc_id % num_gpus)
os.environ['RANK'] = str(proc_id)
dist.init_process_group(backend=backend)
# ----------------------------------
# get rank and world_size
# ----------------------------------
def get_dist_info():
if dist.is_available():
initialized = dist.is_initialized()
else:
initialized = False
if initialized:
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
rank = 0
world_size = 1
return rank, world_size
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def master_only(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
rank, _ = get_dist_info()
if rank == 0:
return func(*args, **kwargs)
return wrapper
# ----------------------------------
# operation across ranks
# ----------------------------------
def reduce_sum(tensor):
if not dist.is_available():
return tensor
if not dist.is_initialized():
return tensor
tensor = tensor.clone()
dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
return tensor
def gather_grad(params):
world_size = get_world_size()
if world_size == 1:
return
for param in params:
if param.grad is not None:
dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM)
param.grad.data.div_(world_size)
def all_gather(data):
world_size = get_world_size()
if world_size == 1:
return [data]
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to('cuda')
local_size = torch.IntTensor([tensor.numel()]).to('cuda')
size_list = [torch.IntTensor([0]).to('cuda') for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to('cuda'))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to('cuda')
tensor = torch.cat((tensor, padding), 0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_loss_dict(loss_dict):
world_size = get_world_size()
if world_size < 2:
return loss_dict
with torch.no_grad():
keys = []
losses = []
for k in sorted(loss_dict.keys()):
keys.append(k)
losses.append(loss_dict[k])
losses = torch.stack(losses, 0)
dist.reduce(losses, dst=0)
if dist.get_rank() == 0:
losses /= world_size
reduced_losses = {k: v for k, v in zip(keys, losses)}
return reduced_losses
| 5,275 | 25.118812 | 102 | py |
SwinMR | SwinMR-main/utils/utils_option.py | import os
from collections import OrderedDict
from datetime import datetime
import json
import re
import glob
'''
# --------------------------------------------
# Kai Zhang (github: https://github.com/cszn)
# 03/Mar/2019
# --------------------------------------------
# https://github.com/xinntao/BasicSR
# --------------------------------------------
'''
def get_timestamp():
return datetime.now().strftime('_%y%m%d_%H%M%S')
def parse(opt_path, is_train=True):
# ----------------------------------------
# remove comments starting with '//'
# ----------------------------------------
json_str = ''
with open(opt_path, 'r') as f:
for line in f:
line = line.split('//')[0] + '\n'
json_str += line
# ----------------------------------------
# initialize opt
# ----------------------------------------
opt = json.loads(json_str, object_pairs_hook=OrderedDict)
opt['opt_path'] = opt_path
opt['is_train'] = is_train
# ----------------------------------------
# set default
# ----------------------------------------
if 'merge_bn' not in opt:
opt['merge_bn'] = False
opt['merge_bn_startpoint'] = -1
if 'scale' not in opt:
opt['scale'] = 1
# ----------------------------------------
# datasets
# ----------------------------------------
for phase, dataset in opt['datasets'].items():
phase = phase.split('_')[0]
dataset['phase'] = phase
dataset['scale'] = opt['scale'] # broadcast
dataset['n_channels'] = opt['n_channels'] # broadcast
if 'dataroot_H' in dataset and dataset['dataroot_H'] is not None:
dataset['dataroot_H'] = os.path.expanduser(dataset['dataroot_H'])
if 'dataroot_L' in dataset and dataset['dataroot_L'] is not None:
dataset['dataroot_L'] = os.path.expanduser(dataset['dataroot_L'])
# ----------------------------------------
# path
# ----------------------------------------
for key, path in opt['path'].items():
if path and key in opt['path']:
opt['path'][key] = os.path.expanduser(path)
path_task = os.path.join(opt['path']['root'], opt['task'])
opt['path']['task'] = path_task
opt['path']['log'] = path_task
opt['path']['options'] = os.path.join(path_task, 'options')
if is_train:
opt['path']['models'] = os.path.join(path_task, 'models')
opt['path']['images'] = os.path.join(path_task, 'images')
else: # test
opt['path']['images'] = os.path.join(path_task, 'test_images')
# ----------------------------------------
# network
# ----------------------------------------
opt['netG']['scale'] = opt['scale'] if 'scale' in opt else 1
# ----------------------------------------
# GPU devices
# ----------------------------------------
gpu_list = ','.join(str(x) for x in opt['gpu_ids'])
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_list
print('export CUDA_VISIBLE_DEVICES=' + gpu_list)
# ----------------------------------------
# default setting for distributeddataparallel
# ----------------------------------------
if 'find_unused_parameters' not in opt:
opt['find_unused_parameters'] = True
if 'dist' not in opt:
opt['dist'] = False
opt['num_gpu'] = len(opt['gpu_ids'])
print('number of GPUs is: ' + str(opt['num_gpu']))
# ----------------------------------------
# default setting for perceptual loss
# ----------------------------------------
if 'F_feature_layer' not in opt['train']:
opt['train']['F_feature_layer'] = 34 # 25; [2,7,16,25,34]
if 'F_weights' not in opt['train']:
opt['train']['F_weights'] = 1.0 # 1.0; [0.1,0.1,1.0,1.0,1.0]
if 'F_lossfn_type' not in opt['train']:
opt['train']['F_lossfn_type'] = 'l1'
if 'F_use_input_norm' not in opt['train']:
opt['train']['F_use_input_norm'] = True
if 'F_use_range_norm' not in opt['train']:
opt['train']['F_use_range_norm'] = False
# ----------------------------------------
# default setting for optimizer
# ----------------------------------------
if 'G_optimizer_reuse' not in opt['train']:
opt['train']['G_optimizer_reuse'] = False
if 'netD' in opt and 'D_optimizer_reuse' not in opt['train']:
opt['train']['D_optimizer_reuse'] = False
# ----------------------------------------
# default setting of strict for model loading
# ----------------------------------------
if 'G_param_strict' not in opt['train']:
opt['train']['G_param_strict'] = True
if 'netD' in opt and 'D_param_strict' not in opt['path']:
opt['train']['D_param_strict'] = True
if 'E_param_strict' not in opt['path']:
opt['train']['E_param_strict'] = True
# ----------------------------------------
# Exponential Moving Average
# ----------------------------------------
if 'E_decay' not in opt['train']:
opt['train']['E_decay'] = 0
# ----------------------------------------
# default setting for discriminator
# ----------------------------------------
if 'netD' in opt:
if 'net_type' not in opt['netD']:
opt['netD']['net_type'] = 'discriminator_patchgan' # discriminator_unet
if 'in_nc' not in opt['netD']:
opt['netD']['in_nc'] = 3
if 'base_nc' not in opt['netD']:
opt['netD']['base_nc'] = 64
if 'n_layers' not in opt['netD']:
opt['netD']['n_layers'] = 3
if 'norm_type' not in opt['netD']:
opt['netD']['norm_type'] = 'spectral'
return opt
def find_last_checkpoint(save_dir, net_type='G'):
"""
Args:
save_dir: model folder
net_type: 'G' or 'D' or 'optimizerG' or 'optimizerD'
Return:
init_iter: iteration number
init_path: model path
"""
file_list = glob.glob(os.path.join(save_dir, '*_{}.pth'.format(net_type)))
if file_list:
iter_exist = []
for file_ in file_list:
iter_current = re.findall(r"(\d+)_{}.pth".format(net_type), file_)
iter_exist.append(int(iter_current[0]))
init_iter = max(iter_exist)
init_path = os.path.join(save_dir, '{}_{}.pth'.format(init_iter, net_type))
else:
init_iter = 0
init_path = None
return init_iter, init_path
'''
# --------------------------------------------
# convert the opt into json file
# --------------------------------------------
'''
def save(opt):
opt_path = opt['opt_path']
opt_path_copy = opt['path']['options']
dirname, filename_ext = os.path.split(opt_path)
filename, ext = os.path.splitext(filename_ext)
dump_path = os.path.join(opt_path_copy, filename+get_timestamp()+ext)
with open(dump_path, 'w') as dump_file:
json.dump(opt, dump_file, indent=2)
'''
# --------------------------------------------
# dict to string for logger
# --------------------------------------------
'''
def dict2str(opt, indent_l=1):
msg = ''
for k, v in opt.items():
if isinstance(v, dict):
msg += ' ' * (indent_l * 2) + k + ':[\n'
msg += dict2str(v, indent_l + 1)
msg += ' ' * (indent_l * 2) + ']\n'
else:
msg += ' ' * (indent_l * 2) + k + ': ' + str(v) + '\n'
return msg
'''
# --------------------------------------------
# convert OrderedDict to NoneDict,
# return None for missing key
# --------------------------------------------
'''
def dict_to_nonedict(opt):
if isinstance(opt, dict):
new_opt = dict()
for key, sub_opt in opt.items():
new_opt[key] = dict_to_nonedict(sub_opt)
return NoneDict(**new_opt)
elif isinstance(opt, list):
return [dict_to_nonedict(sub_opt) for sub_opt in opt]
else:
return opt
class NoneDict(dict):
def __missing__(self, key):
return None
| 7,982 | 31.583673 | 84 | py |
SwinMR | SwinMR-main/utils/utils_logger.py | import sys
import datetime
import logging
'''
# --------------------------------------------
# Kai Zhang (github: https://github.com/cszn)
# 03/Mar/2019
# --------------------------------------------
# https://github.com/xinntao/BasicSR
# --------------------------------------------
'''
def log(*args, **kwargs):
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S:"), *args, **kwargs)
'''
# --------------------------------------------
# logger
# --------------------------------------------
'''
def logger_info(logger_name, log_path='default_logger.log'):
''' set up logger
modified by Kai Zhang (github: https://github.com/cszn)
'''
log = logging.getLogger(logger_name)
if log.hasHandlers():
print('LogHandlers exist!')
else:
print('LogHandlers setup!')
level = logging.INFO
formatter = logging.Formatter('%(asctime)s.%(msecs)03d : %(message)s', datefmt='%y-%m-%d %H:%M:%S')
fh = logging.FileHandler(log_path, mode='a')
fh.setFormatter(formatter)
log.setLevel(level)
log.addHandler(fh)
# print(len(log.handlers))
sh = logging.StreamHandler()
sh.setFormatter(formatter)
log.addHandler(sh)
'''
# --------------------------------------------
# print to file and std_out simultaneously
# --------------------------------------------
'''
class logger_print(object):
def __init__(self, log_path="default.log"):
self.terminal = sys.stdout
self.log = open(log_path, 'a')
def write(self, message):
self.terminal.write(message)
self.log.write(message) # write the message
def flush(self):
pass
| 1,686 | 24.179104 | 107 | py |
SwinMR | SwinMR-main/utils/utils_swinmr.py | import torch
from torch import nn
import os
import cv2
import gc
import numpy as np
from scipy.io import *
from scipy.fftpack import *
"""
# --------------------------------------------
# Jiahao Huang (j.huang21@imperial.uk.ac)
# 30/Jan/2022
# --------------------------------------------
"""
# Fourier Transform
def fft_map(x):
fft_x = torch.fft.fftn(x)
fft_x_real = fft_x.real
fft_x_imag = fft_x.imag
return fft_x_real, fft_x_imag
| 455 | 15.888889 | 46 | py |
SwinMR | SwinMR-main/utils/utils_model.py | # -*- coding: utf-8 -*-
import numpy as np
import torch
from utils import utils_image as util
import re
import glob
import os
'''
# --------------------------------------------
# Model
# --------------------------------------------
# Kai Zhang (github: https://github.com/cszn)
# 03/Mar/2019
# --------------------------------------------
'''
def find_last_checkpoint(save_dir, net_type='G'):
"""
# ---------------------------------------
# Kai Zhang (github: https://github.com/cszn)
# 03/Mar/2019
# ---------------------------------------
Args:
save_dir: model folder
net_type: 'G' or 'D' or 'optimizerG' or 'optimizerD'
Return:
init_iter: iteration number
init_path: model path
# ---------------------------------------
"""
file_list = glob.glob(os.path.join(save_dir, '*_{}.pth'.format(net_type)))
if file_list:
iter_exist = []
for file_ in file_list:
iter_current = re.findall(r"(\d+)_{}.pth".format(net_type), file_)
iter_exist.append(int(iter_current[0]))
init_iter = max(iter_exist)
init_path = os.path.join(save_dir, '{}_{}.pth'.format(init_iter, net_type))
else:
init_iter = 0
init_path = None
return init_iter, init_path
def test_mode(model, L, mode=0, refield=32, min_size=256, sf=1, modulo=1):
'''
# ---------------------------------------
# Kai Zhang (github: https://github.com/cszn)
# 03/Mar/2019
# ---------------------------------------
Args:
model: trained model
L: input Low-quality image
mode:
(0) normal: test(model, L)
(1) pad: test_pad(model, L, modulo=16)
(2) split: test_split(model, L, refield=32, min_size=256, sf=1, modulo=1)
(3) x8: test_x8(model, L, modulo=1) ^_^
(4) split and x8: test_split_x8(model, L, refield=32, min_size=256, sf=1, modulo=1)
refield: effective receptive filed of the network, 32 is enough
useful when split, i.e., mode=2, 4
min_size: min_sizeXmin_size image, e.g., 256X256 image
useful when split, i.e., mode=2, 4
sf: scale factor for super-resolution, otherwise 1
modulo: 1 if split
useful when pad, i.e., mode=1
Returns:
E: estimated image
# ---------------------------------------
'''
if mode == 0:
E = test(model, L)
elif mode == 1:
E = test_pad(model, L, modulo, sf)
elif mode == 2:
E = test_split(model, L, refield, min_size, sf, modulo)
elif mode == 3:
E = test_x8(model, L, modulo, sf)
elif mode == 4:
E = test_split_x8(model, L, refield, min_size, sf, modulo)
return E
'''
# --------------------------------------------
# normal (0)
# --------------------------------------------
'''
def test(model, L):
E = model(L)
return E
'''
# --------------------------------------------
# pad (1)
# --------------------------------------------
'''
def test_pad(model, L, modulo=16, sf=1):
h, w = L.size()[-2:]
paddingBottom = int(np.ceil(h/modulo)*modulo-h)
paddingRight = int(np.ceil(w/modulo)*modulo-w)
L = torch.nn.ReplicationPad2d((0, paddingRight, 0, paddingBottom))(L)
E = model(L)
E = E[..., :h*sf, :w*sf]
return E
'''
# --------------------------------------------
# split (function)
# --------------------------------------------
'''
def test_split_fn(model, L, refield=32, min_size=256, sf=1, modulo=1):
"""
Args:
model: trained model
L: input Low-quality image
refield: effective receptive filed of the network, 32 is enough
min_size: min_sizeXmin_size image, e.g., 256X256 image
sf: scale factor for super-resolution, otherwise 1
modulo: 1 if split
Returns:
E: estimated result
"""
h, w = L.size()[-2:]
if h*w <= min_size**2:
L = torch.nn.ReplicationPad2d((0, int(np.ceil(w/modulo)*modulo-w), 0, int(np.ceil(h/modulo)*modulo-h)))(L)
E = model(L)
E = E[..., :h*sf, :w*sf]
else:
top = slice(0, (h//2//refield+1)*refield)
bottom = slice(h - (h//2//refield+1)*refield, h)
left = slice(0, (w//2//refield+1)*refield)
right = slice(w - (w//2//refield+1)*refield, w)
Ls = [L[..., top, left], L[..., top, right], L[..., bottom, left], L[..., bottom, right]]
if h * w <= 4*(min_size**2):
Es = [model(Ls[i]) for i in range(4)]
else:
Es = [test_split_fn(model, Ls[i], refield=refield, min_size=min_size, sf=sf, modulo=modulo) for i in range(4)]
b, c = Es[0].size()[:2]
E = torch.zeros(b, c, sf * h, sf * w).type_as(L)
E[..., :h//2*sf, :w//2*sf] = Es[0][..., :h//2*sf, :w//2*sf]
E[..., :h//2*sf, w//2*sf:w*sf] = Es[1][..., :h//2*sf, (-w + w//2)*sf:]
E[..., h//2*sf:h*sf, :w//2*sf] = Es[2][..., (-h + h//2)*sf:, :w//2*sf]
E[..., h//2*sf:h*sf, w//2*sf:w*sf] = Es[3][..., (-h + h//2)*sf:, (-w + w//2)*sf:]
return E
'''
# --------------------------------------------
# split (2)
# --------------------------------------------
'''
def test_split(model, L, refield=32, min_size=256, sf=1, modulo=1):
E = test_split_fn(model, L, refield=refield, min_size=min_size, sf=sf, modulo=modulo)
return E
'''
# --------------------------------------------
# x8 (3)
# --------------------------------------------
'''
def test_x8(model, L, modulo=1, sf=1):
E_list = [test_pad(model, util.augment_img_tensor4(L, mode=i), modulo=modulo, sf=sf) for i in range(8)]
for i in range(len(E_list)):
if i == 3 or i == 5:
E_list[i] = util.augment_img_tensor4(E_list[i], mode=8 - i)
else:
E_list[i] = util.augment_img_tensor4(E_list[i], mode=i)
output_cat = torch.stack(E_list, dim=0)
E = output_cat.mean(dim=0, keepdim=False)
return E
'''
# --------------------------------------------
# split and x8 (4)
# --------------------------------------------
'''
def test_split_x8(model, L, refield=32, min_size=256, sf=1, modulo=1):
E_list = [test_split_fn(model, util.augment_img_tensor4(L, mode=i), refield=refield, min_size=min_size, sf=sf, modulo=modulo) for i in range(8)]
for k, i in enumerate(range(len(E_list))):
if i==3 or i==5:
E_list[k] = util.augment_img_tensor4(E_list[k], mode=8-i)
else:
E_list[k] = util.augment_img_tensor4(E_list[k], mode=i)
output_cat = torch.stack(E_list, dim=0)
E = output_cat.mean(dim=0, keepdim=False)
return E
'''
# ^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-
# _^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^
# ^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-
'''
'''
# --------------------------------------------
# print
# --------------------------------------------
'''
# --------------------------------------------
# print model
# --------------------------------------------
def print_model(model):
msg = describe_model(model)
print(msg)
# --------------------------------------------
# print params
# --------------------------------------------
def print_params(model):
msg = describe_params(model)
print(msg)
'''
# --------------------------------------------
# information
# --------------------------------------------
'''
# --------------------------------------------
# model inforation
# --------------------------------------------
def info_model(model):
msg = describe_model(model)
return msg
# --------------------------------------------
# params inforation
# --------------------------------------------
def info_params(model):
msg = describe_params(model)
return msg
'''
# --------------------------------------------
# description
# --------------------------------------------
'''
# --------------------------------------------
# model name and total number of parameters
# --------------------------------------------
def describe_model(model):
if isinstance(model, torch.nn.DataParallel):
model = model.module
msg = '\n'
msg += 'models name: {}'.format(model.__class__.__name__) + '\n'
msg += 'Params number: {}'.format(sum(map(lambda x: x.numel(), model.parameters()))) + '\n'
msg += 'Net structure:\n{}'.format(str(model)) + '\n'
return msg
# --------------------------------------------
# parameters description
# --------------------------------------------
def describe_params(model):
if isinstance(model, torch.nn.DataParallel):
model = model.module
msg = '\n'
msg += ' | {:^6s} | {:^6s} | {:^6s} | {:^6s} || {:<20s}'.format('mean', 'min', 'max', 'std', 'shape', 'param_name') + '\n'
for name, param in model.state_dict().items():
if not 'num_batches_tracked' in name:
v = param.data.clone().float()
msg += ' | {:>6.3f} | {:>6.3f} | {:>6.3f} | {:>6.3f} | {} || {:s}'.format(v.mean(), v.min(), v.max(), v.std(), v.shape, name) + '\n'
return msg
if __name__ == '__main__':
class Net(torch.nn.Module):
def __init__(self, in_channels=3, out_channels=3):
super(Net, self).__init__()
self.conv = torch.nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, padding=1)
def forward(self, x):
x = self.conv(x)
return x
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
model = Net()
model = model.eval()
print_model(model)
print_params(model)
x = torch.randn((2,3,401,401))
torch.cuda.empty_cache()
with torch.no_grad():
for mode in range(5):
y = test_mode(model, x, mode, refield=32, min_size=256, sf=1, modulo=1)
print(y.shape)
# run utils/utils_model.py
| 9,837 | 28.902736 | 148 | py |
SwinMR | SwinMR-main/utils/utils_regularizers.py | import torch
import torch.nn as nn
'''
# --------------------------------------------
# Kai Zhang (github: https://github.com/cszn)
# 03/Mar/2019
# --------------------------------------------
'''
# --------------------------------------------
# SVD Orthogonal Regularization
# --------------------------------------------
def regularizer_orth(m):
"""
# ----------------------------------------
# SVD Orthogonal Regularization
# ----------------------------------------
# Applies regularization to the training by performing the
# orthogonalization technique described in the paper
# This function is to be called by the torch.nn.Module.apply() method,
# which applies svd_orthogonalization() to every layer of the model.
# usage: net.apply(regularizer_orth)
# ----------------------------------------
"""
classname = m.__class__.__name__
if classname.find('Conv') != -1:
w = m.weight.data.clone()
c_out, c_in, f1, f2 = w.size()
# dtype = m.weight.data.type()
w = w.permute(2, 3, 1, 0).contiguous().view(f1*f2*c_in, c_out)
# self.netG.apply(svd_orthogonalization)
u, s, v = torch.svd(w)
s[s > 1.5] = s[s > 1.5] - 1e-4
s[s < 0.5] = s[s < 0.5] + 1e-4
w = torch.mm(torch.mm(u, torch.diag(s)), v.t())
m.weight.data = w.view(f1, f2, c_in, c_out).permute(3, 2, 0, 1) # .type(dtype)
else:
pass
# --------------------------------------------
# SVD Orthogonal Regularization
# --------------------------------------------
def regularizer_orth2(m):
"""
# ----------------------------------------
# Applies regularization to the training by performing the
# orthogonalization technique described in the paper
# This function is to be called by the torch.nn.Module.apply() method,
# which applies svd_orthogonalization() to every layer of the model.
# usage: net.apply(regularizer_orth2)
# ----------------------------------------
"""
classname = m.__class__.__name__
if classname.find('Conv') != -1:
w = m.weight.data.clone()
c_out, c_in, f1, f2 = w.size()
# dtype = m.weight.data.type()
w = w.permute(2, 3, 1, 0).contiguous().view(f1*f2*c_in, c_out)
u, s, v = torch.svd(w)
s_mean = s.mean()
s[s > 1.5*s_mean] = s[s > 1.5*s_mean] - 1e-4
s[s < 0.5*s_mean] = s[s < 0.5*s_mean] + 1e-4
w = torch.mm(torch.mm(u, torch.diag(s)), v.t())
m.weight.data = w.view(f1, f2, c_in, c_out).permute(3, 2, 0, 1) # .type(dtype)
else:
pass
def regularizer_clip(m):
"""
# ----------------------------------------
# usage: net.apply(regularizer_clip)
# ----------------------------------------
"""
eps = 1e-4
c_min = -1.5
c_max = 1.5
classname = m.__class__.__name__
if classname.find('Conv') != -1 or classname.find('Linear') != -1:
w = m.weight.data.clone()
w[w > c_max] -= eps
w[w < c_min] += eps
m.weight.data = w
if m.bias is not None:
b = m.bias.data.clone()
b[b > c_max] -= eps
b[b < c_min] += eps
m.bias.data = b
# elif classname.find('BatchNorm2d') != -1:
#
# rv = m.running_var.data.clone()
# rm = m.running_mean.data.clone()
#
# if m.affine:
# m.weight.data
# m.bias.data
| 3,416 | 31.542857 | 87 | py |
SwinMR | SwinMR-main/utils/utils_bnorm.py | import torch
import torch.nn as nn
"""
# --------------------------------------------
# Batch Normalization
# --------------------------------------------
# Kai Zhang (cskaizhang@gmail.com)
# https://github.com/cszn
# 01/Jan/2019
# --------------------------------------------
"""
# --------------------------------------------
# remove/delete specified layer
# --------------------------------------------
def deleteLayer(model, layer_type=nn.BatchNorm2d):
''' Kai Zhang, 11/Jan/2019.
'''
for k, m in list(model.named_children()):
if isinstance(m, layer_type):
del model._modules[k]
deleteLayer(m, layer_type)
# --------------------------------------------
# merge bn, "conv+bn" --> "conv"
# --------------------------------------------
def merge_bn(model):
''' Kai Zhang, 11/Jan/2019.
merge all 'Conv+BN' (or 'TConv+BN') into 'Conv' (or 'TConv')
based on https://github.com/pytorch/pytorch/pull/901
'''
prev_m = None
for k, m in list(model.named_children()):
if (isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d)) and (isinstance(prev_m, nn.Conv2d) or isinstance(prev_m, nn.Linear) or isinstance(prev_m, nn.ConvTranspose2d)):
w = prev_m.weight.data
if prev_m.bias is None:
zeros = torch.Tensor(prev_m.out_channels).zero_().type(w.type())
prev_m.bias = nn.Parameter(zeros)
b = prev_m.bias.data
invstd = m.running_var.clone().add_(m.eps).pow_(-0.5)
if isinstance(prev_m, nn.ConvTranspose2d):
w.mul_(invstd.view(1, w.size(1), 1, 1).expand_as(w))
else:
w.mul_(invstd.view(w.size(0), 1, 1, 1).expand_as(w))
b.add_(-m.running_mean).mul_(invstd)
if m.affine:
if isinstance(prev_m, nn.ConvTranspose2d):
w.mul_(m.weight.data.view(1, w.size(1), 1, 1).expand_as(w))
else:
w.mul_(m.weight.data.view(w.size(0), 1, 1, 1).expand_as(w))
b.mul_(m.weight.data).add_(m.bias.data)
del model._modules[k]
prev_m = m
merge_bn(m)
# --------------------------------------------
# add bn, "conv" --> "conv+bn"
# --------------------------------------------
def add_bn(model):
''' Kai Zhang, 11/Jan/2019.
'''
for k, m in list(model.named_children()):
if (isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear) or isinstance(m, nn.ConvTranspose2d)):
b = nn.BatchNorm2d(m.out_channels, momentum=0.1, affine=True)
b.weight.data.fill_(1)
new_m = nn.Sequential(model._modules[k], b)
model._modules[k] = new_m
add_bn(m)
# --------------------------------------------
# tidy model after removing bn
# --------------------------------------------
def tidy_sequential(model):
''' Kai Zhang, 11/Jan/2019.
'''
for k, m in list(model.named_children()):
if isinstance(m, nn.Sequential):
if m.__len__() == 1:
model._modules[k] = m.__getitem__(0)
tidy_sequential(m)
| 3,132 | 33.054348 | 187 | py |
SwinMR | SwinMR-main/data/dataset_CCsagpi.py | '''
# -----------------------------------------
Data Loader
CC-SAG-PI d.1.1
by Jiahao Huang (j.huang21@imperial.ac.uk)
# -----------------------------------------
'''
import random
import torch.utils.data as data
import utils.utils_image as util
from utils.utils_swinmr import *
from models.select_mask import define_Mask
class DatasetCCsagpi(data.Dataset):
def __init__(self, opt):
super(DatasetCCsagpi, self).__init__()
print('Get L/H for image-to-image mapping. Both "paths_L" and "paths_H" are needed.')
self.opt = opt
self.n_channels = self.opt['n_channels']
self.patch_size = self.opt['H_size']
self.is_noise = self.opt['is_noise']
self.noise_level = self.opt['noise_level']
self.noise_var = self.opt['noise_var']
self.is_mini_dataset = self.opt['is_mini_dataset']
self.mini_dataset_prec = self.opt['mini_dataset_prec']
# get data path of image & sensitivity map
self.paths_raw = util.get_image_paths(opt['dataroot_H'])
assert self.paths_raw, 'Error: Raw path is empty.'
self.paths_H = []
self.paths_SM = []
for path in self.paths_raw:
if 'imgGT' in path:
self.paths_H.append(path)
elif 'SensitivityMaps' in path:
self.paths_SM.append(path)
else:
raise ValueError('Error: Unknown filename is in raw path')
if self.is_mini_dataset:
pass
# get mask
self.mask = define_Mask(self.opt)
def __getitem__(self, index):
mask = self.mask
is_noise = self.is_noise
noise_level = self.noise_level
noise_var = self.noise_var
# get gt image and sensitivity map
H_path = self.paths_H[index]
SM_path = self.paths_SM[index]
img_H, Sensitivity_Map = self.load_images(H_path, SM_path, isSM=True)
# get zf image
img_L = self.undersample_kspace(img_H, mask, is_noise, noise_level, noise_var)
# get image information
image_name_ext = os.path.basename(H_path)
img_name, ext = os.path.splitext(image_name_ext)
# ------------------------------------
# if train, get L/H patch pair
# ------------------------------------
if self.opt['phase'] == 'train':
H, W, _ = img_H.shape
# --------------------------------
# randomly crop the patch
# --------------------------------
rnd_h = random.randint(0, max(0, H - self.patch_size))
rnd_w = random.randint(0, max(0, W - self.patch_size))
patch_L = img_L[rnd_h:rnd_h + self.patch_size, rnd_w:rnd_w + self.patch_size, :]
patch_H = img_H[rnd_h:rnd_h + self.patch_size, rnd_w:rnd_w + self.patch_size, :]
patch_SM = Sensitivity_Map[rnd_h:rnd_h + self.patch_size, rnd_w:rnd_w + self.patch_size, :]
# --------------------------------
# augmentation - flip and/or rotate
# --------------------------------
mode = random.randint(0, 7)
patch_L, patch_H, patch_SM= util.augment_img(patch_L, mode=mode), \
util.augment_img(patch_H, mode=mode), \
util.augment_img(patch_SM, mode=mode)
# --------------------------------
# HWC to CHW, numpy(uint) to tensor
# --------------------------------
img_L, img_H, Sensitivity_Map = util.float2tensor3(patch_L), \
util.float2tensor3(patch_H), \
util.float2tensor3(patch_SM)
else:
# --------------------------------
# HWC to CHW, numpy(uint) to tensor
# --------------------------------
img_L, img_H = util.float2tensor3(img_L), util.float2tensor3(img_H)
return {'L': img_L, 'H': img_H, 'H_path': H_path, 'mask': mask, 'SM': Sensitivity_Map, 'img_info': img_name}
def __len__(self):
return len(self.paths_H)
def load_images(self, H_path, SM_path, isSM=True):
# load GT
gt = np.load(H_path).astype(np.float32)
gt = np.reshape(gt, (gt.shape[0], gt.shape[1], 1))
# # 0 ~ 1
gt = (gt - gt.min()) / (gt.max() - gt.min())
# load SM
if isSM == True:
sm = np.load(SM_path).astype(np.float32)[:, :, :, 1]
# sm = np.reshape(sm[:, :, :, 1], (256, 256, 12))
# 0 ~ 1
sm = (sm - sm.min()) / (sm.max() - sm.min())
return gt, sm
else:
return gt, 0
def undersample_kspace(self, x, mask, is_noise, noise_level, noise_var):
fft = fft2(x[:, :, 0])
fft = fftshift(fft)
fft = fft * mask
if is_noise:
fft = fft + self.generate_gaussian_noise(fft, noise_level, noise_var)
fft = ifftshift(fft)
xx = ifft2(fft)
xx = np.abs(xx)
x = xx[:, :, np.newaxis]
return x
def generate_gaussian_noise(self, x, noise_level, noise_var):
spower = np.sum(x ** 2) / x.size
npower = noise_level / (1 - noise_level) * spower
noise = np.random.normal(0, noise_var ** 0.5, x.shape) * np.sqrt(npower)
return noise | 5,361 | 34.045752 | 116 | py |
SwinMR | SwinMR-main/data/select_dataset.py | '''
# -----------------------------------------
Select Dataset
by Jiahao Huang (j.huang21@imperial.ac.uk)
# -----------------------------------------
'''
def define_Dataset(dataset_opt):
dataset_type = dataset_opt['dataset_type'].lower()
# ------------------------------------------------
# CC-359 Calgary Campinas Public Brain MR Dataset
# ------------------------------------------------
# CC-SAG-NPI d.1.1
if dataset_type in ['ccsagnpi']:
from data.dataset_CCsagnpi import DatasetCCsagnpi as D
# CC-SAG-PI d.1.1
elif dataset_type in ['ccsagpi']:
from data.dataset_CCsagpi import DatasetCCsagpi as D
else:
raise NotImplementedError('Dataset [{:s}] is not found.'.format(dataset_type))
dataset = D(dataset_opt)
print('Dataset [{:s} - {:s}] is created.'.format(dataset.__class__.__name__, dataset_opt['name']))
return dataset
| 904 | 30.206897 | 102 | py |
SwinMR | SwinMR-main/data/dataset_CCsagnpi.py | '''
# -----------------------------------------
Data Loader
CC-SAG-NPI d.1.1
by Jiahao Huang (j.huang21@imperial.ac.uk)
# -----------------------------------------
'''
import random
import torch.utils.data as data
import utils.utils_image as util
from utils.utils_swinmr import *
from models.select_mask import define_Mask
class DatasetCCsagnpi(data.Dataset):
def __init__(self, opt):
super(DatasetCCsagnpi, self).__init__()
print('Get L/H for image-to-image mapping. Both "paths_L" and "paths_H" are needed.')
self.opt = opt
self.n_channels = self.opt['n_channels']
self.patch_size = self.opt['H_size']
self.is_noise = self.opt['is_noise']
self.noise_level = self.opt['noise_level']
self.noise_var = self.opt['noise_var']
self.is_mini_dataset = self.opt['is_mini_dataset']
self.mini_dataset_prec = self.opt['mini_dataset_prec']
# get data path of image & sensitivity map
self.paths_raw = util.get_image_paths(opt['dataroot_H'])
assert self.paths_raw, 'Error: Raw path is empty.'
self.paths_H = []
self.paths_SM = []
for path in self.paths_raw:
if 'imgGT' in path:
self.paths_H.append(path)
elif 'SensitivityMaps' in path:
self.paths_SM.append(path)
else:
raise ValueError('Error: Unknown filename is in raw path')
if self.is_mini_dataset:
pass
# get mask
self.mask = define_Mask(self.opt)
def __getitem__(self, index):
mask = self.mask
is_noise = self.is_noise
noise_level = self.noise_level
noise_var = self.noise_var
# get gt image
H_path = self.paths_H[index]
img_H, _ = self.load_images(H_path, 0, isSM=False)
# get zf image
img_L = self.undersample_kspace(img_H, mask, is_noise, noise_level, noise_var)
# get image information
image_name_ext = os.path.basename(H_path)
img_name, ext = os.path.splitext(image_name_ext)
# ------------------------------------
# if train, get L/H patch pair
# ------------------------------------
if self.opt['phase'] == 'train':
H, W, _ = img_H.shape
# --------------------------------
# randomly crop the patch
# --------------------------------
rnd_h = random.randint(0, max(0, H - self.patch_size))
rnd_w = random.randint(0, max(0, W - self.patch_size))
patch_L = img_L[rnd_h:rnd_h + self.patch_size, rnd_w:rnd_w + self.patch_size, :]
patch_H = img_H[rnd_h:rnd_h + self.patch_size, rnd_w:rnd_w + self.patch_size, :]
# --------------------------------
# augmentation - flip and/or rotate
# --------------------------------
mode = random.randint(0, 7)
patch_L, patch_H = util.augment_img(patch_L, mode=mode), util.augment_img(patch_H, mode=mode)
# --------------------------------
# HWC to CHW, numpy(uint) to tensor
# --------------------------------
img_L, img_H = util.float2tensor3(patch_L), util.float2tensor3(patch_H)
else:
# --------------------------------
# HWC to CHW, numpy(uint) to tensor
# --------------------------------
img_L, img_H = util.float2tensor3(img_L), util.float2tensor3(img_H)
return {'L': img_L, 'H': img_H, 'H_path': H_path, 'mask': mask, 'SM': _, 'img_info': img_name}
def __len__(self):
return len(self.paths_H)
def load_images(self, H_path, SM_path, isSM=True):
# load GT
gt = np.load(H_path).astype(np.float32)
gt = np.reshape(gt, (gt.shape[0], gt.shape[1], 1))
# # 0 ~ 1
gt = (gt - gt.min()) / (gt.max() - gt.min())
# load SM
if isSM == True:
sm = np.load(SM_path).astype(np.float32)[:, :, :, 1]
# sm = np.reshape(sm[:, :, :, 1], (256, 256, 12))
# 0 ~ 1
sm = (sm - sm.min()) / (sm.max() - sm.min())
return gt, sm
else:
return gt, 0
def undersample_kspace(self, x, mask, is_noise, noise_level, noise_var):
fft = fft2(x[:, :, 0])
fft = fftshift(fft)
fft = fft * mask
if is_noise:
fft = fft + self.generate_gaussian_noise(fft, noise_level, noise_var)
fft = ifftshift(fft)
xx = ifft2(fft)
xx = np.abs(xx)
x = xx[:, :, np.newaxis]
return x
def generate_gaussian_noise(self, x, noise_level, noise_var):
spower = np.sum(x ** 2) / x.size
npower = noise_level / (1 - noise_level) * spower
noise = np.random.normal(0, noise_var ** 0.5, x.shape) * np.sqrt(npower)
return noise | 4,898 | 32.554795 | 105 | py |
risk-slim | risk-slim-master/setup.py | #! /usr/bin/env python
#
# Copyright (C) 2017 Berk Ustun
import os
import sys
from setuptools import setup, find_packages, dist
from setuptools.extension import Extension
#resources
#setuptools http://setuptools.readthedocs.io/en/latest/setuptools.html
#setuptools + Cython: http://stackoverflow.com/questions/32528560/
DISTNAME = 'riskslim'
DESCRIPTION = "optimized risk scores on large-scale datasets"
AUTHOR = 'Berk Ustun'
AUTHOR_EMAIL = 'berk@seas.harvard.edu'
URL = 'https://github.com/ustunb/risk-slim'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'https://github.com/ustunb/risk-slim'
VERSION = '0.0.0'
# Install setup requirements
dist.Distribution().fetch_build_eggs(['Cython', 'numpy', 'scipy'])
#read requirements as listed in txt file
try:
import numpy
except ImportError:
print('numpy is required for installation')
sys.exit(1)
try:
import scipy
except ImportError:
print('scipy is required for installation')
sys.exit(1)
try:
from Cython.Build import cythonize
except ImportError:
print('Cython is required for installation')
sys.exit(1)
#fast log loss
extensions =[
Extension(
DISTNAME + ".loss_functions." + "fast_log_loss",
[DISTNAME + "/loss_functions/fast_log_loss.pyx"],
include_dirs=[numpy.get_include(), scipy.get_include()],
libraries=["m"],
extra_compile_args=["-ffast-math"]
),
Extension(
DISTNAME + ".loss_functions." + "lookup_log_loss",
[DISTNAME + "/loss_functions/lookup_log_loss.pyx"],
include_dirs=[numpy.get_include(), scipy.get_include()],
libraries=["m"],
extra_compile_args=["-ffast-math"])
]
if __name__ == "__main__":
old_path = os.getcwd()
local_path = os.path.dirname(os.path.abspath(sys.argv[0]))
os.chdir(local_path)
sys.path.insert(0, local_path)
with open('requirements.txt') as f:
INSTALL_REQUIRES = [l.strip() for l in f.readlines() if l]
setup(
name=DISTNAME,
packages=find_packages(),
ext_modules=cythonize(extensions),
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
install_requires=INSTALL_REQUIRES,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
zip_safe=False,
)
| 2,322 | 25.397727 | 70 | py |
risk-slim | risk-slim-master/examples/ex_02_advanced_options.py | import os
import numpy as np
import pprint
import riskslim
# data
data_name = "breastcancer" # name of the data
data_dir = os.getcwd() + '/examples/data/' # directory where datasets are stored
data_csv_file = data_dir + data_name + '_data.csv' # csv file for the dataset
sample_weights_csv_file = None # csv file of sample weights for the dataset (optional)
# problem parameters
max_coefficient = 5 # value of largest/smallest coefficient
max_L0_value = 5 # maximum model size
max_offset = 50 # maximum value of offset parameter (optional)
c0_value = 1e-6 # L0-penalty parameter such that c0_value > 0; larger values -> sparser models; we set to a small value (1e-6) so that we get a model with max_L0_value terms
w_pos = 1.00 # relative weight on examples with y = +1; w_neg = 1.00 (optional)
# load dataset
data = riskslim.load_data_from_csv(dataset_csv_file = data_csv_file, sample_weights_csv_file = sample_weights_csv_file)
N, P = data['X'].shape
# coefficient set
coef_set = riskslim.CoefficientSet(variable_names = data['variable_names'], lb=-max_coefficient, ub=max_coefficient, sign=0)
coef_set.update_intercept_bounds(X = data['X'], y = data['Y'], max_offset = max_offset)
# create constraint dictionary
N, P = data['X'].shape
trivial_L0_max = P - np.sum(coef_set.C_0j == 0)
max_L0_value = min(max_L0_value, trivial_L0_max)
constraints = {
'L0_min': 0,
'L0_max': max_L0_value,
'coef_set': coef_set,
}
# Run RiskSLIM
settings = {
#
'c0_value': c0_value,
'w_pos': w_pos,
#
# LCPA Settings
'max_runtime': 300.0, # max runtime for LCPA
'max_tolerance': np.finfo('float').eps, # tolerance to stop LCPA (set to 0 to return provably optimal solution)
'display_cplex_progress': True, # set to True to print CPLEX progress
'loss_computation': 'lookup', # how to compute the loss function ('normal','fast','lookup')
#
# Other LCPA Heuristics
'chained_updates_flag': True, # use chained updates
'add_cuts_at_heuristic_solutions': True, # add cuts at integer feasible solutions found using polishing/rounding
#
# LCPA Rounding Heuristic
'round_flag': False, # round continuous solutions with SeqRd
'polish_rounded_solutions': True, # polish solutions rounded with SeqRd using DCD
'rounding_tolerance': float('inf'), # only solutions with objective value < (1 + tol) are rounded
'rounding_start_cuts': 0, # cuts needed to start using rounding heuristic
'rounding_start_gap': float('inf'), # optimality gap needed to start using rounding heuristic
'rounding_stop_cuts': 20000, # cuts needed to stop using rounding heuristic
'rounding_stop_gap': 0.2, # optimality gap needed to stop using rounding heuristic
#
# LCPA Polishing Heuristic
'polish_flag': False, # polish integer feasible solutions with DCD
'polishing_tolerance': 0.1, # only solutions with objective value (1 + tol) are polished.
'polishing_max_runtime': 10.0, # max time to run polishing each time
'polishing_max_solutions': 5.0, # max # of solutions to polish each time
'polishing_start_cuts': 0, # cuts needed to start using polishing heuristic
'polishing_start_gap': float('inf'), # min optimality gap needed to start using polishing heuristic
'polishing_stop_cuts': float('inf'), # cuts needed to stop using polishing heuristic
'polishing_stop_gap': 0.0, # max optimality gap required to stop using polishing heuristic
#
# Initialization Procedure
'initialization_flag': True, # use initialization procedure
'init_display_progress': True, # show progress of initialization procedure
'init_display_cplex_progress': False, # show progress of CPLEX during intialization procedure
#
'init_max_runtime': 300.0, # max time to run CPA in initialization procedure
'init_max_iterations': 10000, # max # of cuts needed to stop CPA
'init_max_tolerance': 0.0001, # tolerance of solution to stop CPA
'init_max_runtime_per_iteration': 300.0, # max time per iteration of CPA
'init_max_cplex_time_per_iteration': 10.0, # max time per iteration to solve surrogate problem in CPA
#
'init_use_rounding': True, # use Rd in initialization procedure
'init_rounding_max_runtime': 30.0, # max runtime for Rd in initialization procedure
'init_rounding_max_solutions': 5, # max solutions to round using Rd
#
'init_use_sequential_rounding': True, # use SeqRd in initialization procedure
'init_sequential_rounding_max_runtime': 10.0, # max runtime for SeqRd in initialization procedure
'init_sequential_rounding_max_solutions': 5, # max solutions to round using SeqRd
#
'init_polishing_after': True, # polish after rounding
'init_polishing_max_runtime': 30.0, # max runtime for polishing
'init_polishing_max_solutions': 5, # max solutions to polish
#
# CPLEX Solver Parameters
'cplex_randomseed': 0, # random seed
'cplex_mipemphasis': 0, # cplex MIP strategy
}
# train model using lattice_cpa
model_info, mip_info, lcpa_info = riskslim.run_lattice_cpa(data, constraints, settings)
#model info contains key results
pprint.pprint(model_info)
riskslim.print_model(model_info['solution'], data)
# mip_output contains information to access the MIP
mip_info['risk_slim_mip'] #CPLEX mip
mip_info['risk_slim_idx'] #indices of the relevant constraints
# lcpa_output contains detailed information about LCPA
pprint.pprint(lcpa_info)
| 6,526 | 53.848739 | 217 | py |
risk-slim | risk-slim-master/examples/ex_01_quickstart.py | import os
import pprint
import numpy as np
import riskslim
# data
data_name = "breastcancer" # name of the data
data_dir = os.getcwd() + '/examples/data/' # directory where datasets are stored
data_csv_file = data_dir + data_name + '_data.csv' # csv file for the dataset
sample_weights_csv_file = None # csv file of sample weights for the dataset (optional)
# problem parameters
max_coefficient = 5 # value of largest/smallest coefficient
max_L0_value = 5 # maximum model size (set as float(inf))
max_offset = 50 # maximum value of offset parameter (optional)
c0_value = 1e-6 # L0-penalty parameter such that c0_value > 0; larger values -> sparser models; we set to a small value (1e-6) so that we get a model with max_L0_value terms
# load data from disk
data = riskslim.load_data_from_csv(dataset_csv_file = data_csv_file, sample_weights_csv_file = sample_weights_csv_file)
# create coefficient set and set the value of the offset parameter
coef_set = riskslim.CoefficientSet(variable_names = data['variable_names'], lb = -max_coefficient, ub = max_coefficient, sign = 0)
coef_set.update_intercept_bounds(X = data['X'], y = data['Y'], max_offset = max_offset)
constraints = {
'L0_min': 0,
'L0_max': max_L0_value,
'coef_set':coef_set,
}
# major settings (see riskslim_ex_02_complete for full set of options)
settings = {
# Problem Parameters
'c0_value': c0_value,
#
# LCPA Settings
'max_runtime': 30.0, # max runtime for LCPA
'max_tolerance': np.finfo('float').eps, # tolerance to stop LCPA (set to 0 to return provably optimal solution)
'display_cplex_progress': True, # print CPLEX progress on screen
'loss_computation': 'fast', # how to compute the loss function ('normal','fast','lookup')
#
# LCPA Improvements
'round_flag': True, # round continuous solutions with SeqRd
'polish_flag': True, # polish integer feasible solutions with DCD
'chained_updates_flag': True, # use chained updates
'add_cuts_at_heuristic_solutions': True, # add cuts at integer feasible solutions found using polishing/rounding
#
# Initialization
'initialization_flag': True, # use initialization procedure
'init_max_runtime': 120.0, # max time to run CPA in initialization procedure
'init_max_coefficient_gap': 0.49,
#
# CPLEX Solver Parameters
'cplex_randomseed': 0, # random seed
'cplex_mipemphasis': 0, # cplex MIP strategy
}
# train model using lattice_cpa
model_info, mip_info, lcpa_info = riskslim.run_lattice_cpa(data, constraints, settings)
#print model contains model
riskslim.print_model(model_info['solution'], data)
#model info contains key results
pprint.pprint(model_info)
| 3,223 | 46.411765 | 217 | py |
risk-slim | risk-slim-master/examples/ex_03_constraints.py | import os
import numpy as np
import cplex as cplex
import pprint
import riskslim
# data
import riskslim.coefficient_set
data_name = "breastcancer" # name of the data
data_dir = os.getcwd() + '/examples/data/' # directory where datasets are stored
data_csv_file = data_dir + data_name + '_data.csv' # csv file for the dataset
sample_weights_csv_file = None # csv file of sample weights for the dataset (optional)
# problem parameters
max_coefficient = 5 # value of largest/smallest coefficient
max_L0_value = 5 # maximum model size
max_offset = 50 # maximum value of offset parameter (optional)
c0_value = 1e-6 # L0-penalty parameter such that c0_value > 0; larger values -> sparser models; we set to a small value (1e-6) so that we get a model with max_L0_value terms
w_pos = 1.00 # relative weight on examples with y = +1; w_neg = 1.00 (optional)
# load data from disk
data = riskslim.load_data_from_csv(dataset_csv_file = data_csv_file, sample_weights_csv_file = sample_weights_csv_file)
N, P = data['X'].shape
# create coefficient set and set the value of the offset parameter
coef_set = riskslim.CoefficientSet(variable_names=data['variable_names'], lb=-max_coefficient, ub=max_coefficient, sign=0)
coef_set.update_intercept_bounds(X = data['X'], y = data['Y'], max_offset = max_offset)
# create constraint
trivial_L0_max = P - np.sum(coef_set.C_0j == 0)
max_L0_value = min(max_L0_value, trivial_L0_max)
constraints = {
'L0_min': 0,
'L0_max': max_L0_value,
'coef_set':coef_set,
}
# major settings (see riskslim_ex_02_complete for full set of options)
settings = {
# Problem Parameters
'c0_value': c0_value,
'w_pos': w_pos,
#
# LCPA Settings
'max_runtime': 300.0, # max runtime for LCPA
'max_tolerance': np.finfo('float').eps, # tolerance to stop LCPA (set to 0 to return provably optimal solution)
'display_cplex_progress': True, # print CPLEX progress on screen
'loss_computation': 'normal', # how to compute the loss function ('normal','fast','lookup')
#
# RiskSLIM MIP settings
'drop_variables': False,
#
# LCPA Improvements
'round_flag': False, # round continuous solutions with SeqRd
'polish_flag': False, # polish integer feasible solutions with DCD
'chained_updates_flag': False, # use chained updates
'initialization_flag': False, # use initialization procedure
'init_max_runtime': 300.0, # max time to run CPA in initialization procedure
'add_cuts_at_heuristic_solutions': True, # add cuts at integer feasible solutions found using polishing/rounding
#
# CPLEX Solver Parameters
'cplex_randomseed': 0, # random seed
'cplex_mipemphasis': 0, # cplex MIP strategy
}
# turn on at your own risk
settings['round_flag'] = False
settings['polish_flag'] = False
settings['chained_updates_flag'] = False
settings['initialization_flag'] = False
# initialize MIP for lattice CPA
mip_objects = riskslim.setup_lattice_cpa(data, constraints, settings)
# add operational constraints
mip, indices = mip_objects['mip'], mip_objects['indices']
get_alpha_name = lambda var_name: 'alpha_' + str(data['variable_names'].index(var_name))
get_alpha_ind = lambda var_names: [get_alpha_name(v) for v in var_names]
# to add a constraint like "either "CellSize" or "CellShape"
# you must formulate the constraint in terms of the alpha variables
# alpha[cell_size] + alpha[cell_shape] <= 1 to MIP
mip.linear_constraints.add(
names = ["EitherOr_CellSize_or_CellShape"],
lin_expr = [cplex.SparsePair(ind = get_alpha_ind(['UniformityOfCellSize', 'UniformityOfCellShape']),
val = [1.0, 1.0])],
senses = "L",
rhs = [1.0])
mip_objects['mip'] = mip
# pass MIP back to lattice CPA so that it will solve
model_info, mip_info, lcpa_info = riskslim.finish_lattice_cpa(data, constraints, mip_objects, settings)
#model info contains key results
pprint.pprint(model_info)
riskslim.print_model(model_info['solution'], data)
# mip_output contains information to access the MIP
mip_info['risk_slim_mip'] #CPLEX mip
mip_info['risk_slim_idx'] #indices of the relevant constraints
# lcpa_output contains detailed information about LCPA
pprint.pprint(lcpa_info)
| 4,811 | 41.584071 | 217 | py |
risk-slim | risk-slim-master/riskslim/lattice_cpa.py | import time
import numpy as np
from cplex.callbacks import HeuristicCallback, LazyConstraintCallback
from cplex.exceptions import CplexError
from .bound_tightening import chained_updates
from .defaults import DEFAULT_LCPA_SETTINGS
from .utils import print_log, validate_settings
from .heuristics import discrete_descent, sequential_rounding
from .initialization import initialize_lattice_cpa
from .mip import add_mip_starts, convert_to_risk_slim_cplex_solution, create_risk_slim, set_cplex_mip_parameters
from .setup_functions import get_loss_bounds, setup_loss_functions, setup_objective_functions, setup_penalty_parameters
from .solution_pool import SolutionPool, FastSolutionPool
DEFAULT_BOUNDS = {
'objval_min': 0.0,
'objval_max': float('inf'),
'loss_min': 0.0,
'loss_max': float('inf'),
'L0_min': 0,
'L0_max': float('inf'),
}
# LATTICE CPA FUNCTIONS
def run_lattice_cpa(data, constraints, settings = DEFAULT_LCPA_SETTINGS):
"""
Parameters
----------
data
constraints
settings
Returns
-------
"""
mip_objects = setup_lattice_cpa(data, constraints, settings)
model_info, mip_info, lcpa_info = finish_lattice_cpa(data, constraints, mip_objects, settings)
return model_info, mip_info, lcpa_info
def setup_lattice_cpa(data, constraints, settings = DEFAULT_LCPA_SETTINGS):
"""
Parameters
----------
data, dict containing training data should pass check_data
constraints, dict containing 'L0_min, L0_max, CoefficientSet'
settings
Returns
-------
mip_objects
"""
# process settings then split into manageable parts
settings = validate_settings(settings, default_settings = DEFAULT_LCPA_SETTINGS)
init_settings = {k.lstrip('init_'): settings[k] for k in settings if k.startswith('init_')}
cplex_settings = {k.lstrip('cplex_'): settings[k] for k in settings if k.startswith('cplex_')}
lcpa_settings = {k: settings[k] for k in settings if settings if not k.startswith(('init_', 'cplex_'))}
# get handles for loss functions
(Z,
compute_loss,
compute_loss_cut,
compute_loss_from_scores,
compute_loss_real,
compute_loss_cut_real,
compute_loss_from_scores_real) = setup_loss_functions(data = data,
coef_set = constraints['coef_set'],
L0_max = constraints['L0_max'],
loss_computation = settings['loss_computation'],
w_pos = settings['w_pos'])
# data
N, P = Z.shape
# trade-off parameters
c0_value, C_0, L0_reg_ind, C_0_nnz = setup_penalty_parameters(c0_value = lcpa_settings['c0_value'],
coef_set = constraints['coef_set'])
# major components
(get_objval,
get_L0_norm,
get_L0_penalty,
get_alpha,
get_L0_penalty_from_alpha) = setup_objective_functions(compute_loss, L0_reg_ind, C_0_nnz)
rho_lb = np.array(constraints['coef_set'].lb)
rho_ub = np.array(constraints['coef_set'].ub)
L0_min = constraints['L0_min']
L0_max = constraints['L0_max']
def is_feasible(rho, L0_min = L0_min, L0_max = L0_max, rho_lb = rho_lb, rho_ub = rho_ub):
return np.all(rho_ub >= rho) and np.all(rho_lb <= rho) and (L0_min <= np.count_nonzero(rho[L0_reg_ind]) <= L0_max)
# compute bounds on objective value
bounds = dict(DEFAULT_BOUNDS)
bounds['L0_min'] = constraints['L0_min']
bounds['L0_max'] = constraints['L0_max']
bounds['loss_min'], bounds['loss_max'] = get_loss_bounds(Z, rho_ub, rho_lb, L0_reg_ind, L0_max)
# initialize
initial_pool = SolutionPool(P)
initial_cuts = None
# check if trivial solution is feasible, if so add it to the pool and update bounds
trivial_solution = np.zeros(P)
if is_feasible(trivial_solution):
trivial_objval = compute_loss(trivial_solution)
if lcpa_settings['initial_bound_updates']:
bounds['objval_max'] = min(bounds['objval_max'], trivial_objval)
bounds['loss_max'] = min(bounds['loss_max'], trivial_objval)
bounds = chained_updates(bounds, C_0_nnz)
initial_pool = initial_pool.add(objvals = trivial_objval, solutions = trivial_solution)
# setup risk_slim_lp and risk_slim_mip parameters
risk_slim_settings = {
'C_0': c0_value,
'coef_set': constraints['coef_set'],
'tight_formulation': lcpa_settings['tight_formulation'],
'drop_variables': lcpa_settings['drop_variables'],
'include_auxillary_variable_for_L0_norm': lcpa_settings['include_auxillary_variable_for_L0_norm'],
'include_auxillary_variable_for_objval': lcpa_settings['include_auxillary_variable_for_objval'],
}
risk_slim_settings.update(bounds)
# run initialization procedure
if lcpa_settings['initialization_flag']:
initial_pool, initial_cuts, initial_bounds = initialize_lattice_cpa(Z = Z,
c0_value = lcpa_settings['c0_value'],
constraints = constraints,
bounds = bounds,
settings = init_settings,
risk_slim_settings = risk_slim_settings,
cplex_settings = cplex_settings,
compute_loss_from_scores = compute_loss_from_scores,
compute_loss_real = compute_loss_real,
compute_loss_cut_real = compute_loss_cut_real,
compute_loss_from_scores_real = compute_loss_from_scores_real,
get_objval = get_objval,
get_L0_penalty = get_L0_penalty,
is_feasible = is_feasible)
if lcpa_settings['initial_bound_updates']:
bounds.update(initial_bounds)
risk_slim_settings.update(initial_bounds)
# create risk_slim mip
risk_slim_mip, risk_slim_indices = create_risk_slim(coef_set = constraints['coef_set'], input = risk_slim_settings)
risk_slim_indices['C_0_nnz'] = C_0_nnz
risk_slim_indices['L0_reg_ind'] = L0_reg_ind
# mip
mip_objects = {
'mip': risk_slim_mip,
'indices': risk_slim_indices,
'bounds': bounds,
'initial_pool': initial_pool,
'initial_cuts': initial_cuts,
}
return mip_objects
def finish_lattice_cpa(data, constraints, mip_objects, settings = DEFAULT_LCPA_SETTINGS):
"""
Parameters
----------
data, dict containing training data should pass check_data
constraints, dict containing 'L0_min, L0_max, CoefficientSet'
settings
mip_objects output of setup_risk_slim
Returns
-------
"""
# process settings then split into manageable parts
settings = validate_settings(settings, default_settings = DEFAULT_LCPA_SETTINGS)
cplex_settings = {k.lstrip('cplex_'): settings[k] for k in settings if k.startswith('cplex_')}
lcpa_settings = {k: settings[k] for k in settings if settings if not k.startswith(('init_', 'cplex_'))}
# unpack mip_objects from setup_risk_slim
risk_slim_mip = mip_objects['mip']
indices = mip_objects['indices']
bounds = mip_objects['bounds']
initial_pool = mip_objects['initial_pool']
initial_cuts = mip_objects['initial_cuts']
# get handles for loss functions
# loss functions
(Z,
compute_loss,
compute_loss_cut,
compute_loss_from_scores,
compute_loss_real,
compute_loss_cut_real,
compute_loss_from_scores_real) = setup_loss_functions(data = data,
coef_set = constraints['coef_set'],
L0_max = constraints['L0_max'],
loss_computation = settings['loss_computation'],
w_pos = settings['w_pos'])
# data
N, P = Z.shape
# trade-off parameter
c0_value, C_0, L0_reg_ind, C_0_nnz = setup_penalty_parameters(c0_value = lcpa_settings['c0_value'],
coef_set = constraints['coef_set'])
# setup function handles for key functions
# major components
(get_objval,
get_L0_norm,
get_L0_penalty,
get_alpha,
get_L0_penalty_from_alpha) = setup_objective_functions(compute_loss, L0_reg_ind, C_0_nnz)
# constraints
rho_lb = np.array(constraints['coef_set'].lb)
rho_ub = np.array(constraints['coef_set'].ub)
L0_min = constraints['L0_min']
L0_max = constraints['L0_max']
trivial_L0_max = np.sum(constraints['coef_set'].penalized_indices())
def is_feasible(rho, L0_min = L0_min, L0_max = L0_max, rho_lb = rho_lb, rho_ub = rho_ub):
return np.all(rho_ub >= rho) and np.all(rho_lb <= rho) and (L0_min <= np.count_nonzero(rho[L0_reg_ind]) <= L0_max)
risk_slim_mip = set_cplex_mip_parameters(risk_slim_mip, cplex_settings, display_cplex_progress = lcpa_settings['display_cplex_progress'])
risk_slim_mip.parameters.timelimit.set(lcpa_settings['max_runtime'])
# setup callback functions
control = {
'incumbent': np.repeat(np.nan, P),
'upperbound': float('inf'),
'bounds': dict(bounds),
'lowerbound': 0.0,
'relative_gap': float('inf'),
'nodes_processed': 0,
'nodes_remaining': 0,
#
'start_time': float('nan'),
'total_run_time': 0.0,
'total_cut_time': 0.0,
'total_polish_time': 0.0,
'total_round_time': 0.0,
'total_round_then_polish_time': 0.0,
#
'cut_callback_times_called': 0,
'heuristic_callback_times_called': 0,
'total_cut_callback_time': 0.00,
'total_heuristic_callback_time': 0.00,
#
# number of times solutions were updates
'n_incumbent_updates': 0,
'n_heuristic_updates': 0,
'n_cuts': 0,
'n_polished': 0,
'n_rounded': 0,
'n_rounded_then_polished': 0,
#
# total # of bound updates
'n_update_bounds_calls': 0,
'n_bound_updates': 0,
'n_bound_updates_loss_min': 0,
'n_bound_updates_loss_max': 0,
'n_bound_updates_L0_min': 0,
'n_bound_updates_L0_max': 0,
'n_bound_updates_objval_min': 0,
'n_bound_updates_objval_max': 0,
}
lcpa_cut_queue = FastSolutionPool(P)
lcpa_polish_queue = FastSolutionPool(P)
heuristic_flag = lcpa_settings['round_flag'] or lcpa_settings['polish_flag']
if heuristic_flag:
loss_cb = risk_slim_mip.register_callback(LossCallback)
loss_cb.initialize(indices = indices,
control = control,
settings = lcpa_settings,
compute_loss_cut = compute_loss_cut,
get_alpha = get_alpha,
get_L0_penalty_from_alpha = get_L0_penalty_from_alpha,
initial_cuts = initial_cuts,
cut_queue = lcpa_cut_queue,
polish_queue = lcpa_polish_queue)
heuristic_cb = risk_slim_mip.register_callback(PolishAndRoundCallback)
active_set_flag = L0_max <= trivial_L0_max
polishing_handle = lambda rho: discrete_descent(rho, Z, C_0, rho_ub, rho_lb, get_L0_penalty, compute_loss_from_scores, active_set_flag)
rounding_handle = lambda rho, cutoff: sequential_rounding(rho, Z, C_0, compute_loss_from_scores_real, get_L0_penalty, cutoff)
heuristic_cb.initialize(indices = indices,
control = control,
settings = lcpa_settings,
cut_queue = lcpa_cut_queue,
polish_queue = lcpa_polish_queue,
get_objval = get_objval,
get_L0_norm = get_L0_norm,
is_feasible = is_feasible,
polishing_handle = polishing_handle,
rounding_handle = rounding_handle)
else:
loss_cb = risk_slim_mip.register_callback(LossCallback)
loss_cb.initialize(indices = indices,
control = control,
settings = lcpa_settings,
compute_loss_cut = compute_loss_cut,
get_alpha = get_alpha,
get_L0_penalty_from_alpha = get_L0_penalty_from_alpha,
initial_cuts = initial_cuts)
# attach solution pool
if len(initial_pool) > 0:
if lcpa_settings['polish_flag']:
lcpa_polish_queue.add(initial_pool.objvals[0], initial_pool.solutions[0])
# initialize using the polish_queue when possible since the CPLEX MIPStart interface is tricky
else:
risk_slim_mip = add_mip_starts(risk_slim_mip, indices, initial_pool, mip_start_effort_level = risk_slim_mip.MIP_starts.effort_level.repair)
if lcpa_settings['add_cuts_at_heuristic_solutions'] and len(initial_pool) > 1:
lcpa_cut_queue.add(initial_pool.objvals[1:], initial_pool.solutions[1:])
# solve using lcpa
control['start_time'] = time.time()
risk_slim_mip.solve()
control['total_run_time'] = time.time() - control['start_time']
control.pop('start_time')
# record mip solution statistics
try:
control['incumbent'] = np.array(risk_slim_mip.solution.get_values(indices['rho']))
control['upperbound'] = risk_slim_mip.solution.get_objective_value()
control['lowerbound'] = risk_slim_mip.solution.MIP.get_best_objective()
control['relative_gap'] = risk_slim_mip.solution.MIP.get_mip_relative_gap()
control['found_solution'] = True
except CplexError:
control['found_solution'] = False
control['cplex_status'] = risk_slim_mip.solution.get_status_string()
control['total_callback_time'] = control['total_cut_callback_time'] + control['total_heuristic_callback_time']
control['total_solver_time'] = control['total_run_time'] - control['total_callback_time']
control['total_data_time'] = control['total_cut_time'] + control['total_polish_time'] + control['total_round_time'] + control['total_round_then_polish_time']
# Output for Model
model_info = {
'c0_value': c0_value,
'w_pos': settings['w_pos'],
#
'solution': control['incumbent'],
'objective_value': get_objval(control['incumbent']) if control['found_solution'] else float('inf'),
'loss_value': compute_loss(control['incumbent']) if control['found_solution'] else float('inf'),
'optimality_gap': control['relative_gap'] if control['found_solution'] else float('inf'),
#
'run_time': control['total_run_time'],
'solver_time': control['total_solver_time'],
'callback_time': control['total_callback_time'],
'data_time': control['total_data_time'],
'nodes_processed': control['nodes_processed'],
}
model_info.update(constraints)
# Output for MIP
mip_info = {
'risk_slim_mip': risk_slim_mip,
'risk_slim_idx': indices
}
# Output for LCPA
lcpa_info = dict(control)
lcpa_info['bounds'] = dict(bounds)
lcpa_info['settings'] = dict(settings)
return model_info, mip_info, lcpa_info
# CALLBACK FUNCTIONS
class LossCallback(LazyConstraintCallback):
"""
This callback has to be initialized after construnction with initialize().
LossCallback is called when CPLEX finds an integer feasible solution. By default, it will add a cut at this
solution to improve the cutting-plane approximation of the loss function. The cut is added as a 'lazy' constraint
into the surrogate LP so that it is evaluated only when necessary.
Optional functionality:
- add an initial set of cutting planes found by warm starting
requires initial_cuts
- pass integer feasible solutions to 'polish' queue so that they can be polished with DCD in the PolishAndRoundCallback
requires settings['polish_flag'] = True
- adds cuts at integer feasible solutions found by the PolishAndRoundCallback
requires settings['add_cuts_at_heuristic_solutions'] = True
- reduces overall search region by adding constraints on objval_max, l0_max, loss_min, loss_max
requires settings['chained_updates_flag'] = True
"""
def initialize(self, indices, control, settings, compute_loss_cut, get_alpha, get_L0_penalty_from_alpha, initial_cuts = None, cut_queue = None, polish_queue = None):
assert isinstance(indices, dict)
assert isinstance(control, dict)
assert isinstance(settings, dict)
assert callable(compute_loss_cut)
assert callable(get_alpha)
assert callable(get_L0_penalty_from_alpha)
self.settings = settings #store pointer to shared settings so that settings can be turned on/off during B&B
self.control = control # dict containing information for flow
# todo (validate initial cutting planes)
self.initial_cuts = initial_cuts
# indices
self.rho_idx = indices['rho']
self.cut_idx = indices['loss'] + indices['rho']
self.alpha_idx = indices['alpha']
self.L0_reg_ind = indices['L0_reg_ind']
self.C_0_nnz = indices['C_0_nnz']
self.compute_loss_cut = compute_loss_cut
self.get_alpha = get_alpha
self.get_L0_penalty_from_alpha = get_L0_penalty_from_alpha
# cplex has the ability to drop cutting planes that are not used. by default, we force CPLEX to use all cutting planes.
self.loss_cut_purge_flag = self.use_constraint.purge if self.settings['purge_loss_cuts'] else self.use_constraint.force
# setup pointer to cut_queue to receive cuts from PolishAndRoundCallback
if self.settings['add_cuts_at_heuristic_solutions']:
if cut_queue is None:
self.cut_queue = FastSolutionPool(len(self.rho_idx))
else:
assert isinstance(cut_queue, FastSolutionPool)
self.cut_queue = cut_queue
# setup pointer to polish_queue to send integer solutions to PolishAndRoundCallback
if self.settings['polish_flag']:
if polish_queue is None:
self.polish_queue = FastSolutionPool(len(self.rho_idx))
else:
assert isinstance(polish_queue, FastSolutionPool)
self.polish_queue = polish_queue
# setup indices for update bounds
if self.settings['chained_updates_flag']:
self.loss_cut_constraint = [indices['loss'], [1.0]]
self.L0_cut_constraint = [[indices['L0_norm']], [1.0]]
self.objval_cut_constraint = [[indices['objval']], [1.0]]
self.bound_cut_purge_flag = self.use_constraint.purge if self.settings['purge_loss_cuts'] else self.use_constraint.force
return
def add_loss_cut(self, rho):
loss_value, loss_slope = self.compute_loss_cut(rho)
self.add(constraint = [self.cut_idx, [1.0] + (-loss_slope).tolist()],
sense = "G",
rhs = float(loss_value - loss_slope.dot(rho)),
use = self.loss_cut_purge_flag)
return loss_value
def update_bounds(self):
bounds = chained_updates(bounds = self.control['bounds'],
C_0_nnz = self.C_0_nnz,
new_objval_at_relaxation = self.control['lowerbound'],
new_objval_at_feasible = self.control['upperbound'])
#add cuts if bounds need to be tighter
if bounds['loss_min'] > self.control['bounds']['loss_min']:
self.add(constraint = self.loss_cut_constraint, sense = "G", rhs = bounds['loss_min'], use = self.bound_cut_purge_flag)
self.control['bounds']['loss_min'] = bounds['loss_min']
self.control['n_bound_updates_loss_min'] += 1
if bounds['objval_min'] > self.control['bounds']['objval_min']:
self.add(constraint = self.objval_cut_constraint, sense = "G", rhs = bounds['objval_min'], use = self.bound_cut_purge_flag)
self.control['bounds']['objval_min'] = bounds['objval_min']
self.control['n_bound_updates_objval_min'] += 1
if bounds['L0_max'] < self.control['bounds']['L0_max']:
self.add(constraint = self.L0_cut_constraint, sense="L", rhs = bounds['L0_max'], use = self.bound_cut_purge_flag)
self.control['bounds']['L0_max'] = bounds['L0_max']
self.control['n_bound_updates_L0_max'] += 1
if bounds['loss_max'] < self.control['bounds']['loss_max']:
self.add(constraint = self.loss_cut_constraint, sense="L", rhs = bounds['loss_max'], use = self.bound_cut_purge_flag)
self.control['bounds']['loss_max'] = bounds['loss_max']
self.control['n_bound_updates_loss_max'] += 1
if bounds['objval_max'] < self.control['bounds']['objval_max']:
self.add(constraint = self.objval_cut_constraint, sense="L", rhs = bounds['objval_max'], use = self.bound_cut_purge_flag)
self.control['bounds']['objval_max'] = bounds['objval_max']
self.control['n_bound_updates_objval_max'] += 1
return
def __call__(self):
#print_log('in cut callback')
callback_start_time = time.time()
#record entry metrics
self.control['cut_callback_times_called'] += 1
self.control['lowerbound'] = self.get_best_objective_value()
self.control['relative_gap'] = self.get_MIP_relative_gap()
self.control['nodes_processed'] = self.get_num_nodes()
self.control['nodes_remaining'] = self.get_num_remaining_nodes()
# add initial cuts first time the callback is used
if self.initial_cuts is not None:
print_log('adding %1.0f initial cuts' % len(self.initial_cuts['lhs']))
for cut, lhs in zip(self.initial_cuts['coefs'], self.initial_cuts['lhs']):
self.add(constraint = cut, sense = "G", rhs = lhs, use = self.loss_cut_purge_flag)
self.initial_cuts = None
# get integer feasible solution
rho = np.array(self.get_values(self.rho_idx))
alpha = np.array(self.get_values(self.alpha_idx))
# check that CPLEX is actually integer. if not, then recast as int
if not is_integer(rho):
rho = cast_to_integer(rho)
alpha = self.get_alpha(rho)
# add cutting plane at integer feasible solution
cut_start_time = time.time()
loss_value = self.add_loss_cut(rho)
cut_time = time.time() - cut_start_time
cuts_added = 1
# if solution updates incumbent, then add solution to queue for polishing
current_upperbound = float(loss_value + self.get_L0_penalty_from_alpha(alpha))
incumbent_update = current_upperbound < self.control['upperbound']
if incumbent_update:
self.control['incumbent'] = rho
self.control['upperbound'] = current_upperbound
self.control['n_incumbent_updates'] += 1
if self.settings['polish_flag']:
polishing_cutoff = self.control['upperbound'] * (1.0 + self.settings['polishing_tolerance'])
if current_upperbound < polishing_cutoff:
self.polish_queue.add(current_upperbound, rho)
# add cutting planes at other integer feasible solutions in cut_queue
if self.settings['add_cuts_at_heuristic_solutions']:
if len(self.cut_queue) > 0:
self.cut_queue.filter_sort_unique()
cut_start_time = time.time()
for cut_rho in self.cut_queue.solutions:
self.add_loss_cut(cut_rho)
cut_time += time.time() - cut_start_time
cuts_added += len(self.cut_queue)
self.cut_queue.clear()
# update bounds
if self.settings['chained_updates_flag']:
if (self.control['lowerbound'] > self.control['bounds']['objval_min']) or (self.control['upperbound'] < self.control['bounds']['objval_max']):
self.control['n_update_bounds_calls'] += 1
self.update_bounds()
# record metrics at end
self.control['n_cuts'] += cuts_added
self.control['total_cut_time'] += cut_time
self.control['total_cut_callback_time'] += time.time() - callback_start_time
#print_log('left cut callback')
return
class PolishAndRoundCallback(HeuristicCallback):
"""
This callback has to be initialized after construnction with initialize().
HeuristicCallback is called intermittently during B&B by CPLEX. It runs several heuristics in a fast way and contains
several options to stop early. Note: It is important for the callback to run quickly since it is called fairly often.
If HeuristicCallback runs slowly, then it will slow down overall B&B progress.
Heuristics include:
- Runs sequential rounding on the continuous solution from the surrogate LP (only if there has been a change in the
lower bound). Requires settings['round_flag'] = True. If settings['polish_after_rounding'] = True, then the
rounded solutions are polished using DCD.
- Polishes integer solutions in polish_queue using DCD. Requires settings['polish_flag'] = True.
Optional:
- Feasible solutions are passed to LazyCutConstraintCallback via cut_queue
Known issues:
- Sometimes CPLEX does not return an integer feasible solution (in which case we correct this manually)
"""
def initialize(self, indices, control, settings, cut_queue, polish_queue, get_objval, get_L0_norm, is_feasible, polishing_handle, rounding_handle):
#todo: add basic assertions to make sure that nothing weird is going on
assert isinstance(indices, dict)
assert isinstance(control, dict)
assert isinstance(settings, dict)
assert isinstance(cut_queue, FastSolutionPool)
assert isinstance(polish_queue, FastSolutionPool)
assert callable(get_objval)
assert callable(get_L0_norm)
assert callable(is_feasible)
assert callable(polishing_handle)
assert callable(rounding_handle)
self.rho_idx = indices['rho']
self.L0_reg_ind = indices['L0_reg_ind']
self.C_0_nnz = indices['C_0_nnz']
self.indices = indices
self.previous_lowerbound = 0.0
self.control = control
self.settings = settings
self.round_flag = settings['round_flag']
self.polish_rounded_solutions = settings['polish_rounded_solutions']
self.polish_flag = settings['polish_flag']
self.polish_queue = polish_queue
self.cut_queue = cut_queue # pointer to cut_queue
self.rounding_tolerance = float(1.0 + settings['rounding_tolerance'])
self.rounding_start_cuts = settings['rounding_start_cuts']
self.rounding_stop_cuts = settings['rounding_stop_cuts']
self.rounding_stop_gap = settings['rounding_stop_gap']
self.rounding_start_gap = settings['rounding_start_gap']
self.polishing_tolerance = float(1.0 + settings['polishing_tolerance'])
self.polishing_start_cuts = settings['polishing_start_cuts']
self.polishing_stop_cuts = settings['polishing_stop_cuts']
self.polishing_stop_gap = settings['polishing_stop_gap']
self.polishing_start_gap = settings['polishing_start_gap']
self.polishing_max_solutions = settings['polishing_max_solutions']
self.polishing_max_runtime = settings['polishing_max_runtime']
self.get_objval = get_objval
self.get_L0_norm = get_L0_norm
self.is_feasible = is_feasible
self.polishing_handle = polishing_handle
self.rounding_handle = rounding_handle
return
def update_heuristic_flags(self, n_cuts, relative_gap):
# keep on rounding?
keep_rounding = (self.rounding_start_cuts <= n_cuts <= self.rounding_stop_cuts) and \
(self.rounding_stop_gap <= relative_gap <= self.rounding_start_gap)
# keep on polishing?
keep_polishing = (self.polishing_start_cuts <= n_cuts <= self.polishing_stop_cuts) and \
(self.polishing_stop_gap <= relative_gap <= self.polishing_start_gap)
self.round_flag &= keep_rounding
self.polish_flag &= keep_polishing
self.polish_rounded_solutions &= self.round_flag
return
def __call__(self):
# todo write rounding/polishing as separate function calls
#print_log('in heuristic callback')
if not (self.round_flag or self.polish_flag):
return
callback_start_time = time.time()
self.control['heuristic_callback_times_called'] += 1
self.control['upperbound'] = self.get_incumbent_objective_value()
self.control['lowerbound'] = self.get_best_objective_value()
self.control['relative_gap'] = self.get_MIP_relative_gap()
# check if lower bound was updated since last call
lowerbound_update = self.previous_lowerbound < self.control['lowerbound']
if lowerbound_update:
self.previous_lowerbound = self.control['lowerbound']
# check if incumbent solution has been updated
# if incumbent solution is not integer, then recast as integer and update objective value manually
if self.has_incumbent():
cplex_incumbent = np.array(self.get_incumbent_values(self.rho_idx))
cplex_rounding_issue = not is_integer(cplex_incumbent)
if cplex_rounding_issue:
cplex_incumbent = cast_to_integer(cplex_incumbent)
incumbent_update = not np.array_equal(cplex_incumbent, self.control['incumbent'])
if incumbent_update:
self.control['incumbent'] = cplex_incumbent
self.control['n_incumbent_updates'] += 1
if cplex_rounding_issue:
self.control['upperbound'] = self.get_objval(cplex_incumbent)
# update flags on whether or not to keep rounding / polishing
self.update_heuristic_flags(n_cuts = self.control['n_cuts'], relative_gap = self.control['relative_gap'])
#variables to store best objective value / solution from heuristics
best_objval = float('inf')
best_solution = None
# run sequential rounding if lower bound was updated since the last call
if self.round_flag and lowerbound_update:
rho_cts = np.array(self.get_values(self.rho_idx))
zero_idx_rho_ceil = np.equal(np.ceil(rho_cts), 0)
zero_idx_rho_floor = np.equal(np.floor(rho_cts), 0)
cannot_round_to_zero = np.logical_not(np.logical_or(zero_idx_rho_ceil, zero_idx_rho_floor))
min_l0_norm = np.count_nonzero(cannot_round_to_zero[self.L0_reg_ind])
max_l0_norm = np.count_nonzero(rho_cts[self.L0_reg_ind])
rounded_solution_is_feasible = (min_l0_norm < self.control['bounds']['L0_max'] and max_l0_norm > self.control['bounds']['L0_min'])
if rounded_solution_is_feasible:
rounding_cutoff = self.rounding_tolerance * self.control['upperbound']
rounding_start_time = time.time()
rounded_solution, rounded_objval, early_stop = self.rounding_handle(rho_cts, rounding_cutoff)
self.control['total_round_time'] += time.time() - rounding_start_time
self.control['n_rounded'] += 1
# round solution if sequential rounding did not stop early
if not early_stop:
if self.settings['add_cuts_at_heuristic_solutions']:
self.cut_queue.add(rounded_objval, rounded_solution)
if self.is_feasible(rounded_solution, L0_min = self.control['bounds']['L0_min'], L0_max = self.control['bounds']['L0_max']):
best_solution = rounded_solution
best_objval = rounded_objval
if self.polish_rounded_solutions:
current_upperbound = min(rounded_objval, self.control['upperbound'])
polishing_cutoff = current_upperbound * self.polishing_tolerance
if rounded_objval < polishing_cutoff:
start_time = time.time()
polished_solution, _, polished_objval = self.polishing_handle(rounded_solution)
self.control['total_round_then_polish_time'] += time.time() - start_time
self.control['n_rounded_then_polished'] += 1
if self.settings['add_cuts_at_heuristic_solutions']:
self.cut_queue.add(polished_objval, polished_solution)
if self.is_feasible(polished_solution, L0_min=self.control['bounds']['L0_min'], L0_max = self.control['bounds']['L0_max']):
best_solution = polished_solution
best_objval = polished_objval
# polish solutions in polish_queue or that were produced by rounding
if self.polish_flag and len(self.polish_queue) > 0:
#get current upperbound
current_upperbound = min(best_objval, self.control['upperbound'])
polishing_cutoff = self.polishing_tolerance * current_upperbound
self.polish_queue.filter_sort_unique(max_objval = polishing_cutoff)
if len(self.polish_queue) > 0:
polished_queue = FastSolutionPool(self.polish_queue.P)
polish_time = 0
n_polished = 0
for objval, solution in zip(self.polish_queue.objvals, self.polish_queue.solutions):
if objval >= polishing_cutoff:
break
polish_start_time = time.time()
polished_solution, _, polished_objval = self.polishing_handle(solution)
polish_time += time.time() - polish_start_time
n_polished += 1
# update cutoff whenever the solution returns an infeasible solutions
if self.is_feasible(polished_solution, L0_min = self.control['bounds']['L0_min'], L0_max = self.control['bounds']['L0_max']):
polished_queue.add(polished_objval, polished_solution)
current_upperbound = min(polished_objval, polished_objval)
polishing_cutoff = self.polishing_tolerance * current_upperbound
if polish_time > self.polishing_max_runtime:
break
if n_polished > self.polishing_max_solutions:
break
self.polish_queue.clear()
self.control['total_polish_time'] += polish_time
self.control['n_polished'] += n_polished
if self.settings['add_cuts_at_heuristic_solutions']:
self.cut_queue.add(polished_queue.objvals, polished_queue.solutions)
# check if the best polished solution will improve the queue
polished_queue.filter_sort_unique(max_objval = best_objval)
if len(polished_queue) > 0:
best_objval, best_solution = polished_queue.get_best_objval_and_solution()
# if heuristics produces a better solution then update the incumbent
heuristic_update = best_objval < self.control['upperbound']
if heuristic_update:
self.control['n_heuristic_updates'] += 1
proposed_solution, proposed_objval = convert_to_risk_slim_cplex_solution(indices = self.indices, rho = best_solution, objval = best_objval)
self.set_solution(solution = proposed_solution, objective_value = proposed_objval)
self.control['total_heuristic_callback_time'] += time.time() - callback_start_time
#print_log('left heuristic callback')
return
# DATA CONVERSION
def is_integer(x):
"""
checks if numpy array is an integer vector
Parameters
----------
x
Returns
-------
"""
return np.array_equal(x, np.require(x, dtype=np.int_))
def cast_to_integer(x):
"""
casts numpy array to integer vector
Parameters
----------
x
Returns
-------
"""
original_type = x.dtype
return np.require(np.require(x, dtype=np.int_), dtype=original_type)
| 37,697 | 42.834884 | 169 | py |
risk-slim | risk-slim-master/riskslim/coefficient_set.py | import numpy as np
from prettytable import PrettyTable
from .defaults import INTERCEPT_NAME
class CoefficientSet(object):
"""
Class used to represent and manipulate constraints on individual coefficients
including upper bound, lower bound, variable type, and regularization.
Coefficient Set is composed of Coefficient Elements
"""
_initialized = False
_print_flag = True
_check_flag = True
_correct_flag = True
_variable_names = None
def __init__(self, variable_names, **kwargs):
# set variables using setter methods
self.variable_names = list(variable_names)
self.print_flag = kwargs.get('print_flag', self._print_flag)
self.check_flag = kwargs.get('check_flag', self._check_flag)
self.correct_flag = kwargs.get('correct_flag', self._correct_flag)
ub = kwargs.get('ub', _CoefficientElement._DEFAULT_UB)
lb = kwargs.get('lb', _CoefficientElement._DEFAULT_LB)
c0 = kwargs.get('c0', _CoefficientElement._DEFAULT_c0)
vtype = kwargs.get('type', _CoefficientElement._DEFAULT_TYPE)
ub = self._expand_values(value = ub)
lb = self._expand_values(value = lb)
c0 = self._expand_values(value = c0)
vtype = self._expand_values(value = vtype)
self._coef_elements = dict()
for name in variable_names:
idx = variable_names.index(name)
self._coef_elements[name] = _CoefficientElement(name = name, ub = ub[idx], lb = lb[idx], c0 = c0[idx], vtype = vtype[idx])
self._check_rep()
self._initialized = True
@property
def P(self):
return len(self._variable_names)
@property
def print_flag(self):
return bool(self._print_flag)
@print_flag.setter
def print_flag(self, flag):
self._print_flag = bool(flag)
@property
def correct_flag(self):
return bool(self._correct_flag)
@correct_flag.setter
def correct_flag(self, flag):
self._correct_flag = bool(flag)
@property
def check_flag(self):
return self._check_flag
@check_flag.setter
def check_flag(self, flag):
self._check_flag = bool(flag)
@property
def variable_names(self):
return self._variable_names
@variable_names.setter
def variable_names(self, names):
assert isinstance(names, list), 'variable_names must be a list'
for name in names:
assert isinstance(name, str), 'variable_names must be a list of strings'
assert len(names) == len(set(names)), 'variable_names contain elements with unique names'
if self._variable_names is not None:
assert len(names) == len(self), 'variable_names must contain at least %d elements' % len(self)
self._variable_names = list(names)
def index(self, name):
assert isinstance(name, str)
if name in self._variable_names:
return self._variable_names.index(name)
else:
raise ValueError('no variable named %s in coefficient set' % name)
def penalized_indices(self):
return np.array(list(map(lambda v: self._coef_elements[v].penalized, self._variable_names)))
def update_intercept_bounds(self, X, y, max_offset, max_L0_value = None):
"""
uses data to set the lower and upper bound on the offset to a conservative value
the value is guaranteed to avoid a loss in performance
optimal_offset = max_abs_score + 1
where max_abs_score is the largest absolute score that can be achieved using the coefficients in coef_set
with the training data. note:
when offset >= optimal_offset, then we predict y = +1 for every example
when offset <= optimal_offset, then we predict y = -1 for every example
thus, any feasible model should do better.
Parameters
----------
X
y
max_offset
max_L0_value
Returns
-------
None
"""
if INTERCEPT_NAME not in self._coef_elements:
raise ValueError("coef_set must contain a variable for the offset called %s" % INTERCEPT_NAME)
e = self._coef_elements[INTERCEPT_NAME]
# get idx of intercept/variables
names = self.variable_names
variable_names = list(names)
variable_names.remove(INTERCEPT_NAME)
variable_idx = np.array([names.index(n) for n in variable_names])
# get max # of non-zero coefficients given model size limit
penalized_idx = [self._coef_elements[n].penalized for n in variable_names]
trivial_L0_max = len(penalized_idx)
if max_L0_value is None:
max_L0_value = trivial_L0_max
if max_L0_value > 0:
max_L0_value = min(trivial_L0_max, max_L0_value)
# update intercept bounds
Z = X * y
Z_min = np.min(Z, axis = 0)
Z_max = np.max(Z, axis = 0)
# get regularized indices
L0_reg_ind = np.isnan(self.C_0j)[variable_idx]
# get smallest / largest score
s_min, s_max = get_score_bounds(Z_min = Z_min[variable_idx],
Z_max = Z_max[variable_idx],
rho_lb = self.lb[variable_idx],
rho_ub = self.ub[variable_idx],
L0_reg_ind = L0_reg_ind,
L0_max = max_L0_value)
# get max # of non-zero coefficients given model size limit
conservative_offset = max(abs(s_min), abs(s_max)) + 1
max_offset = min(max_offset, conservative_offset)
e.ub = max_offset
e.lb = -max_offset
def tabulate(self):
t = PrettyTable()
t.align = "r"
t.add_column("variable_name", self._variable_names)
t.add_column("vtype", self.vtype)
t.add_column("sign", self.sign)
t.add_column("lb", self.lb)
t.add_column("ub", self.ub)
t.add_column("c0", self.c0)
return str(t)
def __len__(self):
return len(self._variable_names)
def __str__(self):
return self.tabulate()
def __repr__(self):
if self.print_flag:
return self.tabulate()
def __getattr__(self, name):
if name == 'C_0j':
name = 'c0'
vals = [getattr(self._coef_elements[v], name) for v in self._variable_names]
if name in ['ub', 'lb', 'c0', 'sign', 'vtype']:
return np.array(vals)
else:
return list(vals)
def __setattr__(self, name, value):
if self._initialized:
assert all(map(lambda e: hasattr(e, name), self._coef_elements.values()))
attr_values = self._expand_values(value)
for e, v in zip(self._coef_elements.values(), attr_values):
setattr(e, name, v)
self._check_rep()
else:
object.__setattr__(self, name, value)
def __getitem__(self, key):
if isinstance(key, int):
assert 0 <= int(key) <= self.P
return self._coef_elements[self._variable_names[key]]
elif isinstance(key, str):
return self._coef_elements[key]
else:
raise KeyError('invalid key')
def __setitem__(self, key, value):
if isinstance(key, int):
assert 0 <= int(key) <= self.P
key = self._variable_names[key]
elif isinstance(key, str):
assert isinstance(key, str)
assert key in self._variable_names
assert value.name == key
else:
raise KeyError('invalid key')
assert isinstance(value, _CoefficientElement)
self._coef_elements[key] = value
def _check_rep(self):
if self._check_flag:
assert len(self._variable_names) == len(set(self._variable_names))
for name in self._variable_names:
assert isinstance(name, str)
assert len(name) >= 1
assert self._coef_elements[name]._check_rep()
if self._correct_flag:
for name in self._variable_names:
e = self._coef_elements[name]
if name in {'Intercept', '(Intercept)', 'intercept', '(intercept)'}:
if e.c0 > 0 or np.isnan(e.c0):
if self._print_flag:
print("setting c0_value = 0.0 for %s to ensure that intercept is not penalized" % name)
e._c0 = 0.0
return True
def _expand_values(self, value):
if isinstance(value, np.ndarray):
if value.size == self.P:
value_array = value
elif value.size == 1:
value_array = np.repeat(value, self.P)
else:
raise ValueError("length mismatch; need either 1 or %d values" % self.P)
elif isinstance(value, list):
if len(value) == self.P:
value_array = value
elif len(value) == 1:
value_array = [value] * self.P
else:
raise ValueError("length mismatch; need either 1 or %d values" % self.P)
elif isinstance(value, str):
value_array = [str(value)] * self.P
elif isinstance(value, int):
value_array = [int(value)] * self.P
elif isinstance(value, float):
value_array = [float(value)] * self.P
else:
raise ValueError("unknown variable type %s")
return(value_array)
class _CoefficientElement(object):
_DEFAULT_UB = 5
_DEFAULT_LB = -5
_DEFAULT_c0 = float('nan')
_DEFAULT_TYPE = 'I'
_VALID_TYPES = ['I', 'C']
def _is_integer(self, x):
return np.array_equal(x, np.require(x, dtype = np.int_))
def __init__(self, name, ub = _DEFAULT_UB, lb = _DEFAULT_LB, c0 = _DEFAULT_c0, vtype = _DEFAULT_TYPE):
self._name = str(name)
self._ub = float(ub)
self._lb = float(lb)
self._c0 = float(c0)
self._vtype = vtype
assert self._check_rep()
@property
def name(self):
return self._name
@property
def vtype(self):
return self._vtype
@vtype.setter
def vtype(self, value):
assert isinstance(value, str)
assert value in self._VALID_TYPES
self._vtype = str(value)
@property
def ub(self):
return self._ub
@ub.setter
def ub(self, value):
if hasattr(value, '__len__'):
assert len(value) == 1
value = value[0]
assert value >= self._lb
self._ub = float(value)
@property
def lb(self):
return self._lb
@lb.setter
def lb(self, value):
if hasattr(value, '__len__'):
assert len(value) == 1
value = value[0]
assert value <= self._ub
self._lb = float(value)
@property
def c0(self):
return self._c0
@c0.setter
def c0(self, value):
if np.isnan(value):
self._c0 = float('nan')
else:
assert np.isfinite(value), 'L0 penalty for %s must either be NaN or a finite positive number' % self._name
assert value >= 0.0, 'L0 penalty for %s must either be NaN or a finite positive number' % self._name
self._c0 = float(value)
@property
def penalized(self):
return np.isnan(self._c0) or (self._c0 > 0.0)
@property
def sign(self):
if self._ub > 0.0 and self._lb >= 0.0:
return 1
elif self._ub <= 0.0 and self._lb < 0.0:
return -1
else:
return 0
@sign.setter
def sign(self, value):
if value > 0:
self._lb = 0.0
elif value < 0:
self._ub = 0.0
def _check_rep(self):
#name
assert isinstance(self._name, str)
assert len(self._name) >= 1
#bounds
assert np.isfinite(self.ub)
assert np.isfinite(self.lb)
assert self.ub >= self.lb
# value
assert self._vtype in self._VALID_TYPES
assert np.isnan(self.c0) or (self.c0 >= 0.0 and np.isfinite(self.c0))
return True
def __repr__(self):
return self.tabulate()
def __str__(self):
return self.tabulate()
def tabulate(self):
s = ['-' * 60,
'variable: %s' % self._name,
'-' * 60,
'%s: %1.1f' % ('ub', self._ub),
'%s: %1.1f' % ('lb', self._lb),
'%s: %1.2g' % ('c0', self._c0),
'%s: %1.0f' % ('sign', self.sign),
'%s: %s' % ('vtype', self._vtype)]
t = '\n' + '\n'.join(s) + '\n'
return t
def get_score_bounds(Z_min, Z_max, rho_lb, rho_ub, L0_reg_ind = None, L0_max = None):
edge_values = np.vstack([Z_min * rho_lb, Z_max * rho_lb, Z_min * rho_ub, Z_max * rho_ub])
if (L0_max is None) or (L0_reg_ind is None) or (L0_max == Z_min.shape[0]):
s_min = np.sum(np.min(edge_values, axis=0))
s_max = np.sum(np.max(edge_values, axis=0))
else:
min_values = np.min(edge_values, axis=0)
s_min_reg = np.sum(np.sort(min_values[L0_reg_ind])[0:L0_max])
s_min_no_reg = np.sum(min_values[~L0_reg_ind])
s_min = s_min_reg + s_min_no_reg
max_values = np.max(edge_values, axis=0)
s_max_reg = np.sum(-np.sort(-max_values[L0_reg_ind])[0:L0_max])
s_max_no_reg = np.sum(max_values[~L0_reg_ind])
s_max = s_max_reg + s_max_no_reg
return s_min, s_max | 13,648 | 27.978769 | 134 | py |
risk-slim | risk-slim-master/riskslim/utils.py | import logging
import sys
from pathlib import Path
import time
import warnings
import numpy as np
import pandas as pd
import prettytable as pt
from .defaults import INTERCEPT_NAME
# DATA
def load_data_from_csv(dataset_csv_file, sample_weights_csv_file = None, fold_csv_file = None, fold_num = 0):
"""
Parameters
----------
dataset_csv_file csv file containing the training data
see /datasets/adult_data.csv for an example
training data stored as a table with N+1 rows and d+1 columns
column 1 is the outcome variable entries must be (-1,1) or (0,1)
column 2 to d+1 are the d input variables
row 1 contains unique names for the outcome variable, and the input vairable
sample_weights_csv_file csv file containing sample weights for the training data
weights stored as a table with N rows and 1 column
all sample weights must be non-negative
fold_csv_file csv file containing indices of folds for K-fold cross validation
fold indices stored as a table with N rows and 1 column
folds must be integers between 1 to K
if fold_csv_file is None, then we do not use folds
fold_num int between 0 to K, where K is set by the fold_csv_file
let fold_idx be the N x 1 index vector listed in fold_csv_file
samples where fold_idx == fold_num will be used to test
samples where fold_idx != fold_num will be used to train the model
fold_num = 0 means use "all" of the training data (since all values of fold_idx \in [1,K])
if fold_csv_file is None, then fold_num is set to 0
Returns
-------
dictionary containing training data for a binary classification problem with the fields:
- 'X' N x P matrix of features (numpy.ndarray) with a column of 1s for the INTERCEPT_NAME
- 'Y' N x 1 vector of labels (+1/-1) (numpy.ndarray)
- 'variable_names' list of strings containing the names of each feature (list)
- 'Y_name' string containing the name of the output (optional)
- 'sample_weights' N x 1 vector of sample weights, must all be positive
"""
dataset_csv_file = Path(dataset_csv_file)
if not dataset_csv_file.exists():
raise IOError('could not find dataset_csv_file: %s' % dataset_csv_file)
df = pd.read_csv(dataset_csv_file, sep = ',')
raw_data = df.to_numpy()
data_headers = list(df.columns.values)
N = raw_data.shape[0]
# setup Y vector and Y_name
Y_col_idx = [0]
Y = raw_data[:, Y_col_idx]
Y_name = data_headers[Y_col_idx[0]]
Y[Y == 0] = -1
# setup X and X_names
X_col_idx = [j for j in range(raw_data.shape[1]) if j not in Y_col_idx]
X = raw_data[:, X_col_idx]
variable_names = [data_headers[j] for j in X_col_idx]
# insert a column of ones to X for the intercept
X = np.insert(arr=X, obj=0, values=np.ones(N), axis=1)
variable_names.insert(0, INTERCEPT_NAME)
if sample_weights_csv_file is None:
sample_weights = np.ones(N)
else:
sample_weights_csv_file = Path(sample_weights_csv_file)
if not sample_weights_csv_file.exists():
raise IOError('could not find sample_weights_csv_file: %s' % sample_weights_csv_file)
sample_weights = pd.read_csv(sample_weights_csv_file, sep=',', header=None)
sample_weights = sample_weights.to_numpy()
data = {
'X': X,
'Y': Y,
'variable_names': variable_names,
'outcome_name': Y_name,
'sample_weights': sample_weights,
}
#load folds
if fold_csv_file is not None:
fold_csv_file = Path(fold_csv_file)
if not fold_csv_file.exists():
raise IOError('could not find fold_csv_file: %s' % fold_csv_file)
fold_idx = pd.read_csv(fold_csv_file, sep=',', header=None)
fold_idx = fold_idx.values.flatten()
K = max(fold_idx)
all_fold_nums = np.sort(np.unique(fold_idx))
assert len(fold_idx) == N, "dimension mismatch: read %r fold indices (expected N = %r)" % (len(fold_idx), N)
assert np.all(all_fold_nums == np.arange(1, K+1)), "folds should contain indices between 1 to %r" % K
assert fold_num in np.arange(0, K+1), "fold_num should either be 0 or an integer between 1 to %r" % K
if fold_num >= 1:
#test_idx = fold_num == fold_idx
train_idx = fold_num != fold_idx
data['X'] = data['X'][train_idx,]
data['Y'] = data['Y'][train_idx]
data['sample_weights'] = data['sample_weights'][train_idx]
assert check_data(data)
return data
def check_data(data):
"""
makes sure that 'data' contains training data that is suitable for binary classification problems
throws AssertionError if
'data' is a dictionary that must contain:
- 'X' N x P matrix of features (numpy.ndarray) with a column of 1s for the INTERCEPT_NAME
- 'Y' N x 1 vector of labels (+1/-1) (numpy.ndarray)
- 'variable_names' list of strings containing the names of each feature (list)
data can also contain:
- 'outcome_name' string containing the name of the output (optional)
- 'sample_weights' N x 1 vector of sample weights, must all be positive
Returns
-------
True if data passes checks
"""
# type checks
assert type(data) is dict, "data should be a dict"
assert 'X' in data, "data should contain X matrix"
assert type(data['X']) is np.ndarray, "type(X) should be numpy.ndarray"
assert 'Y' in data, "data should contain Y matrix"
assert type(data['Y']) is np.ndarray, "type(Y) should be numpy.ndarray"
assert 'variable_names' in data, "data should contain variable_names"
assert type(data['variable_names']) is list, "variable_names should be a list"
X = data['X']
Y = data['Y']
variable_names = data['variable_names']
if 'outcome_name' in data:
assert type(data['outcome_name']) is str, "outcome_name should be a str"
# sizes and uniqueness
N, P = X.shape
assert N > 0, 'X matrix must have at least 1 row'
assert P > 0, 'X matrix must have at least 1 column'
assert len(Y) == N, 'dimension mismatch. Y must contain as many entries as X. Need len(Y) = N.'
assert len(list(set(data['variable_names']))) == len(data['variable_names']), 'variable_names is not unique'
assert len(data['variable_names']) == P, 'len(variable_names) should be same as # of cols in X'
# feature matrix
assert np.all(~np.isnan(X)), 'X has nan entries'
assert np.all(~np.isinf(X)), 'X has inf entries'
# offset in feature matrix
if INTERCEPT_NAME in variable_names:
assert all(X[:, variable_names.index(INTERCEPT_NAME)] == 1.0), "(Intercept)' column should only be composed of 1s"
else:
warnings.warn("there is no column named INTERCEPT_NAME in variable_names")
# labels values
assert all((Y == 1) | (Y == -1)), 'Need Y[i] = [-1,1] for all i.'
if all(Y == 1):
warnings.warn('Y does not contain any positive examples. Need Y[i] = +1 for at least 1 i.')
if all(Y == -1):
warnings.warn('Y does not contain any negative examples. Need Y[i] = -1 for at least 1 i.')
if 'sample_weights' in data:
sample_weights = data['sample_weights']
type(sample_weights) is np.ndarray
assert len(sample_weights) == N, 'sample_weights should contain N elements'
assert all(sample_weights > 0.0), 'sample_weights[i] > 0 for all i '
# by default, we set sample_weights as an N x 1 array of ones. if not, then sample weights is non-trivial
if any(sample_weights != 1.0) and len(np.unique(sample_weights)) < 2:
warnings.warn('note: sample_weights only has <2 unique values')
return True
# MODEL PRINTING
def print_model(rho, data, show_omitted_variables = False):
variable_names = data['variable_names']
rho_values = np.copy(rho)
rho_names = list(variable_names)
if INTERCEPT_NAME in rho_names:
intercept_ind = variable_names.index(INTERCEPT_NAME)
intercept_val = int(rho[intercept_ind])
rho_values = np.delete(rho_values, intercept_ind)
rho_names.remove(INTERCEPT_NAME)
else:
intercept_val = 0
if 'outcome_name' in data:
predict_string = "Pr(Y = +1) = 1.0/(1.0 + exp(-(%d + score))" % intercept_val
else:
predict_string = "Pr(%s = +1) = 1.0/(1.0 + exp(-(%d + score))" % (data['outcome_name'].upper(), intercept_val)
if not show_omitted_variables:
selected_ind = np.flatnonzero(rho_values)
rho_values = rho_values[selected_ind]
rho_names = [rho_names[i] for i in selected_ind]
rho_binary = [np.all((data['X'][:,j] == 0) | (data['X'][:,j] == 1)) for j in selected_ind]
#sort by most positive to most negative
sort_ind = np.argsort(-np.array(rho_values))
rho_values = [rho_values[j] for j in sort_ind]
rho_names = [rho_names[j] for j in sort_ind]
rho_binary = [rho_binary[j] for j in sort_ind]
rho_values = np.array(rho_values)
rho_values_string = [str(int(i)) + " points" for i in rho_values]
n_variable_rows = len(rho_values)
total_string = "ADD POINTS FROM ROWS %d to %d" % (1, n_variable_rows)
max_name_col_length = max(len(predict_string), len(total_string), max([len(s) for s in rho_names])) + 2
max_value_col_length = max(7, max([len(s) for s in rho_values_string]) + len("points")) + 2
m = pt.PrettyTable()
m.field_names = ["Variable", "Points", "Tally"]
m.add_row([predict_string, "", ""])
m.add_row(['=' * max_name_col_length, "=" * max_value_col_length, "========="])
for name, value_string in zip(rho_names, rho_values_string):
m.add_row([name, value_string, "+ ....."])
m.add_row(['=' * max_name_col_length, "=" * max_value_col_length, "========="])
m.add_row([total_string, "SCORE", "= ....."])
m.header = False
m.align["Variable"] = "l"
m.align["Points"] = "r"
m.align["Tally"] = "r"
print(m)
return m
# LOGGING
def setup_logging(logger, log_to_console = True, log_file = None):
"""
Sets up logging to console and file on disk
See https://docs.python.org/2/howto/logging-cookbook.html for details on how to customize
Parameters
----------
log_to_console set to True to disable logging in console
log_file path to file for loggin
Returns
-------
Logger object that prints formatted messages to log_file and console
"""
# quick return if no logging to console or file
if log_to_console is False and log_file is None:
logger.disabled = True
return logger
log_format = logging.Formatter(fmt='%(asctime)s | %(levelname)-8s | %(message)s', datefmt='%m-%d-%Y %I:%M:%S %p')
# log to file
if log_file is not None:
fh = logging.FileHandler(filename=log_file)
#fh.setLevel(logging.DEBUG)
fh.setFormatter(log_format)
logger.addHandler(fh)
if log_to_console:
ch = logging.StreamHandler()
#ch.setLevel(logging.DEBUG)
ch.setFormatter(log_format)
logger.addHandler(ch)
return logger
def print_log(msg, print_flag = True):
"""
Parameters
----------
msg
print_flag
Returns
-------
"""
if print_flag:
if isinstance(msg, str):
print('%s | %s' % (time.strftime("%m/%d/%y @ %I:%M %p", time.localtime()), msg))
else:
print('%s | %r' % (time.strftime("%m/%d/%y @ %I:%M %p", time.localtime()), msg))
sys.stdout.flush()
def validate_settings(settings = None, default_settings = None):
if settings is None:
settings = dict()
else:
assert isinstance(settings, dict)
settings = dict(settings)
if default_settings is not None:
assert isinstance(default_settings, dict)
settings = {k: settings[k] if k in settings else default_settings[k] for k in default_settings}
return settings | 12,518 | 37.051672 | 126 | py |
risk-slim | risk-slim-master/riskslim/defaults.py | import numpy as np
INTERCEPT_NAME = '(Intercept)'
# Settings
DEFAULT_LCPA_SETTINGS = {
#
'c0_value': 1e-6,
'w_pos': 1.00,
#
# MIP Formulation
'drop_variables': True, #drop variables
'tight_formulation': True, #use a slightly tighter MIP formulation
'include_auxillary_variable_for_objval': True,
'include_auxillary_variable_for_L0_norm': True,
#
# LCPA Settings
'max_runtime': 300.0, # max runtime for LCPA
'max_tolerance': 0.000001, # tolerance to stop LCPA
'display_cplex_progress': True, # setting to True shows CPLEX progress
'loss_computation': 'normal', # type of loss computation to use ('normal','fast','lookup')
'chained_updates_flag': True, # use chained updates
'initialization_flag': False, # use initialization procedure
'initial_bound_updates': True, # update bounds before solving
'add_cuts_at_heuristic_solutions': True, #add cuts at integer feasible solutions found using polishing/rounding
#
# LCPA Rounding Heuristic
'round_flag': True, # round continuous solutions with SeqRd
'polish_rounded_solutions': True, # polish solutions rounded with SeqRd using DCD
'rounding_tolerance': float('inf'), # only solutions with objective value < (1 + tol) are rounded
'rounding_start_cuts': 0, # cuts needed to start using rounding heuristic
'rounding_start_gap': float('inf'), # optimality gap needed to start using rounding heuristic
'rounding_stop_cuts': 20000, # cuts needed to stop using rounding heuristic
'rounding_stop_gap': 0.2, # optimality gap needed to stop using rounding heuristic
#
# LCPA Polishing Heuristic
'polish_flag': True, # polish integer feasible solutions with DCD
'polishing_tolerance': 0.1, # only solutions with objective value (1 + polishing_ub_to_objval_relgap) are polished. setting to
'polishing_max_runtime': 10.0, # max time to run polishing each time
'polishing_max_solutions': 5.0, # max # of solutions to polish each time
'polishing_start_cuts': 0, # cuts needed to start using polishing heuristic
'polishing_start_gap': float('inf'), # min optimality gap needed to start using polishing heuristic
'polishing_stop_cuts': float('inf'), # cuts needed to stop using polishing heuristic
'polishing_stop_gap': 5.0, # max optimality gap required to stop using polishing heuristic
#
# Internal Parameters
'purge_loss_cuts': False,
'purge_bound_cuts': False,
}
DEFAULT_CPLEX_SETTINGS = {
'randomseed': 0, # random seed
'mipemphasis': 0, # cplex MIP strategy
'mipgap': np.finfo('float').eps, #
'absmipgap': np.finfo('float').eps, #
'integrality_tolerance': np.finfo('float').eps, #
'repairtries': 20, # number of tries to repair user provided solutions
'poolsize': 100, # number of feasible solutions to keep in solution pool
'poolrelgap': float('nan'), # discard if solutions
'poolreplace': 2, # solution pool
'n_cores': 1, # number of cores to use in B & B (must be 1)
'nodefilesize': (120 * 1024) / 1, # node file size
}
DEFAULT_CPA_SETTINGS = {
#
'type': 'cvx',
'display_progress': True, # print progress of initialization procedure
'display_cplex_progress': False, # print of CPLEX during intialization procedure
'save_progress': False, # print progress of initialization procedure
'update_bounds': True,
#
'max_runtime': 300.0, # max time to run CPA in initialization procedure
'max_runtime_per_iteration': 15.0, # max time per iteration of CPA
#
'max_coefficient_gap': 0.49, # stopping tolerance for CPA (based on gap between consecutive solutions)
'min_iterations_before_coefficient_gap_check': 250,
#
'max_iterations': 10000, # max # of cuts needed to stop CPA
'max_tolerance': 0.0001, # stopping tolerance for CPA (based on optimality gap)
}
DEFAULT_INITIALIZATION_SETTINGS = {
'type': 'cvx',
'use_rounding': True, # use SeqRd in initialization procedure
'rounding_max_runtime': 30.0, # max runtime for Rs in initialization procedure
'rounding_max_solutions': 5, # max solutions to round using Rd
#
'use_sequential_rounding': True, # use SeqRd in initialization procedure
'sequential_rounding_max_runtime': 30.0, # max runtime for SeqRd in initialization procedure
'sequential_rounding_max_solutions': 5, # max solutions to round using SeqRd
#
'polishing_after': True, # polish after rounding
'polishing_max_runtime': 30.0, # max runtime for polishing
'polishing_max_solutions': 5 # max solutions to polish
}
# Initialization Settings includes CPA Settings
DEFAULT_INITIALIZATION_SETTINGS.update(DEFAULT_CPA_SETTINGS)
# LCPA Settings includes Initialization and CPLEX settings
DEFAULT_LCPA_SETTINGS.update({'init_%s' % k: v for k,v in DEFAULT_INITIALIZATION_SETTINGS.items()})
DEFAULT_LCPA_SETTINGS.update({'cplex_%s' % k: v for k,v in DEFAULT_CPLEX_SETTINGS.items()}) | 5,018 | 47.728155 | 130 | py |
risk-slim | risk-slim-master/riskslim/initialization.py | import time
import numpy as np
from cplex import Cplex, SparsePair, infinity as CPX_INFINITY
from .setup_functions import setup_penalty_parameters
from .mip import create_risk_slim, set_cplex_mip_parameters
from .solution_pool import SolutionPool
from .bound_tightening import chained_updates, chained_updates_for_lp
from .heuristics import discrete_descent, sequential_rounding
from .defaults import DEFAULT_CPA_SETTINGS, DEFAULT_INITIALIZATION_SETTINGS
from .utils import print_log, validate_settings
def initialize_lattice_cpa(Z,
c0_value,
constraints,
bounds,
settings,
risk_slim_settings,
cplex_settings,
compute_loss_real,
compute_loss_cut_real,
compute_loss_from_scores_real,
compute_loss_from_scores,
get_objval,
get_L0_penalty,
is_feasible):
"""
Returns
-------
cuts
solution pool
bounds
"""
#todo: recompute function handles here if required
assert callable(compute_loss_real)
assert callable(compute_loss_cut_real)
assert callable(compute_loss_from_scores_real)
assert callable(compute_loss_from_scores)
assert callable(get_objval)
assert callable(get_L0_penalty)
assert callable(is_feasible)
print_log('-' * 60)
print_log('runnning initialization procedure')
print_log('-' * 60)
# trade-off parameter
_, C_0, L0_reg_ind, C_0_nnz = setup_penalty_parameters(c0_value = c0_value, coef_set = constraints['coef_set'])
settings = validate_settings(settings, default_settings = DEFAULT_INITIALIZATION_SETTINGS)
settings['type'] = 'cvx'
# create RiskSLIM LP
risk_slim_settings = dict(risk_slim_settings)
risk_slim_settings.update(bounds)
risk_slim_settings['relax_integer_variables'] = True
risk_slim_lp, risk_slim_lp_indices = create_risk_slim(coef_set = constraints['coef_set'], input = risk_slim_settings)
risk_slim_lp = set_cplex_mip_parameters(risk_slim_lp, cplex_settings, display_cplex_progress = settings['display_cplex_progress'])
# solve risk_slim_lp LP using standard CPA
cpa_stats, cuts, cpa_pool = run_standard_cpa(cpx = risk_slim_lp,
cpx_indices = risk_slim_lp_indices,
compute_loss = compute_loss_real,
compute_loss_cut = compute_loss_cut_real,
settings = settings)
# update bounds
bounds = chained_updates(bounds, C_0_nnz, new_objval_at_relaxation = cpa_stats['lowerbound'])
print_log('CPA produced %d cuts' % len(cuts))
def rounded_model_size_is_ok(rho):
zero_idx_rho_ceil = np.equal(np.ceil(rho), 0)
zero_idx_rho_floor = np.equal(np.floor(rho), 0)
cannot_round_to_zero = np.logical_not(np.logical_or(zero_idx_rho_ceil, zero_idx_rho_floor))
rounded_rho_L0_min = np.count_nonzero(cannot_round_to_zero[L0_reg_ind])
rounded_rho_L0_max = np.count_nonzero(rho[L0_reg_ind])
return rounded_rho_L0_min >= constraints['L0_min'] and rounded_rho_L0_max <= constraints['L0_max']
cpa_pool = cpa_pool.remove_infeasible(rounded_model_size_is_ok).distinct().sort()
if len(cpa_pool) == 0:
print_log('all CPA solutions are infeasible')
pool = SolutionPool(cpa_pool.P)
# round CPA solutions
if settings['use_rounding'] and len(cpa_pool) > 0:
print_log('running naive rounding on %d solutions' % len(cpa_pool))
print_log('best objective value: %1.4f' % np.min(cpa_pool.objvals))
rnd_pool, _, _ = round_solution_pool(cpa_pool,
constraints,
max_runtime = settings['rounding_max_runtime'],
max_solutions = settings['rounding_max_solutions'])
rnd_pool = rnd_pool.compute_objvals(get_objval).remove_infeasible(is_feasible)
print_log('rounding produced %d integer solutions' % len(rnd_pool))
if len(rnd_pool) > 0:
pool.append(rnd_pool)
print_log('best objective value is %1.4f' % np.min(rnd_pool.objvals))
# sequentially round CPA solutions
if settings['use_sequential_rounding'] and len(cpa_pool) > 0:
print_log('running sequential rounding on %d solutions' % len(cpa_pool))
print_log('best objective value: %1.4f' % np.min(cpa_pool.objvals))
sqrnd_pool, _, _ = sequential_round_solution_pool(pool = cpa_pool,
Z = Z,
C_0 = C_0,
compute_loss_from_scores_real = compute_loss_from_scores_real,
get_L0_penalty = get_L0_penalty,
max_runtime = settings['sequential_rounding_max_runtime'],
max_solutions = settings['sequential_rounding_max_solutions'],
objval_cutoff = bounds['objval_max'])
sqrnd_pool = sqrnd_pool.remove_infeasible(is_feasible)
print_log('sequential rounding produced %d integer solutions' % len(sqrnd_pool))
if len(sqrnd_pool) > 0:
pool = pool.append(sqrnd_pool)
print_log('best objective value: %1.4f' % np.min(pool.objvals))
# polish rounded solutions
if settings['polishing_after'] and len(pool) > 0:
print_log('polishing %d solutions' % len(pool))
print_log('best objective value: %1.4f' % np.min(pool.objvals))
dcd_pool, _, _ = discrete_descent_solution_pool(pool = pool,
Z = Z,
C_0 = C_0,
constraints = constraints,
compute_loss_from_scores = compute_loss_from_scores,
get_L0_penalty = get_L0_penalty,
max_runtime = settings['polishing_max_runtime'],
max_solutions = settings['polishing_max_solutions'])
dcd_pool = dcd_pool.remove_infeasible(is_feasible)
if len(dcd_pool) > 0:
print_log('polishing produced %d integer solutions' % len(dcd_pool))
pool.append(dcd_pool)
# remove solutions that are not feasible, not integer
if len(pool) > 0:
pool = pool.remove_nonintegral().distinct().sort()
# update upper and lower bounds
print_log('initialization produced %1.0f feasible solutions' % len(pool))
if len(pool) > 0:
bounds = chained_updates(bounds, C_0_nnz, new_objval_at_feasible = np.min(pool.objvals))
print_log('best objective value: %1.4f' % np.min(pool.objvals))
print_log('-' * 60)
print_log('completed initialization procedure')
print_log('-' * 60)
return pool, cuts, bounds
def run_standard_cpa(cpx,
cpx_indices,
compute_loss,
compute_loss_cut,
settings = DEFAULT_CPA_SETTINGS,
print_flag = False):
assert isinstance(cpx, Cplex)
assert isinstance(cpx_indices, dict)
assert callable(compute_loss)
assert callable(compute_loss_cut)
assert isinstance(settings, dict)
settings = validate_settings(settings, default_settings = DEFAULT_CPA_SETTINGS)
rho_idx = cpx_indices["rho"]
loss_idx = cpx_indices["loss"]
alpha_idx = cpx_indices["alpha"]
cut_idx = loss_idx + rho_idx
objval_idx = cpx_indices["objval"]
L0_idx = cpx_indices["L0_norm"]
P = len(cpx_indices["rho"])
C_0_alpha = np.array(cpx_indices['C_0_alpha'])
C_0_nnz = C_0_alpha[np.flatnonzero(C_0_alpha)]
if isinstance(loss_idx, list) and len(loss_idx) == 1:
loss_idx = loss_idx[0]
if len(alpha_idx) > 0:
get_alpha = lambda: np.array(cpx.solution.get_values(alpha_idx))
else:
get_alpha = lambda: np.array([])
bounds = {
'loss_min': cpx.variables.get_lower_bounds(loss_idx),
'loss_max': cpx.variables.get_upper_bounds(loss_idx),
'objval_min': cpx.variables.get_lower_bounds(objval_idx),
'objval_max': cpx.variables.get_upper_bounds(objval_idx),
'L0_min': cpx.variables.get_lower_bounds(L0_idx),
'L0_max': cpx.variables.get_upper_bounds(L0_idx),
}
if settings['update_bounds'] and settings['type'] == 'cvx':
update_bounds = lambda bounds, lb, ub: chained_updates_for_lp(bounds, C_0_nnz, ub, lb)
elif settings['update_bounds'] and settings['type'] == 'ntree':
update_bounds = lambda bounds, lb, ub: chained_updates(bounds, C_0_nnz, ub, lb)
else:
update_bounds = lambda bounds, lb, ub: bounds
objval = 0.0
upperbound = CPX_INFINITY
lowerbound = 0.0
n_iterations = 0
n_simplex_iterations = 0
max_runtime = float(settings['max_runtime'])
max_cplex_time = float(settings['max_runtime_per_iteration'])
remaining_total_time = max_runtime
solutions = []
objvals = []
progress_stats = {
'upperbounds': [],
'lowerbounds': [],
'simplex_iterations': [],
'cut_times': [],
'total_times': []
}
run_start_time = time.time()
while True:
iteration_start_time = time.time()
cpx.parameters.timelimit.set(min(remaining_total_time, max_cplex_time))
cpx.solve()
solution_status = cpx.solution.status[cpx.solution.get_status()]
# get solution
if solution_status not in ('optimal', 'optimal_tolerance', 'MIP_optimal'):
stop_reason = solution_status
stop_msg = 'stopping CPA | solution is infeasible (status = %s)' % solution_status
break
# get solution
rho = np.array(cpx.solution.get_values(rho_idx))
alpha = get_alpha()
simplex_iterations = int(cpx.solution.progress.get_num_iterations())
# compute cut
cut_start_time = time.time()
loss_value, loss_slope = compute_loss_cut(rho)
cut_lhs = [float(loss_value - loss_slope.dot(rho))]
cut_constraint = [SparsePair(ind = cut_idx, val = [1.0] + (-loss_slope).tolist())]
cut_time = time.time() - cut_start_time
# compute objective bounds
objval = float(loss_value + alpha.dot(C_0_alpha))
upperbound = min(upperbound, objval)
lowerbound = cpx.solution.get_objective_value()
relative_gap = (upperbound - lowerbound)/(upperbound + np.finfo('float').eps)
bounds = update_bounds(bounds, lb = lowerbound, ub = upperbound)
#store solutions
solutions.append(rho)
objvals.append(objval)
# update run stats
n_iterations += 1
n_simplex_iterations += simplex_iterations
current_time = time.time()
total_time = current_time - run_start_time
iteration_time = current_time - iteration_start_time
remaining_total_time = max(max_runtime - total_time, 0.0)
# print progress
if print_flag and settings['display_progress']:
print_log("cuts = %d \t UB = %.4f \t LB = %.4f \t GAP = %.4f%%\n" % (n_iterations, upperbound, lowerbound, 100.0 * relative_gap))
# save progress
if settings['save_progress']:
progress_stats['upperbounds'].append(upperbound)
progress_stats['lowerbounds'].append(lowerbound)
progress_stats['total_times'].append(total_time)
progress_stats['cut_times'].append(cut_time)
progress_stats['simplex_iterations'].append(simplex_iterations)
# check termination conditions
if n_iterations >= settings['max_iterations']:
stop_reason = 'aborted:reached_max_cuts'
stop_msg = 'reached max iterations'
break
if n_iterations >= settings['min_iterations_before_coefficient_gap_check']:
prior_rho = solutions[-2]
coef_gap = np.abs(np.max(rho - prior_rho))
if np.all(np.round(rho) == np.round(prior_rho)) and coef_gap < settings['max_coefficient_gap']:
stop_reason = 'aborted:coefficient_gap_within_tolerance'
stop_msg = 'stopping CPA | coef gap is within tolerance (%1.4f < %1.4f)' % (coef_gap, settings['max_coefficient_gap'])
break
if relative_gap < settings['max_tolerance']:
stop_reason = 'converged:gap_within_tolerance'
stop_msg = 'stopping CPA | optimality gap is within tolerance (%1.1f%% < %1.1f%%)' % (100 * settings['max_tolerance'], 100 * relative_gap)
break
if iteration_time > settings['max_runtime_per_iteration']:
stop_reason = 'aborted:reached_max_train_time'
stop_msg = 'stopping CPA (reached max training time per iteration of %1.0f secs)' % settings['max_runtime_per_iteration']
break
if (total_time > settings['max_runtime']) or (remaining_total_time == 0.0):
stop_reason = 'aborted:reached_max_train_time'
stop_msg = 'stopping CPA (reached max training time of %1.0f secs)' % settings['max_runtime']
break
# switch bounds
if settings['update_bounds']:
cpx.variables.set_lower_bounds(L0_idx, bounds['L0_min'])
cpx.variables.set_upper_bounds(L0_idx, bounds['L0_max'])
cpx.variables.set_lower_bounds(loss_idx, bounds['loss_min'])
cpx.variables.set_upper_bounds(loss_idx, bounds['loss_max'])
cpx.variables.set_lower_bounds(objval_idx, bounds['objval_min'])
cpx.variables.set_upper_bounds(objval_idx, bounds['objval_max'])
# add loss cut
cpx.linear_constraints.add(lin_expr = cut_constraint, senses = ["G"], rhs = cut_lhs)
if print_flag:
print_log(stop_msg)
#collect stats
stats = {
'solution': rho,
'stop_reason': stop_reason,
'n_iterations': n_iterations,
'n_simplex_iterations': n_simplex_iterations,
'objval': objval,
'upperbound': upperbound,
'lowerbound': lowerbound,
'cut_time': cut_time,
'total_time': total_time,
'cplex_time': total_time - cut_time,
}
stats.update(bounds)
if settings['save_progress']:
progress_stats['cplex_times'] = (np.array(stats['total_times']) - np.array(stats['cut_times'])).tolist()
progress_stats['objvals'] = objvals
progress_stats['solutions'] = solutions
stats.update(progress_stats)
#collect cuts
idx = list(range(cpx_indices['n_constraints'], cpx.linear_constraints.get_num(), 1))
cuts = {
'coefs': cpx.linear_constraints.get_rows(idx),
'lhs': cpx.linear_constraints.get_rhs(idx)
}
#create solution pool
pool = SolutionPool(P)
if len(objvals) > 0:
pool.add(objvals, solutions)
return stats, cuts, pool
def round_solution_pool(pool,
constraints,
max_runtime = float('inf'),
max_solutions = float('inf')):
"""
Parameters
----------
pool
constraints
max_runtime
max_solutions
Returns
-------
"""
# quick return
if len(pool) == 0:
return pool
pool = pool.distinct().sort()
P = pool.P
L0_reg_ind = np.isnan(constraints['coef_set'].c0)
L0_max = constraints['L0_max']
total_runtime = 0.0
total_rounded = 0
rounded_pool = SolutionPool(P)
for rho in pool.solutions:
start_time = time.time()
# sort from largest to smallest coefficients
feature_order = np.argsort([-abs(x) for x in rho])
rounded_solution = np.zeros(shape = (1, P))
l0_norm_count = 0
for k in range(P):
j = feature_order[k]
if not L0_reg_ind[j]:
rounded_solution[0, j] = np.round(rho[j], 0)
elif l0_norm_count < L0_max:
rounded_solution[0, j] = np.round(rho[j], 0)
l0_norm_count += L0_reg_ind[j]
total_runtime += time.time() - start_time
total_rounded += 1
rounded_pool.add(objvals = np.nan, solutions = rounded_solution)
if total_runtime > max_runtime or total_rounded >= max_solutions:
break
rounded_pool = rounded_pool.distinct().sort()
return rounded_pool, total_runtime, total_rounded
def sequential_round_solution_pool(pool,
Z,
C_0,
compute_loss_from_scores_real,
get_L0_penalty,
max_runtime = float('inf'),
max_solutions = float('inf'),
objval_cutoff = float('inf')):
"""
runs sequential rounding for all solutions in a solution pool
can be stopped early using max_runtime or max_solutions
Parameters
----------
pool
Z
C_0
compute_loss_from_scores_real
get_L0_penalty
max_runtime
max_solutions
objval_cutoff
L0_min
L0_max
Returns
-------
"""
# quick return
if len(pool) == 0:
return pool, 0.0, 0
assert callable(get_L0_penalty)
assert callable(compute_loss_from_scores_real)
# if model size constraint is non-trivial, remove solutions that violate the model size constraint beforehand
pool = pool.distinct().sort()
rounding_handle = lambda rho: sequential_rounding(rho = rho,
Z = Z,
C_0 = C_0,
compute_loss_from_scores_real = compute_loss_from_scores_real,
get_L0_penalty = get_L0_penalty,
objval_cutoff = objval_cutoff)
# apply sequential rounding to all solutions
total_runtime = 0.0
total_rounded = 0
rounded_pool = SolutionPool(pool.P)
for rho in pool.solutions:
start_time = time.time()
solution, objval, early_stop = rounding_handle(rho)
total_runtime += time.time() - start_time
total_rounded += 1
if not early_stop:
rounded_pool = rounded_pool.add(objvals = objval, solutions = solution)
if total_runtime > max_runtime or total_rounded > max_solutions:
break
rounded_pool = rounded_pool.distinct().sort()
return rounded_pool, total_runtime, total_rounded
def discrete_descent_solution_pool(pool,
Z,
C_0,
constraints,
get_L0_penalty,
compute_loss_from_scores,
max_runtime = float('inf'),
max_solutions = float('inf')):
"""
runs dcd polishing for all solutions in a solution pool
can be stopped early using max_runtime or max_solutions
Parameters
----------
pool
Z
C_0
constraints
get_L0_penalty
compute_loss_from_scores
max_runtime
max_solutions
Returns
-------
"""
pool = pool.remove_nonintegral()
if len(pool) == 0:
return pool, 0.0, 0
assert callable(get_L0_penalty)
assert callable(compute_loss_from_scores)
rho_ub = constraints['coef_set'].ub
rho_lb = constraints['coef_set'].lb
polishing_handle = lambda rho: discrete_descent(rho,
Z = Z,
C_0 = C_0,
rho_ub = rho_ub,
rho_lb = rho_lb,
get_L0_penalty = get_L0_penalty,
compute_loss_from_scores = compute_loss_from_scores)
pool = pool.distinct().sort()
polished_pool = SolutionPool(pool.P)
total_runtime = 0.0
total_polished = 0
start_time = time.time()
for rho in pool.solutions:
polished_solution, _, polished_objval = polishing_handle(rho)
total_runtime = time.time() - start_time
total_polished += 1
polished_pool = polished_pool.add(objvals = polished_objval, solutions = polished_solution)
if total_runtime > max_runtime or total_polished >= max_solutions:
break
polished_pool = polished_pool.distinct().sort()
return polished_pool, total_runtime, total_polished
| 21,391 | 37.613718 | 150 | py |
risk-slim | risk-slim-master/riskslim/bound_tightening.py | import numpy as np
def chained_updates(bounds, C_0_nnz, new_objval_at_feasible = None, new_objval_at_relaxation = None, MAX_CHAIN_COUNT = 20):
new_bounds = dict(bounds)
# update objval_min using new_value (only done once)
if new_objval_at_relaxation is not None:
if new_bounds['objval_min'] < new_objval_at_relaxation:
new_bounds['objval_min'] = new_objval_at_relaxation
# update objval_max using new_value (only done once)
if new_objval_at_feasible is not None:
if new_bounds['objval_max'] > new_objval_at_feasible:
new_bounds['objval_max'] = new_objval_at_feasible
# we have already converged
if new_bounds['objval_max'] <= new_bounds['objval_min']:
new_bounds['objval_max'] = max(new_bounds['objval_max'], new_bounds['objval_min'])
new_bounds['objval_min'] = min(new_bounds['objval_max'], new_bounds['objval_min'])
new_bounds['loss_max'] = min(new_bounds['objval_max'], new_bounds['loss_max'])
return new_bounds
# start update chain
chain_count = 0
improved_bounds = True
while improved_bounds and chain_count < MAX_CHAIN_COUNT:
improved_bounds = False
L0_penalty_min = np.sum(np.sort(C_0_nnz)[np.arange(int(new_bounds['L0_min']))])
L0_penalty_max = np.sum(-np.sort(-C_0_nnz)[np.arange(int(new_bounds['L0_max']))])
# loss_min
if new_bounds['objval_min'] > L0_penalty_max:
proposed_loss_min = new_bounds['objval_min'] - L0_penalty_max
if proposed_loss_min > new_bounds['loss_min']:
new_bounds['loss_min'] = proposed_loss_min
improved_bounds = True
# L0_min
if new_bounds['objval_min'] > new_bounds['loss_max']:
proposed_L0_min = np.ceil((new_bounds['objval_min'] - new_bounds['loss_max']) / np.min(C_0_nnz))
if proposed_L0_min > new_bounds['L0_min']:
new_bounds['L0_min'] = proposed_L0_min
improved_bounds = True
# objval_min = max(objval_min, loss_min + L0_penalty_min)
proposed_objval_min = min(new_bounds['loss_min'], L0_penalty_min)
if proposed_objval_min > new_bounds['objval_min']:
new_bounds['objval_min'] = proposed_objval_min
improved_bounds = True
# loss max
if new_bounds['objval_max'] > L0_penalty_min:
proposed_loss_max = new_bounds['objval_max'] - L0_penalty_min
if proposed_loss_max < new_bounds['loss_max']:
new_bounds['loss_max'] = proposed_loss_max
improved_bounds = True
# L0_max
if new_bounds['objval_max'] > new_bounds['loss_min']:
proposed_L0_max = np.floor((new_bounds['objval_max'] - new_bounds['loss_min']) / np.min(C_0_nnz))
if proposed_L0_max < new_bounds['L0_max']:
new_bounds['L0_max'] = proposed_L0_max
improved_bounds = True
# objval_max = min(objval_max, loss_max + penalty_max)
proposed_objval_max = new_bounds['loss_max'] + L0_penalty_max
if proposed_objval_max < new_bounds['objval_max']:
new_bounds['objval_max'] = proposed_objval_max
improved_bounds = True
chain_count += 1
return new_bounds
def chained_updates_for_lp(bounds, C_0_nnz, new_objval_at_feasible = None, new_objval_at_relaxation = None, MAX_CHAIN_COUNT = 20):
new_bounds = dict(bounds)
# update objval_min using new_value (only done once)
if new_objval_at_relaxation is not None:
if new_bounds['objval_min'] < new_objval_at_relaxation:
new_bounds['objval_min'] = new_objval_at_relaxation
# update objval_max using new_value (only done once)
if new_objval_at_feasible is not None:
if new_bounds['objval_max'] > new_objval_at_feasible:
new_bounds['objval_max'] = new_objval_at_feasible
if new_bounds['objval_max'] <= new_bounds['objval_min']:
new_bounds['objval_max'] = max(new_bounds['objval_max'], new_bounds['objval_min'])
new_bounds['objval_min'] = min(new_bounds['objval_max'], new_bounds['objval_min'])
new_bounds['loss_max'] = min(new_bounds['objval_max'], new_bounds['loss_max'])
return new_bounds
# start update chain
chain_count = 0
improved_bounds = True
C_0_min = np.min(C_0_nnz)
C_0_max = np.max(C_0_nnz)
L0_penalty_min = C_0_min * new_bounds['L0_min']
L0_penalty_max = min(C_0_max * new_bounds['L0_max'], new_bounds['objval_max'])
while improved_bounds and chain_count < MAX_CHAIN_COUNT:
improved_bounds = False
# loss_min
if new_bounds['objval_min'] > L0_penalty_max:
proposed_loss_min = new_bounds['objval_min'] - L0_penalty_max
if proposed_loss_min > new_bounds['loss_min']:
new_bounds['loss_min'] = proposed_loss_min
improved_bounds = True
# L0_min and L0_penalty_min
if new_bounds['objval_min'] > new_bounds['loss_max']:
proposed_L0_min = (new_bounds['objval_min'] - new_bounds['loss_max']) / C_0_min
if proposed_L0_min > new_bounds['L0_min']:
new_bounds['L0_min'] = proposed_L0_min
L0_penalty_min = max(L0_penalty_min, C_0_min * proposed_L0_min)
improved_bounds = True
# objval_min = max(objval_min, loss_min + L0_penalty_min)
proposed_objval_min = min(new_bounds['loss_min'], L0_penalty_min)
if proposed_objval_min > new_bounds['objval_min']:
new_bounds['objval_min'] = proposed_objval_min
improved_bounds = True
# loss max
if new_bounds['objval_max'] > L0_penalty_min:
proposed_loss_max = new_bounds['objval_max'] - L0_penalty_min
if proposed_loss_max < new_bounds['loss_max']:
new_bounds['loss_max'] = proposed_loss_max
improved_bounds = True
# L0_max and L0_penalty_max
if new_bounds['objval_max'] > new_bounds['loss_min']:
proposed_L0_max = (new_bounds['objval_max'] - new_bounds['loss_min']) / C_0_min
if proposed_L0_max < new_bounds['L0_max']:
new_bounds['L0_max'] = proposed_L0_max
L0_penalty_max = min(L0_penalty_max, C_0_max * proposed_L0_max)
improved_bounds = True
# objval_max = min(objval_max, loss_max + penalty_max)
proposed_objval_max = new_bounds['loss_max'] + L0_penalty_max
if proposed_objval_max < new_bounds['objval_max']:
new_bounds['objval_max'] = proposed_objval_max
L0_penalty_max = min(L0_penalty_max, proposed_objval_max)
improved_bounds = True
chain_count += 1
return new_bounds
| 6,773 | 42.146497 | 130 | py |
risk-slim | risk-slim-master/riskslim/solution_pool.py | import numpy as np
import prettytable as pt
class SolutionPool(object):
"""
Helper class used to store solutions to the risk slim optimization problem
"""
def __init__(self, obj):
if isinstance(obj, SolutionPool):
self._P = obj.P
self._objvals = obj.objvals
self._solutions = obj.solutions
elif isinstance(obj, int):
assert obj >= 1
self._P = int(obj)
self._objvals = np.empty(0)
self._solutions = np.empty(shape = (0, self._P))
elif isinstance(obj, dict):
assert len(obj) == 2
objvals = np.copy(obj['objvals']).flatten().astype(dtype = np.float_)
solutions = np.copy(obj['solutions'])
n = objvals.size
if solutions.ndim == 2:
assert n in solutions.shape
if solutions.shape[1] == n and solutions.shape[0] != n:
solutions = np.transpose(solutions)
elif solutions.ndim == 1:
assert n == 1
solutions = np.reshape(solutions, (1, solutions.size))
else:
raise ValueError('solutions has more than 2 dimensions')
self._P = solutions.shape[1]
self._objvals = objvals
self._solutions = solutions
else:
raise ValueError('cannot initialize SolutionPool using %s object' % type(obj))
def __len__(self):
return len(self._objvals)
@staticmethod
def solution_string(solution, float_fmt = '%1.3f'):
solution_string = ''
for j in range(len(solution)):
if SolutionPool.is_integral(solution[j]):
solution_string += ' ' + str(int(solution[j]))
else:
solution_string += ((' ' + float_fmt) % solution[j])
return solution_string
def table(self):
x = pt.PrettyTable(align = 'r', float_format = '1.3', hrules = pt.ALL)
x.add_column("objval", self._objvals.tolist())
x.add_column("solution", list(map(self.solution_string, self._solutions)))
return str(x)
def __repr__(self):
return self.table()
def __str__(self):
return self.table()
def copy(self):
return SolutionPool(self)
@property
def P(self):
return int(self._P)
@property
def objvals(self):
return self._objvals
@property
def solutions(self):
return self._solutions
@objvals.setter
def objvals(self, objvals):
if hasattr(objvals, "__len__"):
if len(objvals) > 0:
self._objvals = np.copy(list(objvals)).flatten().astype(dtype = np.float_)
elif len(objvals) == 0:
self._objvals = np.empty(0)
else:
self._objvals = float(objvals)
@solutions.setter
def solutions(self, solutions):
if solutions.ndim == 2:
assert self._P in solutions.shape
if solutions.shape[0] == self._P and solutions.shape[1] != self._P:
solutions = np.transpose(solutions)
elif solutions.ndim == 1:
solutions = np.reshape(solutions, (1, solutions.size))
else:
raise ValueError('incorrect solution dimensions')
self._solutions = np.copy(solutions)
def append(self, pool):
if len(pool) == 0:
return self
else:
return self.add(pool.objvals, pool.solutions)
def add(self, objvals, solutions):
if isinstance(objvals, np.ndarray) or isinstance(objvals, list):
n = len(objvals)
if n == 0:
return self
if isinstance(solutions, np.ndarray):
if solutions.ndim == 2:
assert n in solutions.shape
assert self._P in solutions.shape
if solutions.shape[0] == self._P and solutions.shape[1] != self._P:
solutions = np.transpose(solutions)
elif solutions.ndim == 1:
assert n == 1
solutions = np.reshape(solutions, (1, solutions.size))
else:
raise ValueError('incorrect solution dimensions')
elif isinstance(solutions, list):
solutions = np.array(solutions)
assert solutions.shape[0] == n
assert solutions.shape[1] == self._P
else:
raise TypeError('incorrect solution type')
else:
objvals = float(objvals) #also assertion
solutions = np.reshape(solutions, (1, self._P))
self._objvals = np.append(self._objvals, objvals)
self._solutions = np.append(self._solutions, solutions, axis = 0)
return self
def filter(self, filter_ind):
idx = np.require(filter_ind, dtype = 'bool').flatten()
if len(self) > 0 and any(idx == 0):
self._objvals = self._objvals[idx]
self._solutions = self._solutions[idx, :]
return self
def distinct(self):
if len(self) > 0:
_, idx = np.unique(self._solutions, return_index = True, axis = 0)
self._objvals = self._objvals[idx]
self._solutions = self._solutions[idx, :]
return self
def sort(self):
if len(self) > 0:
idx = np.argsort(self._objvals)
self._objvals = self._objvals[idx]
self._solutions = self._solutions[idx, :]
return self
def map(self, mapfun, target = 'all'):
assert callable(mapfun), 'map function must be callable'
if target is 'solutions':
return list(map(mapfun, self.solutions))
elif target is 'objvals':
return list(map(mapfun, self.objvals))
elif target is 'all':
return list(map(mapfun, self.objvals, self.solutions))
else:
raise ValueError('target must be either solutions, objvals, or all')
@staticmethod
def is_integral(solution):
return np.all(solution == np.require(solution, dtype = 'int_'))
def remove_nonintegral(self):
return self.filter(list(map(self.is_integral, self.solutions)))
def compute_objvals(self, get_objval):
compute_idx = np.flatnonzero(np.isnan(self._objvals))
self._objvals[compute_idx] = np.array(list(map(get_objval, self._solutions[compute_idx, :])))
return self
def remove_suboptimal(self, objval_cutoff):
return self.filter(self.objvals <= objval_cutoff)
def remove_infeasible(self, is_feasible):
return self.filter(list(map(is_feasible, self.solutions)))
class FastSolutionPool(object):
"""
Helper class used to store solutions to the risk slim optimization problem
SolutionQueue designed to work faster than SolutionPool.
It is primarily used by the callback functions in risk_slim
"""
def __init__(self, P):
self._P = int(P)
self._objvals = np.empty(shape = 0)
self._solutions = np.empty(shape = (0, P))
def __len__(self):
return len(self._objvals)
@property
def P(self):
return self._P
@property
def objvals(self):
return self._objvals
@property
def solutions(self):
return self._solutions
def add(self, new_objvals, new_solutions):
if isinstance(new_objvals, (np.ndarray, list)):
n = len(new_objvals)
self._objvals = np.append(self._objvals, np.array(new_objvals).astype(dtype = np.float_).flatten())
else:
n = 1
self._objvals = np.append(self._objvals, float(new_objvals))
new_solutions = np.reshape(new_solutions, (n, self._P))
self._solutions = np.append(self._solutions, new_solutions, axis = 0)
def get_best_objval_and_solution(self):
if len(self) > 0:
idx = np.argmin(self._objvals)
return float(self._objvals[idx]), np.copy(self._solutions[idx,])
else:
return np.empty(shape = 0), np.empty(shape = (0, self.P))
def filter_sort_unique(self, max_objval = float('inf')):
# filter
if max_objval < float('inf'):
good_idx = np.less_equal(self._objvals, max_objval)
self._objvals = self._objvals[good_idx]
self._solutions = self._solutions[good_idx,]
if len(self._objvals) >= 2:
_, unique_idx = np.unique(self._solutions, axis = 0, return_index = True)
self._objvals = self._objvals[unique_idx]
self._solutions = self._solutions[unique_idx,]
if len(self._objvals) >= 2:
sort_idx = np.argsort(self._objvals)
self._objvals = self._objvals[sort_idx]
self._solutions = self._solutions[sort_idx,]
return self
def clear(self):
self._objvals = np.empty(shape = 0)
self._solutions = np.empty(shape = (0, self._P))
return self
def table(self):
x = pt.PrettyTable(align = 'r', float_format = '1.4', hrules=pt.ALL)
x.add_column("objval", self._objvals.tolist())
x.add_column("solution", list(map(self.solution_string, self._solutions)))
return str(x)
@staticmethod
def solution_string(solution):
solution_string = ''
for j in range(len(solution)):
if SolutionPool.is_integral(solution[j]):
solution_string += ' ' + str(int(solution[j]))
else:
solution_string += (' %1.4f' % solution[j])
return solution_string
def __repr__(self):
return self.table()
def __str__(self):
return self.table() | 9,755 | 29.776025 | 111 | py |
risk-slim | risk-slim-master/riskslim/heuristics.py | import numpy as np
#todo: finish specifications
#todo: add input checking (with ability to turn off)
#todo: Cython implementation
def sequential_rounding(rho, Z, C_0, compute_loss_from_scores_real, get_L0_penalty, objval_cutoff = float('Inf')):
"""
Parameters
----------
rho: P x 1 vector of continuous coefficients
Z: N x P data matrix computed as X * Y
C_0: N x 1 vector of L0 penalties. C_0[j] = L0 penalty for rho[j] for j = 0,..., P.
compute_loss_from_scores_real: function handle to compute loss using N x 1 vector of scores, where scores = Z.dot(rho)
get_L0_penalty: function handle to compute L0_penalty from rho
objval_cutoff: objective value used for early stopping.
the procedure will stop if the objective value achieved by an intermediate solution will exceeds objval_cutoff
Returns
-------
rho: P x 1 vector of integer coefficients (if early_stop_flag = False, otherwise continuous solution)
best_objval: objective value achieved by rho (if early_stop_flag = False, otherwise NaN)
early_stop_flag: True if procedure was stopped early (in which case rho is not integer feasible)
"""
assert callable(compute_loss_from_scores_real)
assert callable(get_L0_penalty)
P = rho.shape[0]
rho_floor = np.floor(rho)
floor_is_zero = np.equal(rho_floor, 0)
dist_from_start_to_floor = rho_floor - rho
rho_ceil = np.ceil(rho)
ceil_is_zero = np.equal(rho_ceil, 0)
dist_from_start_to_ceil = rho_ceil - rho
dimensions_to_round = np.flatnonzero(np.not_equal(rho_floor, rho_ceil)).tolist()
scores = Z.dot(rho)
best_objval = compute_loss_from_scores_real(scores) + get_L0_penalty(rho)
while len(dimensions_to_round) > 0 and best_objval < objval_cutoff:
objvals_at_floor = np.repeat(np.nan, P)
objvals_at_ceil = np.repeat(np.nan, P)
current_penalty = get_L0_penalty(rho)
for idx in dimensions_to_round:
# scores go from center to ceil -> center + dist_from_start_to_ceil
Z_dim = Z[:, idx]
base_scores = scores + dist_from_start_to_ceil[idx] * Z_dim
objvals_at_ceil[idx] = compute_loss_from_scores_real(base_scores)
# move from ceil to floor => -1*Z_j
base_scores -= Z_dim
objvals_at_floor[idx] = compute_loss_from_scores_real(base_scores)
if ceil_is_zero[idx]:
objvals_at_ceil[idx] -= C_0[idx]
elif floor_is_zero[idx]:
objvals_at_floor[idx] -= C_0[idx]
# adjust for penalty value
objvals_at_ceil += current_penalty
objvals_at_floor += current_penalty
best_objval_at_ceil = np.nanmin(objvals_at_ceil)
best_objval_at_floor = np.nanmin(objvals_at_floor)
if best_objval_at_ceil <= best_objval_at_floor:
best_objval = best_objval_at_ceil
best_dim = np.nanargmin(objvals_at_ceil)
rho[best_dim] += dist_from_start_to_ceil[best_dim]
scores += dist_from_start_to_ceil[best_dim] * Z[:, best_dim]
else:
best_objval = best_objval_at_floor
best_dim = np.nanargmin(objvals_at_floor)
rho[best_dim] += dist_from_start_to_floor[best_dim]
scores += dist_from_start_to_floor[best_dim] * Z[:, best_dim]
dimensions_to_round.remove(best_dim)
#assert(np.all(np.isclose(scores, Z.dot(rho))))
early_stop_flag = best_objval > objval_cutoff
return rho, best_objval, early_stop_flag
def discrete_descent(rho, Z, C_0, rho_ub, rho_lb, get_L0_penalty, compute_loss_from_scores, descent_dimensions = None, active_set_flag = True):
"""
Given a initial feasible solution, rho, produces an improved solution that is 1-OPT
(i.e. the objective value does not decrease by moving in any single dimension)
at each iteration, the algorithm moves in the dimension that yields the greatest decrease in objective value
the best step size is each dimension is computed using a directional search strategy that saves computation
Parameters
----------
rho: P x 1 vector of continuous coefficients
Z: N x P data matrix computed as X * Y
C_0: N x 1 vector of L0 penalties. C_0[j] = L0 penalty for rho[j] for j = 0,..., P.
rho_ub
rho_lb
compute_loss_from_scores_real: function handle to compute loss using N x 1 vector of scores, where scores = Z.dot(rho)
get_L0_penalty: function handle to compute L0_penalty from rho
descent_dimensions
Returns
-------
"""
"""
"""
assert callable(compute_loss_from_scores)
assert callable(get_L0_penalty)
# initialize key variables
MAX_ITERATIONS = 500
MIN_IMPROVEMENT_PER_STEP = float(1e-8)
P = len(rho)
# convert solution to integer
rho = np.require(np.require(rho, dtype = np.int_), dtype = np.float_)
# convert descent dimensions to integer values
if descent_dimensions is None:
descent_dimensions = np.arange(P)
else:
descent_dimensions = np.require(descent_dimensions, dtype = np.int_)
if active_set_flag:
descent_dimensions = np.intersect1d(np.flatnonzero(rho), descent_dimensions)
descent_dimensions = descent_dimensions.tolist()
base_scores = Z.dot(rho)
base_loss = compute_loss_from_scores(base_scores)
base_objval = base_loss + get_L0_penalty(rho)
n_iterations = 0
coefficient_values = {k: np.arange(int(rho_lb[k]), int(rho_ub[k]) + 1) for k in descent_dimensions}
search_dimensions = descent_dimensions
while n_iterations < MAX_ITERATIONS and len(search_dimensions) > 0:
# compute the best objective value / step size in each dimension
best_objval_by_dim = np.repeat(np.nan, P)
best_coef_by_dim = np.repeat(np.nan, P)
for k in search_dimensions:
dim_objvals = _compute_objvals_at_dim(base_rho = rho,
base_scores = base_scores,
base_loss = base_loss,
dim_idx = k,
dim_coefs = coefficient_values[k],
Z = Z,
C_0 = C_0,
compute_loss_from_scores = compute_loss_from_scores)
# mark points that will improve the current objective value by at least MIN_IMPROVEMENT_PER_STEP
best_dim_idx = np.nanargmin(dim_objvals)
best_objval_by_dim[k] = dim_objvals[best_dim_idx]
best_coef_by_dim[k] = coefficient_values[k][best_dim_idx]
# recompute base objective value/loss/scores
best_idx = np.nanargmin(best_objval_by_dim)
next_objval = best_objval_by_dim[best_idx]
threshold_objval = base_objval - MIN_IMPROVEMENT_PER_STEP
if next_objval >= threshold_objval:
break
best_step = best_coef_by_dim[best_idx] - rho[best_idx]
rho[best_idx] += best_step
base_objval = next_objval
base_loss = base_objval - get_L0_penalty(rho)
base_scores = base_scores + (best_step * Z[:, best_idx])
# remove the current best direction from the set of directions to explore
search_dimensions = list(descent_dimensions)
search_dimensions.remove(best_idx)
n_iterations += 1
return rho, base_loss, base_objval
def _compute_objvals_at_dim(Z, C_0, base_rho, base_scores, base_loss, dim_coefs, dim_idx, compute_loss_from_scores):
"""
finds the value of rho[j] in dim_coefs that minimizes log_loss(rho) + C_0j
Parameters
----------
Z
C_0
base_rho
base_scores
base_loss
dim_coefs
dim_idx
compute_loss_from_scores
Returns
-------
"""
# copy stuff because ctypes
scores = np.copy(base_scores)
# initialize parameters
P = base_rho.shape[0]
base_coef_value = base_rho[dim_idx]
base_index = np.flatnonzero(dim_coefs == base_coef_value)
loss_at_coef_value = np.repeat(np.nan, len(dim_coefs))
loss_at_coef_value[base_index] = float(base_loss)
Z_dim = Z[:, dim_idx]
# start by moving forward
forward_indices = np.flatnonzero(base_coef_value <= dim_coefs)
forward_step_sizes = np.diff(dim_coefs[forward_indices] - base_coef_value)
n_forward_steps = len(forward_step_sizes)
stop_after_first_forward_step = False
best_loss = base_loss
total_distance_from_base = 0
for i in range(n_forward_steps):
scores += forward_step_sizes[i] * Z_dim
total_distance_from_base += forward_step_sizes[i]
current_loss = compute_loss_from_scores(scores)
if current_loss >= best_loss:
stop_after_first_forward_step = i == 0
break
loss_at_coef_value[forward_indices[i + 1]] = current_loss
best_loss = current_loss
# if the first step forward didn't lead to a decrease in loss, then move backwards
move_backward = stop_after_first_forward_step or n_forward_steps == 0
if move_backward:
# compute backward steps
backward_indices = np.flipud(np.where(dim_coefs <= base_coef_value)[0])
backward_step_sizes = np.diff(dim_coefs[backward_indices] - base_coef_value)
n_backward_steps = len(backward_step_sizes)
# correct size of first backward step if you took 1 step forward
if n_backward_steps > 0 and n_forward_steps > 0:
backward_step_sizes[0] = backward_step_sizes[0] - forward_step_sizes[0]
best_loss = base_loss
for i in range(n_backward_steps):
scores += backward_step_sizes[i] * Z_dim
total_distance_from_base += backward_step_sizes[i]
current_loss = compute_loss_from_scores(scores)
if current_loss >= best_loss:
break
loss_at_coef_value[backward_indices[i + 1]] = current_loss
best_loss = current_loss
# at this point scores == base_scores + step_distance*Z_dim
# assert(all(np.isclose(scores, base_scores + total_distance_from_base * Z_dim)))
# compute objective values by adding penalty values to all other indices
other_dim_idx = np.flatnonzero(dim_idx != np.arange(P))
other_dim_penalty = np.sum(C_0[other_dim_idx] * (base_rho[other_dim_idx] != 0))
objval_at_coef_values = loss_at_coef_value + other_dim_penalty
if C_0[dim_idx] > 0.0:
# increase objective value at every non-zero coefficient value by C_0j
nonzero_coef_idx = np.flatnonzero(dim_coefs)
objval_at_coef_values[nonzero_coef_idx] = objval_at_coef_values[nonzero_coef_idx] + C_0[dim_idx]
# compute value at coef[j] == 0 if needed
zero_coef_idx = np.flatnonzero(dim_coefs == 0)
if np.isnan(objval_at_coef_values[zero_coef_idx]):
# steps_from_here_to_zero: step_from_here_to_base + step_from_base_to_zero
# steps_from_here_to_zero: -step_from_base_to_here + -step_from_zero_to_base
steps_to_zero = -(base_coef_value + total_distance_from_base)
scores += steps_to_zero * Z_dim
objval_at_coef_values[zero_coef_idx] = compute_loss_from_scores(scores) + other_dim_penalty
# assert(all(np.isclose(scores, base_scores - base_coef_value * Z_dim)))
# return objective value at feasible coefficients
return objval_at_coef_values
| 11,885 | 39.155405 | 150 | py |
risk-slim | risk-slim-master/riskslim/setup_functions.py | import numpy as np
from .coefficient_set import CoefficientSet, get_score_bounds
from .utils import print_log
def setup_loss_functions(data, coef_set, L0_max = None, loss_computation = None, w_pos = 1.0):
"""
Parameters
----------
data
coef_set
L0_max
loss_computation
w_pos
Returns
-------
"""
#todo check if fast/lookup loss is installed
assert loss_computation in [None, 'weighted', 'normal', 'fast', 'lookup']
Z = data['X'] * data['Y']
if 'sample_weights' in data:
sample_weights = _setup_training_weights(Y = data['Y'], sample_weights = data['sample_weights'], w_pos = w_pos)
use_weighted = not np.all(np.equal(sample_weights, 1.0))
else:
use_weighted = False
integer_data_flag = np.all(Z == np.require(Z, dtype = np.int_))
use_lookup_table = isinstance(coef_set, CoefficientSet) and integer_data_flag
if use_weighted:
final_loss_computation = 'weighted'
elif use_lookup_table:
final_loss_computation = 'lookup'
else:
final_loss_computation = 'fast'
if final_loss_computation != loss_computation:
print_log("switching loss computation from %s to %s" % (loss_computation, final_loss_computation))
if final_loss_computation == 'weighted':
from riskslim.loss_functions.log_loss_weighted import \
log_loss_value, \
log_loss_value_and_slope, \
log_loss_value_from_scores
Z = np.require(Z, requirements = ['C'])
total_sample_weights = np.sum(sample_weights)
compute_loss = lambda rho: log_loss_value(Z, sample_weights, total_sample_weights, rho)
compute_loss_cut = lambda rho: log_loss_value_and_slope(Z, sample_weights, total_sample_weights, rho)
compute_loss_from_scores = lambda scores: log_loss_value_from_scores(sample_weights, total_sample_weights, scores)
elif final_loss_computation == 'normal':
from riskslim.loss_functions.log_loss import \
log_loss_value, \
log_loss_value_and_slope, \
log_loss_value_from_scores
Z = np.require(Z, requirements=['C'])
compute_loss = lambda rho: log_loss_value(Z, rho)
compute_loss_cut = lambda rho: log_loss_value_and_slope(Z, rho)
compute_loss_from_scores = lambda scores: log_loss_value_from_scores(scores)
elif final_loss_computation == 'fast':
from riskslim.loss_functions.fast_log_loss import \
log_loss_value, \
log_loss_value_and_slope, \
log_loss_value_from_scores
Z = np.require(Z, requirements=['F'])
compute_loss = lambda rho: log_loss_value(Z, rho)
compute_loss_cut = lambda rho: log_loss_value_and_slope(Z, rho)
compute_loss_from_scores = lambda scores: log_loss_value_from_scores(scores)
elif final_loss_computation == 'lookup':
from riskslim.loss_functions.lookup_log_loss import \
get_loss_value_and_prob_tables, \
log_loss_value, \
log_loss_value_and_slope, \
log_loss_value_from_scores
s_min, s_max = get_score_bounds(Z_min = np.min(Z, axis=0),
Z_max = np.max(Z, axis=0),
rho_lb = coef_set.lb,
rho_ub = coef_set.ub,
L0_reg_ind = np.array(coef_set.c0) == 0.0,
L0_max = L0_max)
Z = np.require(Z, requirements=['F'], dtype = float)
print_log("%d rows in lookup table" % (s_max - s_min + 1))
loss_value_tbl, prob_value_tbl, tbl_offset = get_loss_value_and_prob_tables(s_min, s_max)
compute_loss = lambda rho: log_loss_value(Z, rho, loss_value_tbl, tbl_offset)
compute_loss_cut = lambda rho: log_loss_value_and_slope(Z, rho, loss_value_tbl, prob_value_tbl, tbl_offset)
compute_loss_from_scores = lambda scores: log_loss_value_from_scores(scores, loss_value_tbl, tbl_offset)
# real loss functions
if final_loss_computation == 'lookup':
from riskslim.loss_functions.fast_log_loss import \
log_loss_value as loss_value_real, \
log_loss_value_and_slope as loss_value_and_slope_real,\
log_loss_value_from_scores as loss_value_from_scores_real
compute_loss_real = lambda rho: loss_value_real(Z, rho)
compute_loss_cut_real = lambda rho: loss_value_and_slope_real(Z, rho)
compute_loss_from_scores_real = lambda scores: loss_value_from_scores_real(scores)
else:
compute_loss_real = compute_loss
compute_loss_cut_real = compute_loss_cut
compute_loss_from_scores_real = compute_loss_from_scores
return (Z,
compute_loss,
compute_loss_cut,
compute_loss_from_scores,
compute_loss_real,
compute_loss_cut_real,
compute_loss_from_scores_real)
def _setup_training_weights(Y, sample_weights = None, w_pos = 1.0, w_neg = 1.0, w_total_target = 2.0):
"""
Parameters
----------
Y - N x 1 vector with Y = -1,+1
sample_weights - N x 1 vector
w_pos - positive scalar showing relative weight on examples where Y = +1
w_neg - positive scalar showing relative weight on examples where Y = -1
Returns
-------
a vector of N training weights for all points in the training data
"""
# todo: throw warning if there is no positive/negative point in Y
# process class weights
assert w_pos > 0.0, 'w_pos must be strictly positive'
assert w_neg > 0.0, 'w_neg must be strictly positive'
assert np.isfinite(w_pos), 'w_pos must be finite'
assert np.isfinite(w_neg), 'w_neg must be finite'
w_total = w_pos + w_neg
w_pos = w_total_target * (w_pos / w_total)
w_neg = w_total_target * (w_neg / w_total)
# process case weights
Y = Y.flatten()
N = len(Y)
pos_ind = Y == 1
if sample_weights is None:
training_weights = np.ones(N)
else:
training_weights = sample_weights.flatten()
assert len(training_weights) == N
assert np.all(training_weights >= 0.0)
#todo: throw warning if any training weights = 0
#todo: throw warning if there are no effective positive/negative points in Y
# normalization
training_weights = N * (training_weights / sum(training_weights))
training_weights[pos_ind] *= w_pos
training_weights[~pos_ind] *= w_neg
return training_weights
def setup_penalty_parameters(coef_set, c0_value = 1e-6):
"""
Parameters
----------
coef_set
c0_value
Returns
-------
c0_value
C_0
L0_reg_ind
C_0_nnz
"""
assert isinstance(coef_set, CoefficientSet)
assert c0_value > 0.0, 'default L0_parameter should be positive'
c0_value = float(c0_value)
C_0 = np.array(coef_set.c0)
L0_reg_ind = np.isnan(C_0)
C_0[L0_reg_ind] = c0_value
C_0_nnz = C_0[L0_reg_ind]
return c0_value, C_0, L0_reg_ind, C_0_nnz
def setup_objective_functions(compute_loss, L0_reg_ind, C_0_nnz):
get_objval = lambda rho: compute_loss(rho) + np.sum(C_0_nnz * (rho[L0_reg_ind] != 0.0))
get_L0_norm = lambda rho: np.count_nonzero(rho[L0_reg_ind])
get_L0_penalty = lambda rho: np.sum(C_0_nnz * (rho[L0_reg_ind] != 0.0))
get_alpha = lambda rho: np.array(abs(rho[L0_reg_ind]) > 0.0, dtype = np.float_)
get_L0_penalty_from_alpha = lambda alpha: np.sum(C_0_nnz * alpha)
return (get_objval, get_L0_norm, get_L0_penalty, get_alpha, get_L0_penalty_from_alpha)
def get_loss_bounds(Z, rho_ub, rho_lb, L0_reg_ind, L0_max = float('nan')):
# min value of loss = log(1+exp(-score)) occurs at max score for each point
# max value of loss = loss(1+exp(-score)) occurs at min score for each point
rho_lb = np.array(rho_lb)
rho_ub = np.array(rho_ub)
# get maximum number of regularized coefficients
L0_max = Z.shape[0] if np.isnan(L0_max) else L0_max
num_max_reg_coefs = min(L0_max, sum(L0_reg_ind))
# calculate the smallest and largest score that can be attained by each point
scores_at_lb = Z * rho_lb
scores_at_ub = Z * rho_ub
max_scores_matrix = np.maximum(scores_at_ub, scores_at_lb)
min_scores_matrix = np.minimum(scores_at_ub, scores_at_lb)
assert (np.all(max_scores_matrix >= min_scores_matrix))
# for each example, compute max sum of scores from top reg coefficients
max_scores_reg = max_scores_matrix[:, L0_reg_ind]
max_scores_reg = -np.sort(-max_scores_reg, axis=1)
max_scores_reg = max_scores_reg[:, 0:num_max_reg_coefs]
max_score_reg = np.sum(max_scores_reg, axis=1)
# for each example, compute max sum of scores from no reg coefficients
max_scores_no_reg = max_scores_matrix[:, ~L0_reg_ind]
max_score_no_reg = np.sum(max_scores_no_reg, axis=1)
# max score for each example
max_score = max_score_reg + max_score_no_reg
# for each example, compute min sum of scores from top reg coefficients
min_scores_reg = min_scores_matrix[:, L0_reg_ind]
min_scores_reg = np.sort(min_scores_reg, axis=1)
min_scores_reg = min_scores_reg[:, 0:num_max_reg_coefs]
min_score_reg = np.sum(min_scores_reg, axis=1)
# for each example, compute min sum of scores from no reg coefficients
min_scores_no_reg = min_scores_matrix[:, ~L0_reg_ind]
min_score_no_reg = np.sum(min_scores_no_reg, axis=1)
min_score = min_score_reg + min_score_no_reg
assert (np.all(max_score >= min_score))
# compute min loss
idx = max_score > 0
min_loss = np.empty_like(max_score)
min_loss[idx] = np.log1p(np.exp(-max_score[idx]))
min_loss[~idx] = np.log1p(np.exp(max_score[~idx])) - max_score[~idx]
min_loss = min_loss.mean()
# compute max loss
idx = min_score > 0
max_loss = np.empty_like(min_score)
max_loss[idx] = np.log1p(np.exp(-min_score[idx]))
max_loss[~idx] = np.log1p(np.exp(min_score[~idx])) - min_score[~idx]
max_loss = max_loss.mean()
return min_loss, max_loss
| 10,101 | 35.469314 | 122 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.