repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
P3O | P3O-main/baselines/clip/defaults.py | def mujoco():
return dict(
nsteps=4096,
nminibatches=4096,
lam=0.95,
gamma=0.99,
noptepochs=5,
log_interval=1,
ent_coef=0.0,
lr=lambda f: 1e-4*f,
cliprange=0.2,
value_network='copy'
)
def mujoco_bak():
return dict(
nsteps=2048,
nminibatches=32,
lam=0.95,
gamma=0.99,
noptepochs=10,
log_interval=1,
ent_coef=0.0,
lr=lambda f: 1e-4*f,
cliprange=0.2,
value_network='copy'
)
def atarr_bak():
return dict(
nsteps=128, nminibatches=4,
lam=0.95, gamma=0.99, noptepochs=4, log_interval=1,
ent_coef=.01,
lr=lambda f : f * 2.5e-4,
cliprange=0.1,
)
def atari():
return dict(
nsteps=5, nminibatches=5,
lam=0.95, gamma=0.99, noptepochs=3, log_interval=1,
ent_coef=.01,
lr=lambda f : f * 2.5e-4,
cliprange=0.1,
)
def retro():
return atari()
| 1,006 | 19.979167 | 59 | py |
P3O | P3O-main/baselines/clip/runner.py | import numpy as np
from baselines.common.runners import AbstractEnvRunner
class Runner(AbstractEnvRunner):
"""
We use this object to make a mini batch of experiences
__init__:
- Initialize the runner
run():
- Make a mini batch
"""
def __init__(self, *, env, model, nsteps, gamma, lam):
super().__init__(env=env, model=model, nsteps=nsteps)
# Lambda used in GAE (General Advantage Estimation)
self.lam = lam
# Discount rate
self.gamma = gamma
def run(self):
# Here, we init the lists that will contain the mb of experiences
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [],[],[],[],[],[]
mb_states = self.states
epinfos = []
# For n in range number of steps
for _ in range(self.nsteps):
# Given observations, get action value and neglopacs
# We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init
actions, values, self.states, neglogpacs = self.model.step(self.obs, S=self.states, M=self.dones)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
# Take actions in env and look the results
# Infos contains a ton of useful informations
self.obs[:], rewards, self.dones, infos = self.env.step(actions)
for info in infos:
maybeepinfo = info.get('episode')
if maybeepinfo: epinfos.append(maybeepinfo)
mb_rewards.append(rewards)
#batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, S=self.states, M=self.dones)
# discount/bootstrap off value fn
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.nsteps)):
if t == self.nsteps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
return (*map(sf01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)),
mb_states, epinfos)
# obs, returns, masks, actions, values, neglogpacs, states = runner.run()
def sf01(arr):
"""
swap and then flatten axes 0 and 1
"""
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
| 3,194 | 40.493506 | 109 | py |
P3O | P3O-main/baselines/clip/__init__.py | 0 | 0 | 0 | py | |
P3O | P3O-main/baselines/ppo2/ppo2.py | import os
import time
import numpy as np
import os.path as osp
from baselines import logger
from collections import deque
from baselines.common import explained_variance, set_global_seeds
from baselines.common.policies import build_policy
try:
from mpi4py import MPI
except ImportError:
MPI = None
from baselines.ppo2.runner import Runner
def constfn(val):
def f(_):
return val
return f
def learn(*, network, env, total_timesteps, eval_env = None, seed=None, nsteps=2048, ent_coef=0.0, lr=3e-4,
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2,
save_interval=0, beta=15, load_path=None, model_fn=None, update_fn=None, init_fn=None, mpi_rank_weight=1, comm=None, **network_kwargs):
'''
Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347)
Parameters:
----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See common/models.py/lstm for more details on using recurrent nets in policies
env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation.
The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class.
nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
total_timesteps: int number of timesteps (i.e. number of actions taken in the environment)
ent_coef: float policy entropy coefficient in the optimization objective
lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the
training and 0 is the end of the training.
vf_coef: float value function loss coefficient in the optimization objective
max_grad_norm: float or None gradient norm clipping coefficient
gamma: float discounting factor
lam: float advantage estimation discounting factor (lambda in the paper)
log_interval: int number of timesteps between logging events
nminibatches: int number of training minibatches per update. For recurrent policies,
should be smaller or equal than number of environments run in parallel.
noptepochs: int number of training epochs per update
cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training
and 0 is the end of the training
save_interval: int number of timesteps between saving events
load_path: str path to load the model from
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
set_global_seeds(seed)
if isinstance(lr, float): lr = constfn(lr)
else: assert callable(lr)
if isinstance(cliprange, float): cliprange = constfn(cliprange)
else: assert callable(cliprange)
total_timesteps = int(total_timesteps)
policy = build_policy(env, network, **network_kwargs)
# Get the nb of env
nenvs = env.num_envs
# Get state_space and action_space
ob_space = env.observation_space
ac_space = env.action_space
# Calculate the batch_size
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
is_mpi_root = (MPI is None or MPI.COMM_WORLD.Get_rank() == 0)
# Instantiate the model object (that creates act_model and train_model)
if model_fn is None:
from baselines.ppo2.model import Model
model_fn = Model
model = model_fn(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm, comm=comm, mpi_rank_weight=mpi_rank_weight)
if load_path is not None:
model.load(load_path)
# Instantiate the runner object
runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam)
if eval_env is not None:
eval_runner = Runner(env = eval_env, model = model, nsteps = nsteps, gamma = gamma, lam= lam)
epinfobuf = deque(maxlen=100)
if eval_env is not None:
eval_epinfobuf = deque(maxlen=100)
if init_fn is not None:
init_fn()
# Start total timer
tfirststart = time.perf_counter()
nupdates = total_timesteps//nbatch
for update in range(1, nupdates+1):
assert nbatch % nminibatches == 0
# Start timer
tstart = time.perf_counter()
frac = 1.0 - (update - 1.0) / nupdates
# Calculate the learning rate
lrnow = lr(frac)
# Calculate the cliprange
cliprangenow = cliprange(frac)
if update % log_interval == 0 and is_mpi_root: logger.info('Stepping environment...')
# Get minibatch
obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run() #pylint: disable=E0632
if eval_env is not None:
eval_obs, eval_returns, eval_masks, eval_actions, eval_values, eval_neglogpacs, eval_states, eval_epinfos = eval_runner.run() #pylint: disable=E0632
if update % log_interval == 0 and is_mpi_root: logger.info('Done.')
epinfobuf.extend(epinfos)
if eval_env is not None:
eval_epinfobuf.extend(eval_epinfos)
# Here what we're going to do is for each minibatch calculate the loss and append it.
mblossvals = []
if states is None: # nonrecurrent version
# Index of each element of batch_size
# Create the indices array
inds = np.arange(nbatch)
for _ in range(noptepochs):
# Randomize the indexes
np.random.shuffle(inds)
# 0 to batch_size with batch_train_size step
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mblossvals.append(model.train(lrnow, cliprangenow, *slices))
else: # recurrent version
assert nenvs % nminibatches == 0
envsperbatch = nenvs // nminibatches
envinds = np.arange(nenvs)
flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps)
for _ in range(noptepochs):
np.random.shuffle(envinds)
for start in range(0, nenvs, envsperbatch):
end = start + envsperbatch
mbenvinds = envinds[start:end]
mbflatinds = flatinds[mbenvinds].ravel()
slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mbstates = states[mbenvinds]
mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates))
# Feedforward --> get losses --> update
lossvals = np.mean(mblossvals, axis=0)
# End timer
tnow = time.perf_counter()
# Calculate the fps (frame per second)
fps = int(nbatch / (tnow - tstart))
if update_fn is not None:
update_fn(update)
if update % log_interval == 0 or update == 1:
# Calculates if value function is a good predicator of the returns (ev > 1)
# or if it's just worse than predicting nothing (ev =< 0)
ev = explained_variance(values, returns)
logger.logkv("misc/serial_timesteps", update*nsteps)
logger.logkv("misc/nupdates", update)
logger.logkv("misc/total_timesteps", update*nbatch)
logger.logkv("fps", fps)
logger.logkv("misc/explained_variance", float(ev))
logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))
if eval_env is not None:
logger.logkv('eval_eprewmean', safemean([epinfo['r'] for epinfo in eval_epinfobuf]) )
logger.logkv('eval_eplenmean', safemean([epinfo['l'] for epinfo in eval_epinfobuf]) )
logger.logkv('misc/time_elapsed', tnow - tfirststart)
for (lossval, lossname) in zip(lossvals, model.loss_names):
logger.logkv('loss/' + lossname, lossval)
logger.dumpkvs()
if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir() and is_mpi_root:
checkdir = osp.join(logger.get_dir(), 'checkpoints')
os.makedirs(checkdir, exist_ok=True)
savepath = osp.join(checkdir, '%.5i'%update)
print('Saving to', savepath)
model.save(savepath)
return model
# Avoid division error when calculate the mean (in our case if epinfo is empty returns np.nan, not return an error)
def safemean(xs):
return np.nan if len(xs) == 0 else np.mean(xs)
| 10,238 | 44.506667 | 184 | py |
P3O | P3O-main/baselines/ppo2/microbatched_model.py | import tensorflow as tf
import numpy as np
from baselines.ppo2.model import Model
class MicrobatchedModel(Model):
"""
Model that does training one microbatch at a time - when gradient computation
on the entire minibatch causes some overflow
"""
def __init__(self, *, policy, ob_space, ac_space, nbatch_act, nbatch_train,
nsteps, ent_coef, vf_coef, max_grad_norm, mpi_rank_weight, comm, microbatch_size):
self.nmicrobatches = nbatch_train // microbatch_size
self.microbatch_size = microbatch_size
assert nbatch_train % microbatch_size == 0, 'microbatch_size ({}) should divide nbatch_train ({}) evenly'.format(microbatch_size, nbatch_train)
super().__init__(
policy=policy,
ob_space=ob_space,
ac_space=ac_space,
nbatch_act=nbatch_act,
nbatch_train=microbatch_size,
nsteps=nsteps,
ent_coef=ent_coef,
vf_coef=vf_coef,
max_grad_norm=max_grad_norm,
mpi_rank_weight=mpi_rank_weight,
comm=comm)
self.grads_ph = [tf.placeholder(dtype=g.dtype, shape=g.shape) for g in self.grads]
grads_ph_and_vars = list(zip(self.grads_ph, self.var))
self._apply_gradients_op = self.trainer.apply_gradients(grads_ph_and_vars)
def train(self, lr, cliprange, obs, returns, masks, actions, values, neglogpacs, states=None):
assert states is None, "microbatches with recurrent models are not supported yet"
# Here we calculate advantage A(s,a) = R + yV(s') - V(s)
# Returns = R + yV(s')
advs = returns - values
# Normalize the advantages
advs = (advs - advs.mean()) / (advs.std() + 1e-8)
# Initialize empty list for per-microbatch stats like pg_loss, vf_loss, entropy, approxkl (whatever is in self.stats_list)
stats_vs = []
for microbatch_idx in range(self.nmicrobatches):
_sli = range(microbatch_idx * self.microbatch_size, (microbatch_idx+1) * self.microbatch_size)
td_map = {
self.train_model.X: obs[_sli],
self.A:actions[_sli],
self.ADV:advs[_sli],
self.R:returns[_sli],
self.CLIPRANGE:cliprange,
self.OLDNEGLOGPAC:neglogpacs[_sli],
self.OLDVPRED:values[_sli]
}
# Compute gradient on a microbatch (note that variables do not change here) ...
grad_v, stats_v = self.sess.run([self.grads, self.stats_list], td_map)
if microbatch_idx == 0:
sum_grad_v = grad_v
else:
# .. and add to the total of the gradients
for i, g in enumerate(grad_v):
sum_grad_v[i] += g
stats_vs.append(stats_v)
feed_dict = {ph: sum_g / self.nmicrobatches for ph, sum_g in zip(self.grads_ph, sum_grad_v)}
feed_dict[self.LR] = lr
# Update variables using average of the gradients
self.sess.run(self._apply_gradients_op, feed_dict)
# Return average of the stats
return np.mean(np.array(stats_vs), axis=0).tolist()
| 3,241 | 40.037975 | 151 | py |
P3O | P3O-main/baselines/ppo2/test_microbatches.py | import gym
import tensorflow as tf
import numpy as np
from functools import partial
from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
from baselines.common.tf_util import make_session
from baselines.ppo2.ppo2 import learn
from baselines.ppo2.microbatched_model import MicrobatchedModel
def test_microbatches():
def env_fn():
env = gym.make('CartPole-v0')
env.seed(0)
return env
learn_fn = partial(learn, network='mlp', nsteps=32, total_timesteps=32, seed=0)
env_ref = DummyVecEnv([env_fn])
sess_ref = make_session(make_default=True, graph=tf.Graph())
learn_fn(env=env_ref)
vars_ref = {v.name: sess_ref.run(v) for v in tf.trainable_variables()}
env_test = DummyVecEnv([env_fn])
sess_test = make_session(make_default=True, graph=tf.Graph())
learn_fn(env=env_test, model_fn=partial(MicrobatchedModel, microbatch_size=2))
# learn_fn(env=env_test)
vars_test = {v.name: sess_test.run(v) for v in tf.trainable_variables()}
for v in vars_ref:
np.testing.assert_allclose(vars_ref[v], vars_test[v], atol=3e-3)
if __name__ == '__main__':
test_microbatches()
| 1,152 | 31.027778 | 83 | py |
P3O | P3O-main/baselines/ppo2/model.py | import tensorflow as tf
import functools
from baselines.common.tf_util import get_session, save_variables, load_variables
from baselines.common.tf_util import initialize
try:
from baselines.common.mpi_adam_optimizer import MpiAdamOptimizer
from mpi4py import MPI
from baselines.common.mpi_util import sync_from_root
except ImportError:
MPI = None
class Model(object):
"""
We use this object to :
__init__:
- Creates the step_model
- Creates the train_model
train():
- Make the training part (feedforward and retropropagation of gradients)
save/load():
- Save load the model
"""
def __init__(self, *, policy, ob_space, ac_space, nbatch_act, nbatch_train,
nsteps, ent_coef, vf_coef, max_grad_norm, mpi_rank_weight=1, comm=None, microbatch_size=None):
self.sess = sess = get_session()
if MPI is not None and comm is None:
comm = MPI.COMM_WORLD
with tf.variable_scope('ppo2_model', reuse=tf.AUTO_REUSE):
# CREATE OUR TWO MODELS
# act_model that is used for sampling
act_model = policy(nbatch_act, 1, sess)
# Train model for training
if microbatch_size is None:
train_model = policy(nbatch_train, nsteps, sess)
else:
train_model = policy(microbatch_size, nsteps, sess)
# CREATE THE PLACEHOLDERS
self.A = A = train_model.pdtype.sample_placeholder([None])
self.ADV = ADV = tf.placeholder(tf.float32, [None])
self.R = R = tf.placeholder(tf.float32, [None])
# Keep track of old actor
self.OLDNEGLOGPAC = OLDNEGLOGPAC = tf.placeholder(tf.float32, [None])
# Keep track of old critic
self.OLDVPRED = OLDVPRED = tf.placeholder(tf.float32, [None])
self.LR = LR = tf.placeholder(tf.float32, [])
# Cliprange
self.CLIPRANGE = CLIPRANGE = tf.placeholder(tf.float32, [])
neglogpac = train_model.pd.neglogp(A)
# Calculate the entropy
# Entropy is used to improve exploration by limiting the premature convergence to suboptimal policy.
entropy = tf.reduce_mean(train_model.pd.entropy())
# CALCULATE THE LOSS
# Total loss = Policy gradient loss - entropy * entropy coefficient + Value coefficient * value loss
# Clip the value to reduce variability during Critic training
# Get the predicted value
vpred = train_model.vf
vpredclipped = OLDVPRED + tf.clip_by_value(train_model.vf - OLDVPRED, - CLIPRANGE, CLIPRANGE)
# Unclipped value
vf_losses1 = tf.square(vpred - R)
# Clipped value
vf_losses2 = tf.square(vpredclipped - R)
vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))
# Calculate ratio (pi current policy / pi old policy)
ratio = tf.exp(OLDNEGLOGPAC - neglogpac)
# Defining Loss = - J is equivalent to max J
pg_losses = -ADV * ratio
pg_losses2 = -ADV * tf.clip_by_value(ratio, 1.0 - CLIPRANGE, 1.0 + CLIPRANGE)
# Final PG loss
pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))
# pg_loss = tf.reduce_mean(pg_losses2)
approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - OLDNEGLOGPAC))
clipfrac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), CLIPRANGE)))
rAt = tf.reduce_mean(-ADV * ratio)
# DEON metric
ptadv = (tf.math.sign(ADV) + 1) / 2
nta = (-1 * tf.math.sign(ratio -1) + 1) / 2
ntadv = (-1*tf.math.sign(ADV) + 1) / 2
pta = (tf.math.sign(ratio -1) + 1) / 2
unnormal_pt = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), 0)) * ptadv*nta)
unnormal_nt = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), 0)) * ntadv*pta)
# Total loss
loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef
# UPDATE THE PARAMETERS USING LOSS
# 1. Get the model parameters
params = tf.trainable_variables('ppo2_model')
# 2. Build our trainer
if comm is not None and comm.Get_size() > 1:
self.trainer = MpiAdamOptimizer(comm, learning_rate=LR, mpi_rank_weight=mpi_rank_weight, epsilon=1e-5)
else:
self.trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-5)
# 3. Calculate the gradients
grads_and_var = self.trainer.compute_gradients(loss, params)
grads, var = zip(*grads_and_var)
if max_grad_norm is not None:
# Clip the gradients (normalize)
grads, _grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
grads_and_var = list(zip(grads, var))
# zip aggregate each gradient with parameters associated
# For instance zip(ABCD, xyza) => Ax, By, Cz, Da
self.grads = grads
self.var = var
self._train_op = self.trainer.apply_gradients(grads_and_var)
self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac', 'rAt',"unnormal_pt", 'unnormal_nt']
self.stats_list = [pg_loss, vf_loss, entropy, approxkl, clipfrac, rAt, unnormal_pt, unnormal_nt]
self.train_model = train_model
self.act_model = act_model
self.step = act_model.step
self.value = act_model.value
self.initial_state = act_model.initial_state
self.save = functools.partial(save_variables, sess=sess)
self.load = functools.partial(load_variables, sess=sess)
initialize()
global_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="")
if MPI is not None:
sync_from_root(sess, global_variables, comm=comm) #pylint: disable=E1101
def train(self, lr, cliprange, obs, returns, masks, actions, values, neglogpacs, states=None):
# Here we calculate advantage A(s,a) = R + yV(s') - V(s)
# Returns = R + yV(s')
advs = returns - values
# Normalize the advantages
advs = (advs - advs.mean()) / (advs.std() + 1e-8)
td_map = {
self.train_model.X : obs,
self.A : actions,
self.ADV : advs,
self.R : returns,
self.LR : lr,
self.CLIPRANGE : cliprange,
self.OLDNEGLOGPAC : neglogpacs,
self.OLDVPRED : values
}
if states is not None:
td_map[self.train_model.S] = states
td_map[self.train_model.M] = masks
return self.sess.run(
self.stats_list + [self._train_op],
td_map
)[:-1]
| 6,621 | 37.725146 | 133 | py |
P3O | P3O-main/baselines/ppo2/defaults.py | def mujoco():
return dict(
nsteps=2048,
nminibatches=32,
lam=0.95,
gamma=0.99,
noptepochs=10,
log_interval=1,
ent_coef=0.0,
lr=lambda f: 3e-4*f,
cliprange=0.2,
value_network='copy'
)
def atari():
return dict(
nsteps=128, nminibatches=4,
lam=0.95, gamma=0.99, noptepochs=4, log_interval=1,
ent_coef=.01,
lr=lambda f : f * 2.5e-4,
cliprange=0.1,
)
def retro():
return atari()
| 516 | 18.884615 | 59 | py |
P3O | P3O-main/baselines/ppo2/runner.py | import numpy as np
from baselines.common.runners import AbstractEnvRunner
class Runner(AbstractEnvRunner):
"""
We use this object to make a mini batch of experiences
__init__:
- Initialize the runner
run():
- Make a mini batch
"""
def __init__(self, *, env, model, nsteps, gamma, lam):
super().__init__(env=env, model=model, nsteps=nsteps)
# Lambda used in GAE (General Advantage Estimation)
self.lam = lam
# Discount rate
self.gamma = gamma
def run(self):
# Here, we init the lists that will contain the mb of experiences
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [],[],[],[],[],[]
mb_states = self.states
epinfos = []
# For n in range number of steps
for _ in range(self.nsteps):
# Given observations, get action value and neglopacs
# We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init
actions, values, self.states, neglogpacs = self.model.step(self.obs, S=self.states, M=self.dones)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
# Take actions in env and look the results
# Infos contains a ton of useful informations
self.obs[:], rewards, self.dones, infos = self.env.step(actions)
for info in infos:
maybeepinfo = info.get('episode')
if maybeepinfo: epinfos.append(maybeepinfo)
mb_rewards.append(rewards)
#batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, S=self.states, M=self.dones)
# discount/bootstrap off value fn
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.nsteps)):
if t == self.nsteps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
return (*map(sf01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)),
mb_states, epinfos)
# obs, returns, masks, actions, values, neglogpacs, states = runner.run()
def sf01(arr):
"""
swap and then flatten axes 0 and 1
"""
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
| 3,194 | 40.493506 | 109 | py |
P3O | P3O-main/baselines/ppo2/__init__.py | 0 | 0 | 0 | py | |
P3O | P3O-main/baselines/p3o/model.py | import tensorflow as tf
import functools
from baselines.common.tf_util import get_session, save_variables, load_variables
from baselines.common.tf_util import initialize
from baselines.common.input import observation_placeholder
try:
from baselines.common.mpi_adam_optimizer import MpiAdamOptimizer
from mpi4py import MPI
from baselines.common.mpi_util import sync_from_root
except ImportError:
MPI = None
class Model(object):
"""
We use this object to :
__init__:
- Creates the step_model
- Creates the train_model
train():
- Make the training part (feedforward and retropropagation of gradients)
save/load():
- Save load the model
"""
def __init__(self, *, policy, ob_space, ac_space, nbatch_act, nbatch_train,
nsteps, ent_coef, kl_coef, vf_coef, max_grad_norm, mpi_rank_weight=1, comm=None, microbatch_size=None):
self.sess = sess = get_session()
if MPI is not None and comm is None:
comm = MPI.COMM_WORLD
# observation_placeholder(ob_space, batch_size=nbatch_train)
with tf.variable_scope('model', reuse=tf.AUTO_REUSE):
# CREATE OUR TWO MODELS
# act_model that is used for sampling
act_model = policy(nbatch_act, 1, sess)
# Train model for training
if microbatch_size is None:
train_model = policy(nbatch_train, nsteps, sess)
else:
train_model = policy(microbatch_size, nsteps, sess)
with tf.variable_scope("oldpi", reuse=tf.AUTO_REUSE):
oldpi = policy(nbatch_train, nsteps, sess,observ_placeholder=train_model.X)
# oldpi = policy(nbatch_train, nsteps, sess, observation_placeholder=)
# CREATE THE PLACEHOLDERS
self.A = A = train_model.pdtype.sample_placeholder([None])
self.ADV = ADV = tf.placeholder(tf.float32, [None])
self.R = R = tf.placeholder(tf.float32, [None])
# Keep track of old actor
self.OLDNEGLOGPAC = OLDNEGLOGPAC = tf.placeholder(tf.float32, [None])
# Keep track of old critic
self.LR = LR = tf.placeholder(tf.float32, [])
# Cliprange
self.CLIPRANGE = CLIPRANGE = tf.placeholder(tf.float32, [])
self.OLDVPRED = OLDVPRED = tf.placeholder(tf.float32, [None])
self.BETA = BETA = tf.placeholder(tf.float32, [])
self.TAU = TAU = tf.placeholder(tf.float32, [])
self.assign = [tf.assign(oldv, newv)
for (oldv, newv) in zip(get_variables("oldpi"), get_variables("model"))]
neglogpac = train_model.pd.neglogp(A)
# Calculate the entropy
# Entropy is used to improve exploration by limiting the premature convergence to suboptimal policy.
entropy = tf.reduce_mean(train_model.pd.entropy())
# CALCULATE THE LOSS
# Total loss = Policy gradient loss - entropy * entropy coefficient + Value coefficient * value loss
# Clip the value to reduce variability during Critic training
# Get the predicted value
vpred = train_model.vf
# vpred2 = train_model.vf2
vpredclipped = tf.clip_by_value(train_model.vf - OLDVPRED, - CLIPRANGE, CLIPRANGE) + OLDVPRED
# Unclipped value
vf_clip_losses = tf.square(vpredclipped - R)
# Clipped value
vf_losses = tf.square(vpred - R)
vf_loss = .1 * tf.reduce_mean(tf.maximum(vf_losses, vf_clip_losses))
# vf_loss = .5 * tf.reduce_mean(vf_losses)
# Calculate ratio (pi current policy / pi old policy)
ratio = tf.exp(OLDNEGLOGPAC - neglogpac)
fr_kl_loss = kl_coef*oldpi.pd.kl(train_model.pd)
pg_losses2 = -ADV*(4.0/self.TAU)*tf.sigmoid(ratio*self.TAU - self.TAU)
# gradient_r_scpi = tf.reduce_mean(-ADV*4*tf.sigmoid(4*ratio - 4)*(1-tf.sigmoid(4*ratio - 4)))
pg_loss = tf.reduce_mean(pg_losses2)
pg_loss += tf.reduce_mean(fr_kl_loss)
#static
approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - OLDNEGLOGPAC))
clipfrac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), CLIPRANGE)))
rAt = tf.reduce_mean(-ADV * ratio)
# tf.abs(ratio - 1.0) DEON metric
ptadv = (tf.math.sign(ADV) + 1) / 2
nta = (-1 * tf.math.sign(ratio -1) + 1) / 2
ntadv = (-1*tf.math.sign(ADV) + 1) / 2
pta = (tf.math.sign(ratio -1) + 1) / 2
unnormal_pt = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), 0)) * ptadv*nta)
unnormal_nt = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), 0)) * ntadv*pta)
# Total loss
loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef
# UPDATE THE PARAMETERS USING LOSS
# 1. Get the model parameters
params = tf.trainable_variables('model')
# 2. Build our trainer
if comm is not None and comm.Get_size() > 1:
self.trainer = MpiAdamOptimizer(comm, learning_rate=LR, mpi_rank_weight=mpi_rank_weight, epsilon=1e-5)
else:
self.trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-5)
# self.trainer = tf.train.GradientDescentOptimizer(learning_rate=LR)
# 3. Calculate the gradients
grads_and_var = self.trainer.compute_gradients(loss, params)
grads, var = zip(*grads_and_var)
if max_grad_norm is not None:
# Clip the gradients (normalize)
grads, _grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
grads_and_var = list(zip(grads, var))
# zip aggregate each gradient with parameters associated
# For instance zip(ABCD, xyza) => Ax, By, Cz, Da
self.grads = grads
self.var = var
self._train_op = self.trainer.apply_gradients(grads_and_var)
self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac', 'rAt',"unnormal_pt", 'unnormal_nt']
self.stats_list = [pg_loss, vf_loss, entropy, approxkl, clipfrac, rAt, unnormal_pt, unnormal_nt]
self.train_model = train_model
self.act_model = act_model
self.step = act_model.step
self.value = act_model.value
self.initial_state = act_model.initial_state
self.save = functools.partial(save_variables, sess=sess)
self.load = functools.partial(load_variables, sess=sess)
initialize()
global_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="")
if MPI is not None:
sync_from_root(sess, global_variables, comm=comm) #pylint: disable=E1101
def train(self, lr, tau, cliprange, beta, obs, returns, masks, actions, values, neglogpacs, states=None):
# Here we calculate advantage A(s,a) = R + yV(s') - V(s)
# Returns = R + yV(s')
advs = returns - values
# Normalize the advantages
advs = (advs - advs.mean()) / (advs.std() + 1e-8)
td_map = {
self.train_model.X: obs,
self.A: actions,
self.ADV : advs,
self.R : returns,
self.LR : lr,
self.CLIPRANGE: cliprange,
self.BETA: beta,
self.OLDNEGLOGPAC : neglogpacs,
self.OLDVPRED: values,
self.TAU: tau,
}
if states is not None:
td_map[self.train_model.S] = states
td_map[self.train_model.M] = masks
return self.sess.run(
self.stats_list + [self._train_op],
td_map
)[:-1]
def assign_v(self):
self.sess.run(self.assign)
def get_variables(scope):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope)
| 7,668 | 38.942708 | 133 | py |
P3O | P3O-main/baselines/p3o/defaults.py | def mujoco():
return dict(
nsteps=2048,
nminibatches=32,
lam=0.95,
gamma=0.99,
noptepochs=10,
log_interval=1,
ent_coef=0.01,
kl_coef=0.05,
lr=lambda f: 3e-4*f,
cliprange=0.2,
value_network='copy'
#random seed 4
)
def mujoco_bak():
return dict(
nsteps=2048,
nminibatches=32,
lam=0.95,
gamma=0.99,
noptepochs=10,
log_interval=1,
ent_coef=0.01,
kl_coef=1,
lr=lambda f: 3e-4 * f,
cliprange=0.2,
value_network='copy'
)
# def mujoco_original():
# return dict(
# nsteps=2048,
# nminibatches=32,
# lam=0.95,
# gamma=0.99,
# noptepochs=10,
# log_interval=1,
# ent_coef=0.0,
# kl_coef=1,
# lr=lambda f: 2.5e-4 * f,
# cliprange=0.2,
# value_network='copy'
# )
# 3e-4
# best_ctn 1e4_fix best_dst 1e4?
# 2.5e-4
def atari():
return dict(
nsteps=128, nminibatches=4,
lam=0.95, gamma=0.99, noptepochs=4, log_interval=1,
ent_coef=.01,
kl_coef=1,
lr=lambda f : f * 2.5e-4,
cliprange=0.1,
# value_network='copy'
)
def retro():
return atari()
| 1,291 | 19.507937 | 59 | py |
P3O | P3O-main/baselines/p3o/runner.py | import numpy as np
from baselines.common.runners import AbstractEnvRunner
class Runner(AbstractEnvRunner):
"""
We use this object to make a mini batch of experiences
__init__:
- Initialize the runner
run():
- Make a mini batch
"""
def __init__(self, *, env, model, nsteps, gamma, lam):
super().__init__(env=env, model=model, nsteps=nsteps)
# Lambda used in GAE (General Advantage Estimation)
self.lam = lam
# Discount rate
self.gamma = gamma
def run(self):
# Here, we init the lists that will contain the mb of experiences
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [],[],[],[],[],[]
mb_states = self.states
epinfos = []
# For n in range number of steps
for _ in range(self.nsteps):
# Given observations, get action value and neglopacs
# We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init
actions, values, self.states, neglogpacs = self.model.step(self.obs, S=self.states, M=self.dones)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
# Take actions in env and look the results
# Infos contains a ton of useful informations
self.obs[:], rewards, self.dones, infos = self.env.step(actions)
for info in infos:
maybeepinfo = info.get('episode')
# print(maybeepinfo)
if maybeepinfo: epinfos.append(maybeepinfo)
mb_rewards.append(rewards)
#batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, S=self.states, M=self.dones)
# discount/bootstrap off value fn
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.nsteps)):
if t == self.nsteps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
# print(sf01())
return (*map(sf01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)),
mb_states, epinfos)
# obs, returns, masks, actions, values, neglogpacs, states = runner.run()
def sf01(arr):
"""
swap and then flatten axes 0 and 1
"""
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
| 3,255 | 40.21519 | 109 | py |
P3O | P3O-main/baselines/p3o/__init__.py | 0 | 0 | 0 | py | |
P3O | P3O-main/baselines/p3o/p3o.py | import os
import random
import time
import numpy as np
import os.path as osp
from baselines import logger
from collections import deque
from baselines.common import explained_variance, set_global_seeds
from baselines.common.policies import build_policy
try:
from mpi4py import MPI
except ImportError:
MPI = None
from baselines.p3o.runner import Runner
def constfn(val):
def f(_):
return val
return f
def betafn(val):
def f(frac):
return val*frac
return f
def learn(*, network, env, total_timesteps, eval_env = None, seed=None, nsteps=2048, ent_coef=0.0, kl_coef=1.0, lr=3e-4,
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2, beta=15,
save_interval=0, load_path=None, model_fn=None, update_fn=None, init_fn=None, mpi_rank_weight=1, comm=None, **network_kwargs):
'''
Learn policy using SPG algorithm
Parameters:
----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See common/models.py/lstm for more details on using recurrent nets in policies
env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation.
The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class.
nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
total_timesteps: int number of timesteps (i.e. number of actions taken in the environment)
ent_coef: float policy entropy coefficient in the optimization objective
lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the
training and 0 is the end of the training.
vf_coef: float value function loss coefficient in the optimization objective
max_grad_norm: float or None gradient norm clipping coefficient
gamma: float discounting factor
lam: float advantage estimation discounting factor (lambda in the paper)
log_interval: int number of timesteps between logging events
nminibatches: int number of training minibatches per update. For recurrent policies,
should be smaller or equal than number of environments run in parallel.
noptepochs: int number of training epochs per update
cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training
and 0 is the end of the training
save_interval: int number of timesteps between saving events
load_path: str path to load the model from
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
set_global_seeds(seed)
if isinstance(lr, float): lr = constfn(lr)
else: assert callable(lr)
if isinstance(cliprange, float): cliprange = constfn(cliprange)
else: assert callable(cliprange)
if isinstance(beta, int): beta = betafn(beta)
else: assert callable(beta)
total_timesteps = int(total_timesteps)
policy = build_policy(env, network, **network_kwargs)
# Get the nb of env
nenvs = env.num_envs
# Get state_space and action_space
ob_space = env.observation_space
ac_space = env.action_space
# Calculate the batch_size
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
is_mpi_root = (MPI is None or MPI.COMM_WORLD.Get_rank() == 0)
# Instantiate the model object (that creates act_model and train_model)
if model_fn is None:
from baselines.p3o.model import Model
model_fn = Model
model = model_fn(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=nsteps, ent_coef=ent_coef, kl_coef=kl_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm, comm=comm, mpi_rank_weight=mpi_rank_weight)
if load_path is not None:
model.load(load_path)
# Instantiate the runner object
runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam)
if eval_env is not None:
eval_runner = Runner(env = eval_env, model = model, nsteps = nsteps, gamma = gamma, lam= lam)
epinfobuf = deque(maxlen=100)
if eval_env is not None:
eval_epinfobuf = deque(maxlen=100)
if init_fn is not None:
init_fn()
# Start total timer
tfirststart = time.perf_counter()
nupdates = total_timesteps//nbatch
for update in range(1, nupdates+1):
assert nbatch % nminibatches == 0
# Start timer
tstart = time.perf_counter()
frac = 1.0 - (update - 1.0) / nupdates
# Calculate the learning rate
lrnow = lr(frac)
# taunow = 2+2*(1-frac)
taunow = 4
# Calculate the cliprange
cliprangenow = cliprange(frac)
betanow = beta(frac)
if update % log_interval == 0 and is_mpi_root: logger.info('Stepping environment...')
# Get minibatch
obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run() #pylint: disable=E0632
if eval_env is not None:
eval_obs, eval_returns, eval_masks, eval_actions, eval_values, eval_neglogpacs, eval_states, eval_epinfos = eval_runner.run() #pylint: disable=E0632
if update % log_interval == 0 and is_mpi_root: logger.info('Done.')
epinfobuf.extend(epinfos)
# print(epinfobuf)
if eval_env is not None:
eval_epinfobuf.extend(eval_epinfos)
mblossvals = []
if states is None: # nonrecurrent version
# Index of each element of batch_size
# Create the indices array
inds = np.arange(nbatch)
model.assign_v()
for _ in range(noptepochs):
# Randomize the indexes
np.random.shuffle(inds)
# 0 to batch_size with batch_train_size step
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
# print(lrnow, cliprangenow)
res = model.train(lrnow, taunow, cliprangenow, betanow, *slices)
# print(res[4].item())
mblossvals.append(res)
else: # recurrent version
assert nenvs % nminibatches == 0
envsperbatch = nenvs // nminibatches
envinds = np.arange(nenvs)
flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps)
for _ in range(noptepochs):
np.random.shuffle(envinds)
for start in range(0, nenvs, envsperbatch):
end = start + envsperbatch
mbenvinds = envinds[start:end]
mbflatinds = flatinds[mbenvinds].ravel()
slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mbstates = states[mbenvinds]
mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates))
# Feedforward --> get losses --> update
lossvals = np.mean(mblossvals, axis=0)
# End timer
tnow = time.perf_counter()
# Calculate the fps (frame per second)
fps = int(nbatch / (tnow - tstart))
if update_fn is not None:
update_fn(update)
if update % log_interval == 0 or update == 1:
# Calculates if value function is a good predicator of the returns (ev > 1)
# or if it's just worse than predicting nothing (ev =< 0)
ev = explained_variance(values, returns)
logger.logkv("misc/serial_timesteps", update*nsteps)
logger.logkv("misc/nupdates", update)
logger.logkv("misc/total_timesteps", update*nbatch)
logger.logkv("fps", fps)
logger.logkv("misc/explained_variance", float(ev))
logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))
if eval_env is not None:
logger.logkv('eval_eprewmean', safemean([epinfo['r'] for epinfo in eval_epinfobuf]) )
logger.logkv('eval_eplenmean', safemean([epinfo['l'] for epinfo in eval_epinfobuf]) )
logger.logkv('misc/time_elapsed', tnow - tfirststart)
for (lossval, lossname) in zip(lossvals, model.loss_names):
logger.logkv('loss/' + lossname, lossval)
logger.dumpkvs()
return model
# Avoid division error when calculate the mean (in our case if epinfo is empty returns np.nan, not return an error)
def safemean(xs):
return np.nan if len(xs) == 0 else np.mean(xs)
| 10,223 | 42.692308 | 184 | py |
P3O | P3O-main/baselines/a2c/a2c.py | import time
import functools
import tensorflow as tf
from baselines import logger
from baselines.common import set_global_seeds, explained_variance
from baselines.common import tf_util
from baselines.common.policies import build_policy
from baselines.a2c.utils import Scheduler, find_trainable_variables
from baselines.a2c.runner import Runner
from baselines.ppo2.ppo2 import safemean
from collections import deque
from tensorflow import losses
class Model(object):
"""
We use this class to :
__init__:
- Creates the step_model
- Creates the train_model
train():
- Make the training part (feedforward and retropropagation of gradients)
save/load():
- Save load the model
"""
def __init__(self, policy, env, nsteps,
ent_coef=0.01, vf_coef=0.5, max_grad_norm=0.5, lr=7e-4,
alpha=0.99, epsilon=1e-5, total_timesteps=int(80e6), lrschedule='linear'):
sess = tf_util.get_session()
nenvs = env.num_envs
nbatch = nenvs*nsteps
with tf.variable_scope('a2c_model', reuse=tf.AUTO_REUSE):
# step_model is used for sampling
step_model = policy(nenvs, 1, sess)
# train_model is used to train our network
train_model = policy(nbatch, nsteps, sess)
A = tf.placeholder(train_model.action.dtype, train_model.action.shape)
ADV = tf.placeholder(tf.float32, [nbatch])
R = tf.placeholder(tf.float32, [nbatch])
LR = tf.placeholder(tf.float32, [])
# Calculate the loss
# Total loss = Policy gradient loss - entropy * entropy coefficient + Value coefficient * value loss
# Policy loss
neglogpac = train_model.pd.neglogp(A)
# L = A(s,a) * -logpi(a|s)
pg_loss = tf.reduce_mean(ADV * neglogpac)
# Entropy is used to improve exploration by limiting the premature convergence to suboptimal policy.
entropy = tf.reduce_mean(train_model.pd.entropy())
# Value loss
vf_loss = losses.mean_squared_error(tf.squeeze(train_model.vf), R)
loss = pg_loss - entropy*ent_coef + vf_loss * vf_coef
# Update parameters using loss
# 1. Get the model parameters
params = find_trainable_variables("a2c_model")
# 2. Calculate the gradients
grads = tf.gradients(loss, params)
if max_grad_norm is not None:
# Clip the gradients (normalize)
grads, grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
grads = list(zip(grads, params))
# zip aggregate each gradient with parameters associated
# For instance zip(ABCD, xyza) => Ax, By, Cz, Da
# 3. Make op for one policy and value update step of A2C
trainer = tf.train.RMSPropOptimizer(learning_rate=LR, decay=alpha, epsilon=epsilon)
_train = trainer.apply_gradients(grads)
lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule)
def train(obs, states, rewards, masks, actions, values):
# Here we calculate advantage A(s,a) = R + yV(s') - V(s)
# rewards = R + yV(s')
advs = rewards - values
for step in range(len(obs)):
cur_lr = lr.value()
td_map = {train_model.X:obs, A:actions, ADV:advs, R:rewards, LR:cur_lr}
if states is not None:
td_map[train_model.S] = states
td_map[train_model.M] = masks
policy_loss, value_loss, policy_entropy, _ = sess.run(
[pg_loss, vf_loss, entropy, _train],
td_map
)
return policy_loss, value_loss, policy_entropy
self.train = train
self.train_model = train_model
self.step_model = step_model
self.step = step_model.step
self.value = step_model.value
self.initial_state = step_model.initial_state
self.save = functools.partial(tf_util.save_variables, sess=sess)
self.load = functools.partial(tf_util.load_variables, sess=sess)
tf.global_variables_initializer().run(session=sess)
def learn(
network,
env,
seed=None,
nsteps=5,
total_timesteps=int(80e6),
vf_coef=0.5,
ent_coef=0.01,
max_grad_norm=0.5,
lr=7e-4,
lrschedule='linear',
epsilon=1e-5,
alpha=0.99,
gamma=0.99,
log_interval=100,
load_path=None,
**network_kwargs):
'''
Main entrypoint for A2C algorithm. Train a policy with given network architecture on a given environment using a2c algorithm.
Parameters:
-----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See baselines.common/policies.py/lstm for more details on using recurrent nets in policies
env: RL environment. Should implement interface similar to VecEnv (baselines.common/vec_env) or be wrapped with DummyVecEnv (baselines.common/vec_env/dummy_vec_env.py)
seed: seed to make random number sequence in the alorightm reproducible. By default is None which means seed from system noise generator (not reproducible)
nsteps: int, number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
total_timesteps: int, total number of timesteps to train on (default: 80M)
vf_coef: float, coefficient in front of value function loss in the total loss function (default: 0.5)
ent_coef: float, coeffictiant in front of the policy entropy in the total loss function (default: 0.01)
max_gradient_norm: float, gradient is clipped to have global L2 norm no more than this value (default: 0.5)
lr: float, learning rate for RMSProp (current implementation has RMSProp hardcoded in) (default: 7e-4)
lrschedule: schedule of learning rate. Can be 'linear', 'constant', or a function [0..1] -> [0..1] that takes fraction of the training progress as input and
returns fraction of the learning rate (specified as lr) as output
epsilon: float, RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update) (default: 1e-5)
alpha: float, RMSProp decay parameter (default: 0.99)
gamma: float, reward discounting parameter (default: 0.99)
log_interval: int, specifies how frequently the logs are printed out (default: 100)
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
set_global_seeds(seed)
# Get the nb of env
nenvs = env.num_envs
policy = build_policy(env, network, **network_kwargs)
# Instantiate the model object (that creates step_model and train_model)
model = Model(policy=policy, env=env, nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule)
if load_path is not None:
model.load(load_path)
# Instantiate the runner object
runner = Runner(env, model, nsteps=nsteps, gamma=gamma)
epinfobuf = deque(maxlen=100)
# Calculate the batch_size
nbatch = nenvs*nsteps
# Start total timer
tstart = time.time()
for update in range(1, total_timesteps//nbatch+1):
# Get mini batch of experiences
obs, states, rewards, masks, actions, values, epinfos = runner.run()
epinfobuf.extend(epinfos)
policy_loss, value_loss, policy_entropy = model.train(obs, states, rewards, masks, actions, values)
nseconds = time.time()-tstart
# Calculate the fps (frame per second)
fps = int((update*nbatch)/nseconds)
if update % log_interval == 0 or update == 1:
# Calculates if value function is a good predicator of the returns (ev > 1)
# or if it's just worse than predicting nothing (ev =< 0)
ev = explained_variance(values, rewards)
logger.record_tabular("nupdates", update)
logger.record_tabular("total_timesteps", update*nbatch)
logger.record_tabular("fps", fps)
logger.record_tabular("policy_entropy", float(policy_entropy))
logger.record_tabular("value_loss", float(value_loss))
logger.record_tabular("explained_variance", float(ev))
logger.record_tabular("eprewmean", safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.record_tabular("eplenmean", safemean([epinfo['l'] for epinfo in epinfobuf]))
logger.dump_tabular()
return model
| 9,451 | 39.566524 | 186 | py |
P3O | P3O-main/baselines/a2c/utils.py | import os
import numpy as np
import tensorflow as tf
from collections import deque
def sample(logits):
noise = tf.random_uniform(tf.shape(logits))
return tf.argmax(logits - tf.log(-tf.log(noise)), 1)
def cat_entropy(logits):
a0 = logits - tf.reduce_max(logits, 1, keepdims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, 1, keepdims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (tf.log(z0) - a0), 1)
def cat_entropy_softmax(p0):
return - tf.reduce_sum(p0 * tf.log(p0 + 1e-6), axis = 1)
def ortho_init(scale=1.0):
def _ortho_init(shape, dtype, partition_info=None):
#lasagne ortho init for tf
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return (scale * q[:shape[0], :shape[1]]).astype(np.float32)
return _ortho_init
def conv(x, scope, *, nf, rf, stride, pad='VALID', init_scale=1.0, data_format='NHWC', one_dim_bias=False):
if data_format == 'NHWC':
channel_ax = 3
strides = [1, stride, stride, 1]
bshape = [1, 1, 1, nf]
elif data_format == 'NCHW':
channel_ax = 1
strides = [1, 1, stride, stride]
bshape = [1, nf, 1, 1]
else:
raise NotImplementedError
bias_var_shape = [nf] if one_dim_bias else [1, nf, 1, 1]
nin = x.get_shape()[channel_ax].value
wshape = [rf, rf, nin, nf]
with tf.variable_scope(scope):
w = tf.get_variable("w", wshape, initializer=ortho_init(init_scale))
b = tf.get_variable("b", bias_var_shape, initializer=tf.constant_initializer(0.0))
if not one_dim_bias and data_format == 'NHWC':
b = tf.reshape(b, bshape)
return tf.nn.conv2d(x, w, strides=strides, padding=pad, data_format=data_format) + b
def fc(x, scope, nh, *, init_scale=1.0, init_bias=0.0):
with tf.variable_scope(scope):
nin = x.get_shape()[1].value
w = tf.get_variable("w", [nin, nh], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh], initializer=tf.constant_initializer(init_bias))
return tf.matmul(x, w)+b
def batch_to_seq(h, nbatch, nsteps, flat=False):
if flat:
h = tf.reshape(h, [nbatch, nsteps])
else:
h = tf.reshape(h, [nbatch, nsteps, -1])
return [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=nsteps, value=h)]
def seq_to_batch(h, flat = False):
shape = h[0].get_shape().as_list()
if not flat:
assert(len(shape) > 1)
nh = h[0].get_shape()[-1].value
return tf.reshape(tf.concat(axis=1, values=h), [-1, nh])
else:
return tf.reshape(tf.stack(values=h, axis=1), [-1])
def lstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(c)
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
def _ln(x, g, b, e=1e-5, axes=[1]):
u, s = tf.nn.moments(x, axes=axes, keep_dims=True)
x = (x-u)/tf.sqrt(s+e)
x = x*g+b
return x
def lnlstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
gx = tf.get_variable("gx", [nh*4], initializer=tf.constant_initializer(1.0))
bx = tf.get_variable("bx", [nh*4], initializer=tf.constant_initializer(0.0))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
gh = tf.get_variable("gh", [nh*4], initializer=tf.constant_initializer(1.0))
bh = tf.get_variable("bh", [nh*4], initializer=tf.constant_initializer(0.0))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
gc = tf.get_variable("gc", [nh], initializer=tf.constant_initializer(1.0))
bc = tf.get_variable("bc", [nh], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(_ln(c, gc, bc))
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
def conv_to_fc(x):
nh = np.prod([v.value for v in x.get_shape()[1:]])
x = tf.reshape(x, [-1, nh])
return x
def discount_with_dones(rewards, dones, gamma):
discounted = []
r = 0
for reward, done in zip(rewards[::-1], dones[::-1]):
r = reward + gamma*r*(1.-done) # fixed off by one bug
discounted.append(r)
return discounted[::-1]
def find_trainable_variables(key):
return tf.trainable_variables(key)
def make_path(f):
return os.makedirs(f, exist_ok=True)
def constant(p):
return 1
def linear(p):
return 1-p
def middle_drop(p):
eps = 0.75
if 1-p<eps:
return eps*0.1
return 1-p
def double_linear_con(p):
p *= 2
eps = 0.125
if 1-p<eps:
return eps
return 1-p
def double_middle_drop(p):
eps1 = 0.75
eps2 = 0.25
if 1-p<eps1:
if 1-p<eps2:
return eps2*0.5
return eps1*0.1
return 1-p
schedules = {
'linear':linear,
'constant':constant,
'double_linear_con': double_linear_con,
'middle_drop': middle_drop,
'double_middle_drop': double_middle_drop
}
class Scheduler(object):
def __init__(self, v, nvalues, schedule):
self.n = 0.
self.v = v
self.nvalues = nvalues
self.schedule = schedules[schedule]
def value(self):
current_value = self.v*self.schedule(self.n/self.nvalues)
self.n += 1.
return current_value
def value_steps(self, steps):
return self.v*self.schedule(steps/self.nvalues)
class EpisodeStats:
def __init__(self, nsteps, nenvs):
self.episode_rewards = []
for i in range(nenvs):
self.episode_rewards.append([])
self.lenbuffer = deque(maxlen=40) # rolling buffer for episode lengths
self.rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards
self.nsteps = nsteps
self.nenvs = nenvs
def feed(self, rewards, masks):
rewards = np.reshape(rewards, [self.nenvs, self.nsteps])
masks = np.reshape(masks, [self.nenvs, self.nsteps])
for i in range(0, self.nenvs):
for j in range(0, self.nsteps):
self.episode_rewards[i].append(rewards[i][j])
if masks[i][j]:
l = len(self.episode_rewards[i])
s = sum(self.episode_rewards[i])
self.lenbuffer.append(l)
self.rewbuffer.append(s)
self.episode_rewards[i] = []
def mean_length(self):
if self.lenbuffer:
return np.mean(self.lenbuffer)
else:
return 0 # on the first params dump, no episodes are finished
def mean_reward(self):
if self.rewbuffer:
return np.mean(self.rewbuffer)
else:
return 0
# For ACER
def get_by_index(x, idx):
assert(len(x.get_shape()) == 2)
assert(len(idx.get_shape()) == 1)
idx_flattened = tf.range(0, x.shape[0]) * x.shape[1] + idx
y = tf.gather(tf.reshape(x, [-1]), # flatten input
idx_flattened) # use flattened indices
return y
def check_shape(ts,shapes):
i = 0
for (t,shape) in zip(ts,shapes):
assert t.get_shape().as_list()==shape, "id " + str(i) + " shape " + str(t.get_shape()) + str(shape)
i += 1
def avg_norm(t):
return tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square(t), axis=-1)))
def gradient_add(g1, g2, param):
print([g1, g2, param.name])
assert (not (g1 is None and g2 is None)), param.name
if g1 is None:
return g2
elif g2 is None:
return g1
else:
return g1 + g2
def q_explained_variance(qpred, q):
_, vary = tf.nn.moments(q, axes=[0, 1])
_, varpred = tf.nn.moments(q - qpred, axes=[0, 1])
check_shape([vary, varpred], [[]] * 2)
return 1.0 - (varpred / vary)
| 9,348 | 32.035336 | 107 | py |
P3O | P3O-main/baselines/a2c/runner.py | import numpy as np
from baselines.a2c.utils import discount_with_dones
from baselines.common.runners import AbstractEnvRunner
class Runner(AbstractEnvRunner):
"""
We use this class to generate batches of experiences
__init__:
- Initialize the runner
run():
- Make a mini batch of experiences
"""
def __init__(self, env, model, nsteps=5, gamma=0.99):
super().__init__(env=env, model=model, nsteps=nsteps)
self.gamma = gamma
self.batch_action_shape = [x if x is not None else -1 for x in model.train_model.action.shape.as_list()]
self.ob_dtype = model.train_model.X.dtype.as_numpy_dtype
def run(self):
# We initialize the lists that will contain the mb of experiences
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [],[],[],[],[]
mb_states = self.states
epinfos = []
for n in range(self.nsteps):
# Given observations, take action and value (V(s))
# We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init
actions, values, states, _ = self.model.step(self.obs, S=self.states, M=self.dones)
# Append the experiences
mb_obs.append(np.copy(self.obs))
mb_actions.append(actions)
mb_values.append(values)
mb_dones.append(self.dones)
# Take actions in env and look the results
obs, rewards, dones, infos = self.env.step(actions)
for info in infos:
maybeepinfo = info.get('episode')
if maybeepinfo: epinfos.append(maybeepinfo)
self.states = states
self.dones = dones
self.obs = obs
mb_rewards.append(rewards)
mb_dones.append(self.dones)
# Batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.ob_dtype).swapaxes(1, 0).reshape(self.batch_ob_shape)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0)
mb_actions = np.asarray(mb_actions, dtype=self.model.train_model.action.dtype.name).swapaxes(1, 0)
mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0)
mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0)
mb_masks = mb_dones[:, :-1]
mb_dones = mb_dones[:, 1:]
if self.gamma > 0.0:
# Discount/bootstrap off value fn
last_values = self.model.value(self.obs, S=self.states, M=self.dones).tolist()
for n, (rewards, dones, value) in enumerate(zip(mb_rewards, mb_dones, last_values)):
rewards = rewards.tolist()
dones = dones.tolist()
if dones[-1] == 0:
rewards = discount_with_dones(rewards+[value], dones+[0], self.gamma)[:-1]
else:
rewards = discount_with_dones(rewards, dones, self.gamma)
mb_rewards[n] = rewards
mb_actions = mb_actions.reshape(self.batch_action_shape)
mb_rewards = mb_rewards.flatten()
mb_values = mb_values.flatten()
mb_masks = mb_masks.flatten()
return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values, epinfos
| 3,241 | 41.103896 | 112 | py |
P3O | P3O-main/baselines/a2c/__init__.py | 0 | 0 | 0 | py | |
P3O | P3O-main/baselines/test/test.py | import os
import random
import time
import numpy as np
import os.path as osp
from baselines import logger
from collections import deque
from baselines.common import explained_variance, set_global_seeds
from baselines.common.policies import build_policy
try:
from mpi4py import MPI
except ImportError:
MPI = None
from baselines.test.runner import Runner
def constfn(val):
def f(_):
return val
return f
def betafn(val):
def f(frac):
return val*frac
return f
def learn(*, network, env, total_timesteps, eval_env = None, seed=None, nsteps=2048, ent_coef=0.0, kl_coef=1.0, lr=3e-4,
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2, beta=15,
save_interval=0, load_path=None, model_fn=None, update_fn=None, init_fn=None, mpi_rank_weight=1, comm=None, **network_kwargs):
'''
Learn policy using SPG algorithm
Parameters:
----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See common/models.py/lstm for more details on using recurrent nets in policies
env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation.
The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class.
nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
total_timesteps: int number of timesteps (i.e. number of actions taken in the environment)
ent_coef: float policy entropy coefficient in the optimization objective
lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the
training and 0 is the end of the training.
vf_coef: float value function loss coefficient in the optimization objective
max_grad_norm: float or None gradient norm clipping coefficient
gamma: float discounting factor
lam: float advantage estimation discounting factor (lambda in the paper)
log_interval: int number of timesteps between logging events
nminibatches: int number of training minibatches per update. For recurrent policies,
should be smaller or equal than number of environments run in parallel.
noptepochs: int number of training epochs per update
cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training
and 0 is the end of the training
save_interval: int number of timesteps between saving events
load_path: str path to load the model from
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
set_global_seeds(seed)
if isinstance(lr, float): lr = constfn(lr)
else: assert callable(lr)
if isinstance(cliprange, float): cliprange = constfn(cliprange)
else: assert callable(cliprange)
if isinstance(beta, int): beta = betafn(beta)
else: assert callable(beta)
total_timesteps = int(total_timesteps)
policy = build_policy(env, network, **network_kwargs)
# Get the nb of env
nenvs = env.num_envs
# Get state_space and action_space
ob_space = env.observation_space
ac_space = env.action_space
# Calculate the batch_size
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
is_mpi_root = (MPI is None or MPI.COMM_WORLD.Get_rank() == 0)
# Instantiate the model object (that creates act_model and train_model)
if model_fn is None:
from baselines.test.model import Model
model_fn = Model
model = model_fn(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=nsteps, ent_coef=ent_coef, kl_coef=kl_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm, comm=comm, mpi_rank_weight=mpi_rank_weight)
if load_path is not None:
model.load(load_path)
# Instantiate the runner object
runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam)
if eval_env is not None:
eval_runner = Runner(env = eval_env, model = model, nsteps = nsteps, gamma = gamma, lam= lam)
epinfobuf = deque(maxlen=100)
if eval_env is not None:
eval_epinfobuf = deque(maxlen=100)
if init_fn is not None:
init_fn()
# Start total timer
tfirststart = time.perf_counter()
nupdates = total_timesteps//nbatch
R_buffer = []
V_buffer = []
for update in range(1, nupdates+1):
assert nbatch % nminibatches == 0
# Start timer
tstart = time.perf_counter()
frac = 1.0 - (update - 1.0) / nupdates
# Calculate the learning rate
lrnow = lr(frac)
taunow = 2+2*(1-frac)
taunow = 4
# Calculate the cliprange
cliprangenow = cliprange(frac)
betanow = beta(frac)
if update % log_interval == 0 and is_mpi_root: logger.info('Stepping environment...')
# Get minibatch
obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run() #pylint: disable=E0632
if eval_env is not None:
eval_obs, eval_returns, eval_masks, eval_actions, eval_values, eval_neglogpacs, eval_states, eval_epinfos = eval_runner.run() #pylint: disable=E0632
if update % log_interval == 0 and is_mpi_root: logger.info('Done.')
epinfobuf.extend(epinfos)
# print(epinfobuf)
if eval_env is not None:
eval_epinfobuf.extend(eval_epinfos)
mblossvals = []
if states is None: # nonrecurrent version
# Index of each element of batch_size
# Create the indices array
inds = np.arange(nbatch)
model.assign_v()
for _ in range(noptepochs):
# Randomize the indexes
np.random.shuffle(inds)
# 0 to batch_size with batch_train_size step
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
res = model.train(lrnow, taunow, cliprangenow, betanow, *slices)
mblossvals.append(res)
# slices = (obs, returns, masks, actions, values, neglogpacs)
# res = model.train(lrnow, taunow, cliprangenow, betanow, *slices)
# mblossvals.append(res)
# if R_buffer is None:
# advs = returns - values
#
# # Normalize the advantages
# advs = (advs - advs.mean()) / (advs.std() + 1e-8)
# R_buffer
else: # recurrent version
assert nenvs % nminibatches == 0
envsperbatch = nenvs // nminibatches
envinds = np.arange(nenvs)
flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps)
for _ in range(noptepochs):
np.random.shuffle(envinds)
for start in range(0, nenvs, envsperbatch):
end = start + envsperbatch
mbenvinds = envinds[start:end]
mbflatinds = flatinds[mbenvinds].ravel()
slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mbstates = states[mbenvinds]
mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates))
# Feedforward --> get losses --> update
lossvals = np.mean(mblossvals, axis=0)
# End timer
tnow = time.perf_counter()
# Calculate the fps (frame per second)
fps = int(nbatch / (tnow - tstart))
if update_fn is not None:
update_fn(update)
if update % log_interval == 0 or update == 1:
# Calculates if value function is a good predicator of the returns (ev > 1)
# or if it's just worse than predicting nothing (ev =< 0)
ev = explained_variance(values, returns)
logger.logkv("misc/serial_timesteps", update*nsteps)
logger.logkv("misc/nupdates", update)
logger.logkv("misc/total_timesteps", update*nbatch)
logger.logkv("fps", fps)
logger.logkv("misc/explained_variance", float(ev))
logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))
if eval_env is not None:
logger.logkv('eval_eprewmean', safemean([epinfo['r'] for epinfo in eval_epinfobuf]) )
logger.logkv('eval_eplenmean', safemean([epinfo['l'] for epinfo in eval_epinfobuf]) )
logger.logkv('misc/time_elapsed', tnow - tfirststart)
for (lossval, lossname) in zip(lossvals, model.loss_names):
logger.logkv('loss/' + lossname, lossval)
logger.dumpkvs()
return model
# Avoid division error when calculate the mean (in our case if epinfo is empty returns np.nan, not return an error)
def safemean(xs):
return np.nan if len(xs) == 0 else np.mean(xs)
| 10,621 | 42.532787 | 184 | py |
P3O | P3O-main/baselines/test/model.py | import tensorflow as tf
import functools
from baselines.common.tf_util import get_session, save_variables, load_variables
from baselines.common.tf_util import initialize
from baselines.common.input import observation_placeholder
try:
from baselines.common.mpi_adam_optimizer import MpiAdamOptimizer
from mpi4py import MPI
from baselines.common.mpi_util import sync_from_root
except ImportError:
MPI = None
class Model(object):
"""
We use this object to :
__init__:
- Creates the step_model
- Creates the train_model
train():
- Make the training part (feedforward and retropropagation of gradients)
save/load():
- Save load the model
"""
def __init__(self, *, policy, ob_space, ac_space, nbatch_act, nbatch_train,
nsteps, ent_coef, kl_coef, vf_coef, max_grad_norm, mpi_rank_weight=1, comm=None, microbatch_size=None,boundaction=False):
self.sess = sess = get_session()
if MPI is not None and comm is None:
comm = MPI.COMM_WORLD
# observation_placeholder(ob_space, batch_size=nbatch_train)
with tf.variable_scope('model', reuse=tf.AUTO_REUSE):
# CREATE OUR TWO MODELS
# act_model that is used for sampling
act_model = policy(nbatch_act, 1, sess)
# Train model for training
if microbatch_size is None:
train_model = policy(nbatch_train, nsteps, sess)
else:
train_model = policy(microbatch_size, nsteps, sess)
with tf.variable_scope("oldpi", reuse=tf.AUTO_REUSE):
oldpi = policy(nbatch_train, nsteps, sess,observ_placeholder=train_model.X)
# oldpi = policy(nbatch_train, nsteps, sess, observation_placeholder=)
# CREATE THE PLACEHOLDERS
self.A = A = train_model.pdtype.sample_placeholder([None])
self.ADV = ADV = tf.placeholder(tf.float32, [None])
self.R = R = tf.placeholder(tf.float32, [None])
# Keep track of old actor
self.OLDNEGLOGPAC = OLDNEGLOGPAC = tf.placeholder(tf.float32, [None])
# Keep track of old critic
self.LR = LR = tf.placeholder(tf.float32, [])
# Cliprange
self.CLIPRANGE = CLIPRANGE = tf.placeholder(tf.float32, [])
self.OLDVPRED = OLDVPRED = tf.placeholder(tf.float32, [None])
self.BETA = BETA = tf.placeholder(tf.float32, [])
self.TAU = TAU = tf.placeholder(tf.float32, [])
self.assign = [tf.assign(oldv, newv)
for (oldv, newv) in zip(get_variables("oldpi"), get_variables("model"))]
neglogpac = train_model.pd.neglogp(A)
# Calculate the entropy
# Entropy is used to improve exploration by limiting the premature convergence to suboptimal policy.
entropy = tf.reduce_mean(train_model.pd.entropy())
# CALCULATE THE LOSS
# Total loss = Policy gradient loss - entropy * entropy coefficient + Value coefficient * value loss
# Clip the value to reduce variability during Critic training
# Get the predicted value
vpred1 = train_model.vf
# vpredclipped = tf.clip_by_value(train_model.vf - OLDVPRED, - CLIPRANGE, CLIPRANGE) + OLDVPRED
# Unclipped value
# vf_loss1 = tf.square(vpred2 - R)
vf_loss2 = tf.square(vpred1 - R)
# vf_maximum = tf.maximum(vf_loss1, vf_loss2)
vf_loss = .5 * tf.reduce_mean(vf_loss2)
# Calculate ratio (pi current policy / pi old policy)
ratio = tf.exp(OLDNEGLOGPAC - neglogpac)
fr_kl_loss = kl_coef*oldpi.pd.kl(train_model.pd)
actor_loss = -ADV*(4.0/self.TAU)*tf.sigmoid(ratio*self.TAU - self.TAU)
pg_loss = tf.reduce_mean(actor_loss) + tf.reduce_mean(fr_kl_loss) - entropy * ent_coef
#static
approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - OLDNEGLOGPAC))
clipfrac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), CLIPRANGE)))
rAt = tf.reduce_mean(-ADV * ratio)
# tf.abs(ratio - 1.0) DEON metric
ptadv = (tf.math.sign(ADV) + 1) / 2
nta = (-1 * tf.math.sign(ratio -1) + 1) / 2
ntadv = (-1*tf.math.sign(ADV) + 1) / 2
pta = (tf.math.sign(ratio -1) + 1) / 2
unnormal_pt = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), 0)) * ptadv*nta)
unnormal_nt = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), 0)) * ntadv*pta)
# Total loss
# UPDATE THE PARAMETERS USING LOSS
# 1. Get the model parameters
params = tf.trainable_variables('model')
# 2. Build our trainer
self.trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-5)
# 3. Calculate the gradients
grads_and_var = self.trainer.compute_gradients(pg_loss, params)
grads, var = zip(*grads_and_var)
if max_grad_norm is not None:
# Clip the gradients (normalize)
# pass
grads, _grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
grads_and_var = list(zip(grads, var))
# zip aggregate each gradient with parameters associated
# For instance zip(ABCD, xyza) => Ax, By, Cz, Da
self.grads = grads
self.var = var
self._train_op = self.trainer.apply_gradients(grads_and_var)
self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac', 'rAt',"unnormal_pt", 'unnormal_nt']
self.stats_list = [pg_loss, vf_loss, entropy, approxkl, clipfrac, rAt, unnormal_pt, unnormal_nt]
self.train_model = train_model
self.act_model = act_model
self.step = act_model.step
self.value = act_model.value
self.initial_state = act_model.initial_state
self.save = functools.partial(save_variables, sess=sess)
self.load = functools.partial(load_variables, sess=sess)
initialize()
global_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="")
if MPI is not None:
sync_from_root(sess, global_variables, comm=comm) #pylint: disable=E1101
def train(self, lr, tau, cliprange, beta, obs, returns, masks, actions, values, neglogpacs, states=None):
# Here we calculate advantage A(s,a) = R + yV(s') - V(s)
# Returns = R + yV(s')
advs = returns - values
# Normalize the advantages
advs = (advs - advs.mean()) / (advs.std() + 1e-8)
td_map = {
self.train_model.X: obs,
self.A: actions,
self.ADV : advs,
self.R : returns,
self.LR : lr,
self.CLIPRANGE: cliprange,
self.BETA: beta,
self.OLDNEGLOGPAC : neglogpacs,
self.OLDVPRED: values,
self.TAU: tau,
}
if states is not None:
td_map[self.train_model.S] = states
td_map[self.train_model.M] = masks
return self.sess.run(
self.stats_list + [self._train_op],
td_map
)[:-1]
def assign_v(self):
self.sess.run(self.assign)
def get_variables(scope):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope)
| 7,188 | 37.650538 | 137 | py |
P3O | P3O-main/baselines/test/defaults.py | def mujoco():
return dict(
nsteps=2048,
nminibatches=32,
lam=0.95,
gamma=0.99,
noptepochs=10,
log_interval=1,
ent_coef=0.01,
kl_coef=0.05,
lr=lambda f: 3e-4*f,
cliprange=0.2,
value_network='copy',
squash=False
#quan
)
def mujoco_bak():
return dict(
nsteps=2048,
nminibatches=32,
lam=0.95,
gamma=0.99,
noptepochs=10,
log_interval=1,
ent_coef=0.01,
kl_coef=1,
lr=lambda f: 3e-4 * f,
cliprange=0.2,
value_network='copy'
)
# def mujoco_original():
# return dict(
# nsteps=2048,
# nminibatches=32,
# lam=0.95,
# gamma=0.99,
# noptepochs=10,
# log_interval=1,
# ent_coef=0.0,
# kl_coef=1,
# lr=lambda f: 2.5e-4 * f,
# cliprange=0.2,
# value_network='copy'
# )
# 3e-4
# best_ctn 1e4_fix best_dst 1e4?
# 2.5e-4
def atari():
return dict(
nsteps=128, nminibatches=4,
lam=0.95, gamma=0.99, noptepochs=4, log_interval=1,
ent_coef=.01,
kl_coef=1,
lr=lambda f : f * 2.5e-4,
cliprange=0.1,
# value_network='copy'
)
def retro():
return atari()
| 1,304 | 19.390625 | 59 | py |
P3O | P3O-main/baselines/test/runner.py | import numpy as np
from baselines.common.runners import AbstractEnvRunner
class Runner(AbstractEnvRunner):
"""
We use this object to make a mini batch of experiences
__init__:
- Initialize the runner
run():
- Make a mini batch
"""
def __init__(self, *, env, model, nsteps, gamma, lam):
super().__init__(env=env, model=model, nsteps=nsteps)
# Lambda used in GAE (General Advantage Estimation)
self.lam = lam
# Discount rate
self.gamma = gamma
def run(self):
# Here, we init the lists that will contain the mb of experiences
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [],[],[],[],[],[]
mb_states = self.states
epinfos = []
# For n in range number of steps
for _ in range(self.nsteps):
# Given observations, get action value and neglopacs
# We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init
actions, values, self.states, neglogpacs = self.model.step(self.obs, S=self.states, M=self.dones)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
# Take actions in env and look the results
# Infos contains a ton of useful informations
self.obs[:], rewards, self.dones, infos = self.env.step(actions)
for info in infos:
maybeepinfo = info.get('episode')
# print(maybeepinfo)
if maybeepinfo: epinfos.append(maybeepinfo)
mb_rewards.append(rewards)
#batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, S=self.states, M=self.dones)
# discount/bootstrap off value fn
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.nsteps)):
if t == self.nsteps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
# print(sf01())
return (*map(sf01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)),
mb_states, epinfos)
# obs, returns, masks, actions, values, neglogpacs, states = runner.run()
def sf01(arr):
"""
swap and then flatten axes 0 and 1
"""
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
| 3,255 | 40.21519 | 109 | py |
P3O | P3O-main/baselines/test/__init__.py | 0 | 0 | 0 | py | |
P3O | P3O-main/baselines/acktr/acktr.py | import os.path as osp
import time
import functools
import tensorflow as tf
from baselines import logger
from baselines.common import set_global_seeds, explained_variance
from baselines.common.policies import build_policy
from baselines.common.tf_util import get_session, save_variables, load_variables
from baselines.a2c.runner import Runner
from baselines.a2c.utils import Scheduler, find_trainable_variables
from baselines.acktr import kfac
from baselines.ppo2.ppo2 import safemean
from collections import deque
class Model(object):
def __init__(self, policy, ob_space, ac_space, nenvs,total_timesteps, nprocs=32, nsteps=20,
ent_coef=0.01, vf_coef=0.5, vf_fisher_coef=1.0, lr=2.5e-4, max_grad_norm=0.5,
kfac_clip=0.001, lrschedule='linear', is_async=True):
self.sess = sess = get_session()
nbatch = nenvs * nsteps
with tf.variable_scope('acktr_model', reuse=tf.AUTO_REUSE):
self.model = step_model = policy(nenvs, 1, sess=sess)
self.model2 = train_model = policy(nenvs*nsteps, nsteps, sess=sess)
A = train_model.pdtype.sample_placeholder([None])
ADV = tf.placeholder(tf.float32, [nbatch])
R = tf.placeholder(tf.float32, [nbatch])
PG_LR = tf.placeholder(tf.float32, [])
VF_LR = tf.placeholder(tf.float32, [])
neglogpac = train_model.pd.neglogp(A)
self.logits = train_model.pi
##training loss
pg_loss = tf.reduce_mean(ADV*neglogpac)
entropy = tf.reduce_mean(train_model.pd.entropy())
pg_loss = pg_loss - ent_coef * entropy
vf_loss = tf.losses.mean_squared_error(tf.squeeze(train_model.vf), R)
train_loss = pg_loss + vf_coef * vf_loss
##Fisher loss construction
self.pg_fisher = pg_fisher_loss = -tf.reduce_mean(neglogpac)
sample_net = train_model.vf + tf.random_normal(tf.shape(train_model.vf))
self.vf_fisher = vf_fisher_loss = - vf_fisher_coef*tf.reduce_mean(tf.pow(train_model.vf - tf.stop_gradient(sample_net), 2))
self.joint_fisher = joint_fisher_loss = pg_fisher_loss + vf_fisher_loss
self.params=params = find_trainable_variables("acktr_model")
self.grads_check = grads = tf.gradients(train_loss,params)
with tf.device('/gpu:0'):
self.optim = optim = kfac.KfacOptimizer(learning_rate=PG_LR, clip_kl=kfac_clip,\
momentum=0.9, kfac_update=1, epsilon=0.01,\
stats_decay=0.99, is_async=is_async, cold_iter=10, max_grad_norm=max_grad_norm)
# update_stats_op = optim.compute_and_apply_stats(joint_fisher_loss, var_list=params)
optim.compute_and_apply_stats(joint_fisher_loss, var_list=params)
train_op, q_runner = optim.apply_gradients(list(zip(grads,params)))
self.q_runner = q_runner
self.lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule)
def train(obs, states, rewards, masks, actions, values):
advs = rewards - values
advs = (advs - advs.mean()) / (advs.std() + 1e-8)
for step in range(len(obs)):
cur_lr = self.lr.value()
print(cur_lr,'asdasdasdasdasd')
td_map = {train_model.X:obs, A:actions, ADV:advs, R:rewards, PG_LR:cur_lr, VF_LR:cur_lr}
if states is not None:
td_map[train_model.S] = states
td_map[train_model.M] = masks
policy_loss, value_loss, policy_entropy, _ = sess.run(
[pg_loss, vf_loss, entropy, train_op],
td_map
)
return policy_loss, value_loss, policy_entropy
self.train = train
self.save = functools.partial(save_variables, sess=sess)
self.load = functools.partial(load_variables, sess=sess)
self.train_model = train_model
self.step_model = step_model
self.step = step_model.step
self.value = step_model.value
self.initial_state = step_model.initial_state
tf.global_variables_initializer().run(session=sess)
def learn(network, env, seed, total_timesteps=int(40e6), gamma=0.99, log_interval=100, nprocs=32, nsteps=20,
ent_coef=0.01, vf_coef=0.5, vf_fisher_coef=1.0, lr=0.25, max_grad_norm=0.5,
kfac_clip=0.001, save_interval=None, lrschedule='linear', load_path=None, is_async=True, **network_kwargs):
set_global_seeds(seed)
if network == 'cnn':
network_kwargs['one_dim_bias'] = True
policy = build_policy(env, network, **network_kwargs)
nenvs = env.num_envs
ob_space = env.observation_space
ac_space = env.action_space
make_model = lambda : Model(policy, ob_space, ac_space, nenvs, total_timesteps, nprocs=nprocs, nsteps
=nsteps, ent_coef=ent_coef, vf_coef=vf_coef, vf_fisher_coef=
vf_fisher_coef, lr=lr, max_grad_norm=max_grad_norm, kfac_clip=kfac_clip,
lrschedule=lrschedule, is_async=is_async)
if save_interval and logger.get_dir():
import cloudpickle
with open(osp.join(logger.get_dir(), 'make_model.pkl'), 'wb') as fh:
fh.write(cloudpickle.dumps(make_model))
model = make_model()
if load_path is not None:
model.load(load_path)
runner = Runner(env, model, nsteps=nsteps, gamma=gamma)
epinfobuf = deque(maxlen=100)
nbatch = nenvs*nsteps
tstart = time.time()
coord = tf.train.Coordinator()
if is_async:
enqueue_threads = model.q_runner.create_threads(model.sess, coord=coord, start=True)
else:
enqueue_threads = []
for update in range(1, total_timesteps//nbatch+1):
obs, states, rewards, masks, actions, values, epinfos = runner.run()
epinfobuf.extend(epinfos)
policy_loss, value_loss, policy_entropy = model.train(obs, states, rewards, masks, actions, values)
model.old_obs = obs
nseconds = time.time()-tstart
fps = int((update*nbatch)/nseconds)
if update % log_interval == 0 or update == 1:
ev = explained_variance(values, rewards)
logger.record_tabular("nupdates", update)
logger.record_tabular("total_timesteps", update*nbatch)
logger.record_tabular("fps", fps)
logger.record_tabular("policy_entropy", float(policy_entropy))
logger.record_tabular("policy_loss", float(policy_loss))
logger.record_tabular("value_loss", float(value_loss))
logger.record_tabular("explained_variance", float(ev))
logger.record_tabular("eprewmean", safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.record_tabular("eplenmean", safemean([epinfo['l'] for epinfo in epinfobuf]))
logger.dump_tabular()
if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir():
savepath = osp.join(logger.get_dir(), 'checkpoint%.5i'%update)
print('Saving to', savepath)
model.save(savepath)
coord.request_stop()
coord.join(enqueue_threads)
return model
| 7,144 | 43.65625 | 131 | py |
P3O | P3O-main/baselines/acktr/kfac.py | import tensorflow as tf
import numpy as np
import re
# flake8: noqa F403, F405
from baselines.acktr.kfac_utils import *
from functools import reduce
KFAC_OPS = ['MatMul', 'Conv2D', 'BiasAdd']
KFAC_DEBUG = False
class KfacOptimizer():
# note that KfacOptimizer will be truly synchronous (and thus deterministic) only if a single-threaded session is used
def __init__(self, learning_rate=0.01, momentum=0.9, clip_kl=0.01, kfac_update=2, stats_accum_iter=60, full_stats_init=False, cold_iter=100, cold_lr=None, is_async=False, async_stats=False, epsilon=1e-2, stats_decay=0.95, blockdiag_bias=False, channel_fac=False, factored_damping=False, approxT2=False, use_float64=False, weight_decay_dict={},max_grad_norm=0.5):
self.max_grad_norm = max_grad_norm
self._lr = learning_rate
self._momentum = momentum
self._clip_kl = clip_kl
self._channel_fac = channel_fac
self._kfac_update = kfac_update
self._async = is_async
self._async_stats = async_stats
self._epsilon = epsilon
self._stats_decay = stats_decay
self._blockdiag_bias = blockdiag_bias
self._approxT2 = approxT2
self._use_float64 = use_float64
self._factored_damping = factored_damping
self._cold_iter = cold_iter
if cold_lr == None:
# good heuristics
self._cold_lr = self._lr# * 3.
else:
self._cold_lr = cold_lr
self._stats_accum_iter = stats_accum_iter
self._weight_decay_dict = weight_decay_dict
self._diag_init_coeff = 0.
self._full_stats_init = full_stats_init
if not self._full_stats_init:
self._stats_accum_iter = self._cold_iter
self.sgd_step = tf.Variable(0, name='KFAC/sgd_step', trainable=False)
self.global_step = tf.Variable(
0, name='KFAC/global_step', trainable=False)
self.cold_step = tf.Variable(0, name='KFAC/cold_step', trainable=False)
self.factor_step = tf.Variable(
0, name='KFAC/factor_step', trainable=False)
self.stats_step = tf.Variable(
0, name='KFAC/stats_step', trainable=False)
self.vFv = tf.Variable(0., name='KFAC/vFv', trainable=False)
self.factors = {}
self.param_vars = []
self.stats = {}
self.stats_eigen = {}
def getFactors(self, g, varlist):
graph = tf.get_default_graph()
factorTensors = {}
fpropTensors = []
bpropTensors = []
opTypes = []
fops = []
def searchFactors(gradient, graph):
# hard coded search stratergy
bpropOp = gradient.op
bpropOp_name = bpropOp.name
bTensors = []
fTensors = []
# combining additive gradient, assume they are the same op type and
# indepedent
if 'AddN' in bpropOp_name:
factors = []
for g in gradient.op.inputs:
factors.append(searchFactors(g, graph))
op_names = [item['opName'] for item in factors]
# TO-DO: need to check all the attribute of the ops as well
print (gradient.name)
print (op_names)
print (len(np.unique(op_names)))
assert len(np.unique(op_names)) == 1, gradient.name + \
' is shared among different computation OPs'
bTensors = reduce(lambda x, y: x + y,
[item['bpropFactors'] for item in factors])
if len(factors[0]['fpropFactors']) > 0:
fTensors = reduce(
lambda x, y: x + y, [item['fpropFactors'] for item in factors])
fpropOp_name = op_names[0]
fpropOp = factors[0]['op']
else:
fpropOp_name = re.search(
'gradientsSampled(_[0-9]+|)/(.+?)_grad', bpropOp_name).group(2)
fpropOp = graph.get_operation_by_name(fpropOp_name)
if fpropOp.op_def.name in KFAC_OPS:
# Known OPs
###
bTensor = [
i for i in bpropOp.inputs if 'gradientsSampled' in i.name][-1]
bTensorShape = fpropOp.outputs[0].get_shape()
if bTensor.get_shape()[0].value == None:
bTensor.set_shape(bTensorShape)
bTensors.append(bTensor)
###
if fpropOp.op_def.name == 'BiasAdd':
fTensors = []
else:
fTensors.append(
[i for i in fpropOp.inputs if param.op.name not in i.name][0])
fpropOp_name = fpropOp.op_def.name
else:
# unknown OPs, block approximation used
bInputsList = [i for i in bpropOp.inputs[
0].op.inputs if 'gradientsSampled' in i.name if 'Shape' not in i.name]
if len(bInputsList) > 0:
bTensor = bInputsList[0]
bTensorShape = fpropOp.outputs[0].get_shape()
if len(bTensor.get_shape()) > 0 and bTensor.get_shape()[0].value == None:
bTensor.set_shape(bTensorShape)
bTensors.append(bTensor)
fpropOp_name = opTypes.append('UNK-' + fpropOp.op_def.name)
return {'opName': fpropOp_name, 'op': fpropOp, 'fpropFactors': fTensors, 'bpropFactors': bTensors}
for t, param in zip(g, varlist):
if KFAC_DEBUG:
print(('get factor for '+param.name))
factors = searchFactors(t, graph)
factorTensors[param] = factors
########
# check associated weights and bias for homogeneous coordinate representation
# and check redundent factors
# TO-DO: there may be a bug to detect associate bias and weights for
# forking layer, e.g. in inception models.
for param in varlist:
factorTensors[param]['assnWeights'] = None
factorTensors[param]['assnBias'] = None
for param in varlist:
if factorTensors[param]['opName'] == 'BiasAdd':
factorTensors[param]['assnWeights'] = None
for item in varlist:
if len(factorTensors[item]['bpropFactors']) > 0:
if (set(factorTensors[item]['bpropFactors']) == set(factorTensors[param]['bpropFactors'])) and (len(factorTensors[item]['fpropFactors']) > 0):
factorTensors[param]['assnWeights'] = item
factorTensors[item]['assnBias'] = param
factorTensors[param]['bpropFactors'] = factorTensors[
item]['bpropFactors']
########
########
# concatenate the additive gradients along the batch dimension, i.e.
# assuming independence structure
for key in ['fpropFactors', 'bpropFactors']:
for i, param in enumerate(varlist):
if len(factorTensors[param][key]) > 0:
if (key + '_concat') not in factorTensors[param]:
name_scope = factorTensors[param][key][0].name.split(':')[
0]
with tf.name_scope(name_scope):
factorTensors[param][
key + '_concat'] = tf.concat(factorTensors[param][key], 0)
else:
factorTensors[param][key + '_concat'] = None
for j, param2 in enumerate(varlist[(i + 1):]):
if (len(factorTensors[param][key]) > 0) and (set(factorTensors[param2][key]) == set(factorTensors[param][key])):
factorTensors[param2][key] = factorTensors[param][key]
factorTensors[param2][
key + '_concat'] = factorTensors[param][key + '_concat']
########
if KFAC_DEBUG:
for items in zip(varlist, fpropTensors, bpropTensors, opTypes):
print((items[0].name, factorTensors[item]))
self.factors = factorTensors
return factorTensors
def getStats(self, factors, varlist):
if len(self.stats) == 0:
# initialize stats variables on CPU because eigen decomp is
# computed on CPU
with tf.device('/cpu'):
tmpStatsCache = {}
# search for tensor factors and
# use block diag approx for the bias units
for var in varlist:
fpropFactor = factors[var]['fpropFactors_concat']
bpropFactor = factors[var]['bpropFactors_concat']
opType = factors[var]['opName']
if opType == 'Conv2D':
Kh = var.get_shape()[0]
Kw = var.get_shape()[1]
C = fpropFactor.get_shape()[-1]
Oh = bpropFactor.get_shape()[1]
Ow = bpropFactor.get_shape()[2]
if Oh == 1 and Ow == 1 and self._channel_fac:
# factorization along the channels do not support
# homogeneous coordinate
var_assnBias = factors[var]['assnBias']
if var_assnBias:
factors[var]['assnBias'] = None
factors[var_assnBias]['assnWeights'] = None
##
for var in varlist:
fpropFactor = factors[var]['fpropFactors_concat']
bpropFactor = factors[var]['bpropFactors_concat']
opType = factors[var]['opName']
self.stats[var] = {'opName': opType,
'fprop_concat_stats': [],
'bprop_concat_stats': [],
'assnWeights': factors[var]['assnWeights'],
'assnBias': factors[var]['assnBias'],
}
if fpropFactor is not None:
if fpropFactor not in tmpStatsCache:
if opType == 'Conv2D':
Kh = var.get_shape()[0]
Kw = var.get_shape()[1]
C = fpropFactor.get_shape()[-1]
Oh = bpropFactor.get_shape()[1]
Ow = bpropFactor.get_shape()[2]
if Oh == 1 and Ow == 1 and self._channel_fac:
# factorization along the channels
# assume independence between input channels and spatial
# 2K-1 x 2K-1 covariance matrix and C x C covariance matrix
# factorization along the channels do not
# support homogeneous coordinate, assnBias
# is always None
fpropFactor2_size = Kh * Kw
slot_fpropFactor_stats2 = tf.Variable(tf.diag(tf.ones(
[fpropFactor2_size])) * self._diag_init_coeff, name='KFAC_STATS/' + fpropFactor.op.name, trainable=False)
self.stats[var]['fprop_concat_stats'].append(
slot_fpropFactor_stats2)
fpropFactor_size = C
else:
# 2K-1 x 2K-1 x C x C covariance matrix
# assume BHWC
fpropFactor_size = Kh * Kw * C
else:
# D x D covariance matrix
fpropFactor_size = fpropFactor.get_shape()[-1]
# use homogeneous coordinate
if not self._blockdiag_bias and self.stats[var]['assnBias']:
fpropFactor_size += 1
slot_fpropFactor_stats = tf.Variable(tf.diag(tf.ones(
[fpropFactor_size])) * self._diag_init_coeff, name='KFAC_STATS/' + fpropFactor.op.name, trainable=False)
self.stats[var]['fprop_concat_stats'].append(
slot_fpropFactor_stats)
if opType != 'Conv2D':
tmpStatsCache[fpropFactor] = self.stats[
var]['fprop_concat_stats']
else:
self.stats[var][
'fprop_concat_stats'] = tmpStatsCache[fpropFactor]
if bpropFactor is not None:
# no need to collect backward stats for bias vectors if
# using homogeneous coordinates
if not((not self._blockdiag_bias) and self.stats[var]['assnWeights']):
if bpropFactor not in tmpStatsCache:
slot_bpropFactor_stats = tf.Variable(tf.diag(tf.ones([bpropFactor.get_shape(
)[-1]])) * self._diag_init_coeff, name='KFAC_STATS/' + bpropFactor.op.name, trainable=False)
self.stats[var]['bprop_concat_stats'].append(
slot_bpropFactor_stats)
tmpStatsCache[bpropFactor] = self.stats[
var]['bprop_concat_stats']
else:
self.stats[var][
'bprop_concat_stats'] = tmpStatsCache[bpropFactor]
return self.stats
def compute_and_apply_stats(self, loss_sampled, var_list=None):
varlist = var_list
if varlist is None:
varlist = tf.trainable_variables()
stats = self.compute_stats(loss_sampled, var_list=varlist)
return self.apply_stats(stats)
def compute_stats(self, loss_sampled, var_list=None):
varlist = var_list
if varlist is None:
varlist = tf.trainable_variables()
gs = tf.gradients(loss_sampled, varlist, name='gradientsSampled')
self.gs = gs
factors = self.getFactors(gs, varlist)
stats = self.getStats(factors, varlist)
updateOps = []
statsUpdates = {}
statsUpdates_cache = {}
for var in varlist:
opType = factors[var]['opName']
fops = factors[var]['op']
fpropFactor = factors[var]['fpropFactors_concat']
fpropStats_vars = stats[var]['fprop_concat_stats']
bpropFactor = factors[var]['bpropFactors_concat']
bpropStats_vars = stats[var]['bprop_concat_stats']
SVD_factors = {}
for stats_var in fpropStats_vars:
stats_var_dim = int(stats_var.get_shape()[0])
if stats_var not in statsUpdates_cache:
old_fpropFactor = fpropFactor
B = (tf.shape(fpropFactor)[0]) # batch size
if opType == 'Conv2D':
strides = fops.get_attr("strides")
padding = fops.get_attr("padding")
convkernel_size = var.get_shape()[0:3]
KH = int(convkernel_size[0])
KW = int(convkernel_size[1])
C = int(convkernel_size[2])
flatten_size = int(KH * KW * C)
Oh = int(bpropFactor.get_shape()[1])
Ow = int(bpropFactor.get_shape()[2])
if Oh == 1 and Ow == 1 and self._channel_fac:
# factorization along the channels
# assume independence among input channels
# factor = B x 1 x 1 x (KH xKW x C)
# patches = B x Oh x Ow x (KH xKW x C)
if len(SVD_factors) == 0:
if KFAC_DEBUG:
print(('approx %s act factor with rank-1 SVD factors' % (var.name)))
# find closest rank-1 approx to the feature map
S, U, V = tf.batch_svd(tf.reshape(
fpropFactor, [-1, KH * KW, C]))
# get rank-1 approx slides
sqrtS1 = tf.expand_dims(tf.sqrt(S[:, 0, 0]), 1)
patches_k = U[:, :, 0] * sqrtS1 # B x KH*KW
full_factor_shape = fpropFactor.get_shape()
patches_k.set_shape(
[full_factor_shape[0], KH * KW])
patches_c = V[:, :, 0] * sqrtS1 # B x C
patches_c.set_shape([full_factor_shape[0], C])
SVD_factors[C] = patches_c
SVD_factors[KH * KW] = patches_k
fpropFactor = SVD_factors[stats_var_dim]
else:
# poor mem usage implementation
patches = tf.extract_image_patches(fpropFactor, ksizes=[1, convkernel_size[
0], convkernel_size[1], 1], strides=strides, rates=[1, 1, 1, 1], padding=padding)
if self._approxT2:
if KFAC_DEBUG:
print(('approxT2 act fisher for %s' % (var.name)))
# T^2 terms * 1/T^2, size: B x C
fpropFactor = tf.reduce_mean(patches, [1, 2])
else:
# size: (B x Oh x Ow) x C
fpropFactor = tf.reshape(
patches, [-1, flatten_size]) / Oh / Ow
fpropFactor_size = int(fpropFactor.get_shape()[-1])
if stats_var_dim == (fpropFactor_size + 1) and not self._blockdiag_bias:
if opType == 'Conv2D' and not self._approxT2:
# correct padding for numerical stability (we
# divided out OhxOw from activations for T1 approx)
fpropFactor = tf.concat([fpropFactor, tf.ones(
[tf.shape(fpropFactor)[0], 1]) / Oh / Ow], 1)
else:
# use homogeneous coordinates
fpropFactor = tf.concat(
[fpropFactor, tf.ones([tf.shape(fpropFactor)[0], 1])], 1)
# average over the number of data points in a batch
# divided by B
cov = tf.matmul(fpropFactor, fpropFactor,
transpose_a=True) / tf.cast(B, tf.float32)
updateOps.append(cov)
statsUpdates[stats_var] = cov
if opType != 'Conv2D':
# HACK: for convolution we recompute fprop stats for
# every layer including forking layers
statsUpdates_cache[stats_var] = cov
for stats_var in bpropStats_vars:
stats_var_dim = int(stats_var.get_shape()[0])
if stats_var not in statsUpdates_cache:
old_bpropFactor = bpropFactor
bpropFactor_shape = bpropFactor.get_shape()
B = tf.shape(bpropFactor)[0] # batch size
C = int(bpropFactor_shape[-1]) # num channels
if opType == 'Conv2D' or len(bpropFactor_shape) == 4:
if fpropFactor is not None:
if self._approxT2:
if KFAC_DEBUG:
print(('approxT2 grad fisher for %s' % (var.name)))
bpropFactor = tf.reduce_sum(
bpropFactor, [1, 2]) # T^2 terms * 1/T^2
else:
bpropFactor = tf.reshape(
bpropFactor, [-1, C]) * Oh * Ow # T * 1/T terms
else:
# just doing block diag approx. spatial independent
# structure does not apply here. summing over
# spatial locations
if KFAC_DEBUG:
print(('block diag approx fisher for %s' % (var.name)))
bpropFactor = tf.reduce_sum(bpropFactor, [1, 2])
# assume sampled loss is averaged. TO-DO:figure out better
# way to handle this
bpropFactor *= tf.to_float(B)
##
cov_b = tf.matmul(
bpropFactor, bpropFactor, transpose_a=True) / tf.to_float(tf.shape(bpropFactor)[0])
updateOps.append(cov_b)
statsUpdates[stats_var] = cov_b
statsUpdates_cache[stats_var] = cov_b
if KFAC_DEBUG:
aKey = list(statsUpdates.keys())[0]
statsUpdates[aKey] = tf.Print(statsUpdates[aKey],
[tf.convert_to_tensor('step:'),
self.global_step,
tf.convert_to_tensor(
'computing stats'),
])
self.statsUpdates = statsUpdates
return statsUpdates
def apply_stats(self, statsUpdates):
""" compute stats and update/apply the new stats to the running average
"""
def updateAccumStats():
if self._full_stats_init:
return tf.cond(tf.greater(self.sgd_step, self._cold_iter), lambda: tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter)), tf.no_op)
else:
return tf.group(*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter))
def updateRunningAvgStats(statsUpdates, fac_iter=1):
# return tf.cond(tf.greater_equal(self.factor_step,
# tf.convert_to_tensor(fac_iter)), lambda:
# tf.group(*self._apply_stats(stats_list, varlist)), tf.no_op)
return tf.group(*self._apply_stats(statsUpdates))
if self._async_stats:
# asynchronous stats update
update_stats = self._apply_stats(statsUpdates)
queue = tf.FIFOQueue(1, [item.dtype for item in update_stats], shapes=[
item.get_shape() for item in update_stats])
enqueue_op = queue.enqueue(update_stats)
def dequeue_stats_op():
return queue.dequeue()
self.qr_stats = tf.train.QueueRunner(queue, [enqueue_op])
update_stats_op = tf.cond(tf.equal(queue.size(), tf.convert_to_tensor(
0)), tf.no_op, lambda: tf.group(*[dequeue_stats_op(), ]))
else:
# synchronous stats update
update_stats_op = tf.cond(tf.greater_equal(
self.stats_step, self._stats_accum_iter), lambda: updateRunningAvgStats(statsUpdates), updateAccumStats)
self._update_stats_op = update_stats_op
return update_stats_op
def _apply_stats(self, statsUpdates, accumulate=False, accumulateCoeff=0.):
updateOps = []
# obtain the stats var list
for stats_var in statsUpdates:
stats_new = statsUpdates[stats_var]
if accumulate:
# simple superbatch averaging
update_op = tf.assign_add(
stats_var, accumulateCoeff * stats_new, use_locking=True)
else:
# exponential running averaging
update_op = tf.assign(
stats_var, stats_var * self._stats_decay, use_locking=True)
update_op = tf.assign_add(
update_op, (1. - self._stats_decay) * stats_new, use_locking=True)
updateOps.append(update_op)
with tf.control_dependencies(updateOps):
stats_step_op = tf.assign_add(self.stats_step, 1)
if KFAC_DEBUG:
stats_step_op = (tf.Print(stats_step_op,
[tf.convert_to_tensor('step:'),
self.global_step,
tf.convert_to_tensor('fac step:'),
self.factor_step,
tf.convert_to_tensor('sgd step:'),
self.sgd_step,
tf.convert_to_tensor('Accum:'),
tf.convert_to_tensor(accumulate),
tf.convert_to_tensor('Accum coeff:'),
tf.convert_to_tensor(accumulateCoeff),
tf.convert_to_tensor('stat step:'),
self.stats_step, updateOps[0], updateOps[1]]))
return [stats_step_op, ]
def getStatsEigen(self, stats=None):
if len(self.stats_eigen) == 0:
stats_eigen = {}
if stats is None:
stats = self.stats
tmpEigenCache = {}
with tf.device('/cpu:0'):
for var in stats:
for key in ['fprop_concat_stats', 'bprop_concat_stats']:
for stats_var in stats[var][key]:
if stats_var not in tmpEigenCache:
stats_dim = stats_var.get_shape()[1].value
e = tf.Variable(tf.ones(
[stats_dim]), name='KFAC_FAC/' + stats_var.name.split(':')[0] + '/e', trainable=False)
Q = tf.Variable(tf.diag(tf.ones(
[stats_dim])), name='KFAC_FAC/' + stats_var.name.split(':')[0] + '/Q', trainable=False)
stats_eigen[stats_var] = {'e': e, 'Q': Q}
tmpEigenCache[
stats_var] = stats_eigen[stats_var]
else:
stats_eigen[stats_var] = tmpEigenCache[
stats_var]
self.stats_eigen = stats_eigen
return self.stats_eigen
def computeStatsEigen(self):
""" compute the eigen decomp using copied var stats to avoid concurrent read/write from other queue """
# TO-DO: figure out why this op has delays (possibly moving
# eigenvectors around?)
with tf.device('/cpu:0'):
def removeNone(tensor_list):
local_list = []
for item in tensor_list:
if item is not None:
local_list.append(item)
return local_list
def copyStats(var_list):
print("copying stats to buffer tensors before eigen decomp")
redundant_stats = {}
copied_list = []
for item in var_list:
if item is not None:
if item not in redundant_stats:
if self._use_float64:
redundant_stats[item] = tf.cast(
tf.identity(item), tf.float64)
else:
redundant_stats[item] = tf.identity(item)
copied_list.append(redundant_stats[item])
else:
copied_list.append(None)
return copied_list
#stats = [copyStats(self.fStats), copyStats(self.bStats)]
#stats = [self.fStats, self.bStats]
stats_eigen = self.stats_eigen
computedEigen = {}
eigen_reverse_lookup = {}
updateOps = []
# sync copied stats
# with tf.control_dependencies(removeNone(stats[0]) +
# removeNone(stats[1])):
with tf.control_dependencies([]):
for stats_var in stats_eigen:
if stats_var not in computedEigen:
eigens = tf.self_adjoint_eig(stats_var)
e = eigens[0]
Q = eigens[1]
if self._use_float64:
e = tf.cast(e, tf.float32)
Q = tf.cast(Q, tf.float32)
updateOps.append(e)
updateOps.append(Q)
computedEigen[stats_var] = {'e': e, 'Q': Q}
eigen_reverse_lookup[e] = stats_eigen[stats_var]['e']
eigen_reverse_lookup[Q] = stats_eigen[stats_var]['Q']
self.eigen_reverse_lookup = eigen_reverse_lookup
self.eigen_update_list = updateOps
if KFAC_DEBUG:
self.eigen_update_list = [item for item in updateOps]
with tf.control_dependencies(updateOps):
updateOps.append(tf.Print(tf.constant(
0.), [tf.convert_to_tensor('computed factor eigen')]))
return updateOps
def applyStatsEigen(self, eigen_list):
updateOps = []
print(('updating %d eigenvalue/vectors' % len(eigen_list)))
for i, (tensor, mark) in enumerate(zip(eigen_list, self.eigen_update_list)):
stats_eigen_var = self.eigen_reverse_lookup[mark]
updateOps.append(
tf.assign(stats_eigen_var, tensor, use_locking=True))
with tf.control_dependencies(updateOps):
factor_step_op = tf.assign_add(self.factor_step, 1)
updateOps.append(factor_step_op)
if KFAC_DEBUG:
updateOps.append(tf.Print(tf.constant(
0.), [tf.convert_to_tensor('updated kfac factors')]))
return updateOps
def getKfacPrecondUpdates(self, gradlist, varlist):
updatelist = []
vg = 0.
assert len(self.stats) > 0
assert len(self.stats_eigen) > 0
assert len(self.factors) > 0
counter = 0
grad_dict = {var: grad for grad, var in zip(gradlist, varlist)}
for grad, var in zip(gradlist, varlist):
GRAD_RESHAPE = False
GRAD_TRANSPOSE = False
fpropFactoredFishers = self.stats[var]['fprop_concat_stats']
bpropFactoredFishers = self.stats[var]['bprop_concat_stats']
if (len(fpropFactoredFishers) + len(bpropFactoredFishers)) > 0:
counter += 1
GRAD_SHAPE = grad.get_shape()
if len(grad.get_shape()) > 2:
# reshape conv kernel parameters
KW = int(grad.get_shape()[0])
KH = int(grad.get_shape()[1])
C = int(grad.get_shape()[2])
D = int(grad.get_shape()[3])
if len(fpropFactoredFishers) > 1 and self._channel_fac:
# reshape conv kernel parameters into tensor
grad = tf.reshape(grad, [KW * KH, C, D])
else:
# reshape conv kernel parameters into 2D grad
grad = tf.reshape(grad, [-1, D])
GRAD_RESHAPE = True
elif len(grad.get_shape()) == 1:
# reshape bias or 1D parameters
D = int(grad.get_shape()[0])
grad = tf.expand_dims(grad, 0)
GRAD_RESHAPE = True
else:
# 2D parameters
C = int(grad.get_shape()[0])
D = int(grad.get_shape()[1])
if (self.stats[var]['assnBias'] is not None) and not self._blockdiag_bias:
# use homogeneous coordinates only works for 2D grad.
# TO-DO: figure out how to factorize bias grad
# stack bias grad
var_assnBias = self.stats[var]['assnBias']
grad = tf.concat(
[grad, tf.expand_dims(grad_dict[var_assnBias], 0)], 0)
# project gradient to eigen space and reshape the eigenvalues
# for broadcasting
eigVals = []
for idx, stats in enumerate(self.stats[var]['fprop_concat_stats']):
Q = self.stats_eigen[stats]['Q']
e = detectMinVal(self.stats_eigen[stats][
'e'], var, name='act', debug=KFAC_DEBUG)
Q, e = factorReshape(Q, e, grad, facIndx=idx, ftype='act')
eigVals.append(e)
grad = gmatmul(Q, grad, transpose_a=True, reduce_dim=idx)
for idx, stats in enumerate(self.stats[var]['bprop_concat_stats']):
Q = self.stats_eigen[stats]['Q']
e = detectMinVal(self.stats_eigen[stats][
'e'], var, name='grad', debug=KFAC_DEBUG)
Q, e = factorReshape(Q, e, grad, facIndx=idx, ftype='grad')
eigVals.append(e)
grad = gmatmul(grad, Q, transpose_b=False, reduce_dim=idx)
##
#####
# whiten using eigenvalues
weightDecayCoeff = 0.
if var in self._weight_decay_dict:
weightDecayCoeff = self._weight_decay_dict[var]
if KFAC_DEBUG:
print(('weight decay coeff for %s is %f' % (var.name, weightDecayCoeff)))
if self._factored_damping:
if KFAC_DEBUG:
print(('use factored damping for %s' % (var.name)))
coeffs = 1.
num_factors = len(eigVals)
# compute the ratio of two trace norm of the left and right
# KFac matrices, and their generalization
if len(eigVals) == 1:
damping = self._epsilon + weightDecayCoeff
else:
damping = tf.pow(
self._epsilon + weightDecayCoeff, 1. / num_factors)
eigVals_tnorm_avg = [tf.reduce_mean(
tf.abs(e)) for e in eigVals]
for e, e_tnorm in zip(eigVals, eigVals_tnorm_avg):
eig_tnorm_negList = [
item for item in eigVals_tnorm_avg if item != e_tnorm]
if len(eigVals) == 1:
adjustment = 1.
elif len(eigVals) == 2:
adjustment = tf.sqrt(
e_tnorm / eig_tnorm_negList[0])
else:
eig_tnorm_negList_prod = reduce(
lambda x, y: x * y, eig_tnorm_negList)
adjustment = tf.pow(
tf.pow(e_tnorm, num_factors - 1.) / eig_tnorm_negList_prod, 1. / num_factors)
coeffs *= (e + adjustment * damping)
else:
coeffs = 1.
damping = (self._epsilon + weightDecayCoeff)
for e in eigVals:
coeffs *= e
coeffs += damping
#grad = tf.Print(grad, [tf.convert_to_tensor('1'), tf.convert_to_tensor(var.name), grad.get_shape()])
grad /= coeffs
#grad = tf.Print(grad, [tf.convert_to_tensor('2'), tf.convert_to_tensor(var.name), grad.get_shape()])
#####
# project gradient back to euclidean space
for idx, stats in enumerate(self.stats[var]['fprop_concat_stats']):
Q = self.stats_eigen[stats]['Q']
grad = gmatmul(Q, grad, transpose_a=False, reduce_dim=idx)
for idx, stats in enumerate(self.stats[var]['bprop_concat_stats']):
Q = self.stats_eigen[stats]['Q']
grad = gmatmul(grad, Q, transpose_b=True, reduce_dim=idx)
##
#grad = tf.Print(grad, [tf.convert_to_tensor('3'), tf.convert_to_tensor(var.name), grad.get_shape()])
if (self.stats[var]['assnBias'] is not None) and not self._blockdiag_bias:
# use homogeneous coordinates only works for 2D grad.
# TO-DO: figure out how to factorize bias grad
# un-stack bias grad
var_assnBias = self.stats[var]['assnBias']
C_plus_one = int(grad.get_shape()[0])
grad_assnBias = tf.reshape(tf.slice(grad,
begin=[
C_plus_one - 1, 0],
size=[1, -1]), var_assnBias.get_shape())
grad_assnWeights = tf.slice(grad,
begin=[0, 0],
size=[C_plus_one - 1, -1])
grad_dict[var_assnBias] = grad_assnBias
grad = grad_assnWeights
#grad = tf.Print(grad, [tf.convert_to_tensor('4'), tf.convert_to_tensor(var.name), grad.get_shape()])
if GRAD_RESHAPE:
grad = tf.reshape(grad, GRAD_SHAPE)
grad_dict[var] = grad
print(('projecting %d gradient matrices' % counter))
for g, var in zip(gradlist, varlist):
grad = grad_dict[var]
### clipping ###
if KFAC_DEBUG:
print(('apply clipping to %s' % (var.name)))
tf.Print(grad, [tf.sqrt(tf.reduce_sum(tf.pow(grad, 2)))], "Euclidean norm of new grad")
local_vg = tf.reduce_sum(grad * g * (self._lr * self._lr))
vg += local_vg
# recale everything
if KFAC_DEBUG:
print('apply vFv clipping')
scaling = tf.minimum(1., tf.sqrt(self._clip_kl / vg))
if KFAC_DEBUG:
scaling = tf.Print(scaling, [tf.convert_to_tensor(
'clip: '), scaling, tf.convert_to_tensor(' vFv: '), vg])
with tf.control_dependencies([tf.assign(self.vFv, vg)]):
updatelist = [grad_dict[var] for var in varlist]
for i, item in enumerate(updatelist):
updatelist[i] = scaling * item
return updatelist
def compute_gradients(self, loss, var_list=None):
varlist = var_list
if varlist is None:
varlist = tf.trainable_variables()
g = tf.gradients(loss, varlist)
return [(a, b) for a, b in zip(g, varlist)]
def apply_gradients_kfac(self, grads):
g, varlist = list(zip(*grads))
if len(self.stats_eigen) == 0:
self.getStatsEigen()
qr = None
# launch eigen-decomp on a queue thread
if self._async:
print('Use async eigen decomp')
# get a list of factor loading tensors
factorOps_dummy = self.computeStatsEigen()
# define a queue for the list of factor loading tensors
queue = tf.FIFOQueue(1, [item.dtype for item in factorOps_dummy], shapes=[
item.get_shape() for item in factorOps_dummy])
enqueue_op = tf.cond(tf.logical_and(tf.equal(tf.mod(self.stats_step, self._kfac_update), tf.convert_to_tensor(
0)), tf.greater_equal(self.stats_step, self._stats_accum_iter)), lambda: queue.enqueue(self.computeStatsEigen()), tf.no_op)
def dequeue_op():
return queue.dequeue()
qr = tf.train.QueueRunner(queue, [enqueue_op])
updateOps = []
global_step_op = tf.assign_add(self.global_step, 1)
updateOps.append(global_step_op)
with tf.control_dependencies([global_step_op]):
# compute updates
assert self._update_stats_op != None
updateOps.append(self._update_stats_op)
dependency_list = []
if not self._async:
dependency_list.append(self._update_stats_op)
with tf.control_dependencies(dependency_list):
def no_op_wrapper():
return tf.group(*[tf.assign_add(self.cold_step, 1)])
if not self._async:
# synchronous eigen-decomp updates
updateFactorOps = tf.cond(tf.logical_and(tf.equal(tf.mod(self.stats_step, self._kfac_update),
tf.convert_to_tensor(0)),
tf.greater_equal(self.stats_step, self._stats_accum_iter)), lambda: tf.group(*self.applyStatsEigen(self.computeStatsEigen())), no_op_wrapper)
else:
# asynchronous eigen-decomp updates using queue
updateFactorOps = tf.cond(tf.greater_equal(self.stats_step, self._stats_accum_iter),
lambda: tf.cond(tf.equal(queue.size(), tf.convert_to_tensor(0)),
tf.no_op,
lambda: tf.group(
*self.applyStatsEigen(dequeue_op())),
),
no_op_wrapper)
updateOps.append(updateFactorOps)
with tf.control_dependencies([updateFactorOps]):
def gradOp():
return list(g)
def getKfacGradOp():
return self.getKfacPrecondUpdates(g, varlist)
u = tf.cond(tf.greater(self.factor_step,
tf.convert_to_tensor(0)), getKfacGradOp, gradOp)
optim = tf.train.MomentumOptimizer(
self._lr * (1. - self._momentum), self._momentum)
#optim = tf.train.AdamOptimizer(self._lr, epsilon=0.01)
def optimOp():
def updateOptimOp():
if self._full_stats_init:
return tf.cond(tf.greater(self.factor_step, tf.convert_to_tensor(0)), lambda: optim.apply_gradients(list(zip(u, varlist))), tf.no_op)
else:
return optim.apply_gradients(list(zip(u, varlist)))
if self._full_stats_init:
return tf.cond(tf.greater_equal(self.stats_step, self._stats_accum_iter), updateOptimOp, tf.no_op)
else:
return tf.cond(tf.greater_equal(self.sgd_step, self._cold_iter), updateOptimOp, tf.no_op)
updateOps.append(optimOp())
return tf.group(*updateOps), qr
def apply_gradients(self, grads):
coldOptim = tf.train.MomentumOptimizer(
self._cold_lr, self._momentum)
def coldSGDstart():
sgd_grads, sgd_var = zip(*grads)
if self.max_grad_norm != None:
sgd_grads, sgd_grad_norm = tf.clip_by_global_norm(sgd_grads,self.max_grad_norm)
sgd_grads = list(zip(sgd_grads,sgd_var))
sgd_step_op = tf.assign_add(self.sgd_step, 1)
coldOptim_op = coldOptim.apply_gradients(sgd_grads)
if KFAC_DEBUG:
with tf.control_dependencies([sgd_step_op, coldOptim_op]):
sgd_step_op = tf.Print(
sgd_step_op, [self.sgd_step, tf.convert_to_tensor('doing cold sgd step')])
return tf.group(*[sgd_step_op, coldOptim_op])
kfacOptim_op, qr = self.apply_gradients_kfac(grads)
def warmKFACstart():
return kfacOptim_op
return tf.cond(tf.greater(self.sgd_step, self._cold_iter), warmKFACstart, coldSGDstart), qr
def minimize(self, loss, loss_sampled, var_list=None):
grads = self.compute_gradients(loss, var_list=var_list)
update_stats_op = self.compute_and_apply_stats(
loss_sampled, var_list=var_list)
return self.apply_gradients(grads)
| 45,679 | 48.171152 | 366 | py |
P3O | P3O-main/baselines/acktr/utils.py | import tensorflow as tf
def dense(x, size, name, weight_init=None, bias_init=0, weight_loss_dict=None, reuse=None):
with tf.variable_scope(name, reuse=reuse):
assert (len(tf.get_variable_scope().name.split('/')) == 2)
w = tf.get_variable("w", [x.get_shape()[1], size], initializer=weight_init)
b = tf.get_variable("b", [size], initializer=tf.constant_initializer(bias_init))
weight_decay_fc = 3e-4
if weight_loss_dict is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(w), weight_decay_fc, name='weight_decay_loss')
if weight_loss_dict is not None:
weight_loss_dict[w] = weight_decay_fc
weight_loss_dict[b] = 0.0
tf.add_to_collection(tf.get_variable_scope().name.split('/')[0] + '_' + 'losses', weight_decay)
return tf.nn.bias_add(tf.matmul(x, w), b)
def kl_div(action_dist1, action_dist2, action_size):
mean1, std1 = action_dist1[:, :action_size], action_dist1[:, action_size:]
mean2, std2 = action_dist2[:, :action_size], action_dist2[:, action_size:]
numerator = tf.square(mean1 - mean2) + tf.square(std1) - tf.square(std2)
denominator = 2 * tf.square(std2) + 1e-8
return tf.reduce_sum(
numerator/denominator + tf.log(std2) - tf.log(std1),reduction_indices=-1)
| 1,322 | 44.62069 | 107 | py |
P3O | P3O-main/baselines/acktr/defaults.py | def mujoco():
return dict(
nsteps=2500,
value_network='copy'
)
| 87 | 13.666667 | 28 | py |
P3O | P3O-main/baselines/acktr/__init__.py | 0 | 0 | 0 | py | |
P3O | P3O-main/baselines/acktr/kfac_utils.py | import tensorflow as tf
def gmatmul(a, b, transpose_a=False, transpose_b=False, reduce_dim=None):
assert reduce_dim is not None
# weird batch matmul
if len(a.get_shape()) == 2 and len(b.get_shape()) > 2:
# reshape reduce_dim to the left most dim in b
b_shape = b.get_shape()
if reduce_dim != 0:
b_dims = list(range(len(b_shape)))
b_dims.remove(reduce_dim)
b_dims.insert(0, reduce_dim)
b = tf.transpose(b, b_dims)
b_t_shape = b.get_shape()
b = tf.reshape(b, [int(b_shape[reduce_dim]), -1])
result = tf.matmul(a, b, transpose_a=transpose_a,
transpose_b=transpose_b)
result = tf.reshape(result, b_t_shape)
if reduce_dim != 0:
b_dims = list(range(len(b_shape)))
b_dims.remove(0)
b_dims.insert(reduce_dim, 0)
result = tf.transpose(result, b_dims)
return result
elif len(a.get_shape()) > 2 and len(b.get_shape()) == 2:
# reshape reduce_dim to the right most dim in a
a_shape = a.get_shape()
outter_dim = len(a_shape) - 1
reduce_dim = len(a_shape) - reduce_dim - 1
if reduce_dim != outter_dim:
a_dims = list(range(len(a_shape)))
a_dims.remove(reduce_dim)
a_dims.insert(outter_dim, reduce_dim)
a = tf.transpose(a, a_dims)
a_t_shape = a.get_shape()
a = tf.reshape(a, [-1, int(a_shape[reduce_dim])])
result = tf.matmul(a, b, transpose_a=transpose_a,
transpose_b=transpose_b)
result = tf.reshape(result, a_t_shape)
if reduce_dim != outter_dim:
a_dims = list(range(len(a_shape)))
a_dims.remove(outter_dim)
a_dims.insert(reduce_dim, outter_dim)
result = tf.transpose(result, a_dims)
return result
elif len(a.get_shape()) == 2 and len(b.get_shape()) == 2:
return tf.matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
assert False, 'something went wrong'
def clipoutNeg(vec, threshold=1e-6):
mask = tf.cast(vec > threshold, tf.float32)
return mask * vec
def detectMinVal(input_mat, var, threshold=1e-6, name='', debug=False):
eigen_min = tf.reduce_min(input_mat)
eigen_max = tf.reduce_max(input_mat)
eigen_ratio = eigen_max / eigen_min
input_mat_clipped = clipoutNeg(input_mat, threshold)
if debug:
input_mat_clipped = tf.cond(tf.logical_or(tf.greater(eigen_ratio, 0.), tf.less(eigen_ratio, -500)), lambda: input_mat_clipped, lambda: tf.Print(
input_mat_clipped, [tf.convert_to_tensor('screwed ratio ' + name + ' eigen values!!!'), tf.convert_to_tensor(var.name), eigen_min, eigen_max, eigen_ratio]))
return input_mat_clipped
def factorReshape(Q, e, grad, facIndx=0, ftype='act'):
grad_shape = grad.get_shape()
if ftype == 'act':
assert e.get_shape()[0] == grad_shape[facIndx]
expanded_shape = [1, ] * len(grad_shape)
expanded_shape[facIndx] = -1
e = tf.reshape(e, expanded_shape)
if ftype == 'grad':
assert e.get_shape()[0] == grad_shape[len(grad_shape) - facIndx - 1]
expanded_shape = [1, ] * len(grad_shape)
expanded_shape[len(grad_shape) - facIndx - 1] = -1
e = tf.reshape(e, expanded_shape)
return Q, e
| 3,389 | 37.965517 | 168 | py |
P3O | P3O-main/baselines/bench/test_monitor.py | from .monitor import Monitor
import gym
import json
def test_monitor():
import pandas
import os
import uuid
env = gym.make("CartPole-v1")
env.seed(0)
mon_file = "/tmp/baselines-test-%s.monitor.csv" % uuid.uuid4()
menv = Monitor(env, mon_file)
menv.reset()
for _ in range(1000):
_, _, done, _ = menv.step(0)
if done:
menv.reset()
f = open(mon_file, 'rt')
firstline = f.readline()
assert firstline.startswith('#')
metadata = json.loads(firstline[1:])
assert metadata['env_id'] == "CartPole-v1"
assert set(metadata.keys()) == {'env_id', 't_start'}, "Incorrect keys in monitor metadata"
last_logline = pandas.read_csv(f, index_col=None)
assert set(last_logline.keys()) == {'l', 't', 'r'}, "Incorrect keys in monitor logline"
f.close()
os.remove(mon_file)
| 861 | 25.9375 | 95 | py |
P3O | P3O-main/baselines/bench/benchmarks.py | import re
import os
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
_atari7 = ['BeamRider', 'Breakout', 'Enduro', 'Pong', 'Qbert', 'Seaquest', 'SpaceInvaders']
_atariexpl7 = ['Freeway', 'Gravitar', 'MontezumaRevenge', 'Pitfall', 'PrivateEye', 'Solaris', 'Venture']
_BENCHMARKS = []
remove_version_re = re.compile(r'-v\d+$')
def register_benchmark(benchmark):
for b in _BENCHMARKS:
if b['name'] == benchmark['name']:
raise ValueError('Benchmark with name %s already registered!' % b['name'])
# automatically add a description if it is not present
if 'tasks' in benchmark:
for t in benchmark['tasks']:
if 'desc' not in t:
t['desc'] = remove_version_re.sub('', t.get('env_id', t.get('id')))
_BENCHMARKS.append(benchmark)
def list_benchmarks():
return [b['name'] for b in _BENCHMARKS]
def get_benchmark(benchmark_name):
for b in _BENCHMARKS:
if b['name'] == benchmark_name:
return b
raise ValueError('%s not found! Known benchmarks: %s' % (benchmark_name, list_benchmarks()))
def get_task(benchmark, env_id):
"""Get a task by env_id. Return None if the benchmark doesn't have the env"""
return next(filter(lambda task: task['env_id'] == env_id, benchmark['tasks']), None)
def find_task_for_env_id_in_any_benchmark(env_id):
for bm in _BENCHMARKS:
for task in bm["tasks"]:
if task["env_id"] == env_id:
return bm, task
return None, None
_ATARI_SUFFIX = 'NoFrameskip-v4'
register_benchmark({
'name': 'Atari50M',
'description': '7 Atari games from Mnih et al. (2013), with pixel observations, 50M timesteps',
'tasks': [{'desc': _game, 'env_id': _game + _ATARI_SUFFIX, 'trials': 2, 'num_timesteps': int(50e6)} for _game in _atari7]
})
register_benchmark({
'name': 'Atari10M',
'description': '7 Atari games from Mnih et al. (2013), with pixel observations, 10M timesteps',
'tasks': [{'desc': _game, 'env_id': _game + _ATARI_SUFFIX, 'trials': 6, 'num_timesteps': int(10e6)} for _game in _atari7]
})
register_benchmark({
'name': 'Atari1Hr',
'description': '7 Atari games from Mnih et al. (2013), with pixel observations, 1 hour of walltime',
'tasks': [{'desc': _game, 'env_id': _game + _ATARI_SUFFIX, 'trials': 2, 'num_seconds': 60 * 60} for _game in _atari7]
})
register_benchmark({
'name': 'AtariExploration10M',
'description': '7 Atari games emphasizing exploration, with pixel observations, 10M timesteps',
'tasks': [{'desc': _game, 'env_id': _game + _ATARI_SUFFIX, 'trials': 2, 'num_timesteps': int(10e6)} for _game in _atariexpl7]
})
# MuJoCo
_mujocosmall = [
'InvertedDoublePendulum-v2', 'InvertedPendulum-v2',
'HalfCheetah-v2', 'Hopper-v2', 'Walker2d-v2',
'Reacher-v2', 'Swimmer-v2']
register_benchmark({
'name': 'Mujoco1M',
'description': 'Some small 2D MuJoCo tasks, run for 1M timesteps',
'tasks': [{'env_id': _envid, 'trials': 6, 'num_timesteps': int(1e6)} for _envid in _mujocosmall]
})
register_benchmark({
'name': 'MujocoWalkers',
'description': 'MuJoCo forward walkers, run for 8M, humanoid 100M',
'tasks': [
{'env_id': "Hopper-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
{'env_id': "Walker2d-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
{'env_id': "Humanoid-v1", 'trials': 4, 'num_timesteps': 100 * 1000000},
]
})
# Bullet
_bulletsmall = [
'InvertedDoublePendulum', 'InvertedPendulum', 'HalfCheetah', 'Reacher', 'Walker2D', 'Hopper', 'Ant'
]
_bulletsmall = [e + 'BulletEnv-v0' for e in _bulletsmall]
register_benchmark({
'name': 'Bullet1M',
'description': '6 mujoco-like tasks from bullet, 1M steps',
'tasks': [{'env_id': e, 'trials': 6, 'num_timesteps': int(1e6)} for e in _bulletsmall]
})
# Roboschool
register_benchmark({
'name': 'Roboschool8M',
'description': 'Small 2D tasks, up to 30 minutes to complete on 8 cores',
'tasks': [
{'env_id': "RoboschoolReacher-v1", 'trials': 4, 'num_timesteps': 2 * 1000000},
{'env_id': "RoboschoolAnt-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
{'env_id': "RoboschoolHalfCheetah-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
{'env_id': "RoboschoolHopper-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
{'env_id': "RoboschoolWalker2d-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
]
})
register_benchmark({
'name': 'RoboschoolHarder',
'description': 'Test your might!!! Up to 12 hours on 32 cores',
'tasks': [
{'env_id': "RoboschoolHumanoid-v1", 'trials': 4, 'num_timesteps': 100 * 1000000},
{'env_id': "RoboschoolHumanoidFlagrun-v1", 'trials': 4, 'num_timesteps': 200 * 1000000},
{'env_id': "RoboschoolHumanoidFlagrunHarder-v1", 'trials': 4, 'num_timesteps': 400 * 1000000},
]
})
# Other
_atari50 = [ # actually 47
'Alien', 'Amidar', 'Assault', 'Asterix', 'Asteroids',
'Atlantis', 'BankHeist', 'BattleZone', 'BeamRider', 'Bowling',
'Breakout', 'Centipede', 'ChopperCommand', 'CrazyClimber',
'DemonAttack', 'DoubleDunk', 'Enduro', 'FishingDerby', 'Freeway',
'Frostbite', 'Gopher', 'Gravitar', 'IceHockey', 'Jamesbond',
'Kangaroo', 'Krull', 'KungFuMaster', 'MontezumaRevenge', 'MsPacman',
'NameThisGame', 'Pitfall', 'Pong', 'PrivateEye', 'Qbert',
'RoadRunner', 'Robotank', 'Seaquest', 'SpaceInvaders', 'StarGunner',
'Tennis', 'TimePilot', 'Tutankham', 'UpNDown', 'Venture',
'VideoPinball', 'WizardOfWor', 'Zaxxon',
]
register_benchmark({
'name': 'Atari50_10M',
'description': '47 Atari games from Mnih et al. (2013), with pixel observations, 10M timesteps',
'tasks': [{'desc': _game, 'env_id': _game + _ATARI_SUFFIX, 'trials': 2, 'num_timesteps': int(10e6)} for _game in _atari50]
})
# HER DDPG
_fetch_tasks = ['FetchReach-v1', 'FetchPush-v1', 'FetchSlide-v1']
register_benchmark({
'name': 'Fetch1M',
'description': 'Fetch* benchmarks for 1M timesteps',
'tasks': [{'trials': 6, 'env_id': env_id, 'num_timesteps': int(1e6)} for env_id in _fetch_tasks]
})
| 6,102 | 35.987879 | 129 | py |
P3O | P3O-main/baselines/bench/monitor.py | __all__ = ['Monitor', 'get_monitor_files', 'load_results']
from gym.core import Wrapper
import time
from glob import glob
import csv
import os.path as osp
import json
class Monitor(Wrapper):
EXT = "monitor.csv"
f = None
def __init__(self, env, filename, allow_early_resets=False, reset_keywords=(), info_keywords=()):
Wrapper.__init__(self, env=env)
self.tstart = time.time()
if filename:
self.results_writer = ResultsWriter(filename,
header={"t_start": time.time(), 'env_id' : env.spec and env.spec.id},
extra_keys=reset_keywords + info_keywords
)
else:
self.results_writer = None
self.reset_keywords = reset_keywords
self.info_keywords = info_keywords
self.allow_early_resets = allow_early_resets
self.rewards = None
self.needs_reset = True
self.episode_rewards = []
self.episode_lengths = []
self.episode_times = []
self.total_steps = 0
self.current_reset_info = {} # extra info about the current episode, that was passed in during reset()
def reset(self, **kwargs):
self.reset_state()
for k in self.reset_keywords:
v = kwargs.get(k)
if v is None:
raise ValueError('Expected you to pass kwarg %s into reset'%k)
self.current_reset_info[k] = v
return self.env.reset(**kwargs)
def reset_state(self):
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError("Tried to reset an environment before done. If you want to allow early resets, wrap your env with Monitor(env, path, allow_early_resets=True)")
self.rewards = []
self.needs_reset = False
def step(self, action):
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
ob, rew, done, info = self.env.step(action)
# print(rew)
self.update(ob, rew, done, info)
return (ob, rew, done, info)
def update(self, ob, rew, done, info):
self.rewards.append(rew)
if done:
self.needs_reset = True
eprew = sum(self.rewards)
eplen = len(self.rewards)
epinfo = {"r": round(eprew, 6), "l": eplen, "t": round(time.time() - self.tstart, 6)}
for k in self.info_keywords:
epinfo[k] = info[k]
self.episode_rewards.append(eprew)
self.episode_lengths.append(eplen)
self.episode_times.append(time.time() - self.tstart)
epinfo.update(self.current_reset_info)
if self.results_writer:
self.results_writer.write_row(epinfo)
assert isinstance(info, dict)
if isinstance(info, dict):
info['episode'] = epinfo
self.total_steps += 1
def close(self):
super(Monitor, self).close()
if self.f is not None:
self.f.close()
def get_total_steps(self):
return self.total_steps
def get_episode_rewards(self):
return self.episode_rewards
def get_episode_lengths(self):
return self.episode_lengths
def get_episode_times(self):
return self.episode_times
class LoadMonitorResultsError(Exception):
pass
class ResultsWriter(object):
def __init__(self, filename, header='', extra_keys=()):
self.extra_keys = extra_keys
assert filename is not None
if not filename.endswith(Monitor.EXT):
if osp.isdir(filename):
filename = osp.join(filename, Monitor.EXT)
else:
filename = filename + "." + Monitor.EXT
self.f = open(filename, "wt")
if isinstance(header, dict):
header = '# {} \n'.format(json.dumps(header))
self.f.write(header)
self.logger = csv.DictWriter(self.f, fieldnames=('r', 'l', 't')+tuple(extra_keys))
self.logger.writeheader()
self.f.flush()
def write_row(self, epinfo):
if self.logger:
self.logger.writerow(epinfo)
self.f.flush()
def get_monitor_files(dir):
return glob(osp.join(dir, "*" + Monitor.EXT))
def load_results(dir):
import pandas
monitor_files = (
glob(osp.join(dir, "*monitor.json")) +
glob(osp.join(dir, "*monitor.csv"))) # get both csv and (old) json files
if not monitor_files:
raise LoadMonitorResultsError("no monitor files of the form *%s found in %s" % (Monitor.EXT, dir))
dfs = []
headers = []
for fname in monitor_files:
with open(fname, 'rt') as fh:
if fname.endswith('csv'):
firstline = fh.readline()
if not firstline:
continue
assert firstline[0] == '#'
header = json.loads(firstline[1:])
df = pandas.read_csv(fh, index_col=None)
headers.append(header)
elif fname.endswith('json'): # Deprecated json format
episodes = []
lines = fh.readlines()
header = json.loads(lines[0])
headers.append(header)
for line in lines[1:]:
episode = json.loads(line)
episodes.append(episode)
df = pandas.DataFrame(episodes)
else:
assert 0, 'unreachable'
df['t'] += header['t_start']
dfs.append(df)
df = pandas.concat(dfs)
df.sort_values('t', inplace=True)
df.reset_index(inplace=True)
df['t'] -= min(header['t_start'] for header in headers)
df.headers = headers # HACK to preserve backwards compatibility
return df
| 5,762 | 33.927273 | 174 | py |
P3O | P3O-main/baselines/bench/__init__.py | # flake8: noqa F403
from baselines.bench.benchmarks import *
from baselines.bench.monitor import *
| 99 | 24 | 40 | py |
P3O | P3O-main/plot/plot_halfcheetah.py | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict, namedtuple
from baselines.common.plot_util import smooth,symmetric_ema
import os
rc_fonts = {
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.labelsize':10,
'ytick.labelsize':10,
"font.family": "times",
"font.size": 10,
'axes.titlesize':10,
"legend.fontsize":10,
'figure.figsize': (9, 12/3.0*0.75*2),
# 'figure.figsize': (7, 7/2.0*0.75),
# "text.usetex": True,
# 'text.latex.preview': True,
# 'text.latex.preamble':
# r"""
# \usepackage{times}
# \usepackage{helvet}
# \usepackage{courier}
# """,
}
matplotlib.rcParams.update(rc_fonts)
# plt.style.use('seaborn')
from baselines.common import plot_util
X_TIMESTEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
Y_REWARD = 'reward'
Y_TIMESTEPS = 'timesteps'
POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]
EPISODES_WINDOW = 100
COLORS = ['blue', 'green', 'cyan', 'magenta', 'purple',
'orange', 'teal', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue']
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=-1)
return x[window-1:], yw_func
def ts2xy(ts, xaxis, yaxis):
if xaxis == X_TIMESTEPS:
x = np.cumsum(ts.l.values)
elif xaxis == X_EPISODES:
x = np.arange(len(ts))
elif xaxis == X_WALLTIME:
x = ts.t.values / 3600.
else:
raise NotImplementedError
if yaxis == Y_REWARD:
y = ts.r.values
elif yaxis == Y_TIMESTEPS:
y = ts.l.values
else:
raise NotImplementedError
return x, y
def group_by_seed(taskpath):
path = taskpath.dirname.split(os.sep)[-1].split('_')
return path[0]+'_'.join(path[2:])
def group_by_name(taskpath):
return taskpath.dirname.split(os.sep)[-2]
def default_xy_fn(r):
try:
x = np.cumsum(r.monitor.l)
y = smooth(r.monitor.r, radius=10)
except:
y = smooth(r.progress['return-average'], radius=10)
x = r.progress['total-samples']
return x,y
def plot_results(
allresults, *,
xy_fn=default_xy_fn,
split_fn=None,
group_fn=None,
average_group=False,
shaded_std=True,
shaded_err=True,
shaded_line=False,
legend_outside=False,
resample=0,
smooth_step=1.0,
xlabel=None,
ylabel=None,
row=1,
inches=7
):
sk2r = defaultdict(list) # splitkey2results
for result in allresults:
splitkey = split_fn(result)
sk2r[splitkey].append(result)
ll = len(sk2r)
nrows=row
ncols=ll//nrows
f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False)
groups = list(set(group_fn(result) for result in allresults))
default_samples = 512
if average_group:
resample = resample or default_samples
fmts=['-x', '-+', '-.', '-s','-*', '-^', ]
g2ls = []
g2cs = []
for (isplit, sk) in enumerate(sk2r.keys()):
# for (isplit, sk) in enumerate(['Enduro', 'Breakout', 'BeamRider', 'Ant', 'HalfCheetah', 'Walker2d']):
# plt.gca().set_prop_cycle(markercycle)
g2l = {}
g2c = defaultdict(int)
sresults = sk2r[sk]
gresults = defaultdict(list)
idx_row = isplit // ncols
idx_col = isplit % ncols
ax = axarr[idx_row][idx_col]
for result in sresults:
group = group_fn(result)
g2c[group] += 1
x, y = xy_fn(result)
if x is None: x = np.arange(len(y))
x, y = map(np.asarray, (x, y))
if average_group:
gresults[group].append((x,y))
else:
if resample:
x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step)
l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)])
g2l[group] = l
if average_group:
for idx, group in enumerate(sorted(groups)):
xys = gresults[group]
if not any(xys):
continue
if group=='ddpo':
color = 'red'
fmt = '-'
else:
color = COLORS[idx % len(COLORS)]
fmt = fmts[idx % len(fmts)]
# print(groups.index(group), idx)
origxs = [xy[0] for xy in xys]
minxlen = min(map(len, origxs))
def allequal(qs):
return all((q==qs[0]).all() for q in qs[1:])
if resample:
print(isplit, sk)
low = max(x[0] for x in origxs)
# if sk in ['Enduro', 'Breakout', 'BeamRider']:
# high = 9e6
#
# else:
# high = min(x[-1] for x in origxs)
high = min(x[-1] for x in origxs)
usex = np.linspace(low, high, resample)
ys = []
for (x, y) in xys:
ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1])
else:
assert allequal([x[:minxlen] for x in origxs]), \
'If you want to average unevenly sampled data, set resample=<number of samples you want>'
usex = origxs[0]
ys = [xy[1][:minxlen] for xy in xys]
ymean = np.mean(ys, axis=0)
ystd = np.std(ys, axis=0)
ystderr = ystd / np.sqrt(len(ys))
# TODO
need_point=5
l, = axarr[idx_row][idx_col].plot(usex, ymean, fmt, color=color,markevery=default_samples//need_point)
# set_size(4, 3,axarr[idx_row][idx_col])
g2l[group] = l
if shaded_err:
if shaded_line:
ax.vlines(usex[::default_samples//20], ymean - ystderr, ymean + ystderr, color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=.4)
if shaded_std:
if shaded_line:
x = usex[::default_samples//need_point]
ymin = ymean - ystd
ymax = ymean + ystd
ax.vlines(x, ymin[::default_samples//need_point], ymax[::default_samples//need_point], color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=.2)
plt.tight_layout()
ax.set_title('('+chr(isplit+97)+') '+sk)
if xlabel is not None:
for ax in axarr.flatten():
plt.sca(ax)
# plt.xlabel('('+chr(id+97)+') '+xlabel)
plt.xlabel('timesteps')
if ylabel is not None:
for ax in axarr[:,0]:
plt.sca(ax)
plt.ylabel(ylabel)
g2ls.append(g2l)
tt= {'ddpo':'P3O','vpgdualclip':'Dual-Clip PPO',
'acktr':'ACKTR','ppo2':'PPO','trpo':'TRPO','a2c':'A2C'}
tt_s = g2ls[0]
ad = g2ls[0]
axarr[0][0].legend(ad.values(), [tt[g] if g in tt.keys() else g for g in ad],edgecolor='None', facecolor='None')
return f, axarr
def paper_image():
path = [r'C:\Users\chenxing\0323\HalfCheetah_result',
]
save_name = 'halfcheetah_p3o_vs_ppo'
results = plot_util.load_results(path, enable_monitor=True, enable_progress=False)
plot_results(results, split_fn=group_by_name, group_fn=group_by_seed, average_group=True,
shaded_std=True,shaded_err=False, xlabel=X_TIMESTEPS,
ylabel=Y_REWARD,row=1)
fig = plt.gcf()
fig.savefig('png'+os.sep+save_name+'.pdf',bbox_inches='tight',dpi=300, backend='pdf')
# fig.savefig('png/'+save_name+'.pdf',dpi=300, backend='pdf')
if __name__ == '__main__':
paper_image()
| 8,410 | 32.376984 | 132 | py |
P3O | P3O-main/plot/plot_parameter_select_on_halfcheetah.py | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict, namedtuple
from baselines.common.plot_util import smooth,symmetric_ema
import os
rc_fonts = {
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.labelsize':10,
'ytick.labelsize':10,
"font.family": "times",
"font.size": 10,
'axes.titlesize':10,
"legend.fontsize":10,
'figure.figsize': (6, 4),
# 'figure.figsize': (7, 7/2.0*0.75),
# "text.usetex": True,
# 'text.latex.preview': True,
# 'text.latex.preamble':
# r"""
# \usepackage{times}
# \usepackage{helvet}
# \usepackage{courier}
# """,
}
matplotlib.rcParams.update(rc_fonts)
# plt.style.use('seaborn')
from baselines.common import plot_util
X_TIMESTEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
Y_REWARD = 'reward'
Y_TIMESTEPS = 'timesteps'
POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]
EPISODES_WINDOW = 100
COLORS = ['blue', 'green', 'cyan', 'magenta', 'purple',
'orange', 'teal', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue']
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=-1)
return x[window-1:], yw_func
def ts2xy(ts, xaxis, yaxis):
if xaxis == X_TIMESTEPS:
x = np.cumsum(ts.l.values)
elif xaxis == X_EPISODES:
x = np.arange(len(ts))
elif xaxis == X_WALLTIME:
x = ts.t.values / 3600.
else:
raise NotImplementedError
if yaxis == Y_REWARD:
y = ts.r.values
elif yaxis == Y_TIMESTEPS:
y = ts.l.values
else:
raise NotImplementedError
return x, y
def group_by_seed(taskpath):
gp = taskpath.dirname.split(os.sep)[-1].split('_')
return gp[0] + gp[-1]
def group_by_name(taskpath):
return taskpath.dirname.split(os.sep)[-2]
def default_xy_fn(r):
try:
x = np.cumsum(r.monitor.l)
y = smooth(r.monitor.r, radius=10)
except:
y = smooth(r.progress['return-average'], radius=10)
x = r.progress['total-samples']
return x,y
def plot_results(
allresults, *,
xy_fn=default_xy_fn,
split_fn=None,
group_fn=None,
average_group=False,
shaded_std=True,
shaded_err=True,
shaded_line=False,
legend_outside=False,
resample=0,
smooth_step=1.0,
xlabel=None,
ylabel=None,
row=1,
inches=7
):
sk2r = defaultdict(list) # splitkey2results
for result in allresults:
splitkey = split_fn(result)
sk2r[splitkey].append(result)
ll = len(sk2r)
nrows=row
ncols=ll//nrows
f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False)
groups = list(set(group_fn(result) for result in allresults))
default_samples = 512
if average_group:
resample = resample or default_samples
fmts=['-x', '-+', '-.', '-s','-*', '-^', ]
g2ls = []
g2cs = []
for (isplit, sk) in enumerate(sk2r.keys()):
# for (isplit, sk) in enumerate(['Enduro', 'Breakout', 'BeamRider', 'Ant', 'HalfCheetah', 'Walker2d']):
# plt.gca().set_prop_cycle(markercycle)
g2l = {}
g2c = defaultdict(int)
sresults = sk2r[sk]
gresults = defaultdict(list)
idx_row = isplit // ncols
idx_col = isplit % ncols
ax = axarr[idx_row][idx_col]
for result in sresults:
group = group_fn(result)
g2c[group] += 1
x, y = xy_fn(result)
if x is None: x = np.arange(len(y))
x, y = map(np.asarray, (x, y))
if average_group:
gresults[group].append((x,y))
else:
if resample:
x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step)
l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)])
g2l[group] = l
if average_group:
for idx, group in enumerate(sorted(groups)):
xys = gresults[group]
if not any(xys):
continue
if group=='ddpo':
color = 'red'
fmt = '-'
else:
color = COLORS[idx % len(COLORS)]
fmt = fmts[idx % len(fmts)]
# print(groups.index(group), idx)
origxs = [xy[0] for xy in xys]
minxlen = min(map(len, origxs))
def allequal(qs):
return all((q==qs[0]).all() for q in qs[1:])
if resample:
print(isplit, sk)
low = max(x[0] for x in origxs)
# if sk in ['Enduro', 'Breakout', 'BeamRider']:
# high = 9e6
#
# else:
# high = min(x[-1] for x in origxs)
high = min(x[-1] for x in origxs)
usex = np.linspace(low, high, resample)
ys = []
for (x, y) in xys:
ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1])
else:
assert allequal([x[:minxlen] for x in origxs]), \
'If you want to average unevenly sampled data, set resample=<number of samples you want>'
usex = origxs[0]
ys = [xy[1][:minxlen] for xy in xys]
ymean = np.mean(ys, axis=0)
ystd = np.std(ys, axis=0)
ystderr = ystd / np.sqrt(len(ys))
# TODO
need_point=5
l, = axarr[idx_row][idx_col].plot(usex, ymean, fmt, color=color,markevery=default_samples//need_point)
# set_size(4, 3,axarr[idx_row][idx_col])
g2l[group] = l
if shaded_err:
if shaded_line:
ax.vlines(usex[::default_samples//20], ymean - ystderr, ymean + ystderr, color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=.4)
if shaded_std:
if shaded_line:
x = usex[::default_samples//need_point]
ymin = ymean - ystd
ymax = ymean + ystd
ax.vlines(x, ymin[::default_samples//need_point], ymax[::default_samples//need_point], color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=.2)
plt.tight_layout()
ax.set_title('('+chr(isplit+97)+') '+sk)
if xlabel is not None:
for ax in axarr.flatten():
plt.sca(ax)
# plt.xlabel('('+chr(id+97)+') '+xlabel)
plt.xlabel('timesteps')
if ylabel is not None:
for ax in axarr[:,0]:
plt.sca(ax)
plt.ylabel(ylabel)
g2ls.append(g2l)
tt= {
"ddpo8-0.5":r"$\alpha=8$ $\beta=0.5$ ",
"ddpo2-1":r"$\alpha=2$ $\beta=1$ ",
"ddpo4-1":r"$\alpha=4$ $\beta=1$ ",
"ddpo1-2":r"$\alpha=1$ $\beta=2$ ",
"ddpo2-2":r"$\alpha=2$ $\beta=2$ ",
"ddpo0.5-4":r"$\alpha=0.5$ $\beta=4$ ",
"ddpo1-4":r"$\alpha=1$ $\beta=4$ ",
"ddpo2-4":r"$\alpha=2$ $\beta=4$ ",
"ddpo0.5-8":r"$\alpha=0.5$ $\beta=8$ ",
}
tt_s = g2ls[0]
ad = {}
for key in tt.keys():
if key in tt_s.keys():
ad[key] = tt_s[key]
axarr[0][0].legend(ad.values(), [tt[g] if g in tt.keys() else g for g in ad],edgecolor='None', facecolor='None',loc=(1.05, 0.25))
# legend.get_frame().set_alpha(None)
# legend.get_frame().set_facecolor((0, 0, 0, 0))
# legend.get_frame().set_edgecolor((0, 0, 0, 0))
return f, axarr
def paper_image():
path = [r'C:\Users\chenxing\0323\HalfCheetah_mulit_parameter_lr-1e4',
]
save_name = 'halfcheetah-para-sel'
results = plot_util.load_results(path, enable_monitor=True, enable_progress=False)
plot_results(results, split_fn=group_by_name, group_fn=group_by_seed, average_group=True,
shaded_std=True,shaded_err=False, xlabel=X_TIMESTEPS,
ylabel=Y_REWARD,row=1)
fig = plt.gcf()
fig.savefig('png'+os.sep+save_name+'.pdf',bbox_inches='tight',dpi=300, backend='pdf')
# fig.savefig('png/'+save_name+'.pdf',dpi=300, backend='pdf')
if __name__ == '__main__':
paper_image()
| 8,950 | 32.777358 | 133 | py |
P3O | P3O-main/plot/plot_progress_kl_gradient_r.py | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict, namedtuple
from baselines.common.plot_util import smooth,symmetric_ema
import os
rc_fonts = {
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.labelsize':10,
'ytick.labelsize':10,
"font.family": "times",
"font.size": 10,
'axes.titlesize':10,
"legend.fontsize":10,
'figure.figsize': (8.5, 5),
'axes.unicode_minus':False,
# 'figure.figsize': (7, 7/2.0*0.75),
# "text.usetex": True,
# 'text.latex.preview': True,
# 'text.latex.preamble':
# r"""
# \usepackage{times}
# \usepackage{helvet}
# \usepackage{courier}
# """,
}
matplotlib.rcParams.update(rc_fonts)
# plt.style.use('seaborn')
from baselines.common import plot_util
X_TIMESTEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
Y_REWARD = 'reward'
Y_TIMESTEPS = 'timesteps'
POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]
EPISODES_WINDOW = 100
COLORS = ['blue', 'green', 'cyan', 'magenta', 'purple',
'orange', 'teal', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue']
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=-1)
return x[window-1:], yw_func
def ts2xy(ts, xaxis, yaxis):
if xaxis == X_TIMESTEPS:
x = np.cumsum(ts.l.values)
elif xaxis == X_EPISODES:
x = np.arange(len(ts))
elif xaxis == X_WALLTIME:
x = ts.t.values / 3600.
else:
raise NotImplementedError
if yaxis == Y_REWARD:
y = ts.r.values
elif yaxis == Y_TIMESTEPS:
y = ts.l.values
else:
raise NotImplementedError
return x, y
def group_by_seed(taskpath):
path = taskpath.dirname.split(os.sep)[-1].split('_')
return path[0]
def group_by_name(taskpath):
return taskpath.dirname.split(os.sep)[-2]
def default_xy_fn(r):
try:
x = np.cumsum(r.monitor.l)
y = smooth(r.monitor.r, radius=10)
except:
y = smooth(r.progress['return-average'], radius=10)
x = r.progress['total-samples']
return x,y
def plot_results(
allresults, *,
xy_fn=default_xy_fn,
split_fn=None,
group_fn=None,
average_group=False,
shaded_std=True,
shaded_err=True,
shaded_line=False,
legend_outside=False,
resample=0,
smooth_step=1.0,
xlabel=None,
ylabel=None,
row=1,
):
sk2r = defaultdict(list) # splitkey2results
for result in allresults:
splitkey = split_fn(result)
sk2r[splitkey].append(result)
ll = len(sk2r)
nrows=row
ncols=ll//nrows
f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False)
groups = list(set(group_fn(result) for result in allresults))
default_samples = 512
if average_group:
resample = resample or default_samples
fmts=['-x', '-+', '-.', '-s','-*', '-^', ]
g2ls = []
g2cs = []
# for (isplit, sk) in enumerate(sk2r.keys()):
for (isplit, sk) in enumerate(['Enduro', 'Breakout', 'BeamRider', 'Ant', 'HalfCheetah', 'Walker2d']):
g2l = {}
g2c = defaultdict(int)
sresults = sk2r[sk]
gresults = defaultdict(list)
idx_row = isplit // ncols
idx_col = isplit % ncols
ax = axarr[idx_row][idx_col]
for result in sresults:
group = 'kl'
g2c[group] += 1
x, y = xy_fn(result,name='loss/approxkl')
if x is None: x = np.arange(len(y))
x, y = map(np.asarray, (x, y))
if average_group:
gresults[group].append((x,y))
else:
if resample:
x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step)
l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)])
g2l[group] = l
for result in sresults:
group = 'gradient'
g2c[group] += 1
x, y = xy_fn(result,name='loss/gradient_r_scpi')
if x is None: x = np.arange(len(y))
x, y = map(np.asarray, (x, y))
if average_group:
gresults[group].append((x,y))
else:
if resample:
x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step)
l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)])
g2l[group] = l
if average_group:
for idx, group in enumerate(["kl",'gradient']):
xys = gresults[group]
if not any(xys):
continue
if group=='ddpo':
color = 'red'
fmt = '-'
else:
color = COLORS[idx % len(COLORS)]
fmt = fmts[idx % len(fmts)]
# print(groups.index(group), idx)
origxs = [xy[0] for xy in xys]
minxlen = min(map(len, origxs))
def allequal(qs):
return all((q==qs[0]).all() for q in qs[1:])
if resample:
print(isplit, sk)
low = max(x[0] for x in origxs)
# if sk in ['Enduro', 'Breakout', 'BeamRider']:
# high = 9e6
#
# else:
# high = min(x[-1] for x in origxs)
high = min(x[-1] for x in origxs)
usex = np.linspace(low, high, resample)
ys = []
for (x, y) in xys:
ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1])
else:
assert allequal([x[:minxlen] for x in origxs]), \
'If you want to average unevenly sampled data, set resample=<number of samples you want>'
usex = origxs[0]
ys = [xy[1][:minxlen] for xy in xys]
ymean = np.mean(ys, axis=0)
ystd = np.std(ys, axis=0)
ystderr = ystd / np.sqrt(len(ys))
# TODO
need_point=5
l, = axarr[idx_row][idx_col].plot(usex, ymean, fmt, color=color,markevery=default_samples//need_point)
# set_size(4, 3,axarr[idx_row][idx_col])
g2l[group] = l
if shaded_err:
if shaded_line:
ax.vlines(usex[::default_samples//20], ymean - ystderr, ymean + ystderr, color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=.4)
if shaded_std:
if shaded_line:
x = usex[::default_samples//need_point]
ymin = ymean - ystd
ymax = ymean + ystd
ax.vlines(x, ymin[::default_samples//need_point], ymax[::default_samples//need_point], color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=.2)
plt.tight_layout()
ax.set_title('('+chr(isplit+97)+') '+sk)
if xlabel is not None:
for ax in axarr.flatten():
plt.sca(ax)
# plt.xlabel('('+chr(id+97)+') '+xlabel)
plt.xlabel('timesteps')
if ylabel is not None:
for ax in axarr[:,0]:
plt.sca(ax)
plt.ylabel(ylabel)
g2ls.append(g2l)
tt= {'ddpo':'P3O','vpgdualclip':'Dual-Clip PPO',
'acktr':'ACKTR','ppo2':'PPO','trpo':'TRPO','a2c':'A2C'}
tt_s = g2ls[0]
ad = g2ls[0]
axarr[0][0].legend(ad.values(), [tt[g] if g in tt.keys() else g for g in ad],edgecolor='None', facecolor='None')
return f, axarr
def paper_image():
path = [
# r'C:\Users\chenxing\0323\ppop3o',C:\Users\chenxing\0323\only_ppop3o
r'C:\Users\chenxing\0323\onlyp3o',
]
save_name = 'kl_gradient_r'
def xy_fn(r, name):
y = smooth(r.progress[name], radius=10)
x = r.progress['misc/total_timesteps']
return x,y
results = plot_util.load_results(path, enable_monitor=False, enable_progress=True)
plot_results(results, xy_fn=xy_fn, split_fn=group_by_name, group_fn=group_by_seed, average_group=True,
shaded_std=True,shaded_err=False, xlabel=X_TIMESTEPS,
ylabel='policy kl gradient',row=2)
fig = plt.gcf()
fig.savefig('png'+os.sep+save_name+'.pdf',bbox_inches='tight',dpi=300, backend='pdf')
# fig.savefig('png/'+save_name+'.pdf',dpi=300, backend='pdf')
if __name__ == '__main__':
paper_image()
| 9,170 | 32.470803 | 132 | py |
P3O | P3O-main/plot/plot_halfcheetah_episode_lenth.py | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict, namedtuple
from baselines.common.plot_util import smooth,symmetric_ema
import os
rc_fonts = {
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.labelsize':10,
'ytick.labelsize':10,
"font.family": "times",
"font.size": 10,
'axes.titlesize':10,
"legend.fontsize":10,
'figure.figsize': (5, 3),# 3.5, 5, 7.2
# 'figure.figsize': (7, 7/2.0*0.75),
# "text.usetex": True,
# 'text.latex.preview': True,
# 'text.latex.preamble':
# r"""
# \usepackage{times}
# \usepackage{helvet}
# \usepackage{courier}
# """,
}
matplotlib.rcParams.update(rc_fonts)
# plt.style.use('seaborn')
from baselines.common import plot_util
X_TIMESTEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
Y_REWARD = 'reward'
Y_TIMESTEPS = 'timesteps'
POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]
EPISODES_WINDOW = 100
COLORS = ['blue', 'green', 'cyan', 'magenta', 'purple',
'orange', 'teal', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue']
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=-1)
return x[window-1:], yw_func
def ts2xy(ts, xaxis, yaxis):
if xaxis == X_TIMESTEPS:
x = np.cumsum(ts.l.values)
elif xaxis == X_EPISODES:
x = np.arange(len(ts))
elif xaxis == X_WALLTIME:
x = ts.t.values / 3600.
else:
raise NotImplementedError
if yaxis == Y_REWARD:
y = ts.r.values
elif yaxis == Y_TIMESTEPS:
y = ts.l.values
else:
raise NotImplementedError
return x, y
def group_by_seed(taskpath):
path = taskpath.dirname.split(os.sep)[-1].split('_')
return "_".join(path[2:])
def group_by_name(taskpath):
return taskpath.dirname.split(os.sep)[-2]
def default_xy_fn(r):
try:
x = np.cumsum(r.monitor.l)
y = smooth(r.monitor.r, radius=10)
except:
y = smooth(r.progress['return-average'], radius=10)
x = r.progress['total-samples']
return x,y
def plot_results(
allresults, *,
xy_fn=default_xy_fn,
split_fn=None,
group_fn=None,
average_group=False,
shaded_std=True,
shaded_err=True,
shaded_line=False,
legend_outside=False,
resample=0,
smooth_step=1.0,
xlabel=None,
ylabel=None,
row=1,
inches=7
):
sk2r = defaultdict(list) # splitkey2results
for result in allresults:
splitkey = split_fn(result)
sk2r[splitkey].append(result)
ll = len(sk2r)
nrows=row
ncols=ll//nrows
f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False)
groups = list(set(group_fn(result) for result in allresults))
default_samples = 512
if average_group:
resample = resample or default_samples
fmts=['-x', '-+', '-.', '-s','-*', '-^', ]
g2ls = []
g2cs = []
for (isplit, sk) in enumerate(sk2r.keys()):
g2l = {}
g2c = defaultdict(int)
sresults = sk2r[sk]
gresults = defaultdict(list)
idx_row = isplit // ncols
idx_col = isplit % ncols
ax = axarr[idx_row][idx_col]
for result in sresults:
group = group_fn(result)
g2c[group] += 1
x, y = xy_fn(result)
if x is None: x = np.arange(len(y))
x, y = map(np.asarray, (x, y))
if average_group:
gresults[group].append((x,y))
else:
if resample:
x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step)
l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)])
g2l[group] = l
if average_group:
for idx, group in enumerate(sorted(groups)):
xys = gresults[group]
if not any(xys):
continue
if group=='ddpo':
color = 'red'
fmt = '-'
else:
color = COLORS[idx % len(COLORS)]
fmt = fmts[idx % len(fmts)]
# print(groups.index(group), idx)
origxs = [xy[0] for xy in xys]
minxlen = min(map(len, origxs))
def allequal(qs):
return all((q==qs[0]).all() for q in qs[1:])
if resample:
print(isplit, sk)
low = max(x[0] for x in origxs)
# if sk in ['Enduro', 'Breakout', 'BeamRider']:
# high = 9e6
#
# else:
# high = min(x[-1] for x in origxs)
high = min(x[-1] for x in origxs)
usex = np.linspace(low, high, resample)
ys = []
for (x, y) in xys:
ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1])
else:
assert allequal([x[:minxlen] for x in origxs]), \
'If you want to average unevenly sampled data, set resample=<number of samples you want>'
usex = origxs[0]
ys = [xy[1][:minxlen] for xy in xys]
ymean = np.mean(ys, axis=0)
ystd = np.std(ys, axis=0)
ystderr = ystd / np.sqrt(len(ys))
# TODO
need_point=5
l, = axarr[idx_row][idx_col].plot(usex, ymean, fmt, color=color,markevery=default_samples//need_point)
# set_size(4, 3,axarr[idx_row][idx_col])
g2l[group] = l
if shaded_err:
if shaded_line:
ax.vlines(usex[::default_samples//20], ymean - ystderr, ymean + ystderr, color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=.4)
if shaded_std:
if shaded_line:
x = usex[::default_samples//need_point]
ymin = ymean - ystd
ymax = ymean + ystd
ax.vlines(x, ymin[::default_samples//need_point], ymax[::default_samples//need_point], color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=.2)
plt.tight_layout()
ax.set_title('('+chr(isplit+97)+') '+sk)
if xlabel is not None:
for ax in axarr.flatten():
plt.sca(ax)
# plt.xlabel('('+chr(id+97)+') '+xlabel)
plt.xlabel('timesteps')
if ylabel is not None:
for ax in axarr[:,0]:
plt.sca(ax)
plt.ylabel(ylabel)
g2ls.append(g2l)
tt= {'ddpo':'P3O','vpgdualclip':'Dual-Clip PPO',
'acktr':'ACKTR','ppo2':'PPO','trpo':'TRPO','a2c':'A2C'}
tt_s = g2ls[0]
ad = tt_s
# for key in tt.keys():
# if key in tt_s.keys():
# ad[key] = tt_s[key]
axarr[0][0].legend(ad.values(), [tt[g] if g in tt.keys() else g for g in ad],edgecolor='None', facecolor='None',loc=(1.05, 0.25))
return f, axarr
def paper_image():
path = [r'C:\Users\chenxing\0323\HalfCheetah_episode_length',
]
save_name = 'HalfCheetah_episode_length'
results = plot_util.load_results(path, enable_monitor=True, enable_progress=False)
plot_results(results, split_fn=group_by_name, group_fn=group_by_seed, average_group=True,
shaded_std=True,shaded_err=False, xlabel=X_TIMESTEPS,
ylabel=Y_REWARD,row=1)
fig = plt.gcf()
fig.savefig('png'+os.sep+save_name+'.pdf',bbox_inches='tight',dpi=300, backend='pdf')
# fig.savefig('png/'+save_name+'.pdf',dpi=300, backend='pdf')
if __name__ == '__main__':
paper_image()
| 8,368 | 32.079051 | 133 | py |
P3O | P3O-main/plot/plot_halfcheetah-hyper-parameter.py | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict, namedtuple
from baselines.common.plot_util import smooth,symmetric_ema
import os
rc_fonts = {
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.labelsize':10,
'ytick.labelsize':10,
"font.family": "times",
"font.size": 10,
'axes.titlesize':10,
"legend.fontsize":10,
'figure.figsize': (8.5, 4),
# 'figure.figsize': (7, 7/2.0*0.75),
# "text.usetex": True,
# 'text.latex.preview': True,
# 'text.latex.preamble':
# r"""
# \usepackage{times}
# \usepackage{helvet}
# \usepackage{courier}
# """,
}
matplotlib.rcParams.update(rc_fonts)
# plt.style.use('seaborn')
from baselines.common import plot_util
X_TIMESTEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
Y_REWARD = 'reward'
Y_TIMESTEPS = 'timesteps'
POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]
EPISODES_WINDOW = 100
COLORS = ['blue', 'green', 'cyan', 'magenta', 'purple',
'orange', 'teal', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue']
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=-1)
return x[window-1:], yw_func
def ts2xy(ts, xaxis, yaxis):
if xaxis == X_TIMESTEPS:
x = np.cumsum(ts.l.values)
elif xaxis == X_EPISODES:
x = np.arange(len(ts))
elif xaxis == X_WALLTIME:
x = ts.t.values / 3600.
else:
raise NotImplementedError
if yaxis == Y_REWARD:
y = ts.r.values
elif yaxis == Y_TIMESTEPS:
y = ts.l.values
else:
raise NotImplementedError
return x, y
def group_by_seed(taskpath):
gp = taskpath.dirname.split(os.sep)[-1].split('_')
return gp[0]
def group_by_name(taskpath):
return taskpath.dirname.split(os.sep)[-2]
def default_xy_fn(r):
try:
x = np.cumsum(r.monitor.l)
y = smooth(r.monitor.r, radius=10)
except:
y = smooth(r.progress['return-average'], radius=10)
x = r.progress['total-samples']
return x,y
def plot_results(
allresults, *,
xy_fn=default_xy_fn,
split_fn=None,
group_fn=None,
average_group=False,
shaded_std=True,
shaded_err=True,
shaded_line=False,
legend_outside=False,
resample=0,
smooth_step=1.0,
xlabel=None,
ylabel=None,
row=1,
inches=7
):
sk2r = defaultdict(list) # splitkey2results
for result in allresults:
splitkey = split_fn(result)
sk2r[splitkey].append(result)
ll = len(sk2r)
nrows=row
ncols=ll//nrows
f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False)
groups = list(set(group_fn(result) for result in allresults))
default_samples = 512
if average_group:
resample = resample or default_samples
fmts=['-x', '-+', '-.', '-s','-*', '-^', ]
g2ls = []
g2cs = []
for (isplit, sk) in enumerate(sk2r.keys()):
# for (isplit, sk) in enumerate(['Enduro', 'Breakout', 'BeamRider', 'Ant', 'HalfCheetah', 'Walker2d']):
# plt.gca().set_prop_cycle(markercycle)
g2l = {}
g2c = defaultdict(int)
sresults = sk2r[sk]
gresults = defaultdict(list)
idx_row = isplit // ncols
idx_col = isplit % ncols
ax = axarr[idx_row][idx_col]
for result in sresults:
group = group_fn(result)
g2c[group] += 1
x, y = xy_fn(result)
if x is None: x = np.arange(len(y))
x, y = map(np.asarray, (x, y))
if average_group:
gresults[group].append((x,y))
else:
if resample:
x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step)
l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)])
g2l[group] = l
if average_group:
for idx, group in enumerate(sorted(groups)):
xys = gresults[group]
if not any(xys):
continue
if group=='ddpo':
color = 'red'
fmt = '-'
else:
color = COLORS[idx % len(COLORS)]
fmt = fmts[idx % len(fmts)]
# print(groups.index(group), idx)
origxs = [xy[0] for xy in xys]
minxlen = min(map(len, origxs))
def allequal(qs):
return all((q==qs[0]).all() for q in qs[1:])
if resample:
print(isplit, sk)
low = max(x[0] for x in origxs)
# if sk in ['Enduro', 'Breakout', 'BeamRider']:
# high = 9e6
#
# else:
# high = min(x[-1] for x in origxs)
high = min(x[-1] for x in origxs)
usex = np.linspace(low, high, resample)
ys = []
for (x, y) in xys:
ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1])
else:
assert allequal([x[:minxlen] for x in origxs]), \
'If you want to average unevenly sampled data, set resample=<number of samples you want>'
usex = origxs[0]
ys = [xy[1][:minxlen] for xy in xys]
ymean = np.mean(ys, axis=0)
ystd = np.std(ys, axis=0)
ystderr = ystd / np.sqrt(len(ys))
# TODO
need_point=5
l, = axarr[idx_row][idx_col].plot(usex, ymean, fmt, color=color,markevery=default_samples//need_point)
# set_size(4, 3,axarr[idx_row][idx_col])
g2l[group] = l
if shaded_err:
if shaded_line:
ax.vlines(usex[::default_samples//20], ymean - ystderr, ymean + ystderr, color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=.4)
if shaded_std:
if shaded_line:
x = usex[::default_samples//need_point]
ymin = ymean - ystd
ymax = ymean + ystd
ax.vlines(x, ymin[::default_samples//need_point], ymax[::default_samples//need_point], color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=.2)
plt.tight_layout()
ax.set_title('('+chr(isplit+97)+') '+sk)
if xlabel is not None:
for ax in axarr.flatten():
plt.sca(ax)
# plt.xlabel('('+chr(id+97)+') '+xlabel)
plt.xlabel('timesteps')
if ylabel is not None:
for ax in axarr[:,0]:
plt.sca(ax)
plt.ylabel(ylabel)
g2ls.append(g2l)
tt= {
"ppo2":r"PPO",
"ddpo":r"P3O",
}
tt_s = g2ls[0]
ad = {}
for key in tt.keys():
if key in tt_s.keys():
ad[key] = tt_s[key]
axarr[0][0].legend(ad.values(), [tt[g] if g in tt.keys() else g for g in ad],edgecolor='None', facecolor='None',loc=(1.05, 0.25))
# legend.get_frame().set_alpha(None)
# legend.get_frame().set_facecolor((0, 0, 0, 0))
# legend.get_frame().set_edgecolor((0, 0, 0, 0))
return f, axarr
def paper_image():
path = [r'C:\Users\chenxing\0323\halfcheetah-hyper-parameter',
]
save_name = 'halfcheetah-hyper-parameter'
results = plot_util.load_results(path, enable_monitor=True, enable_progress=False)
plot_results(results, split_fn=group_by_name, group_fn=group_by_seed, average_group=True,
shaded_std=True,shaded_err=False, xlabel=X_TIMESTEPS,
ylabel=Y_REWARD,row=2)
fig = plt.gcf()
fig.savefig('png'+os.sep+save_name+'.pdf',bbox_inches='tight',dpi=300, backend='pdf')
# fig.savefig('png/'+save_name+'.pdf',dpi=300, backend='pdf')
if __name__ == '__main__':
paper_image()
| 8,582 | 32.138996 | 133 | py |
P3O | P3O-main/plot/plot_activatefunction.py | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict, namedtuple
from baselines.common.plot_util import smooth,symmetric_ema
from baselines.common import plot_util
plt.style.use('seaborn')
rc_fonts = {
'lines.markeredgewidth': 1,
"lines.markersize":3,
"lines.linewidth":1,
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.labelsize':8,
'ytick.labelsize':8,
"font.family": "times",
'axes.titlesize':11,
"legend.fontsize":8,
'figure.figsize': (5, 4),
"text.usetex": True,
# 'text.latex.preview': True,
'text.latex.preamble':
r"""
\usepackage{times}
\usepackage{helvet}
\usepackage{courier}
""",
}
matplotlib.rcParams.update(rc_fonts)
X_TIMESTEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
Y_REWARD = 'reward'
Y_TIMESTEPS = 'timesteps'
POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]
EPISODES_WINDOW = 100
COLORS = ['blue', 'green', 'cyan', 'magenta', 'purple',
'orange', 'teal', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue']
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=-1)
return x[window-1:], yw_func
def ts2xy(ts, xaxis, yaxis):
if xaxis == X_TIMESTEPS:
x = np.cumsum(ts.l.values)
elif xaxis == X_EPISODES:
x = np.arange(len(ts))
elif xaxis == X_WALLTIME:
x = ts.t.values / 3600.
else:
raise NotImplementedError
if yaxis == Y_REWARD:
y = ts.r.values
elif yaxis == Y_TIMESTEPS:
y = ts.l.values
else:
raise NotImplementedError
return x, y
def group_by_seed(taskpath):
return taskpath.dirname.split('/')[-1].split('_')[0]
def group_by_name(taskpath):
return taskpath.dirname.split('/')[-2]
def default_xy_fn(r):
try:
x = np.cumsum(r.monitor.l)
y = smooth(r.monitor.r, radius=10)
except:
y = smooth(r.progress['return-average'], radius=10)
x = r.progress['total-samples']
return x,y
def plot_results(
allresults, *,
xy_fn=default_xy_fn,
split_fn=None,
group_fn=None,
average_group=False,
shaded_std=True,
shaded_err=True,
shaded_line=False,
legend_outside=False,
resample=0,
smooth_step=1.0,
xlabel=None,
ylabel=None,
row=1,
inches=7
):
sk2r = defaultdict(list) # splitkey2results
for result in allresults:
splitkey = split_fn(result)
sk2r[splitkey].append(result)
ll = len(sk2r)
nrows=row
ncols=ll//nrows
f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False)
groups = list(set(group_fn(result) for result in allresults))
default_samples = 512
if average_group:
resample = resample or default_samples
fmts=['-x', '-+', '-.', '-s','-*', '-^', ]
g2ls = []
g2cs = []
# for (isplit, sk) in enumerate(sk2r.keys()):
for (isplit, sk) in enumerate(['Breakout2', 'Ant2','Breakout', 'Ant']):
# plt.gca().set_prop_cycle(markercycle)
g2l = {}
g2c = defaultdict(int)
sresults = sk2r[sk]
gresults = defaultdict(list)
idx_row = isplit // ncols
idx_col = isplit % ncols
ax = axarr[idx_row][idx_col]
for result in sresults:
group = group_fn(result)
g2c[group] += 1
x, y = xy_fn(result)
if x is None: x = np.arange(len(y))
x, y = map(np.asarray, (x, y))
if average_group:
gresults[group].append((x,y))
else:
if resample:
x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step)
l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)])
g2l[group] = l
if average_group:
for idx, group in enumerate(sorted(groups)):
xys = gresults[group]
if not any(xys):
continue
if group=='ddpo':
color = 'red'
fmt = '-'
else:
color = COLORS[idx % len(COLORS)]
fmt = fmts[idx % len(fmts)]
# print(groups.index(group), idx)
origxs = [xy[0] for xy in xys]
minxlen = min(map(len, origxs))
def allequal(qs):
return all((q==qs[0]).all() for q in qs[1:])
if resample:
print(isplit, sk)
low = max(x[0] for x in origxs)
high = min(x[-1] for x in origxs)
usex = np.linspace(low, high, resample)
ys = []
for (x, y) in xys:
ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1])
else:
assert allequal([x[:minxlen] for x in origxs]), \
'If you want to average unevenly sampled data, set resample=<number of samples you want>'
usex = origxs[0]
ys = [xy[1][:minxlen] for xy in xys]
ymean = np.mean(ys, axis=0)
ystd = np.std(ys, axis=0)
ystderr = ystd / np.sqrt(len(ys))
# TODO
need_point=5
# axarr[idx_row][idx_col].xaxis.set_major_locator(plt.MultipleLocator(1e6)) # #把x轴的主刻度设置为3的倍数
# axarr[idx_row][idx_col].yaxis.set_major_locator(plt.MultipleLocator(1e3))
# axarr[idx_row][idx_col].ticklabel_format(style='sci',scilimits=(0,0),axis='both')
l, = axarr[idx_row][idx_col].plot(usex, ymean, fmt, color=color,markevery=default_samples//need_point)
# set_size(4, 3,axarr[idx_row][idx_col])
g2l[group] = l
if shaded_err:
if shaded_line:
ax.vlines(usex[::default_samples//20], ymean - ystderr, ymean + ystderr, color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=.4)
if shaded_std:
if shaded_line:
x = usex[::default_samples//need_point]
ymin = ymean - ystd
ymax = ymean + ystd
ax.vlines(x, ymin[::default_samples//need_point], ymax[::default_samples//need_point], color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=.2)
plt.tight_layout()
ax.set_title('('+chr(isplit+97)+') '+sk.replace('2',''))
if xlabel is not None:
for ax in axarr.flatten():
plt.sca(ax)
# plt.xlabel('('+chr(id+97)+') '+xlabel)
plt.xlabel('timesteps')
if ylabel is not None:
for ax in axarr[:,0]:
plt.sca(ax)
plt.ylabel(ylabel)
g2ls.append(g2l)
tt= {'vpg+ratio':'RD','vpg+tvd':'VD', 'vpg+kl':'KL',
'vpg+dualclip':'DC','vpg+minclip':'MC','vpg+onlyclip':'OC','vpg+sigmoid':'SM'
}
tt_s = g2ls[0]
print(tt_s.keys())
ad = {}
for key in tt.keys():
if key in tt_s.keys():
ad[tt[key]] = tt_s[key]
axarr[0][0].legend(ad.values(), ad.keys(), loc ='upper left', edgecolor='None', facecolor='None')
# tt= {
# 'vpg+dualclip':'DC','vpg+minclip':'MC','vpg+onlyclip':'OC','vpg+sigmoid':'SM'
# }
tt_s = g2ls[2]
print(tt_s.keys())
ad = {}
for key in tt.keys():
if key in tt_s.keys():
ad[tt[key]] = tt_s[key]
axarr[1][0].legend(ad.values(), ad.keys(), loc ='upper left', edgecolor='None', facecolor='None')
return f, axarr
def paper_image():
path = ['/home/chenxing/DDPO/image/passivefunction','/home/chenxing/DDPO/image/activefunction',
]
save_name = 'both'
results = plot_util.load_results(path, enable_monitor=True, enable_progress=False)
plot_results(results, split_fn=group_by_name, group_fn=group_by_seed, average_group=True,
shaded_std=True,shaded_err=False, xlabel=X_TIMESTEPS,
ylabel=Y_REWARD,row=2,)
fig = plt.gcf()
fig.savefig('../png/'+save_name+'.pdf',bbox_inches='tight',dpi=300, backend='pdf')
# fig.savefig('png/'+save_name+'.pdf',dpi=300, backend='pdf')
if __name__ == '__main__':
paper_image()
| 8,934 | 32.844697 | 132 | py |
P3O | P3O-main/plot/plot_mulit_value_in_one_fig.py | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict, namedtuple
from baselines.common.plot_util import smooth,symmetric_ema
import os
rc_fonts = {
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.labelsize':10,
'ytick.labelsize':10,
"font.family": "times",
"font.size": 10,
'axes.titlesize':10,# title
"legend.fontsize":10,
'figure.figsize': (8.5, 5),
# 'figure.figsize': (7, 7/2.0*0.75),
# "text.usetex": True,
# 'text.latex.preview': True,
# 'text.latex.preamble':
# r"""
# \usepackage{times}
# \usepackage{helvet}
# \usepackage{courier}
# """,
}
matplotlib.rcParams.update(rc_fonts)
# plt.style.use('seaborn')
from baselines.common import plot_util
X_TIMESTEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
Y_REWARD = 'reward'
Y_TIMESTEPS = 'timesteps'
POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]
EPISODES_WINDOW = 100
COLORS = ['green', 'blue', 'green', 'blue',
'green', 'teal', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue']
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=-1)
return x[window-1:], yw_func
def ts2xy(ts, xaxis, yaxis):
if xaxis == X_TIMESTEPS:
x = np.cumsum(ts.l.values)
elif xaxis == X_EPISODES:
x = np.arange(len(ts))
elif xaxis == X_WALLTIME:
x = ts.t.values / 3600.
else:
raise NotImplementedError
if yaxis == Y_REWARD:
y = ts.r.values
elif yaxis == Y_TIMESTEPS:
y = ts.l.values
else:
raise NotImplementedError
return x, y
def group_by_seed(taskpath):
path = taskpath.dirname.split(os.sep)[-1].split('_')
return path[0]
def group_by_name(taskpath):
return taskpath.dirname.split(os.sep)[-2]
def default_xy_fn(r):
try:
x = np.cumsum(r.monitor.l)
y = smooth(r.monitor.r, radius=10)
except:
y = smooth(r.progress['return-average'], radius=10)
x = r.progress['total-samples']
return x,y
def plot_results(
allresults, *,
xy_fn=default_xy_fn,
split_fn=None,
group_fn=None,
average_group=False,
shaded_std=True,
shaded_err=True,
shaded_line=False,
legend_outside=False,
resample=0,
smooth_step=1.0,
xlabel=None,
ylabel=None,
row=1,
):
sk2r = defaultdict(list) # splitkey2results
for result in allresults:
splitkey = split_fn(result)
sk2r[splitkey].append(result)
ll = len(sk2r)
nrows=row
ncols=ll//nrows
f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False)
groups = list(set(group_fn(result) for result in allresults))
default_samples = 512
if average_group:
resample = resample or default_samples
fmts=['.', '-', '-','--', '--', ]
g2ls = []
g2cs = []
# for (isplit, sk) in enumerate(sk2r.keys()):
for (isplit, sk) in enumerate(['Ant', 'HalfCheetah', 'Walker2d']):
# plt.gca().set_prop_cycle(markercycle)
g2l = {}
g2c = defaultdict(int)
sresults = sk2r[sk]
gresults = defaultdict(list)
idx_row = isplit // ncols
idx_col = isplit % ncols
ax = axarr[idx_row][idx_col]
for result in sresults:
group = group_fn(result)
group_ = 'mean abs(r-1) of ' + group
g2c[group_] += 1
x, y = xy_fn(result,'mean_rt')
if x is None: x = np.arange(len(y))
x, y = map(np.asarray, (x, y))
gresults[group_].append((x,y))
group_ = 'max(abs(r-1)) when r<1 of ' + group
g2c[group_] += 1
x, y = xy_fn(result,'ntr_rt')
if x is None: x = np.arange(len(y))
x, y = map(np.asarray, (x, y))
gresults[group_].append((x,y))
if group=="p3o":continue
group_ = 'max(abs(r-1)) when adv<0 of ' + group
g2c[group_] += 1
x, y = xy_fn(result,'nta_rt')
if x is None: x = np.arange(len(y))
x, y = map(np.asarray, (x, y))
gresults[group_].append((x,y))
if average_group:
for idx, group in enumerate(sorted(gresults.keys())):
xys = gresults[group]
if not any(xys):
continue
color = COLORS[idx % len(COLORS)]
fmt = fmts[idx % len(fmts)]
# print(groups.index(group), idx)
origxs = [xy[0] for xy in xys]
minxlen = min(map(len, origxs))
def allequal(qs):
return all((q==qs[0]).all() for q in qs[1:])
if resample:
print(isplit, sk)
low = max(x[0] for x in origxs)
high = min(x[-1] for x in origxs)
usex = np.linspace(low, high, resample)
ys = []
for (x, y) in xys:
ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1])
else:
assert allequal([x[:minxlen] for x in origxs]), \
'If you want to average unevenly sampled data, set resample=<number of samples you want>'
usex = origxs[0]
ys = [xy[1][:minxlen] for xy in xys]
ymean = np.mean(ys, axis=0)
ystd = np.std(ys, axis=0)
ystderr = ystd / np.sqrt(len(ys))
# TODO
need_point=5
l, = axarr[idx_row][idx_col].plot(usex, ymean, fmt, color=color,markevery=default_samples//need_point)
# set_size(4, 3,axarr[idx_row][idx_col])
g2l[group] = l
if shaded_err:
if shaded_line:
ax.vlines(usex[::default_samples//20], ymean - ystderr, ymean + ystderr, color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=.4)
if shaded_std:
if shaded_line:
x = usex[::default_samples//need_point]
ymin = ymean - ystd
ymax = ymean + ystd
ax.vlines(x, ymin[::default_samples//need_point], ymax[::default_samples//need_point], color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=.2)
l = ax.hlines(1,0,3e6, colors='red')
g2l['value=1'] = l
# plt.tight_layout()
ax.set_title('('+chr(isplit+97)+') '+sk)
if xlabel is not None:
for ax in axarr.flatten():
plt.sca(ax)
# plt.xlabel('('+chr(id+97)+') '+xlabel)
plt.xlabel('timesteps')
if ylabel is not None:
for ax in axarr[:,0]:
plt.sca(ax)
plt.ylabel(ylabel)
g2ls.append(g2l)
tt= {'ddpo':'P3O','vpgdualclip':'Dual-Clip PPO',
'acktr':'ACKTR','ppo2':'PPO','trpo':'TRPO','a2c':'A2C'}
ad = g2ls[0]
axarr[0][0].legend(ad.values(), [tt[g] if g in tt.keys() else g for g in ad],edgecolor='None', facecolor='None',loc=(3.5, 0.5))
return f, axarr
def paper_image():
path = [
# r'C:\Users\chenxing\0323\ppop3o',C:\Users\chenxing\0323\only_ppop3o
r'C:\Users\chenxing\0323\extra_test',
]
save_name = 'max_ratio'
def xy_fn(r, alias):
y = smooth(r.progress['loss/'+alias], radius=10)
x = r.progress['misc/total_timesteps']
return x,y
results = plot_util.load_results(path, enable_monitor=False, enable_progress=True)
plot_results(results, xy_fn=xy_fn, split_fn=group_by_name, group_fn=group_by_seed, average_group=True,
shaded_std=True,shaded_err=False, xlabel=X_TIMESTEPS,
ylabel=r'$\max(|r - 1|)$',row=1)
fig = plt.gcf()
# plt.show()
fig.savefig('png'+os.sep+save_name+'.pdf',bbox_inches='tight',dpi=300, backend='pdf')
# fig.savefig('png/'+save_name+'.pdf',dpi=300, backend='pdf')
if __name__ == '__main__':
paper_image()
| 8,651 | 31.283582 | 132 | py |
P3O | P3O-main/plot/plot_passivefunction.py | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict, namedtuple
from baselines.common.plot_util import smooth,symmetric_ema
plt.style.use('seaborn')
rc_fonts = {
'lines.markeredgewidth': 1,
"lines.markersize":3,
"lines.linewidth":1,
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.labelsize':8,
'ytick.labelsize':8,
"font.family": "times",
'axes.titlesize':11,
"legend.fontsize":8,
'figure.figsize': (5, 2),
"text.usetex": True,
# 'text.latex.preview': True,
'text.latex.preamble':
r"""
\usepackage{times}
\usepackage{helvet}
\usepackage{courier}
""",
}
matplotlib.rcParams.update(rc_fonts)
from baselines.common import plot_util
X_TIMESTEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
Y_REWARD = 'reward'
Y_TIMESTEPS = 'timesteps'
POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]
EPISODES_WINDOW = 100
COLORS = ['blue', 'green', 'cyan', 'magenta', 'purple',
'orange', 'teal', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue']
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=-1)
return x[window-1:], yw_func
def ts2xy(ts, xaxis, yaxis):
if xaxis == X_TIMESTEPS:
x = np.cumsum(ts.l.values)
elif xaxis == X_EPISODES:
x = np.arange(len(ts))
elif xaxis == X_WALLTIME:
x = ts.t.values / 3600.
else:
raise NotImplementedError
if yaxis == Y_REWARD:
y = ts.r.values
elif yaxis == Y_TIMESTEPS:
y = ts.l.values
else:
raise NotImplementedError
return x, y
def group_by_seed(taskpath):
return taskpath.dirname.split('/')[-1].split('_')[0]
def group_by_name(taskpath):
return taskpath.dirname.split('/')[-2]
def default_xy_fn(r):
try:
x = np.cumsum(r.monitor.l)
y = smooth(r.monitor.r, radius=10)
except:
y = smooth(r.progress['return-average'], radius=10)
x = r.progress['total-samples']
return x,y
def plot_results(
allresults, *,
xy_fn=default_xy_fn,
split_fn=None,
group_fn=None,
average_group=False,
shaded_std=True,
shaded_err=True,
shaded_line=False,
legend_outside=False,
resample=0,
smooth_step=1.0,
xlabel=None,
ylabel=None,
row=1,
inches=7
):
sk2r = defaultdict(list) # splitkey2results
for result in allresults:
splitkey = split_fn(result)
sk2r[splitkey].append(result)
ll = len(sk2r)
nrows=row
ncols=ll//nrows
f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False)
groups = list(set(group_fn(result) for result in allresults))
default_samples = 512
if average_group:
resample = resample or default_samples
fmts=['-x', '-+', '-.', '-s','-*', '-^', ]
g2ls = []
g2cs = []
# for (isplit, sk) in enumerate(sk2r.keys()):
for (isplit, sk) in enumerate(['Breakout', 'Ant']):
g2l = {}
g2c = defaultdict(int)
sresults = sk2r[sk]
gresults = defaultdict(list)
idx_row = isplit // ncols
idx_col = isplit % ncols
ax = axarr[idx_row][idx_col]
for result in sresults:
group = group_fn(result)
g2c[group] += 1
x, y = xy_fn(result)
if x is None: x = np.arange(len(y))
x, y = map(np.asarray, (x, y))
if average_group:
gresults[group].append((x,y))
else:
if resample:
x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step)
l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)])
g2l[group] = l
if average_group:
for idx, group in enumerate(sorted(groups)):
xys = gresults[group]
if not any(xys):
continue
if group=='ddpo':
color = 'red'
fmt = '-'
else:
color = COLORS[idx % len(COLORS)]
fmt = fmts[idx % len(fmts)]
# print(groups.index(group), idx)
origxs = [xy[0] for xy in xys]
minxlen = min(map(len, origxs))
def allequal(qs):
return all((q==qs[0]).all() for q in qs[1:])
if resample:
print(isplit, sk)
low = max(x[0] for x in origxs)
high = min(x[-1] for x in origxs)
usex = np.linspace(low, high, resample)
ys = []
for (x, y) in xys:
ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1])
else:
assert allequal([x[:minxlen] for x in origxs]), \
'If you want to average unevenly sampled data, set resample=<number of samples you want>'
usex = origxs[0]
ys = [xy[1][:minxlen] for xy in xys]
ymean = np.mean(ys, axis=0)
ystd = np.std(ys, axis=0)
ystderr = ystd / np.sqrt(len(ys))
# TODO
need_point=5
# axarr[idx_row][idx_col].xaxis.set_major_locator(plt.MultipleLocator(1e6)) # #把x轴的主刻度设置为3的倍数
# axarr[idx_row][idx_col].yaxis.set_major_locator(plt.MultipleLocator(1e3))
# axarr[idx_row][idx_col].ticklabel_format(style='sci',scilimits=(0,0),axis='both')
l, = axarr[idx_row][idx_col].plot(usex, ymean, fmt, color=color,markevery=default_samples//need_point)
# set_size(4, 3,axarr[idx_row][idx_col])
g2l[group] = l
if shaded_err:
if shaded_line:
ax.vlines(usex[::default_samples//20], ymean - ystderr, ymean + ystderr, color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=.4)
if shaded_std:
if shaded_line:
x = usex[::default_samples//need_point]
ymin = ymean - ystd
ymax = ymean + ystd
ax.vlines(x, ymin[::default_samples//need_point], ymax[::default_samples//need_point], color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=.2)
plt.tight_layout()
ax.set_title('('+chr(isplit+97)+') '+sk)
if xlabel is not None:
for ax in axarr.flatten():
plt.sca(ax)
# plt.xlabel('('+chr(id+97)+') '+xlabel)
plt.xlabel('timesteps')
if ylabel is not None:
for ax in axarr[:,0]:
plt.sca(ax)
plt.ylabel(ylabel)
g2ls.append(g2l)
tt= {
'vpg+dualclip':'DC','vpg+minclip':'MC','vpg+onlyclip':'OC','vpg+sigmoid':'SM'
}
tt_s = g2ls[0]
print(tt_s.keys())
ad = {}
for key in tt.keys():
if key in tt_s.keys():
ad[tt[key]] = tt_s[key]
axarr[0][0].legend(ad.values(), ad.keys(), loc ='upper left', edgecolor='None', facecolor='None')
return f, axarr
def paper_image():
path = ['/home/chenxing/DDPO/image/passivefunction']
save_name = 'passivefunction'
results = plot_util.load_results(path, enable_monitor=True, enable_progress=False)
plot_results(results, split_fn=group_by_name, group_fn=group_by_seed, average_group=True,
shaded_std=True,shaded_err=False, xlabel=X_TIMESTEPS,
ylabel=Y_REWARD,row=1)
fig = plt.gcf()
fig.savefig('../png/'+save_name+'.pdf',bbox_inches='tight',dpi=300, backend='pdf')
# fig.savefig('png/'+save_name+'.pdf',dpi=300, backend='pdf')
if __name__ == '__main__':
paper_image()
| 8,394 | 32.313492 | 132 | py |
P3O | P3O-main/plot/plot-marker.py | import matplotlib.pylab as plt
markers = ['.',',','o','v','^','<','>','1','2','3','4','8','s','p','P','*','h','H','+','x','X','D','d','|','_']
descriptions = ['point', 'pixel', 'circle', 'triangle_down', 'triangle_up','triangle_left',
'triangle_right', 'tri_down', 'tri_up', 'tri_left', 'tri_right', 'octagon',
'square', 'pentagon', 'plus (filled)','star', 'hexagon1', 'hexagon2', 'plus',
'x', 'x (filled)','diamond', 'thin_diamond', 'vline', 'hline']
x=[]
y=[]
for i in range(5):
for j in range(5):
x.append(i)
y.append(j)
plt.figure(figsize=(8, 8))
for i,j,m,l in zip(x,y,markers,descriptions):
plt.scatter(i,j,marker=m)
plt.text(i-0.15,j+0.15,s=m+' : '+l)
plt.axis([-0.1,4.8,-0.1,4.5])
plt.axis('off')
plt.tight_layout()
plt.show() | 819 | 27.275862 | 111 | py |
P3O | P3O-main/plot/plot_performence.py | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict, namedtuple
from baselines.common.plot_util import smooth,symmetric_ema
import os
rc_fonts = {
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.labelsize':10,
'ytick.labelsize':10,
"font.family": "times",
"font.size": 10,
'axes.titlesize':10,
"legend.fontsize":8,
'figure.figsize': (8.5, 5),
# 'figure.figsize': (7, 7/2.0*0.75),
# "text.usetex": True,
# 'text.latex.preview': True,
# 'text.latex.preamble':
# r"""
# \usepackage{times}
# \usepackage{helvet}
# \usepackage{courier}
# """,
}
matplotlib.rcParams.update(rc_fonts)
# plt.style.use('seaborn')
from baselines.common import plot_util
X_TIMESTEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
Y_REWARD = 'reward'
Y_TIMESTEPS = 'timesteps'
POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]
EPISODES_WINDOW = 100
COLORS = ['blue', 'green', 'cyan', 'magenta', 'purple',
'orange', 'teal', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue']
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=-1)
return x[window-1:], yw_func
def ts2xy(ts, xaxis, yaxis):
if xaxis == X_TIMESTEPS:
x = np.cumsum(ts.l.values)
elif xaxis == X_EPISODES:
x = np.arange(len(ts))
elif xaxis == X_WALLTIME:
x = ts.t.values / 3600.
else:
raise NotImplementedError
if yaxis == Y_REWARD:
y = ts.r.values
elif yaxis == Y_TIMESTEPS:
y = ts.l.values
else:
raise NotImplementedError
return x, y
def group_by_seed(taskpath):
return taskpath.dirname.split(os.sep)[-1].split('_')[0]
def group_by_name(taskpath):
return taskpath.dirname.split(os.sep)[-2]
def default_xy_fn(r):
try:
x = np.cumsum(r.monitor.l)
y = smooth(r.monitor.r, radius=10)
except:
y = smooth(r.progress['eprewmean'], radius=10)
x = r.progress['misc/total_timesteps']
return x,y
def plot_results(
allresults, *,
xy_fn=default_xy_fn,
split_fn=None,
group_fn=None,
average_group=False,
shaded_std=True,
shaded_err=True,
shaded_line=False,
legend_outside=False,
resample=0,
smooth_step=1.0,
xlabel=None,
ylabel=None,
row=1,
inches=7
):
sk2r = defaultdict(list) # splitkey2results
for result in allresults:
splitkey = split_fn(result)
sk2r[splitkey].append(result)
ll = len(sk2r)
nrows=row
ncols=ll//nrows
f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False)
groups = list(set(group_fn(result) for result in allresults))
default_samples = 512
if average_group:
resample = resample or default_samples
fmts=['-x', '-+', '-.', '-s','-*', '-^', ]
g2ls = []
g2cs = []
# for (isplit, sk) in enumerate(sk2r.keys()):
for (isplit, sk) in enumerate(['Enduro', 'Breakout', 'BeamRider', 'Ant', 'HalfCheetah', 'Walker2d']):
# plt.gca().set_prop_cycle(markercycle)
g2l = {}
g2c = defaultdict(int)
sresults = sk2r[sk]
gresults = defaultdict(list)
idx_row = isplit // ncols
idx_col = isplit % ncols
ax = axarr[idx_row][idx_col]
for result in sresults:
group = group_fn(result)
g2c[group] += 1
x, y = xy_fn(result)
if x is None: x = np.arange(len(y))
x, y = map(np.asarray, (x, y))
if average_group:
gresults[group].append((x,y))
else:
if resample:
x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step)
l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)])
g2l[group] = l
if average_group:
for idx, group in enumerate(sorted(groups)):
xys = gresults[group]
if not any(xys):
continue
if group=='ddpo':
color = 'red'
fmt = '-'
else:
color = COLORS[idx % len(COLORS)]
fmt = fmts[idx % len(fmts)]
# print(groups.index(group), idx)
origxs = [xy[0] for xy in xys]
minxlen = min(map(len, origxs))
def allequal(qs):
return all((q==qs[0]).all() for q in qs[1:])
if resample:
print(isplit, sk)
low = max(x[0] for x in origxs)
high = min(x[-1] for x in origxs)
usex = np.linspace(low, high, resample)
ys = []
for (x, y) in xys:
y[np.isnan(y)] = 0
ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1])
else:
assert allequal([x[:minxlen] for x in origxs]), \
'If you want to average unevenly sampled data, set resample=<number of samples you want>'
usex = origxs[0]
ys = [xy[1][:minxlen] for xy in xys]
ymean = np.mean(ys, axis=0)
ystd = np.std(ys, axis=0)
ystderr = ystd / np.sqrt(len(ys))
# TODO
need_point=5
l, = axarr[idx_row][idx_col].plot(usex, ymean, fmt, color=color,markevery=default_samples//need_point)
# set_size(4, 3,axarr[idx_row][idx_col])
g2l[group] = l
if shaded_err:
if shaded_line:
ax.vlines(usex[::default_samples//20], ymean - ystderr, ymean + ystderr, color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=.4)
if shaded_std:
if shaded_line:
x = usex[::default_samples//need_point]
ymin = ymean - ystd
ymax = ymean + ystd
ax.vlines(x, ymin[::default_samples//need_point], ymax[::default_samples//need_point], color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=.2)
plt.tight_layout()
ax.set_title('('+chr(isplit+97)+') '+sk)
if xlabel is not None:
for ax in axarr.flatten():
plt.sca(ax)
# plt.xlabel('('+chr(id+97)+') '+xlabel)
plt.xlabel('timesteps')
if ylabel is not None:
for ax in axarr[:,0]:
plt.sca(ax)
plt.ylabel(ylabel)
g2ls.append(g2l)
tt= {'ddpo':'P3O','vpgdualclip':'Dual-Clip PPO',
'acktr':'ACKTR','ppo2':'PPO','trpo':'TRPO','a2c':'A2C','clip':'CLIP'}
tt_s = g2ls[0]
ad = {}
for key in tt.keys():
if key in tt_s.keys():
ad[key] = tt_s[key]
axarr[0][0].legend(ad.values(), [tt[g] if g in tt.keys() else g for g in ad],edgecolor='None', facecolor='None')
return f, axarr
def paper_image():
path = [
r'C:\Users\chenxing\0323\DDPO\performance',
r"C:\Users\chenxing\0323\extra_test"]
save_name = 'performence'
results = plot_util.load_results(path, enable_monitor=True, enable_progress=True)
plot_results(results, split_fn=group_by_name, group_fn=group_by_seed, average_group=True,
shaded_std=True,shaded_err=False, xlabel=X_TIMESTEPS,
ylabel=Y_REWARD,row=2)
fig = plt.gcf()
fig.savefig('png'+os.sep+save_name+'.pdf',bbox_inches='tight',dpi=300, backend='pdf')
# fig.savefig('png/'+save_name+'.pdf',dpi=300, backend='pdf')
if __name__ == '__main__':
paper_image()
| 8,322 | 32.292 | 132 | py |
P3O | P3O-main/plot/plot_para.py | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict, namedtuple
from baselines.common.plot_util import smooth,symmetric_ema
plt.style.use('seaborn')
rc_fonts = {
'lines.markeredgewidth': 1,
"lines.markersize":3,
"lines.linewidth":1,
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.labelsize':8,
'ytick.labelsize':8,
"font.family": "times",
# "font.size": 8,
'axes.titlesize':11,
"legend.fontsize":8,
'figure.figsize': (5, 2),
"text.usetex": True,
# 'text.latex.preview': True,
# 'text.latex.preamble':
# r"""
# \usepackage{times}
# \usepackage{helvet}
# \usepackage{courier}
# """,
}
matplotlib.rcParams.update(rc_fonts)
from baselines.common import plot_util
X_TIMESTEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
Y_REWARD = 'reward'
Y_TIMESTEPS = 'timesteps'
POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]
EPISODES_WINDOW = 100
COLORS = ['blue', 'green', 'cyan', 'magenta', 'purple',
'orange', 'teal', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue']
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=-1)
return x[window-1:], yw_func
def ts2xy(ts, xaxis, yaxis):
if xaxis == X_TIMESTEPS:
x = np.cumsum(ts.l.values)
elif xaxis == X_EPISODES:
x = np.arange(len(ts))
elif xaxis == X_WALLTIME:
x = ts.t.values / 3600.
else:
raise NotImplementedError
if yaxis == Y_REWARD:
y = ts.r.values
elif yaxis == Y_TIMESTEPS:
y = ts.l.values
else:
raise NotImplementedError
return x, y
def group_by_seed(taskpath):
return taskpath.dirname.split('/')[-1].split('_')[0]
def group_by_name(taskpath):
return taskpath.dirname.split('/')[-2]
def default_xy_fn(r):
try:
x = np.cumsum(r.monitor.l)
y = smooth(r.monitor.r, radius=10)
except:
y = smooth(r.progress['return-average'], radius=10)
x = r.progress['total-samples']
return x,y
def plot_results(
allresults, *,
xy_fn=default_xy_fn,
split_fn=None,
group_fn=None,
average_group=False,
shaded_std=True,
shaded_err=True,
shaded_line=False,
legend_outside=False,
resample=0,
smooth_step=1.0,
xlabel=None,
ylabel=None,
row=1,
inches=7
):
sk2r = defaultdict(list) # splitkey2results
for result in allresults:
splitkey = split_fn(result)
sk2r[splitkey].append(result)
ll = len(sk2r)
nrows=row
ncols=ll//nrows
f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False)
groups = list(set(group_fn(result) for result in allresults))
default_samples = 512
if average_group:
resample = resample or default_samples
fmts=['-x', '-+', '-.', '-s','-*', '-^', ]
g2ls = []
g2cs = []
for (isplit, sk) in enumerate(sk2r.keys()):
# for (isplit, sk) in enumerate(['Breakout', 'Ant']):
# plt.gca().set_prop_cycle(markercycle)
g2l = {}
g2c = defaultdict(int)
sresults = sk2r[sk]
gresults = defaultdict(list)
idx_row = isplit // ncols
idx_col = isplit % ncols
ax = axarr[idx_row][idx_col]
for result in sresults:
group = group_fn(result)
g2c[group] += 1
x, y = xy_fn(result)
if x is None: x = np.arange(len(y))
x, y = map(np.asarray, (x, y))
if average_group:
gresults[group].append((x,y))
else:
if resample:
x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step)
l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)])
g2l[group] = l
if average_group:
for idx, group in enumerate(sorted(groups)):
xys = gresults[group]
if not any(xys):
continue
if group=='ddpo':
color = 'red'
fmt = '-'
else:
color = COLORS[idx % len(COLORS)]
fmt = fmts[idx % len(fmts)]
# print(groups.index(group), idx)
origxs = [xy[0] for xy in xys]
minxlen = min(map(len, origxs))
def allequal(qs):
return all((q==qs[0]).all() for q in qs[1:])
if resample:
print(isplit, sk)
low = max(x[0] for x in origxs)
high = min(x[-1] for x in origxs)
usex = np.linspace(low, high, resample)
ys = []
for (x, y) in xys:
ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1])
else:
assert allequal([x[:minxlen] for x in origxs]), \
'If you want to average unevenly sampled data, set resample=<number of samples you want>'
usex = origxs[0]
ys = [xy[1][:minxlen] for xy in xys]
ymean = np.mean(ys, axis=0)
ystd = np.std(ys, axis=0)
ystderr = ystd / np.sqrt(len(ys))
# TODO
need_point=5
axarr[idx_row][idx_col].locator_params(axis='x', nbins=6)
axarr[idx_row][idx_col].locator_params(axis='y', nbins=6)
# axarr[idx_row][idx_col].xaxis.set_major_locator(plt.MultipleLocator(1e6)) # #把x轴的主刻度设置为3的倍数
# axarr[idx_row][idx_col].yaxis.set_major_locator(plt.MultipleLocator(1e3))
# axarr[idx_row][idx_col].ticklabel_format(style='sci',scilimits=(0,0),axis='both')
l, = axarr[idx_row][idx_col].plot(usex, ymean, fmt, color=color,markevery=default_samples//need_point)
# set_size(4, 3,axarr[idx_row][idx_col])
g2l[group] = l
if shaded_err:
if shaded_line:
ax.vlines(usex[::default_samples//20], ymean - ystderr, ymean + ystderr, color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=.4)
if shaded_std:
if shaded_line:
x = usex[::default_samples//need_point]
ymin = ymean - ystd
ymax = ymean + ystd
ax.vlines(x, ymin[::default_samples//need_point], ymax[::default_samples//need_point], color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=.2)
plt.tight_layout()
ax.set_title('('+chr(isplit+97)+') '+sk)
if xlabel is not None:
for ax in axarr.flatten():
plt.sca(ax)
# plt.xlabel('('+chr(id+97)+') '+xlabel)
plt.xlabel('timesteps')
if ylabel is not None:
for ax in axarr[:,0]:
plt.sca(ax)
plt.ylabel(ylabel)
g2ls.append(g2l)
ad = g2ls[0]
axarr[0][0].legend(ad.values(), [ g.replace('vpgkl','').replace('vpgsigmoid','') for g in ad.keys()],
loc ='upper left', edgecolor='None', facecolor='None')
ad = g2ls[1]
a = map(int, [ g.replace('vpgkl','').replace('vpgsigmoid','') for g in ad.keys()])
def div4(a):
return round(4.0/a,1)
b = map(div4, a)
axarr[0][1].legend(ad.values(), b,
loc ='upper left', edgecolor='None', facecolor='None')
return f, axarr
def paper_image():
path = ['/home/chenxing/DDPO/image/para_analyis']
save_name = 'para_analyis'
results = plot_util.load_results(path, enable_monitor=True, enable_progress=False)
plot_results(results, split_fn=group_by_name, group_fn=group_by_seed, average_group=True,
shaded_std=True,shaded_err=False, xlabel=X_TIMESTEPS,
ylabel=Y_REWARD,row=1)
fig = plt.gcf()
fig.savefig('png/'+save_name+'.pdf',bbox_inches='tight',dpi=300, backend='pdf')
# fig.savefig('png/'+save_name+'.pdf',dpi=300, backend='pdf')
if __name__ == '__main__':
paper_image()
| 8,756 | 32.94186 | 132 | py |
P3O | P3O-main/plot/plot_halfcheetah_batchsize.py | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict, namedtuple
from baselines.common.plot_util import smooth,symmetric_ema
import os
rc_fonts = {
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.labelsize':10,
'ytick.labelsize':10,
"font.family": "times",
"font.size": 10,
'axes.titlesize':10,
"legend.fontsize":10,
'figure.figsize': (5, 3),# 3.5, 5, 7.2
# 'figure.figsize': (7, 7/2.0*0.75),
# "text.usetex": True,
# 'text.latex.preview': True,
# 'text.latex.preamble':
# r"""
# \usepackage{times}
# \usepackage{helvet}
# \usepackage{courier}
# """,
}
matplotlib.rcParams.update(rc_fonts)
# plt.style.use('seaborn')
from baselines.common import plot_util
X_TIMESTEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
Y_REWARD = 'reward'
Y_TIMESTEPS = 'timesteps'
POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]
EPISODES_WINDOW = 100
COLORS = ['blue', 'green', 'cyan', 'magenta', 'purple',
'orange', 'teal', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue']
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=-1)
return x[window-1:], yw_func
def ts2xy(ts, xaxis, yaxis):
if xaxis == X_TIMESTEPS:
x = np.cumsum(ts.l.values)
elif xaxis == X_EPISODES:
x = np.arange(len(ts))
elif xaxis == X_WALLTIME:
x = ts.t.values / 3600.
else:
raise NotImplementedError
if yaxis == Y_REWARD:
y = ts.r.values
elif yaxis == Y_TIMESTEPS:
y = ts.l.values
else:
raise NotImplementedError
return x, y
def group_by_seed(taskpath):
path = taskpath.dirname.split(os.sep)[-1].split('_')
return "_".join(path[2:])
def group_by_name(taskpath):
return taskpath.dirname.split(os.sep)[-2]
def default_xy_fn(r):
try:
x = np.cumsum(r.monitor.l)
y = smooth(r.monitor.r, radius=10)
except:
y = smooth(r.progress['return-average'], radius=10)
x = r.progress['total-samples']
return x,y
def plot_results(
allresults, *,
xy_fn=default_xy_fn,
split_fn=None,
group_fn=None,
average_group=False,
shaded_std=True,
shaded_err=True,
shaded_line=False,
legend_outside=False,
resample=0,
smooth_step=1.0,
xlabel=None,
ylabel=None,
row=1,
inches=7
):
sk2r = defaultdict(list) # splitkey2results
for result in allresults:
splitkey = split_fn(result)
sk2r[splitkey].append(result)
ll = len(sk2r)
nrows=row
ncols=ll//nrows
f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False)
groups = list(set(group_fn(result) for result in allresults))
default_samples = 512
if average_group:
resample = resample or default_samples
fmts=['-x', '-+', '-.', '-s','-*', '-^', ]
g2ls = []
g2cs = []
for (isplit, sk) in enumerate(sk2r.keys()):
g2l = {}
g2c = defaultdict(int)
sresults = sk2r[sk]
gresults = defaultdict(list)
idx_row = isplit // ncols
idx_col = isplit % ncols
ax = axarr[idx_row][idx_col]
for result in sresults:
group = group_fn(result)
g2c[group] += 1
x, y = xy_fn(result)
if x is None: x = np.arange(len(y))
x, y = map(np.asarray, (x, y))
if average_group:
gresults[group].append((x,y))
else:
if resample:
x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step)
l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)])
g2l[group] = l
if average_group:
for idx, group in enumerate(sorted(groups)):
xys = gresults[group]
if not any(xys):
continue
if group=='ddpo':
color = 'red'
fmt = '-'
else:
color = COLORS[idx % len(COLORS)]
fmt = fmts[idx % len(fmts)]
# print(groups.index(group), idx)
origxs = [xy[0] for xy in xys]
minxlen = min(map(len, origxs))
def allequal(qs):
return all((q==qs[0]).all() for q in qs[1:])
if resample:
print(isplit, sk)
low = max(x[0] for x in origxs)
# if sk in ['Enduro', 'Breakout', 'BeamRider']:
# high = 9e6
#
# else:
# high = min(x[-1] for x in origxs)
high = min(x[-1] for x in origxs)
usex = np.linspace(low, high, resample)
ys = []
for (x, y) in xys:
ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1])
else:
assert allequal([x[:minxlen] for x in origxs]), \
'If you want to average unevenly sampled data, set resample=<number of samples you want>'
usex = origxs[0]
ys = [xy[1][:minxlen] for xy in xys]
ymean = np.mean(ys, axis=0)
ystd = np.std(ys, axis=0)
ystderr = ystd / np.sqrt(len(ys))
# TODO
need_point=5
l, = axarr[idx_row][idx_col].plot(usex, ymean, fmt, color=color,markevery=default_samples//need_point)
# set_size(4, 3,axarr[idx_row][idx_col])
g2l[group] = l
if shaded_err:
if shaded_line:
ax.vlines(usex[::default_samples//20], ymean - ystderr, ymean + ystderr, color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=.4)
if shaded_std:
if shaded_line:
x = usex[::default_samples//need_point]
ymin = ymean - ystd
ymax = ymean + ystd
ax.vlines(x, ymin[::default_samples//need_point], ymax[::default_samples//need_point], color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=.2)
plt.tight_layout()
ax.set_title('('+chr(isplit+97)+') '+sk)
if xlabel is not None:
for ax in axarr.flatten():
plt.sca(ax)
# plt.xlabel('('+chr(id+97)+') '+xlabel)
plt.xlabel('timesteps')
if ylabel is not None:
for ax in axarr[:,0]:
plt.sca(ax)
plt.ylabel(ylabel)
g2ls.append(g2l)
tt= {'ddpo':'P3O','vpgdualclip':'Dual-Clip PPO',
'acktr':'ACKTR','ppo2':'PPO','trpo':'TRPO','a2c':'A2C'}
tt_s = g2ls[0]
ad = tt_s
# for key in tt.keys():
# if key in tt_s.keys():
# ad[key] = tt_s[key]
axarr[0][0].legend(ad.values(), [tt[g] if g in tt.keys() else g for g in ad],edgecolor='None', facecolor='None',loc=(1.05, 0.25))
return f, axarr
def paper_image():
path = [r'C:\Users\chenxing\0323\HalfCheetah_batchsize',
]
save_name = 'halfcheetah_batchsize'
results = plot_util.load_results(path, enable_monitor=True, enable_progress=False)
plot_results(results, split_fn=group_by_name, group_fn=group_by_seed, average_group=True,
shaded_std=True,shaded_err=False, xlabel=X_TIMESTEPS,
ylabel=Y_REWARD,row=1)
fig = plt.gcf()
fig.savefig('png'+os.sep+save_name+'.pdf',bbox_inches='tight',dpi=300, backend='pdf')
# fig.savefig('png/'+save_name+'.pdf',dpi=300, backend='pdf')
if __name__ == '__main__':
paper_image()
| 8,356 | 32.294821 | 133 | py |
P3O | P3O-main/plot/plot_test_marker.py | import matplotlib.pylab as plt
import numpy as np
fmts=['-.', '-*', '-1', '-|', '-_', ]
x = np.linspace(0,100,20)
y = np.ones_like(x)
for f in fmts:
plt.plot(x,y,f)
y += 1
plt.legend()
plt.show() | 207 | 13.857143 | 37 | py |
P3O | P3O-main/plot/plot_progress_loss_difference.py | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict, namedtuple
from baselines.common.plot_util import smooth,symmetric_ema
import os
rc_fonts = {
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.labelsize':10,
'ytick.labelsize':10,
"font.family": "times",
"font.size": 10,
'axes.titlesize':10,
"legend.fontsize":10,
'figure.figsize': (8.5, 5),
'axes.unicode_minus':False,
# 'figure.figsize': (7, 7/2.0*0.75),
# "text.usetex": True,
# 'text.latex.preview': True,
# 'text.latex.preamble':
# r"""
# \usepackage{times}
# \usepackage{helvet}
# \usepackage{courier}
# """,
}
matplotlib.rcParams.update(rc_fonts)
# plt.style.use('seaborn')
from baselines.common import plot_util
X_TIMESTEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
Y_REWARD = 'reward'
Y_TIMESTEPS = 'timesteps'
POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]
EPISODES_WINDOW = 100
COLORS = ['blue', 'green', 'cyan', 'magenta', 'purple',
'orange', 'teal', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue']
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=-1)
return x[window-1:], yw_func
def ts2xy(ts, xaxis, yaxis):
if xaxis == X_TIMESTEPS:
x = np.cumsum(ts.l.values)
elif xaxis == X_EPISODES:
x = np.arange(len(ts))
elif xaxis == X_WALLTIME:
x = ts.t.values / 3600.
else:
raise NotImplementedError
if yaxis == Y_REWARD:
y = ts.r.values
elif yaxis == Y_TIMESTEPS:
y = ts.l.values
else:
raise NotImplementedError
return x, y
def group_by_seed(taskpath):
path = taskpath.dirname.split(os.sep)[-1].split('_')
return path[0]
def group_by_name(taskpath):
return taskpath.dirname.split(os.sep)[-2]
def default_xy_fn(r):
try:
x = np.cumsum(r.monitor.l)
y = smooth(r.monitor.r, radius=10)
except:
y = smooth(r.progress['return-average'], radius=10)
x = r.progress['total-samples']
return x,y
def plot_results(
allresults, *,
xy_fn=default_xy_fn,
split_fn=None,
group_fn=None,
average_group=False,
shaded_std=True,
shaded_err=True,
shaded_line=False,
legend_outside=False,
resample=0,
smooth_step=1.0,
xlabel=None,
ylabel=None,
row=1,
):
sk2r = defaultdict(list) # splitkey2results
for result in allresults:
splitkey = split_fn(result)
sk2r[splitkey].append(result)
ll = len(sk2r)
nrows=row
ncols=ll//nrows
f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False)
groups = list(set(group_fn(result) for result in allresults))
default_samples = 512
if average_group:
resample = resample or default_samples
fmts=['-x', '-+', '-.', '-s','-*', '-^', ]
g2ls = []
g2cs = []
# for (isplit, sk) in enumerate(sk2r.keys()):
for (isplit, sk) in enumerate(['Enduro', 'Breakout', 'BeamRider', 'Ant', 'HalfCheetah', 'Walker2d']):
g2l = {}
g2c = defaultdict(int)
sresults = sk2r[sk]
gresults = defaultdict(list)
idx_row = isplit // ncols
idx_col = isplit % ncols
ax = axarr[idx_row][idx_col]
for result in sresults:
group = group_fn(result)
g2c[group] += 1
x, y = xy_fn(result,name='loss/rAt')
if x is None: x = np.arange(len(y))
x, y = map(np.asarray, (x, y))
if average_group:
gresults[group].append((x,y))
else:
if resample:
x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step)
l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)])
g2l[group] = l
# for result in sresults:
# group = 'ppo2'
# g2c[group] += 1
# x, y = xy_fn(result,name='loss/rAt')
# if x is None: x = np.arange(len(y))
# x, y = map(np.asarray, (x, y))
# if average_group:
# gresults[group].append((x,y))
# else:
# if resample:
# x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step)
# l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)])
# g2l[group] = l
if average_group:
for idx, group in enumerate(['ddpo','ppo2']):
xys = gresults[group]
if not any(xys):
continue
if group=='ddpo':
color = 'red'
fmt = '-'
else:
color = COLORS[idx % len(COLORS)]
fmt = fmts[idx % len(fmts)]
# print(groups.index(group), idx)
origxs = [xy[0] for xy in xys]
minxlen = min(map(len, origxs))
def allequal(qs):
return all((q==qs[0]).all() for q in qs[1:])
if resample:
print(isplit, sk)
low = max(x[0] for x in origxs)
high = min(x[-1] for x in origxs)
usex = np.linspace(low, high, resample)
ys = []
for (x, y) in xys:
ys.append(symmetric_ema(x, -y, low, high, resample, decay_steps=smooth_step)[1])
else:
assert allequal([x[:minxlen] for x in origxs]), \
'If you want to average unevenly sampled data, set resample=<number of samples you want>'
usex = origxs[0]
ys = [xy[1][:minxlen] for xy in xys]
ymean = np.mean(ys, axis=0)
ystd = np.std(ys, axis=0)
ystderr = ystd / np.sqrt(len(ys))
# TODO
need_point=5
l, = axarr[idx_row][idx_col].plot(usex, ymean, fmt, color=color,markevery=default_samples//need_point)
# set_size(4, 3,axarr[idx_row][idx_col])
g2l[group] = l
if shaded_err:
if shaded_line:
ax.vlines(usex[::default_samples//20], ymean - ystderr, ymean + ystderr, color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=.4)
if shaded_std:
if shaded_line:
x = usex[::default_samples//need_point]
ymin = ymean - ystd
ymax = ymean + ystd
ax.vlines(x, ymin[::default_samples//need_point], ymax[::default_samples//need_point], color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=.2)
plt.tight_layout()
ax.set_title('('+chr(isplit+97)+') '+sk)
if xlabel is not None:
for ax in axarr.flatten():
plt.sca(ax)
# plt.xlabel('('+chr(id+97)+') '+xlabel)
plt.xlabel('timesteps')
if ylabel is not None:
for ax in axarr[:,0]:
plt.sca(ax)
plt.ylabel(ylabel)
g2ls.append(g2l)
tt= {'ddpo':'P3O','vpgdualclip':'Dual-Clip PPO',
'acktr':'ACKTR','ppo2':'PPO','trpo':'TRPO','a2c':'A2C'}
tt_s = g2ls[0]
ad = g2ls[0]
axarr[0][0].legend(ad.values(), [tt[g] if g in tt.keys() else g for g in ad],edgecolor='None', facecolor='None')
return f, axarr
def paper_image():
path = [
# r'C:\Users\chenxing\0323\ppop3o',C:\Users\chenxing\0323\only_ppop3o
r'C:\Users\chenxing\0323\extest',
]
save_name = 'loss_difference'
def xy_fn(r, name):
y = smooth(r.progress[name], radius=10)
x = r.progress['misc/total_timesteps']
return x,y
results = plot_util.load_results(path, enable_monitor=False, enable_progress=True)
plot_results(results, xy_fn=xy_fn, split_fn=group_by_name, group_fn=group_by_seed, average_group=True,
shaded_std=True,shaded_err=False, xlabel=X_TIMESTEPS,
ylabel='CPI objective',row=2)
fig = plt.gcf()
fig.savefig('png'+os.sep+save_name+'.pdf',bbox_inches='tight',dpi=300, backend='pdf')
# fig.savefig('png/'+save_name+'.pdf',dpi=300, backend='pdf')
if __name__ == '__main__':
paper_image()
| 8,967 | 32.33829 | 132 | py |
P3O | P3O-main/plot/plot_progress_kl_divergence.py | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict, namedtuple
from baselines.common.plot_util import smooth,symmetric_ema
import os
rc_fonts = {
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.labelsize':10,
'ytick.labelsize':10,
"font.family": "times",
"font.size": 10,
'axes.titlesize':10,# title
"legend.fontsize":10,
'figure.figsize': (8.5, 5),
# 'figure.figsize': (7, 7/2.0*0.75),
# "text.usetex": True,
# 'text.latex.preview': True,
# 'text.latex.preamble':
# r"""
# \usepackage{times}
# \usepackage{helvet}
# \usepackage{courier}
# """,
}
matplotlib.rcParams.update(rc_fonts)
# plt.style.use('seaborn')
from baselines.common import plot_util
X_TIMESTEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
Y_REWARD = 'reward'
Y_TIMESTEPS = 'timesteps'
POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]
EPISODES_WINDOW = 100
COLORS = ['blue', 'green', 'cyan', 'magenta', 'purple',
'orange', 'teal', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue']
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=-1)
return x[window-1:], yw_func
def ts2xy(ts, xaxis, yaxis):
if xaxis == X_TIMESTEPS:
x = np.cumsum(ts.l.values)
elif xaxis == X_EPISODES:
x = np.arange(len(ts))
elif xaxis == X_WALLTIME:
x = ts.t.values / 3600.
else:
raise NotImplementedError
if yaxis == Y_REWARD:
y = ts.r.values
elif yaxis == Y_TIMESTEPS:
y = ts.l.values
else:
raise NotImplementedError
return x, y
def group_by_seed(taskpath):
path = taskpath.dirname.split(os.sep)[-1].split('_')
return path[0]
def group_by_name(taskpath):
return taskpath.dirname.split(os.sep)[-2]
def default_xy_fn(r):
try:
x = np.cumsum(r.monitor.l)
y = smooth(r.monitor.r, radius=10)
except:
y = smooth(r.progress['return-average'], radius=10)
x = r.progress['total-samples']
return x,y
def plot_results(
allresults, *,
xy_fn=default_xy_fn,
split_fn=None,
group_fn=None,
average_group=False,
shaded_std=True,
shaded_err=True,
shaded_line=False,
legend_outside=False,
resample=0,
smooth_step=1.0,
xlabel=None,
ylabel=None,
row=1,
):
sk2r = defaultdict(list) # splitkey2results
for result in allresults:
splitkey = split_fn(result)
sk2r[splitkey].append(result)
ll = len(sk2r)
nrows=row
ncols=ll//nrows
f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False)
groups = list(set(group_fn(result) for result in allresults))
default_samples = 512
if average_group:
resample = resample or default_samples
fmts=['-x', '-+', '-.', '-s','-*', '-^', ]
g2ls = []
g2cs = []
# for (isplit, sk) in enumerate(sk2r.keys()):
for (isplit, sk) in enumerate(['Enduro', 'Breakout', 'BeamRider', 'Ant', 'HalfCheetah', 'Walker2d']):
# plt.gca().set_prop_cycle(markercycle)
g2l = {}
g2c = defaultdict(int)
sresults = sk2r[sk]
gresults = defaultdict(list)
idx_row = isplit // ncols
idx_col = isplit % ncols
ax = axarr[idx_row][idx_col]
for result in sresults:
group = group_fn(result)
g2c[group] += 1
x, y = xy_fn(result)
if x is None: x = np.arange(len(y))
x, y = map(np.asarray, (x, y))
if average_group:
gresults[group].append((x,y))
else:
if resample:
x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step)
l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)])
g2l[group] = l
if average_group:
for idx, group in enumerate(sorted(groups)):
xys = gresults[group]
if not any(xys):
continue
if group=='ddpo':
color = 'red'
fmt = '-'
else:
color = COLORS[idx % len(COLORS)]
fmt = fmts[idx % len(fmts)]
# print(groups.index(group), idx)
origxs = [xy[0] for xy in xys]
minxlen = min(map(len, origxs))
def allequal(qs):
return all((q==qs[0]).all() for q in qs[1:])
if resample:
print(isplit, sk)
low = max(x[0] for x in origxs)
# if sk in ['Enduro', 'Breakout', 'BeamRider']:
# high = 9e6
#
# else:
# high = min(x[-1] for x in origxs)
high = min(x[-1] for x in origxs)
usex = np.linspace(low, high, resample)
ys = []
for (x, y) in xys:
ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1])
else:
assert allequal([x[:minxlen] for x in origxs]), \
'If you want to average unevenly sampled data, set resample=<number of samples you want>'
usex = origxs[0]
ys = [xy[1][:minxlen] for xy in xys]
ymean = np.mean(ys, axis=0)
ystd = np.std(ys, axis=0)
ystderr = ystd / np.sqrt(len(ys))
# TODO
need_point=5
l, = axarr[idx_row][idx_col].plot(usex, ymean, fmt, color=color,markevery=default_samples//need_point)
# set_size(4, 3,axarr[idx_row][idx_col])
g2l[group] = l
if shaded_err:
if shaded_line:
ax.vlines(usex[::default_samples//20], ymean - ystderr, ymean + ystderr, color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=.4)
if shaded_std:
if shaded_line:
x = usex[::default_samples//need_point]
ymin = ymean - ystd
ymax = ymean + ystd
ax.vlines(x, ymin[::default_samples//need_point], ymax[::default_samples//need_point], color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=.2)
plt.tight_layout()
ax.set_title('('+chr(isplit+97)+') '+sk)
if xlabel is not None:
for ax in axarr.flatten():
plt.sca(ax)
# plt.xlabel('('+chr(id+97)+') '+xlabel)
plt.xlabel('timesteps')
if ylabel is not None:
for ax in axarr[:,0]:
plt.sca(ax)
plt.ylabel(ylabel)
g2ls.append(g2l)
tt= {'ddpo':'P3O','vpgdualclip':'Dual-Clip PPO',
'acktr':'ACKTR','ppo2':'PPO','trpo':'TRPO','a2c':'A2C'}
tt_s = g2ls[0]
ad = g2ls[0]
axarr[0][0].legend(ad.values(), [tt[g] if g in tt.keys() else g for g in ad],edgecolor='None', facecolor='None')
return f, axarr
def paper_image():
path = [
# r'C:\Users\chenxing\0323\ppop3o',C:\Users\chenxing\0323\only_ppop3o
r'C:\Users\chenxing\0323\onlyppop3o',
]
save_name = 'max_ratio'
def xy_fn(r):
y = smooth(r.progress['loss/max_ratio'], radius=10)
x = r.progress['misc/total_timesteps']
return x,y
results = plot_util.load_results(path, enable_monitor=False, enable_progress=True)
plot_results(results, xy_fn=xy_fn, split_fn=group_by_name, group_fn=group_by_seed, average_group=True,
shaded_std=True,shaded_err=False, xlabel=X_TIMESTEPS,
ylabel=r'$\max(|r - 1|)$',row=2)
fig = plt.gcf()
plt.show()
# fig.savefig('png'+os.sep+save_name+'.pdf',bbox_inches='tight',dpi=300, backend='pdf')
# fig.savefig('png/'+save_name+'.pdf',dpi=300, backend='pdf')
if __name__ == '__main__':
paper_image()
| 8,627 | 32.184615 | 132 | py |
P3O | P3O-main/plot/plot_ablation.py | import numpy as np
from collections import defaultdict
from baselines.common.plot_util import smooth,symmetric_ema
import matplotlib.pyplot as plt
from baselines.common import plot_util
import os
import matplotlib
import matplotlib.font_manager
# plt.style.use('seaborn')
rc_fonts = {
#8.5
# 'lines.markeredgewidth': 1,
# "lines.markersize":3,
# "lines.linewidth":1,
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.labelsize':10,
'ytick.labelsize':10,
"font.family": "times",
"font.size": 10,
'axes.titlesize':10,
"legend.fontsize":10,
'figure.figsize': (8.5, 4.5),
# "text.usetex": True,
# 'text.latex.preview': True,
# 'text.latex.preamble':
# r"""
# \usepackage{times}
# \usepackage{helvet}
# \usepackage{courier}
# """,
}
matplotlib.rcParams.update(rc_fonts)
fmts=['-^', '-v', '-.', '-s', '-*']
# fmts=['-+', '-.', '-s','-*', '-^']
X_TIMESTEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
Y_REWARD = 'reward'
Y_TIMESTEPS = 'timesteps'
POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]
EPISODES_WINDOW = 100
COLORS = ['blue', 'green', 'magenta', 'yellow', 'black', 'purple', 'pink',
'brown', 'orange', 'teal', 'coral', 'lightblue', 'lime', 'lavender', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue']
COLORS= ['#4c72b0','#55a868','#c44e52','#8172b2','#ccb974']
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=-1)
return x[window-1:], yw_func
def ts2xy(ts, xaxis, yaxis):
if xaxis == X_TIMESTEPS:
x = np.cumsum(ts.l.values)
elif xaxis == X_EPISODES:
x = np.arange(len(ts))
elif xaxis == X_WALLTIME:
x = ts.t.values / 3600.
else:
raise NotImplementedError
if yaxis == Y_REWARD:
y = ts.r.values
elif yaxis == Y_TIMESTEPS:
y = ts.l.values
else:
raise NotImplementedError
return x, y
def plot_curves(xy_list, xaxis, yaxis, title):
fig = plt.figure(figsize=(8,2))
maxx = max(xy[0][-1] for xy in xy_list)
minx = 0
for (i, (x, y)) in enumerate(xy_list):
color = COLORS[i % len(COLORS)]
plt.scatter(x, y, s=2)
x, y_mean = window_func(x, y, EPISODES_WINDOW, np.mean) #So returns average of last EPISODE_WINDOW episodes
plt.plot(x, y_mean, color=color)
plt.xlim(minx, maxx)
plt.title(title)
plt.xlabel(xaxis)
plt.ylabel(yaxis)
plt.tight_layout()
fig.canvas.mpl_connect('resize_event', lambda event: plt.tight_layout())
plt.grid(True)
def group_by_seed(taskpath):
return taskpath.dirname.split(os.sep)[-1].split('_')[0]
def group_by_name(taskpath):
return taskpath.dirname.split(os.sep)[-2]
def default_xy_fn(r):
# r.progress['misc/total_timesteps'].values[-1]
try:
y = np.nan_to_num(r.progress['eprewmean'],0)
y = smooth(y, radius=10)
x = r.progress['misc/total_timesteps']
except:
x = np.cumsum(r.monitor.l)
y = smooth(r.monitor.r, radius=10)
return x,y
def plot_results(
allresults, *,
xy_fn=default_xy_fn,
split_fn=None,
group_fn=None,
average_group=False,
shaded_std=True,
shaded_err=True,
shaded_line=False,
legend_outside=False,
resample=0,
smooth_step=1.0,
xlabel=None,
ylabel=None,
row=1,
col=1
):
if split_fn is None: split_fn = lambda _ : ''
if group_fn is None: group_fn = lambda _ : ''
sk2r = defaultdict(list) # splitkey2results
for result in allresults:
splitkey = split_fn(result)
sk2r[splitkey].append(result)
assert len(sk2r) > 0
assert isinstance(resample, int), "0: don't resample. <integer>: that many samples"
nrows=row
ncols=col
f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False)
# f.set_size_inches(inches, inches*0.75/ncols)
groups = list(set(group_fn(result) for result in allresults))
default_samples = 512
if average_group:
resample = resample or default_samples
g2ls = []
g2cs = []
# for (isplit, sk) in enumerate(sk2r.keys()):
for (isplit, sk) in enumerate(['Enduro', 'Breakout', 'BeamRider', 'Ant', 'HalfCheetah', 'Walker2d']):
# for (isplit, sk) in enumerate(['Breakout', 'Ant']):
g2l = {}
g2c = defaultdict(int)
sresults = sk2r[sk]
gresults = defaultdict(list)
idx_row = isplit // ncols
idx_col = isplit % ncols
ax = axarr[idx_row][idx_col]
for result in sresults:
group = group_fn(result)
g2c[group] += 1
x, y = xy_fn(result)
if x is None: x = np.arange(len(y))
x, y = map(np.asarray, (x, y))
if average_group:
gresults[group].append((x,y))
else:
if resample:
x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step)
l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)])
g2l[group] = l
if average_group:
# print(sorted(groups))
sort_groups_ = sorted(groups)
if "ddpo" in sort_groups_:
id = sort_groups_.index('ddpo')
sort_groups_.pop(id)
sort_groups_.append('ddpo')
for idx, group in enumerate(sort_groups_):
xys = gresults[group]
if not any(xys):
continue
if group=='ddpo':
color = 'red'
fmt = '-'
else:
color = COLORS[idx % len(COLORS)]
fmt = fmts[idx % len(fmts)]
# print(group, color, fmt)
origxs = [xy[0] for xy in xys]
minxlen = min(map(len, origxs))
def allequal(qs):
return all((q==qs[0]).all() for q in qs[1:])
if resample:
# print(isplit, sk)
low = max(x[0] for x in origxs)
high = min(x[-1] for x in origxs)
print(high)
usex = np.linspace(low, high, resample)
ys = []
for (x, y) in xys:
ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1])
else:
assert allequal([x[:minxlen] for x in origxs]), \
'If you want to average unevenly sampled data, set resample=<number of samples you want>'
usex = origxs[0]
ys = [xy[1][:minxlen] for xy in xys]
ymean = np.mean(ys, axis=0)
ystd = np.std(ys, axis=0)
ystderr = ystd / np.sqrt(len(ys))
# TODO
need_point=5
axarr[idx_row][idx_col].locator_params(axis='x', nbins=10)
axarr[idx_row][idx_col].locator_params(axis='y', nbins=8)
internal = default_samples//need_point +idx*10
l, = axarr[idx_row][idx_col].plot(usex, ymean, fmt, color=color,markevery=internal)
g2l[group] = l
if shaded_err:
if shaded_line:
ax.vlines(usex[::default_samples//20], ymean - ystderr, ymean + ystderr, color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=.4)
if shaded_std:
if shaded_line:
x = usex[::default_samples//need_point]
ymin = ymean - ystd
ymax = ymean + ystd
ax.vlines(x, ymin[::default_samples//need_point], ymax[::default_samples//need_point], color=color,alpha=.5)
else:
ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=.2)
# https://matplotlib.org/users/legend_guide.html
plt.tight_layout()
# ax.set_title('('+chr(isplit+97)+') '+sk, y=-0.4)
ax.set_title('('+chr(isplit+97)+') '+sk)
# ax.set_title(sk)
# add xlabels, but only to the bottom row
if xlabel is not None:
for ax in axarr.flatten():
plt.sca(ax)
plt.xlabel('timesteps')
# add ylabels, but only to left column
if ylabel is not None:
for ax in axarr[:,0]:
plt.sca(ax)
plt.ylabel(ylabel)
g2ls.append(g2l)
tt= {'ddpo':'P3O',
'vpgkl':'P3O-S','vpgsigmoid':'P3O-K','vpg':'P3O-SK'}
tt_s = g2ls[0]
print(tt_s)
ad = {}
for key in tt.keys():
if key in tt_s.keys():
ad[key] = tt_s[key]
legend= axarr[0][0].legend(ad.values(), [tt[g] if g in tt.keys() else g for g in ad],borderaxespad=0)
legend.get_frame().set_alpha(None)
legend.get_frame().set_facecolor((0, 0, 0, 0))
legend.get_frame().set_edgecolor((0, 0, 0, 0))
return f, axarr
if __name__ == '__main__':
path = [
r"C:\Users\chenxing\0323\DDPO\extra_test_ablation"
]
results = plot_util.load_results(path, enable_monitor=True, enable_progress=True)
plot_results(results, split_fn=group_by_name, group_fn=group_by_seed, average_group=True,
shaded_std=True,shaded_err=False, xlabel=X_TIMESTEPS,
ylabel=Y_REWARD,row=2, col=3)
# plt.show()
fig = plt.gcf()
save_name = 'ablation'
fig.savefig('png'+os.sep+save_name+'.pdf',bbox_inches='tight',dpi=300, backend='pdf')
| 10,055 | 33.556701 | 132 | py |
P3O | P3O-main/plot/read_data.py | import numpy as np
from matplotlib import pyplot as plt
plt.style.use('seaborn')
rc_fonts = {
'lines.markeredgewidth': 1,
"lines.markersize":3,
"lines.linewidth":1,
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.labelsize':8,
'ytick.labelsize':8,
"font.family": "times",
'axes.titlesize':11,
"legend.fontsize":8,
'figure.figsize': (4, 4*0.8),
"text.usetex": True,
# 'text.latex.preview': True,
'text.latex.preamble':
r"""
\usepackage{times}
\usepackage{helvet}
\usepackage{courier}
""",
}
wev = None
for i in range(1,5):
logp = '../extra_test/Walker2d/ddpo_'+str(i)+'/progress.csv'
with open(logp, 'r') as f:
logs = f.read().split('\n')
data = {}
keys = logs[0].split(',')
for key in keys:
data[key] = []
for log in logs[1:]:
if len(log)<2:
continue
values = log.split(',')
a,b = keys[:10], values[:10]
for key,value in zip(keys, values):
data[key].append(float(value))
if wev is None:
wev = np.array(data['misc/explained_variance'])
else:
wev += np.array(data['misc/explained_variance'])
hev = None
for i in range(1,5):
logp = '../extra_test/HalfCheetah/ddpo_'+str(i)+'/progress.csv'
with open(logp, 'r') as f:
logs = f.read().split('\n')
data = {}
keys = logs[0].split(',')
for key in keys:
data[key] = []
for log in logs[1:]:
if len(log)<2:
continue
values = log.split(',')
a,b = keys[:10], values[:10]
for key,value in zip(keys, values):
data[key].append(float(value))
if hev is None:
hev = np.array(data['misc/explained_variance'])
else:
hev += np.array(data['misc/explained_variance'])
aev = None
for i in range(1,5):
logp = '../extra_test/Ant/ddpo_'+str(i)+'/progress.csv'
with open(logp, 'r') as f:
logs = f.read().split('\n')
data = {}
keys = logs[0].split(',')
for key in keys:
data[key] = []
for log in logs[1:]:
if len(log)<2:
continue
values = log.split(',')
a,b = keys[:10], values[:10]
for key,value in zip(keys, values):
data[key].append(float(value))
if aev is None:
aev = np.array(data['misc/explained_variance'])
else:
aev += np.array(data['misc/explained_variance'])
data = {
'Walker2d':wev/4,
"HalfCheetah":hev/4,
"Ant":aev/4,
}
# figsize=(5,5*0.8)
fig = plt.figure(figsize=(4.25,4.25*0.6))
i=0
COLORS= ['#4c72b0','#55a868','#c44e52','#8172b2','#ccb974']
fmts=['-^', '-v', '-.', '-s', '-*']
for key, value in data.items():
N=30
weights = np.exp(np.linspace(0,1,N))
weights = weights/np.sum(weights)
value = np.convolve(weights, value, mode='valid')
plt.locator_params(axis='x', nbins=10)
plt.locator_params(axis='y', nbins=8)
plt.plot(value,fmts[i],color=COLORS[i],label=key,markevery=60)
i+=1
plt.legend()
plt.ylabel('value')
plt.xlabel('iteration')
fig.suptitle("Explained Variance",y=1)
fig = plt.gcf()
# plt.show()
fig.savefig('../test/explained_variance.pdf',bbox_inches='tight',dpi=300, backend='pdf') | 3,260 | 23.704545 | 88 | py |
P3O | P3O-main/analysis/hebing.py | with open('plot_data', 'r') as f:
data = eval(f.read())
with open('plot_data_lr_ctn', 'r') as f:
data2 = eval(f.read())
data.update(data2)
with open('plot_data_lr','w') as f:
f.write(str(data)) | 207 | 22.111111 | 40 | py |
P3O | P3O-main/analysis/analysisi.py | plot_data = {}
path = '/home/chenxing/Downloads/ss/sense_ana'
for env in ["Enduro", 'BeamRider', "Breakout"]:
lr_data = {}
for i in range(1,11):
lr = str(i/100.0)
res = 0
file_cont=0
for j in range(4):
with open(path+'/'+env+'/'+str(lr)+'_'+str(j)+'/0.0.monitor.csv', 'r') as f:
data = f.read()
csv_data = data.split('\n')[-21:-1]
if len(csv_data)!=20:
print(path+'/'+env+'/'+str(lr)+'_'+str(j)+'/0.0.monitor.csv')
file_cont+=1
continue
rtt = 0
count = 0
for cdata in csv_data:
tmp = cdata.split(',')
if len(tmp)!=3:
count+=1
print(path+'/'+env+'/'+str(lr)+'_'+str(j)+'/0.0.monitor.csv')
print(cdata)
continue
rtt += float(tmp[0])
res += rtt/(20.0 -count)
lr_data[lr] = res/(4.0-file_cont)
plot_data[env]= lr_data
with open('plot_data','w') as f:
f.write(str(plot_data)) | 1,105 | 31.529412 | 88 | py |
P3O | P3O-main/analysis/plot_analysis.py | with open('plot_data_lr', 'r') as f:
data = eval(f.read())
from matplotlib import pyplot as plt
import matplotlib
import numpy as np
plt.style.use('seaborn')
rc_fonts = {
'lines.markeredgewidth': 1,
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.labelsize':12,
'ytick.labelsize':12,
"font.family": "serif",
"font.size": 12,
'axes.titlesize':12,
"legend.fontsize":10,
# 'figure.figsize': (12, 12/3.0*0.75),
'figure.figsize': (7, 7/2.0*0.7),
"text.usetex": True,
# 'text.latex.preview': True,
'text.latex.preamble':
r"""
\usepackage{times}
\usepackage{helvet}
\usepackage{courier}
""",
}
matplotlib.rcParams.update(rc_fonts)
f, axarr = plt.subplots(1, 2, sharex=False, squeeze=False)
ll ={}
fmts=['-xr', '-+y', '-.b', '-s','-*', '-^', ]
for env, fmt in zip(data.keys(), fmts):
if env in ['HalfCheetah', 'Ant', 'Walker2d']:
ax = axarr[0][1]
else:
ax = axarr[0][0]
x = [float(k) for k in data[env].keys()]
y = np.array(list(data[env].values()))
y = y /(y.max()-y.min())
# yp = (y - y.min())/(y.max()-y.min())
# yp = y.std() + y.mean()
l = ax.plot(list(x), list(y),fmt, label=env)
ll[env] = l
legend = ax.legend()
legend.get_frame().set_alpha(None)
legend.get_frame().set_facecolor((0, 0, 0, 0))
legend.get_frame().set_edgecolor((0, 0, 0, 0))
# axarr[0][0].ticklabel_format(style='sci',scilimits=(0,0),axis='x')
axarr[0][0].set_ylabel('Normalized Reward')
# axarr[0][0].set_xlabel('(a)Learning Rate')
axarr[0][1].set_title('(b)Continuous Environment')
axarr[0][0].set_title('(a)Discrete Environment')
plt.tight_layout()
# legend = f.legend(ll, bbox_to_anchor=(0.5,-0.08), loc="lower center",bbox_transform=f.transFigure, ncol=6,borderaxespad=0)
# legend.get_frame().set_alpha(None)
# legend.get_frame().set_facecolor((0, 0, 0, 0))
# legend.get_frame().set_edgecolor((0, 0, 0, 0))
# plt.tight_layout()
save_name='lr_para'
f.savefig(save_name+'.pdf',bbox_inches='tight',dpi=300)
# plt.show() | 2,067 | 30.333333 | 124 | py |
P3O | P3O-main/analysis/main_dst.py | import os
import sys
from subprocess import Popen, PIPE, STDOUT, DEVNULL
import time
def run():
curenv = os.environ.copy()
cmds = []
curenv['PYTHONPATH'] = "/home/chenxing/workspace/baselines"
curenv['CUDA_VISIBLE_DEVICES'] = "0,1"
# curenv['LD_LIBRARY_PATH'] = "$LD_LIBRARY_PATH:/home/chenxing/.mujoco/mujoco200/bin"
games = ["HalfCheetah"]
for seed in range(4):
for env in games:
for lr in range(1,11):
learningrate = lr/10.0
cmd=[
sys.executable,
'-u',
'-m',
'baselines.run',
'--alg=ddpo',
'--num_env=1',
'--kl_coef='+str(learningrate),
'--seed='+str(seed),
'--env='+env+'-v2',
'--num_timesteps=1e5',
'--log_path=/home/chenxing/sense_ana/'+env+'/'+str(learningrate)+'_'+str(seed)
]
cmds.append(cmd)
running = []
while cmds:
while len(running)<24:
if len(cmds):
cmd = cmds.pop(-1)
print(cmd, 'start')
running.append(Popen(cmd, env=curenv,stdin=PIPE, stdout=DEVNULL, stderr=STDOUT))
else:
break
for idx, p in enumerate(running):
if p.poll() is not None:
running.pop(idx)
time.sleep(300)
if __name__ == "__main__":
run() | 1,507 | 31.085106 | 98 | py |
DeepForcedAligner | DeepForcedAligner-main/scratch_pred.py | import argparse
import numpy as np
import torch
from dfa.audio import Audio
from dfa.duration_extraction import extract_durations_with_dijkstra, extract_durations_beam
from dfa.model import Aligner
from dfa.text import Tokenizer
from dfa.utils import read_metafile
from dfa.utils import read_config
from dfa.paths import Paths
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Preprocessing for DeepForcedAligner.')
parser.add_argument('--config', '-c', default='config.yaml', help='Points to the config file.')
args = parser.parse_args()
config = read_config(args.config)
paths = Paths.from_config(config['paths'])
checkpoint = torch.load('/Volumes/data/logs/dfa/latest_model.pt', map_location=torch.device('cpu'))
config = checkpoint['config']
symbols = checkpoint['symbols']
audio = Audio.from_config(config['audio'])
tokenizer = Tokenizer(symbols)
model = Aligner.from_checkpoint(checkpoint).eval()
print(f'model step {model.get_step()}')
main_dir = paths.dataset_dir
text_dict = read_metafile(paths.metadata_path)
file_id = list(text_dict.keys())[0]
text = text_dict[file_id]
target = np.array(tokenizer(text))
mel = np.load((paths.mel_dir / file_id).with_suffix('.npy'))
mel = torch.tensor(mel).float().unsqueeze(0)
pred = model(mel)
pred_max = pred[0].max(1)[1].detach().cpu().numpy().tolist()
pred_text = tokenizer.decode(pred_max)
pred = torch.softmax(pred, dim=-1)
pred = pred.detach()[0].numpy()
target_len = target.shape[0]
pred_len = pred.shape[0]
durations = extract_durations_with_dijkstra(target, pred)
durations_beam, sequences = extract_durations_beam(target, pred, 5)
expanded_string = ''.join([text[i] * dur for i, dur in enumerate(list(durations))])
print(text)
print(pred_text)
print(expanded_string)
print(tokenizer.decode(target[sequences[0][0]]))
print(tokenizer.decode(target[sequences[-1][0]]))
print(durations)
print(durations_beam[0])
print(durations_beam[-1])
| 2,103 | 34.661017 | 103 | py |
DeepForcedAligner | DeepForcedAligner-main/extract_durations.py | import argparse
from multiprocessing import cpu_count
from multiprocessing.pool import Pool
from pathlib import Path
from typing import Tuple
import numpy as np
import torch
import tqdm
from dfa.dataset import new_dataloader
from dfa.duration_extraction import extract_durations_with_dijkstra, extract_durations_beam
from dfa.model import Aligner
from dfa.paths import Paths
from dfa.text import Tokenizer
from dfa.utils import read_config, to_device, unpickle_binary
class Extractor:
def __init__(self, method):
self.method = method
def extract_durations_for_item(self, item_file: Tuple[dict, Path, Path]) -> Tuple[dict, np.array]:
item, token_file, pred_file = item_file
tokens_len, mel_len = item['tokens_len'], item['mel_len']
tokens = np.load(str(token_file), allow_pickle=False).astype(np.int)
tokens = tokens[:tokens_len]
pred = np.load(str(pred_file), allow_pickle=False)
pred = pred[:mel_len, :]
if self.method == 'beam':
durations, _ = extract_durations_beam(tokens, pred, 10)
durations = durations[0]
else:
durations = extract_durations_with_dijkstra(tokens, pred)
return item, durations
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Duration extraction for DeepForcedAligner.')
parser.add_argument('--config', '-c', default='config2.yaml', type=str, help='Points to the config file.')
parser.add_argument('--model', '-m', default=None, type=str, help='Points to the a model file to restore.')
parser.add_argument('--target', '-t', default='outputs', type=str, help='Target path')
parser.add_argument('--batch_size', '-b', default=8, type=int, help='Batch size for inference.')
parser.add_argument('--num_workers', '-w', metavar='N', type=int, default=cpu_count() - 1,
help='The number of worker threads to use for preprocessing')
args = parser.parse_args()
config = read_config(args.config)
paths = Paths.from_config(config['paths'])
model_path = args.model if args.model else paths.checkpoint_dir / 'latest_model.pt'
args.target = '_'.join([str(paths.data_dir), str(args.target)])
print(f'Target dir: {args.target}')
dur_target_dir, pred_target_dir = Path(args.target) / 'durations', Path(args.target) / 'predictions'
dur_target_dir.mkdir(parents=True, exist_ok=True)
pred_target_dir.mkdir(parents=True, exist_ok=True)
print(f'Loading model from {model_path}')
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
checkpoint = torch.load(model_path, map_location=torch.device('cpu'))
model = Aligner.from_checkpoint(checkpoint).eval().to(device)
print(f'Loaded model with step {model.get_step()} on device: {device}')
symbols = unpickle_binary(paths.data_dir / 'symbols.pkl')
assert symbols == checkpoint['symbols'], 'Symbols from dataset do not match symbols from model checkpoint!'
tokenizer = Tokenizer(symbols)
dataloader = new_dataloader(dataset_path=paths.data_dir / 'dataset.pkl', mel_dir=paths.mel_dir,
token_dir=paths.token_dir, batch_size=args.batch_size)
print(f'Performing STT model inference...')
for i, batch in tqdm.tqdm(enumerate(dataloader), total=len(dataloader)):
tokens, mel, tokens_len, mel_len = to_device(batch, device)
pred_batch = model(mel)
for b in range(tokens.size(0)):
this_mel_len = mel_len[b]
pred = pred_batch[b, :this_mel_len, :]
pred = torch.softmax(pred, dim=-1)
pred = pred.detach().cpu().numpy()
item_id = batch['item_id'][b]
np.save(pred_target_dir / f'{item_id}.npy', pred, allow_pickle=False)
print(f'Extracting durations...')
dataset = unpickle_binary(paths.data_dir / 'dataset.pkl')
item_files = []
for item in dataset:
file_name = item['item_id'] + '.npy'
token_file, pred_file = paths.token_dir / file_name, pred_target_dir / file_name
item_files.append((item, token_file, pred_file))
pool = Pool(processes=args.num_workers)
extr_fx = Extractor(method=config['durations']['method']).extract_durations_for_item
mapper = pool.imap_unordered(extr_fx, item_files)
for i, (item, durations) in tqdm.tqdm(enumerate(mapper), total=len(item_files)):
item_id = item['item_id']
np.save(dur_target_dir / f'{item_id}.npy', durations, allow_pickle=False) | 4,546 | 45.397959 | 111 | py |
DeepForcedAligner | DeepForcedAligner-main/train.py | import argparse
import torch
from torch import optim
from dfa.model import Aligner
from dfa.paths import Paths
from dfa.utils import read_config, unpickle_binary
from trainer import Trainer
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Preprocessing for DeepForcedAligner.')
parser.add_argument('--config', '-c', default='config.yaml', help='Points to the config file.')
parser.add_argument('--checkpoint', '-cp', default=None, help='Points to the a model file to restore.')
args = parser.parse_args()
config = read_config(args.config)
paths = Paths.from_config(config['paths'])
symbols = unpickle_binary(paths.data_dir / 'symbols.pkl')
if args.checkpoint:
print(f'Restoring model from checkpoint: {args.checkpoint}')
checkpoint = torch.load(args.checkpoint, map_location=torch.device('cpu'))
model = Aligner.from_checkpoint(checkpoint)
assert checkpoint['symbols'] == symbols, 'Symbols from data do not match symbols from model!'
print(f'Restored model with step {model.get_step()}')
else:
model_path = paths.checkpoint_dir / 'latest_model.pt'
if model_path.exists():
print(f'Restoring model from checkpoint: {model_path}')
checkpoint = torch.load(model_path, map_location=torch.device('cpu'))
model = Aligner.from_checkpoint(checkpoint)
assert checkpoint['symbols'] == symbols, 'Symbols from data do not match symbols from model!'
print(f'Restored model with step {model.get_step()}')
else:
print(f'Initializing new model from config {args.config}')
model = Aligner(n_mels=config['audio']['n_mels'],
num_symbols=len(symbols)+1,
**config['model'])
optim = optim.Adam(model.parameters(), lr=1e-4)
checkpoint = {'model': model.state_dict(), 'optim': optim.state_dict(),
'config': config, 'symbols': symbols}
trainer = Trainer(paths=paths)
trainer.train(checkpoint, train_params=config['training'])
| 2,128 | 45.282609 | 107 | py |
DeepForcedAligner | DeepForcedAligner-main/preprocess.py | import argparse
from multiprocessing import cpu_count
from multiprocessing.pool import Pool
from pathlib import Path
from typing import Dict, Union
import numpy as np
import tqdm
from dfa.audio import Audio
from dfa.paths import Paths
from dfa.text import Tokenizer
from dfa.utils import get_files, read_config, pickle_binary, read_metafile
class Preprocessor:
"""Performs mel extraction and tokenization and stores the resulting torch tensors."""
def __init__(self,
audio: Audio,
tokenizer: Tokenizer,
paths: Paths,
text_dict: Dict[str, str],
mel_dim_last=True) -> None:
self.audio = audio
self.paths = paths
self.tokenizer = tokenizer
self.text_dict = text_dict
self.mel_dim_last = mel_dim_last
def __call__(self, file_path: Path) -> Dict[str, Union[str, int]]:
item_id = file_path.stem
if self.paths.precomputed_mels:
mel = np.load(self.paths.precomputed_mels / f'{item_id}.npy')
if not self.mel_dim_last:
mel = mel.T
assert mel.shape[1] == self.audio.n_mels, \
f'Expected mel shape to be of (None, {self.audio.n_mels}), but was: {mel.shape}! ' \
f'Consider setting config/audio/mel_dim_last: {not self.mel_dim_last}'
else:
wav = self.audio.load_wav(file_path)
mel = self.audio.wav_to_mel(wav)
np.save(self.paths.mel_dir / f'{item_id}.npy', mel, allow_pickle=False)
text = self.text_dict[item_id]
tokens = np.array(self.tokenizer(text)).astype(np.int32)
np.save(self.paths.token_dir / f'{item_id}.npy', tokens, allow_pickle=False)
return {'item_id': item_id, 'tokens_len': tokens.shape[0], 'mel_len': mel.shape[0]}
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Preprocessing for DeepForcedAligner.')
parser.add_argument('--config', '-c', help='Points to the config file.', default='config.yaml')
parser.add_argument('--num_workers', '-w', metavar='N', type=int, default=cpu_count() - 1,
help='The number of worker threads to use for preprocessing')
args = parser.parse_args()
config = read_config(args.config)
paths = Paths.from_config(config['paths'])
audio = Audio.from_config(config['audio'])
mel_dim_last = config['preprocessing']['mel_dim_last']
print(f'Config: {args.config}\n'
f'Target data directory: {paths.data_dir}')
text_dict, audio_files = read_metafile(paths.metadata_path, paths.dataset_dir, paths.actual_dur_path)
symbols = set()
for text in text_dict.values():
symbols.update(set(text))
symbols = sorted(list(symbols))
audio_files = [x for x in audio_files if x.stem in text_dict]
tokenizer = Tokenizer(symbols)
preprocessor = Preprocessor(audio=audio, tokenizer=tokenizer, paths=paths,
text_dict=text_dict, mel_dim_last=mel_dim_last)
pool = Pool(processes=args.num_workers)
mapper = pool.imap_unordered(preprocessor, audio_files)
dataset = []
for i, item in tqdm.tqdm(enumerate(mapper), total=len(audio_files)):
dataset.append(item)
pickle_binary(dataset, paths.data_dir / 'dataset.pkl')
pickle_binary(symbols, paths.data_dir / 'symbols.pkl')
print('Preprocessing done.')
| 3,446 | 39.081395 | 105 | py |
DeepForcedAligner | DeepForcedAligner-main/trainer.py | import numpy as np
import torch
import tqdm
from torch.nn import CTCLoss
from torch.optim import Adam
from torch.utils.tensorboard import SummaryWriter
from dfa.dataset import new_dataloader, get_longest_mel_id
from dfa.duration_extraction import extract_durations_with_dijkstra
from dfa.model import Aligner
from dfa.paths import Paths
from dfa.text import Tokenizer
from dfa.utils import to_device
class Trainer:
def __init__(self, paths: Paths) -> None:
self.paths = paths
self.writer = SummaryWriter(log_dir=paths.checkpoint_dir / 'tensorboard')
self.ctc_loss = CTCLoss()
# Used for generating plots
longest_id = get_longest_mel_id(dataset_path=self.paths.data_dir / 'dataset.pkl')
self.longest_mel = np.load(str(paths.mel_dir / f'{longest_id}.npy'), allow_pickle=False)
self.longest_tokens = np.load(str(paths.token_dir / f'{longest_id}.npy'), allow_pickle=False)
def train(self, checkpoint: dict, train_params: dict) -> None:
lr = train_params['learning_rate']
epochs = train_params['epochs']
batch_size = train_params['batch_size']
ckpt_steps = train_params['checkpoint_steps']
plot_steps = train_params['plot_steps']
config = checkpoint['config']
symbols = checkpoint['symbols']
tokenizer = Tokenizer(symbols)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model = Aligner.from_checkpoint(checkpoint).to(device)
optim = Adam(model.parameters())
optim.load_state_dict(checkpoint['optim'])
for g in optim.param_groups:
g['lr'] = lr
dataloader = new_dataloader(dataset_path=self.paths.data_dir / 'dataset.pkl', mel_dir=self.paths.mel_dir,
token_dir=self.paths.token_dir, batch_size=batch_size)
start_epoch = model.get_step() // len(dataloader)
for epoch in range(start_epoch + 1, epochs + 1):
pbar = tqdm.tqdm(enumerate(dataloader, 1), total=len(dataloader))
for i, batch in pbar:
tokens, mel, tokens_len, mel_len = to_device(batch, device)
pred = model(mel)
pred = pred.transpose(0, 1).log_softmax(2)
loss = self.ctc_loss(pred, tokens, mel_len, tokens_len)
pbar.set_description(desc=f'Epoch: {epoch} | Step {model.get_step()} '
f'| Loss: {loss:#.4}', refresh=True)
if not torch.isnan(loss) and not torch.isinf(loss):
optim.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optim.step()
self.writer.add_scalar('CTC_Loss', loss.item(), global_step=model.get_step())
self.writer.add_scalar('Params/batch_size', batch_size, global_step=model.get_step())
self.writer.add_scalar('Params/learning_rate', lr, global_step=model.get_step())
if model.get_step() % ckpt_steps == 0:
torch.save({'model': model.state_dict(), 'optim': optim.state_dict(),
'config': config, 'symbols': symbols},
self.paths.checkpoint_dir / f'model_step_{model.get_step() // 1000}k.pt')
if model.get_step() % plot_steps == 0:
self.generate_plots(model, tokenizer)
latest_checkpoint = self.paths.checkpoint_dir / 'latest_model.pt'
torch.save({'model': model.state_dict(), 'optim': optim.state_dict(),
'config': config, 'symbols': symbols},
latest_checkpoint)
def generate_plots(self, model: Aligner, tokenizer: Tokenizer) -> None:
model.eval()
device = next(model.parameters()).device
longest_mel = torch.tensor(self.longest_mel).unsqueeze(0).float().to(device)
pred = model(longest_mel)[0].detach().cpu().softmax(dim=-1)
durations = extract_durations_with_dijkstra(self.longest_tokens, pred.numpy())
pred_max = pred.max(1)[1].numpy().tolist()
pred_text = tokenizer.decode(pred_max)
target_text = tokenizer.decode(self.longest_tokens)
target_duration_rep = ''.join(c * durations[i] for i, c in enumerate(target_text))
self.writer.add_text('Text/Prediction', ' ' + pred_text, global_step=model.get_step())
self.writer.add_text('Text/Target_Duration_Repeated',
' ' + target_duration_rep, global_step=model.get_step())
self.writer.add_text('Text/Target', ' ' + target_text, global_step=model.get_step())
model.train()
| 4,759 | 45.666667 | 113 | py |
DeepForcedAligner | DeepForcedAligner-main/dfa/utils.py | import pickle
import os
from pathlib import Path
from typing import Dict, List, Any, Union
import torch
import yaml
def read_metafile(path: str, folder, dur_path) -> Dict[str, str]:
text_dict = {}
txt_files = []
audio_files = []
print(path)
for filename in os.listdir(folder):
if filename.startswith(str(path)):
txt_files.extend(get_files(os.path.join(folder, filename), '.txt'))
audio_files.extend(get_files(os.path.join(folder, filename), '.wav'))
for textfile in txt_files:
with open(str(textfile), 'r') as f:
line = f.read()
text_dict[textfile.stem] = line
mapping = {
'Hindi_M':'dur_hi_m',
'Hindi_F':'dur_hi_f',
'Telugu_M':'dur_te_m',
'Telugu_F':'dur_te_f',
'Marathi_M':'dur_mr_m',
'Marathi_F':'dur_mr_f',
}
with open(os.path.join(dur_path, mapping[path.stem]), 'r') as f:
lines = f.read().split('\n')[:-1]
lines = set([Path(l.split('\t')[0]).stem for l in lines if float(l.split('\t')[-1]) > 2])
text_dict = {t:text_dict[t] for t in text_dict if t in lines}
return text_dict, audio_files
def read_config(path: str) -> Dict[str, Any]:
with open(path, 'r') as stream:
config = yaml.load(stream, Loader=yaml.FullLoader)
return config
def save_config(config: Dict[str, Any], path: str) -> None:
with open(path, 'w+', encoding='utf-8') as stream:
yaml.dump(config, stream, default_flow_style=False)
def get_files(path: str, extension='.wav') -> List[Path]:
return list(Path(path).expanduser().resolve().rglob(f'*{extension}'))
def pickle_binary(data: object, file: Union[str, Path]) -> None:
with open(str(file), 'wb') as f:
pickle.dump(data, f)
def unpickle_binary(file: Union[str, Path]) -> Any:
with open(str(file), 'rb') as f:
return pickle.load(f)
def to_device(batch: dict, device: torch.device) -> tuple:
tokens, mel, tokens_len, mel_len = batch['tokens'], batch['mel'], \
batch['tokens_len'], batch['mel_len']
tokens, mel, tokens_len, mel_len = tokens.to(device), mel.to(device), \
tokens_len.to(device), mel_len.to(device)
return tokens, mel, tokens_len, mel_len | 2,358 | 33.188406 | 93 | py |
DeepForcedAligner | DeepForcedAligner-main/dfa/duration_extraction.py | import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse.csgraph import dijkstra
def to_node_index(i, j, cols):
return cols * i + j
def from_node_index(node_index, cols):
return node_index // cols, node_index % cols
def to_adj_matrix(mat):
rows = mat.shape[0]
cols = mat.shape[1]
row_ind = []
col_ind = []
data = []
for i in range(rows):
for j in range(cols):
node = to_node_index(i, j, cols)
if j < cols - 1:
right_node = to_node_index(i, j + 1, cols)
weight_right = mat[i, j + 1]
row_ind.append(node)
col_ind.append(right_node)
data.append(weight_right)
if i < rows - 1 and j < cols:
bottom_node = to_node_index(i + 1, j, cols)
weight_bottom = mat[i + 1, j]
row_ind.append(node)
col_ind.append(bottom_node)
data.append(weight_bottom)
if i < rows - 1 and j < cols - 1:
bottom_right_node = to_node_index(i + 1, j + 1, cols)
weight_bottom_right = mat[i + 1, j + 1]
row_ind.append(node)
col_ind.append(bottom_right_node)
data.append(weight_bottom_right)
adj_mat = coo_matrix((data, (row_ind, col_ind)), shape=(rows * cols, rows * cols))
return adj_mat.tocsr()
def extract_durations_with_dijkstra(tokens: np.array, pred: np.array) -> np.array:
"""
Extracts durations from the attention matrix by finding the shortest monotonic path from
top left to bottom right.
"""
pred_max = pred[:, tokens]
path_probs = 1. - pred_max
adj_matrix = to_adj_matrix(path_probs)
dist_matrix, predecessors = dijkstra(csgraph=adj_matrix, directed=True,
indices=0, return_predecessors=True)
path = []
pr_index = predecessors[-1]
while pr_index != 0:
path.append(pr_index)
pr_index = predecessors[pr_index]
path.reverse()
# append first and last node
path = [0] + path + [dist_matrix.size-1]
cols = path_probs.shape[1]
mel_text = {}
durations = np.zeros(tokens.shape[0], dtype=np.int32)
# collect indices (mel, text) along the path
for node_index in path:
i, j = from_node_index(node_index, cols)
mel_text[i] = j
for j in mel_text.values():
durations[j] += 1
return durations
def extract_durations_beam(tokens: np.array, pred: np.array, k: int) -> np.array:
data = pred[:, tokens]
sequences = [[[0], - np.log(data[0,0])]] # always start on first position
for row in data[1:]:
all_candidates = list()
# expand each current candidate
for i in range(len(sequences)):
seq, score = sequences[i]
for j in [seq[-1], seq[-1]+1]: # only allow 2 possible moves
if j < data.shape[-1]:
candidate = [seq + [j], score - np.log(row[j])]
else:
candidate = [seq + [j], np.inf]
all_candidates.append(candidate)
# order all candidates by score
ordered = sorted(all_candidates, key=lambda tup: tup[1])
# select k best
sequences = ordered[:k]
durations = []
for sequence in sequences:
durations.append(np.bincount(sequence[0]))
return durations, sequences | 3,448 | 30.642202 | 92 | py |
DeepForcedAligner | DeepForcedAligner-main/dfa/audio.py | import librosa
import numpy as np
class Audio:
"""Performs audio processing such as generating mel specs and normalization."""
def __init__(self,
n_mels: int,
sample_rate: int,
hop_length: int,
win_length: int,
n_filters: int,
fmin: int,
fmax: int,
power):
self.n_mels = n_mels
self.sample_rate = sample_rate
self.hop_length = hop_length
self.win_length = win_length
self.n_filters = n_filters
self.fmin = fmin
self.fmax = fmax
self.power = power
def load_wav(self, path):
wav, _ = librosa.load(path, sr=self.sample_rate)
return wav
def wav_to_mel(self, y):
mel = librosa.feature.melspectrogram(
y=y,
sr=self.sample_rate,
n_fft=self.n_filters,
n_mels=self.n_mels,
hop_length=self.hop_length,
win_length=self.win_length,
fmin=self.fmin,
fmax=self.fmax,
power=self.power)
mel = mel.T
return self.normalize(mel)
@staticmethod
def normalize(mel):
S = np.clip(mel, a_min=1.0e-5, a_max=None)
return np.log(S)
@staticmethod
def denormalize(mel):
return np.exp(mel)
@classmethod
def from_config(cls, config):
return cls(
sample_rate=config['sample_rate'],
n_filters=config['n_filters'],
n_mels=config['n_mels'],
win_length=config['win_length'],
hop_length=config['hop_length'],
fmin=config['fmin'],
fmax=config['fmax'],
power=config['power'])
| 1,783 | 26.875 | 83 | py |
DeepForcedAligner | DeepForcedAligner-main/dfa/model.py | import torch
import torch.nn as nn
class BatchNormConv(nn.Module):
def __init__(self, in_channels: int, out_channels: int, kernel_size: int):
super().__init__()
self.conv = nn.Conv1d(
in_channels, out_channels, kernel_size,
stride=1, padding=kernel_size // 2, bias=False)
self.bnorm = nn.BatchNorm1d(out_channels)
self.relu = nn.ReLU()
def forward(self, x):
x = x.transpose(1, 2)
x = self.conv(x)
x = self.relu(x)
x = self.bnorm(x)
x = x.transpose(1, 2)
return x
class Aligner(torch.nn.Module):
def __init__(self,
n_mels: int,
num_symbols: int,
lstm_dim: int,
conv_dim: int) -> None:
super().__init__()
self.register_buffer('step', torch.tensor(1, dtype=torch.int))
self.convs = nn.ModuleList([
BatchNormConv(n_mels, conv_dim, 5),
BatchNormConv(conv_dim, conv_dim, 5),
BatchNormConv(conv_dim, conv_dim, 5),
])
self.rnn = torch.nn.LSTM(conv_dim, lstm_dim, batch_first=True, bidirectional=True)
self.lin = torch.nn.Linear(2 * lstm_dim, num_symbols)
def forward(self, x):
if self.train:
self.step += 1
for conv in self.convs:
x = conv(x)
x, _ = self.rnn(x)
x = self.lin(x)
return x
def get_step(self):
return self.step.data.item()
@classmethod
def from_checkpoint(cls, checkpoint: dict) -> 'Aligner':
config = checkpoint['config']
symbols = checkpoint['symbols']
model = Aligner(n_mels=config['audio']['n_mels'],
num_symbols=len(symbols) + 1,
**config['model'])
model.load_state_dict(checkpoint['model'])
return model | 1,868 | 29.639344 | 90 | py |
DeepForcedAligner | DeepForcedAligner-main/dfa/dataset.py | from pathlib import Path
from random import Random
from typing import List
import numpy as np
import torch
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.sampler import Sampler
from dfa.utils import unpickle_binary
class AlignerDataset(Dataset):
def __init__(self, item_ids: List[str], mel_dir: Path, token_dir: Path):
super().__init__()
self.item_ids = item_ids
self.mel_dir = mel_dir
self.token_dir = token_dir
def __getitem__(self, index):
item_id = self.item_ids[index]
mel = np.load(str(self.mel_dir / f'{item_id}.npy'), allow_pickle=False)
tokens = np.load(str(self.token_dir / f'{item_id}.npy'), allow_pickle=False)
mel = torch.tensor(mel).float()
tokens = torch.tensor(tokens).long()
return {'item_id': item_id, 'tokens': tokens, 'mel': mel,
'tokens_len': tokens.size(0), 'mel_len': mel.size(0)}
def __len__(self):
return len(self.item_ids)
# From https://github.com/fatchord/WaveRNN/blob/master/utils/dataset.py
class BinnedLengthSampler(Sampler):
def __init__(self, mel_lens: torch.tensor, batch_size: int, bin_size: int, seed=42):
_, self.idx = torch.sort(torch.tensor(mel_lens))
self.batch_size = batch_size
self.bin_size = bin_size
self.random = Random(seed)
assert self.bin_size % self.batch_size == 0
def __iter__(self):
idx = self.idx.numpy()
bins = []
for i in range(len(idx) // self.bin_size):
this_bin = idx[i * self.bin_size:(i + 1) * self.bin_size]
self.random.shuffle(this_bin)
bins += [this_bin]
self.random.shuffle(bins)
binned_idx = np.stack(bins).reshape(-1)
if len(binned_idx) < len(idx):
last_bin = idx[len(binned_idx):]
self.random.shuffle(last_bin)
binned_idx = np.concatenate([binned_idx, last_bin])
return iter(torch.tensor(binned_idx).long())
def __len__(self):
return len(self.idx)
def collate_dataset(batch: List[dict]) -> torch.tensor:
tokens = [b['tokens'] for b in batch]
tokens = pad_sequence(tokens, batch_first=True, padding_value=0)
mels = [b['mel'] for b in batch]
mels = pad_sequence(mels, batch_first=True, padding_value=0)
tokens_len = torch.tensor([b['tokens_len'] for b in batch]).long()
mel_len = torch.tensor([b['mel_len'] for b in batch]).long()
item_ids = [b['item_id'] for b in batch]
return {'tokens': tokens, 'mel': mels, 'tokens_len': tokens_len,
'mel_len': mel_len, 'item_id': item_ids}
def new_dataloader(dataset_path: Path, mel_dir: Path,
token_dir: Path, batch_size=32) -> DataLoader:
dataset = unpickle_binary(dataset_path)
item_ids = [d['item_id'] for d in dataset]
mel_lens = [d['mel_len'] for d in dataset]
aligner_dataset = AlignerDataset(item_ids=item_ids, mel_dir=mel_dir, token_dir=token_dir)
return DataLoader(aligner_dataset,
collate_fn=collate_dataset,
batch_size=batch_size,
sampler=BinnedLengthSampler(mel_lens=mel_lens, batch_size=batch_size,
bin_size=batch_size*3),
num_workers=0,
pin_memory=True)
def get_longest_mel_id(dataset_path: Path) -> str:
dataset = unpickle_binary(dataset_path)
dataset.sort(key=lambda item: (item['mel_len'], item['item_id']))
return dataset[-1]['item_id']
| 3,653 | 36.670103 | 93 | py |
DeepForcedAligner | DeepForcedAligner-main/dfa/text.py | from typing import List
class Tokenizer:
def __init__(self, symbols: List[str], pad_token='_') -> None:
self.symbols = symbols
self.pad_token = pad_token
self.idx_to_token = {i: s for i, s in enumerate(symbols, start=1)}
self.idx_to_token[0] = pad_token
self.token_to_idx = {s: i for i, s in self.idx_to_token.items()}
self.vocab_size = len(self.symbols) + 1
def __call__(self, sentence):
sequence = [self.token_to_idx[c] for c in sentence if c in self.token_to_idx]
return sequence
def decode(self, sequence):
return ''.join([self.idx_to_token[int(t)] for t in sequence if int(t) in self.idx_to_token])
| 693 | 33.7 | 100 | py |
DeepForcedAligner | DeepForcedAligner-main/dfa/__init__.py | 0 | 0 | 0 | py | |
DeepForcedAligner | DeepForcedAligner-main/dfa/paths.py | from pathlib import Path
class Paths:
def __init__(self, data_dir: str, checkpoint_dir: str, dataset_dir: str, precomputed_mels: str, metadata_path: str, actual_dur_path):
self.data_dir = Path(data_dir)
self.dataset_dir = dataset_dir
self.metadata_path = Path(metadata_path)
self.mel_dir = self.data_dir / 'mels'
self.token_dir = self.data_dir / 'tokens'
self.precomputed_mels = precomputed_mels
self.actual_dur_path = actual_dur_path
if self.precomputed_mels is not None:
self.precomputed_mels = Path(precomputed_mels)
self.checkpoint_dir = Path(checkpoint_dir)
self.create_dirs()
def create_dirs(self):
self.data_dir.mkdir(parents=True, exist_ok=True)
self.mel_dir.mkdir(parents=True, exist_ok=True)
self.token_dir.mkdir(parents=True, exist_ok=True)
self.checkpoint_dir.mkdir(parents=True, exist_ok=True)
@classmethod
def from_config(cls, config):
return cls(
data_dir=Path(config['metadata_path']).stem+'_data',
metadata_path=config['metadata_path'],
checkpoint_dir=Path(config['metadata_path']).stem+'_checkpoints',
dataset_dir=config['dataset_dir'],
precomputed_mels=None,
actual_dur_path=config['dur_path'],
)
| 1,361 | 37.914286 | 137 | py |
EDGE | EDGE-master/EDGE.py | #######################################################
# #
# Calculation of electron spectra, #
# gamma-ray spectra and electrons #
# flux at the Earth for different #
# initial parameters #
# #
#######################################################
# #
# Ruben Lopez-Coto, MPIK, rlopez@mpi-hd.mpg.de #
# Joachim Hahn, MPIK, joachim.hahn@mpi-hd.mpg.de #
# #
#######################################################
import os, sys
from math import exp
import math
import numpy as np
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
sys.path.append(os.path.abspath('/Users/rubenlopez/Code/GAMERA-master/lib'))
import gappa as gp
import argparse
import astropy.units as u
from gammapy.astro.population import make_base_catalog_galactic
from scipy.special import erfc
import matplotlib.ticker as mtick
from matplotlib.ticker import OldScalarFormatter, ScalarFormatter
global fp
fu = gp.Utils()
fr = gp.Radiation()
fp = gp.Particles()
fa = gp.Astro()
deg_to_rad = gp.pi / 180.
os.system("mkdir -p Figures")
os.system("mkdir -p Results")
global opts
p = argparse.ArgumentParser(description="Calculate the IC electron spectrum of sources")
p.add_argument("-n", "--name", dest="Name", type=str, default="Source",
help="Name of the source.")
p.add_argument("-f", "--file", dest="File", type=str, default="Data/GemingaProfile.dat",
help="File containing the angular profile of the source.")
p.add_argument("-al", "--alpha", dest="ALPHA", type=float, default=2.2,
help="Spectral index of the injection spectrum")
p.add_argument("-d", "--distance", dest="DIST", type=float, default=0.25,
help="Distance to the source [kpc]")
p.add_argument("-del", "--delta", dest="DELTA", type=float, default=0.33,
help="Diffusion index")
p.add_argument("-a", "--age", dest="AGE", type=float, default=3.42e5,
help="Characteristic age of the source [yr]")
p.add_argument("-emax", "--emax", dest="EMAX", type=float, default=500.,
help="EMAX of accelerated electrons [TeV]") # You give it in TeV but it is transformed to erg
p.add_argument("-emin", "--emin", dest="EMIN", type=float, default=0.001,
help="EMIN of accelerated electrons [TeV]") # You give it in TeV but it is transformed to erg
p.add_argument("-m", "--mu", dest="MU", type=float, default=0.5,
help="Fraction of energy that goes into electrons")
p.add_argument("-d0", "--d0", dest="D0", type=float, default=4.e27,
help="Diffusion coefficient [cm^-2]")
p.add_argument("-s", "--s", dest="SIZE", type=float, default=5.,
help="Size of the source given by the diffusion coefficient")
p.add_argument("-kn", "--kn", dest="KN", action='store_true', default=False,
help="Flag to activate or deactivate the KN option to calculate IC losses")
p.add_argument("-edens", "--edens", dest="TOT_E_DENS", type=float, default=1.06,
help="Total energy density. For Thomson losses. [eV/cm^3]")
p.add_argument("-bfield", "--bfield", dest="BCONT", type=float, default=3.,
help="Magnetic field [G]")
p.add_argument("-edot", "--edot", dest="EDOT", type=float, default=3.2e34,
help="Spin-down power [erg/s]")
p.add_argument("-brind", "--brind", dest="BRIND", type=float, default=3.,
help="Breaking index")
p.add_argument("-tau0", "--tau0", dest="T0", type=float, default=1.2e4,
help="Initial spin-down timescale [yr]")
p.add_argument("-p", "--p", dest="P", type=float, default=237.,
help="Pulsar Period [ms]")
p.add_argument("-p0", "--p0", dest="P0", type=float, default=40.5,
help="Initial pulsar period [ms]")
p.add_argument("-tsupr", "--tsupr", dest="TIMESUPR", type=float, default=0.,
help="Suppression time for the luminosity [yr]")
# Running-related inputs
p.add_argument("-birth_period", "--birth_period", dest="BIRTH_PERIOD", action='store_true', default=False,
help="Flag to calculate initial spin-down characteristic age from birth period")
p.add_argument("-all_pulsar", "--all_pulsar", dest="ALL_PULSAR", action='store_true', default=False,
help="Flag to calculate the contribution at the Earth of all pulsars")
p.add_argument("-only_flux_earth", "--only_flux_earth", dest="ONLY_FLUX_EARTH", action='store_true', default=False,
help="Only calculate the flux at the Earth and exit")
p.add_argument("-eps", "--eps", dest="FIG_EPS", action='store_true', default=False,
help="Save Figures in EPS format")
# Binning inputs
p.add_argument("-eb", "--ebins", dest="EBINS", type=float, default=100,
help="Energy bins of the E_R_Array")
p.add_argument("-rb", "--rbins", dest="RBINS", type=float, default=400,
help="Radial bins of the E_R_Array")
# Source-related inputs
p.add_argument("-norm", "--norm", dest="NORM", type=float, default=12.1e-15,
help="Normalization of the source's flux at a given pivot E")
p.add_argument("-norm_err", "--norm_err", dest="NORM_ERR", type=float, default=2.5e-15,
help="Error on the normalization of the source's flux at a given pivot E")
p.add_argument("-pivot", "--pivot", dest="PIVOT_E", type=float, default=20.,
help="Pivot energy for the normalization of the flux")
p.add_argument("-gamma", "--gamma", dest="GAMMA", type=float, default=2.40,
help="Spectral index of the gamma-ray spectrum")
p.add_argument("-gamma_err", "--gamma_err", dest="GAMMA_ERR", type=float, default=0.09,
help="Error on the spectral index of the gamma-ray spectrum")
# Input parameters
args = p.parse_args()
opts = args
AGE = opts.AGE # 3.e5 # yr Real Age of the pulsar
TC = opts.AGE # 3.e5 # yr Characteristic age of the pulsar
DIST = opts.DIST # 0.25 # kpc Distance
ALPHA = opts.ALPHA # 2.0 # Spectral index of the injection function
DELTA = opts.DELTA # 0.4 # Diffusion index
EMAX = opts.EMAX * gp.TeV_to_erg # 500 # erg
EMIN = opts.EMIN * gp.TeV_to_erg # 0.001 # erg
MU = opts.MU # 0.5 # Fraction of energy that goes into electrons
D0 = opts.D0 # 4.e27 # Diffusion coefficient
SIZE = opts.SIZE # 4.7 # deg
KN = opts.KN # False
TOT_E_DENS= opts.TOT_E_DENS # 1.06 # eV/cm^3
BCONT = opts.BCONT* 1.e-6 # 3.e-6 # muGauss
EDOT = opts.EDOT # 3.2e34 # erg/s
BRIND = opts.BRIND # 3
T0 = opts.T0 # 1.2e4 # yr
P = opts.P # 20. # ms
P0 = opts.P0 # 20. # ms
TIMESUPR = opts.TIMESUPR # 0. # yr
EBINS = opts.EBINS # 100
RBINS = opts.RBINS # 400
BIRTH_PERIOD = opts.BIRTH_PERIOD # False
ALL_PULSAR = opts.ALL_PULSAR # False
ONLY_FLUX_EARTH = opts.ONLY_FLUX_EARTH # False
FIG_EPS = opts.FIG_EPS # False
NORM = opts.NORM # 12.1e-15 # TeV^-1 cm^-2 s^-1
NORM_ERR = opts.NORM_ERR # 2.5e-15 # TeV^-1 cm^-2 s^-1
PIVOT_E = opts.PIVOT_E # 20 # TeV
GAMMA = opts.GAMMA # 2.40
GAMMA_ERR = opts.GAMMA_ERR # 0.09
electron_mass=0.5e-6 # TeV/c^2
c=3.e10 # cm/s
#Edot=3.2e34 # erg/s
#nu=4.218 # Hz Frequency
#nu_dot=1.952e-13 # Hz/s Frequency derivate
#nu_dot_dot_old=1.49e-25 # Hz/s^2 Frequency second derivate
#nu_0=nu+nu_dot*t+nu_dot_dot_old*pow(t,2)
#nu_0=nu+nu_dot*t # Hz Initial frequency
l0 = 5.e-20 # s^-1
E_star=3.e-3 * gp.TeV_to_erg # erg
I = 1e45 # g cm^2 Pulsar moment of inertia
AGEBURST = AGE # s
#AGECONT = 2*TC/(BRIND-1.0)-T0 # s
#AGECONT = AGEBURST - TIMEOFFSET # s
ETA = .1
el_charge=4.80320427e-10 # StatC
TMIN = 1. # s
DENS = 1e-4
TIR = 20. # K
TOPT = 5e3 # K
WIR = 0.3 # erg/cm^3
WOPT = 0.3 # erg/cm^3
#BCONT = 3.e-6 # G Magnetic field for continuous emission
BBURST = 3.e-6 # G Magnetic field for burst emission
ESN = 2.5e48 # erg
# Luminosity evolution of a pulsar (simply spin-down)
def CalculateLuminosity(bins,age):
T = np.logspace(math.log10(TMIN),math.log10(2.*age),bins) # Array with the time
if (BIRTH_PERIOD):
tau0 = 2*TC/(BRIND-1.)-age
Ps = P*1e-3
Pdot = Ps/(2*TC*gp.yr_to_sec)
print ("Pdot (ms)",Pdot)
edot = 4*math.pi**2*I*Pdot/(Ps**3)
lum0= edot/pow(1+age/tau0,-1.*(BRIND+1.)/(BRIND-1.))# erg/s
lum = MU*lum0*(1.+T/tau0)**(-1.*(BRIND+1.)/(BRIND-1.)) # Array with the luminosity for each of the times
else:
edot=EDOT
tau0 = T0
lum0= EDOT/pow(1+age/T0,-1.*(BRIND+1.)/(BRIND-1.))# erg/s
lum = MU*lum0*(1.+T/T0)**(-1.*(BRIND+1.)/(BRIND-1.)) # Array with the luminosity for each of the times
print ("Age ",age)
print ("Characteristic age ",TC)
print ("TAU0",tau0)
print ("Edot",edot)
print ("LUM0",lum0)
if TIMESUPR != 0.:
t_index = np.max(np.where(T < TIMESUPR)[0])
lumBurst = []
lum_ones=np.ones(np.size(T[0:+t_index]))
lumoffset=np.concatenate((lum_ones,lum[t_index+0:]))
lumCont = np.vstack((T, lumoffset)).T
else:
lumCont = np.vstack((T, lum)).T # We stack both arrays, having two columns, the first one for the time and the second for the corresponding luminosity
lumBurst = []
return np.log10(lumBurst),np.log10(lumCont),lum0,tau0,age,edot
# Find the real age of the pulsar (t in eq5 from Gansler&Slane 2006)
def FindAge():
if (BIRTH_PERIOD):
print ("Birth period - Period [ms] : ",P0, P)
age = TC*(2/(BRIND-1.))*(1-math.pow(P0/P,(BRIND-1.)))
else:
age = 2*TC/(BRIND-1.0)-T0
#age = TC
return age
# Diffusion coefficient at energy e (in erg)
def Diffusion(e):
return D0 * math.pow(1. + e/E_star, DELTA)
def CalculateEnergyTrajectory(fp):
e = EMAX
E = []
T = []
LossRates = []
LossRatesInverse = []
DiffIntegrand = []
DiffIntegrandInt = []
diff_int = 0.
t = 0.
while e > EMIN: # loop from EMAX to EMIN
if (KN):
lr = fp.EnergyLossRate(e)
#print "Calculating losses using the KN formula"
else:
gamma=e / gp.m_e
lr = (TOT_E_DENS * gp.TeV_to_erg * 1.e-12) * (4./3 * gp.sigma_T * gp.c_speed) * pow(gamma,2) # Loss rate for 1 eV/cm^3 energy density
# TOT_E_DENS in eV/cm^3 -> we transform it to erg/cm^3
# Thomson energy losses: 4./3 * sigma_t * c
# (TOT_E_DENS * gp.TeV_to_erg * 1.e-12) * (4./3 * gp.sigma_T * gp.c_speed) = l0 * m_e[erg] = 4./3 * gp.sigma_T * gp.c_speed/m_e[eV] * m_e[erg]
#print "Calculating losses using the Thomson formula"
#print "(TOT_E_DENS * gp.TeV_to_erg * 1.e-12) * (4./3 * gp.sigma_T * gp.c_speed)", (TOT_E_DENS * gp.TeV_to_erg * 1.e-12) * (4./3 * gp.sigma_T * gp.c_speed)
#print "lr,e,gamma",lr,e,gamma
dt = 1.e-3 * e / lr; # time increase
e -= dt * lr; # we decrease the energy in steps of DeltaE=dt*lr
t += dt / gp.yr_to_sec # and increase the time in steps of dt
D = Diffusion(e)
diff_int = diff_int + dt * lr * D / lr # This is Delta E * f(E), we are integrating in E the expression lambda = int(D(E)/E_dot)
T.append(t)
E.append(e)
DiffIntegrandInt.append(diff_int)
etraj = np.log10(np.array(zip(T,E))) # Energy trajectory (array of [(time0,energy0),(time1,energy1),...]). We zip in (T,E)
etrajinverse = np.log10(np.flipud(np.array(zip(E,T)))) # Energy trajectory inverted (last element, corresponding to the minimum energy, is now the first). We zip in (E,T)
lamb = np.log10(np.flipud(np.array(zip(E,DiffIntegrandInt)))) # Lambda (minimum energy goes first)
return etraj,etrajinverse,lamb
def Create_E_R_ArrayOfElectrons(rbins,ebins):
r = np.logspace(-3.,math.log10(1.e3*DIST*20.),rbins) # Array of distances to the pulsar [pc]
e = np.logspace(math.log10(EMIN),math.log10(EMAX),ebins) # Array of energies [erg]
Age=FindAge()
twoDarray = []
twoDarrayPLOT = []
thr = 1e-30
#fig = plt.figure()
halfwidth = [] # The point at which the density of electrons has gone down to half its maximum value for a given energy
for ee in e: # For a given energy
print ("Energy: %.4f [TeV]" % (ee/gp.TeV_to_erg))
line = []
lineplot = []
linebplot = []
vlamb,DT,E0,E = FillLambdaVector(ee,1000,Age) # We fill the mean free path vector for each of the energies
# vlamb is a vector of the form [(Energy,lambda_integral(Enow=ee)-lambda_integral(Energy))], where ee is varying from EMIN to EMAX and Energy from ee to EMAX
vzero = 0.
fill_halfwidth = True
for rr in r:
scont = Spectrum(ee,vlamb,rr,DT,E0,Age) # Value in 1/(erg*cm^3) of the differential energy spectrum
# srl = SpectrumRectilinear(ee,rr)
if len(LUMBURST):
sburst = SpectrumBurst(ee,rr)
else:
sburst = 0.
# print scont,srl,sburst
v = scont + sburst# + srl
#loop to find the halfwidth of the distribution
if vzero == 0.:
vzero = v
if vzero != 0. and fill_halfwidth == True and v < 0.5 * vzero:
fill_halfwidth = False
halfwidth.append([ee,rr])
vplot = ee*ee*v # In the plot we represent E^2 dN/dE [erg/cm^3]
if v < thr:
v = thr
vplot = thr
line.append(v) # 1-D Array of densities for each of the different radii=rr and for a given energy ee [1/(erg*cm^3)]
lineplot.append(vplot) # The same, multiplied by E^2, for plotting [erg/cm^3]
twoDarray.append(np.array(line)) # 2-D Array containing all the 1-D Arrays previously mentioned, for each of the energies ee [1/(erg*cm^3)]
twoDarrayPLOT.append(np.array(lineplot))
halfwidth = np.array(halfwidth)
twoDarray = np.array(twoDarray)
twoDarrayPLOT = np.array(twoDarrayPLOT)
#fig.savefig("Figures/lambdas_"+tag+".png")
return twoDarray,twoDarrayPLOT,halfwidth,e,r
# fill vector of LAMBDA vs lower energy bound of electrons
def FillLambdaVector(Enow,bins,Age):
DT = math.pow(10.,np.interp(math.log10(Enow),ETRAJCONTINVERSE[:,0],ETRAJCONTINVERSE[:,1])) # We interpolate between the first element (that is an array) of ETRAJCONTINVERSE (x=energy) and the second (f(x)=time) to obtain the interpolated time for a given energy
if DT < Age: # If the interpolated time is smaller than the age, we can consider the initial energy E0=EMAX, otherwise we would be on curve 3 of the notes and the maximum energy would not be EMAX but the one calculated in the next step
E0 = EMAX
else:
E0 = math.pow(10.,np.interp(math.log10(DT-Age),ETRAJCONT[:,0],ETRAJCONT[:,1])) # We interpolate between the first element (that is an array) of ETRAJCONT (x=time) and the second (f(x)=energy) to obtain the interpolated energy for a given time
E = np.logspace(math.log10(Enow),math.log10(E0),bins)
lamb = []
for e in E[1:]:
# LAMBCONT is the integral over the energy of dE D(E)/EDOT(E), from EMIN to EMAX
# We interpolate between the first time element of LAMBCONT (x=energy) and the second (f(x)=integral(dE D(E)/EDOT(E)))
v = math.pow(10.,np.interp(math.log10(Enow),LAMBCONT[:,0],LAMBCONT[:,1])) - math.pow(10.,np.interp(math.log10(e),LAMBCONT[:,0],LAMBCONT[:,1]))
# We are interested on int_E'^Enow{ dE D(E)/EDOT(E) }, therefore we need to break the integral:
# int_E'^Enow{ } = int_Emax^Enow{ } - int_Emax^E'{ } = lambda(Enow) - lambda(E')
# We subtract from the integral for Enow the integral for every energy e and fill a vector with this subtraction
lamb.append([e,v])
lamb = np.log10(np.array(lamb))
return lamb,DT,E0,lamb[:,0]
# main function to calculate the differential number (1/(erg*cm^3)) of electrons at
# energy e and radius R from the (point-) source in the *continuous* scenario.
def Spectrum(e,vlamb,R,DT,E0,Age):
tmin = acc_time(E0,BCONT) # minimum acceleration time needed to accelerate the particle to that energy
if DT <= tmin:
return 0.
R = R * gp.pc_to_cm
spec = []
if ALPHA == 2.:
norm = math.log(EMAX/EMIN)
else:
norm = 1. / (ALPHA-2.) * (math.pow(EMIN, -ALPHA + 2.) - math.pow(EMAX, -ALPHA + 2.)) # Normalization of the electron spectrum
vq = np.array(zip(LUMCONT[:,0],np.log10(10.**LUMCONT[:,1] / norm))) # Array with Time and luminosity/normalization
T = np.logspace(math.log10(max(1e-3,Age-DT)),math.log10(Age-tmin),2000) # Array with Time in logarithmic bins
T2 = T - (Age - tmin - DT)
e0 = 10.**np.interp(np.log10(T2),ETRAJCONT[:,0],ETRAJCONT[:,1])
lamb = 10.**np.interp(np.log10(e0),vlamb[:,0],vlamb[:,1])
Q = 10.**np.interp(np.log10(T),vq[:,0],vq[:,1])
#print "Q",Q
val = Q * e0 ** (-ALPHA) * e0*e0 * np.exp(-R*R/(4.*lamb)) /( e*e * (4.*gp.pi*lamb)**1.5 )
np.place(val, val!=val, [0.])
val = fu.Integrate(zip(T*gp.yr_to_sec,val),T[0]*gp.yr_to_sec,T[len(T)-1]*gp.yr_to_sec) # Differential number of electrons for an energy e and at radius R [1/(erg * cm^3)]
return val
# acceleration time for particles of energy e. Used to determine starting time
# of injection.
def acc_time(energy,b):
momentum = ( energy - gp.m_e) / c
gyrorad = momentum * c / (el_charge * b)
tacc = gyrorad / c / ETA
return tacc / gp.yr_to_sec
def InitialiseGappa(fp,fr,b,age):
fr.AddThermalTargetPhotons(2.7,0.26*gp.eV_to_erg) #CMB
fr.AddThermalTargetPhotons(TIR,WIR*gp.eV_to_erg) #IR
fr.AddThermalTargetPhotons(TOPT,WOPT*gp.eV_to_erg) #OPT
fr.CreateICLossLookup()
fr.SetBField(b)
fr.SetAmbientDensity(DENS)
fr.SetDistance(0.) # This will calculate the luminosity (which is what we want for the LOS integral)
fp.SetBField(b)
fp.SetICLossLookup(fr.GetICLossLookup())
fp.SetAmbientDensity(DENS)
fp.SetAge(age)
return fp
# Calculate electron column densities for every spectral energy bin along the
# l, b direction (although this model is radial symmetric,
# so only one angle is required...).
def LineOfSightIntegration(l,b,twoDarray,e,r,rbins):
# twoDarray contains the density of photons in bins of r and e [1/(erg*cm^3)]
# l is the vertical angle
# b?
# e is the energy [erg]
# r is an array with distances from the Earth? [pc] Bug?
rvals = np.logspace(-6.,math.log10(DIST),rbins)
# make r-steps so that they are very fine at the source [kpc]
rvals = np.concatenate(((DIST - rvals)[::-1],rvals + DIST))
#rvals = np.linspace(0.,2.*DIST,rbins)
vals = []
los = line_of_sight(l,b,rvals,fa) # Array with xyz positions w.r.t. the Earth for all the elements with angle < l
integrand = []
for xyz in los:
x = xyz[0]
y = xyz[1]
z = xyz[2]
rr = math.sqrt( x * x + y * y + z * z) * 1000. # *1000 to convert it into pc
r_index = np.where(r > rr)[0][0] # It returns the index of the first element where the condition is fulfilled
integrandE = []
for i in xrange(len(e)):
val = twoDarray.T[r_index][i] # we add for all the energies the twoDarray element with index r_index (Remember, twoDarray [1/(erg*cm^3)])
integrandE.append(val) # For every energy, we add a value to the integrandE array, with the density corresponding to the distance r[r_index]
integrand.append(integrandE)
# Array containing, for each xyz value
# in the line of sight from the Earth,
# the integrandE of the densities for
# the distance corresponding to r[r_index] for all the energies
integrand = np.array(integrand).T
for integr in integrand:
vals.append(fu.Integrate(zip(rvals*gp.kpc_to_cm,integr),rvals[0]*gp.kpc_to_cm,rvals[len(rvals)-1]*gp.kpc_to_cm))
# Integrate integr * rvals (rvals is in kpc)
# from rvals[0]
# to rvals[len(rvals)-1]
return vals # Units [1/(erg * cm^2)]
def LineOfSightVolumeIntegration(l,b,twoDarray,e,r,rbins):
# twoDarray contains the density of photons in bins of r and e [1/(erg*cm^3)]
# l is the vertical angle
# b?
# e is the energy [erg]
# r is an array with distances from the pulsar [pc]
rvals = np.logspace(-6.,math.log10(DIST),rbins)
# make r-steps so that they are very fine at the source [kpc]
rvals = np.concatenate(((DIST - rvals)[::-1],rvals + DIST))
#rvals = np.linspace(0.,2.*DIST,rbins)
vals = []
los = line_of_sight(l,b,rvals,fa) # Array with xyz positions w.r.t. the Earth for all the elements with angle < l
integrand = []
for xyz in los:
x = xyz[0]
y = xyz[1]
z = xyz[2]
rr = math.sqrt( x * x + y * y + z * z) * 1000. # *1000 to convert it into pc
r_index = np.where(r > rr)[0][0] # It returns the index of the first element where the condition is fulfilled
integrandE = []
for i in xrange(len(e)):
val = twoDarray.T[r_index][i] # we add for all the energies the twoDarray element with index r_index (Remember, twoDarray [1/(erg*cm^3)])
integrandE.append(val) # For every energy, we add a value to the integrandE array, with the density corresponding to the distance r[r_index]
integrand.append(integrandE)
# Array containing, for each xyz value
# in the line of sight from the Earth,
# the integrandE of the densities for
# the distance corresponding to r[r_index] for all the energies
integrand = np.array(integrand).T
for integr in integrand:
vals.append(fu.Integrate(zip(rvals*gp.kpc_to_cm,integr*rvals*rvals*math.pow(gp.kpc_to_cm,2)),rvals[0]*gp.kpc_to_cm,rvals[len(rvals)-1]*gp.kpc_to_cm))
# Integrate (integr*rvals*rvals) * rvals (rvals is in kpc)
# from rvals[0]
# to rvals[len(rvals)-1]
return vals # Units [1/(erg)]
# creates an array of x,y,z values along a line of sight in the l,b direction
def line_of_sight(l,b,rvals,fa):
xyz_obs = [DIST, 0. ,0. ]
los = []
for r in rvals:
xyz = fa.GetCartesian(r,l,b,xyz_obs)
# It gives the xyz position of a point w.r.t. the Earth. It is a vector with 3 components [0]=x,[1]=y,[2]=z
los.append(xyz)
return np.array(los)
# Calculate the contribution of an homogeneus distributions of
# pulsars in the galaxy
def Homogeneus_distribution_pulsars(age,SN_rate):
n_sources = age * SN_rate
table = make_base_catalog_galactic(n_sources=n_sources,
rad_dis='L06',
vel_dis='F06B',
max_age=max_age,
spiralarms=True)
return table
# ********** HOMOGENEOUS PULSAR CONTRIBUTION ********
def Flux_Earth_all_pulsars(E):
max_age = 1e7 * u.yr
SN_rate = 2. / (100. * u.yr)
pulsar_distribution = Homogeneus_distribution_pulsars(max_age,SN_rate);
x_pc = np.array(pulsar_distribution[6][:]) # in kpc
y_pc = np.array(pulsar_distribution[7][:]) # in kpc
age = np.array(pulsar_distribution[0][:]) * gp.yr_to_sec # in s
x_Earth = x_pc - 8.3 # in kpc
y_Earth = y_pc - 0 # in kpc
d = np.sqrt(x_Earth * x_Earth + y_Earth * y_Earth) * gp.kpc_to_cm
# Steady flux
# Eq 21 Atoyan et al. 1995
Q0 = 5.e32 # 1/(erg * s)
f_st_int = []
for e in E:
D = Diffusion(e) # cm^2/s
t_gamma = []
for t in age:
if (t < gp.m_e/(l0 * e)):
t_gamma.append(t)
else:
t_gamma.append(gp.m_e/(l0 * e))
t_gamma = np.array(t_gamma)
f_st = Q0 * e**-2.4 / (4*gp.pi * D * d) * erfc(d/(2 * np.sqrt(D * t_gamma)))
#print "f_st",f_st
#print "t_gamma",t_gamma
#print "d",d
#print "D",D
#print "Q0 * e**-2.4",Q0 * e**-2.4
#print "(4*gp.pi * D * d)", (4*gp.pi * D * d)
#print "D * t_gamma", D * t_gamma
#print "np.sqrt(D * t_gamma)",np.sqrt(D * t_gamma)
#print "d/(2 * np.sqrt(D * t_gamma)",d/(2 * np.sqrt(D * t_gamma))
#print "erfc(d/(2 * np.sqrt(D * t_gamma)))",erfc(d/(2 * np.sqrt(D * t_gamma)))
# Condition to consider the contribution of pulsars at a distance > 1 kpc.
# Note: If we do not add this condition, the electron emission extends up to TeV energies
sum_all_pulsars = sum(f_st[i] for i in range(len(f_st)) if d[i] > 1 * gp.kpc_to_cm)
#sum_all_pulsars = sum(f_st)
f_st_int.append(sum_all_pulsars)
# Check how many pulsars there are in a region of 1 kpc from the Earth
N_pulsars_1kpc = 0
print ("Positions of pulsars at distance < 1 kpc")
print ("Distance x_Earth y_Earth")
for i in range(len(f_st)):
if d[i] < 1 * gp.kpc_to_cm:
print (d[i],x_Earth[i],y_Earth[i])
N_pulsars_1kpc += 1
print ("The number of pulsars within 1 kpc distance from the Earth is ", N_pulsars_1kpc)
return f_st_int
# ******************** MAIN FUNCTION ******************
if __name__=='__main__':
global LUM
global tag
#points = opts.File
#tag = sys.argv[2]
tag = opts.Name
#data = np.loadtxt(points)
AMS_data = np.loadtxt("Data/Data_points/AMS_data.dat",skiprows=2)
HESS_data = np.loadtxt("Data/Data_points/HESS_data.dat",skiprows=2)
Fermi_data = np.loadtxt("Data/Data_points/Fermi_data.dat",skiprows=2)
AMS_positron_fraction = np.loadtxt("Data/Data_points/AMS_positron_fraction.dat",skiprows=2)
PAMELA_positron_fraction = np.loadtxt("Data/Data_points/PAMELA_positron_fraction.dat",skiprows=2)
# Curves from other papers
Yuksel_delta04 = np.loadtxt("Data/Predictions_papers/Yuksel_Fig3_dotted_delta04.csv",delimiter=',')
Aharonian_Fig4 = np.loadtxt("Data/Predictions_papers/Aharonian_1995_Fig4_time_dependent_injection.csv",skiprows=1)
min_bin_deg = 0.
max_bin_deg = SIZE+0.2
nbins = SIZE*10+3
#nbins = 51
#deg = np.linspace(min_bin_deg,max_bin_deg,nbins)
deg1=np.linspace(0.01,0.09,9)
deg2=np.linspace(0.1,SIZE,SIZE*10)
deg=np.concatenate((deg1,deg2))
#degs = [1.7, 5.5, 8.6]
degs = [2.6,SIZE] # IMPROVE ME!: SIZE should be an array with different sizes, just to compare
#bin_1dot7=int(1.7/((max_bin_deg-min_bin_deg)/nbins))
#bin_5dot5=int(5.5/((max_bin_deg-min_bin_deg)/nbins))
#bin_8dot6=int(8.6/((max_bin_deg-min_bin_deg)/nbins))
bin_Size = int(SIZE/((max_bin_deg-min_bin_deg)/nbins)) # Bin for the corresponding size given by diffusion
bin_Milagro = int(2.6/((max_bin_deg-min_bin_deg)/nbins)) # Bin for the corresponding size given by Milagro's point at FWHM=2.6
#****************** LUMINOSITY *************
Age = FindAge()
LUMBURST,LUMCONT,lum0,tau0,age,edot = CalculateLuminosity(10000,Age)
fig = plt.figure()
#print "LUMBURST,LUMCONT",LUMBURST,LUMCONT
if len(LUMBURST) != 0:
plt.plot(10.**LUMBURST[:,0],10.**LUMBURST[:,1],label=" ")
plt.loglog(10.**LUMCONT[:,0],10.**LUMCONT[:,1]/MU,label="Pulsar evolution luminosity")
#plt.xlim([0.,10.*TC])
plt.xlim([0.,2*AGE])
#plt.xlim([1.e5,AGE])
plt.ylim([edot/10.,lum0*100])
plt.ylabel(r'L$_e$ [erg/s]')
plt.xlabel("Age [kyr]")
plt.plot((1., 2*age), (edot, edot), label=r'Constant injection luminosity',color='red')
plt.plot((tau0, tau0), (edot/10., lum0*100), label=r'$\tau_0$',color='black',linestyle = "dashed")
plt.plot((age, age), (edot/10., lum0*100), label=r'Now',color='blue',linestyle = "dashed")
#print tau0,EDOT/10.,LUMCONT[0,1]
plt.title(r'L$_0$=%.1e erg/s; $\tau_0$=%.1e yr; n = %.1f' %(lum0,tau0,BRIND))
plt.grid(color="black",alpha=.5)
plt.legend(prop={'size':10},loc="upper left")
#plt.legend(title="log10(L0),t0,n =\n"+str(round(math.log10(LUM0),2))+","+str(round(TC,2))+","+str(round(BRIND,2)),loc="upper right")
if (FIG_EPS):
fig.savefig("Figures/Luminosity_"+tag+".eps")
else:
fig.savefig("Figures/Luminosity_"+tag+".png")
#************* ELECTRON DENSITY IN SPACE AND ENERGY ***********
global ETRAJCONT,ETRAJBURST,ETRAJCONTINVERSE,ETRAJBURSTINVERSE,LAMBCONT,LAMBBURST
fp = InitialiseGappa(fp,fr,BBURST,AGEBURST)
ETRAJBURST,ETRAJBURSTINVERSE,LAMBBURST = CalculateEnergyTrajectory(fp)
fp = InitialiseGappa(fp,fr,BCONT,Age)
ETRAJCONT,ETRAJCONTINVERSE,LAMBCONT = CalculateEnergyTrajectory(fp)
# This creates an array of electron densities in (E,R) space
twoDarray,twoDarrayPLOT,halfwidth,E,R = Create_E_R_ArrayOfElectrons(RBINS,EBINS)
# twoDarray: 2-D Array containing all the electron densities, for each of the radii and the energies [1/(erg*cm^3)]
# twoDarrayPLOT: The same * E^2, for plotting [erg/cm^3)]
# halfwidth: Array containing, for each energy, the distance at which the maximum density goes to half
# E: Array of the energies [erg]
# R: Array of the radii [pc]
#print halfwidth
# plot it!
fig,ax = plt.subplots(1, 1,figsize=(7,5))
logarray = np.log10(twoDarrayPLOT)
levels = np.linspace(np.amin(logarray),np.amax(logarray),100)
plt.contourf(np.log10(E/gp.TeV_to_erg),np.log10(R), logarray.T,levels, cmap=plt.get_cmap('viridis')) # Density of electrons
#plt.plot(np.log10(halfwidth[:,0]),np.log10(halfwidth[:,1]),color="black",linestyle = "dashed") # Line limiting half of the density of the electrons for a given energy
plt.grid(color="black",alpha=.5)
cbar = plt.colorbar()
cbar.set_label(r'log$_{10}$(E$^2$ $\frac{\mathrm{dN}}{\mathrm{dE}})$ [erg cm$^{-3}$]')
plt.ylabel("log$_{10}$ (R) [pc]")
plt.xlabel("log$_{10}$ (E) [TeV]")
if (FIG_EPS):
fig.savefig("Figures/Electrons_E_R_Array_"+tag+".eps")
else:
fig.savefig("Figures/Electrons_E_R_Array_"+tag+".png")
#***************** ELECTRON FLUX EARTH *****************
GeV_to_erg = 1.e-3 * gp.TeV_to_erg
ii = np.where(R >= 1000.*DIST)[0][0]
# First index where R > 1000*DIST (corresponds to the distance in pc)
# Since the problem is spherically symmetric, the flux at Earth is equal to the flux at any point of the sphere with radius R=1000*DIST
#print "-->",R[ii]
fig = plt.figure()
fac = 1e4 * c / (4.*gp.pi) # c/4pi in cm/s, 1e4 transform the cm^-2 to m^-2 in the E^3 J(E) function
EGeV = E/gp.TeV_to_erg * 1.e3 # GeV
plt.loglog(E/gp.TeV_to_erg,EGeV**3.*GeV_to_erg*fac*twoDarray.T[ii],color="black", label ="Pulsar") # E^3 J(E)
# GeV_to_erg pass one of the GeV to erg on the numeral and they go away with the one coming from twoDarray [1/(erg*cm^3)]
#print "EGeV**3.*GeV_to_erg*fac*f_st_int",EGeV**3.*GeV_to_erg*fac*f_st_int
print ("Flux_Earth",EGeV**3.*GeV_to_erg*fac*twoDarray.T[ii])
zipped=zip(E/gp.TeV_to_erg,EGeV**3.*GeV_to_erg*fac*twoDarray.T[ii])
np.savetxt("Results/Flux_Earth"+tag+".txt", zipped)
# ********** HOMOGENEOUS PULSAR CONTRIBUTION ********
if (ALL_PULSAR):
f_st_int=Flux_Earth_all_pulsars(E)
zipped_all_pulsars=zip(E/gp.TeV_to_erg,EGeV**3.*GeV_to_erg*fac*f_st_int)
np.savetxt("Results/Flux_Earth_all_pulsars"+tag+".txt", zipped_all_pulsars)
plt.loglog(E/gp.TeV_to_erg,EGeV**3.*GeV_to_erg*fac*f_st_int,color="red",label="All pulsars [d > 1 kpc]") # E^3 J(E)
# AMS Data all electron flux
y_AMS = AMS_data[:,3]*pow(AMS_data[:,0],3) # F x E^3
yerror_AMS = AMS_data[:,4]*pow(AMS_data[:,0],3)
AMS_points = plt.errorbar(AMS_data[:,0]*1.e-3,y_AMS,yerr=yerror_AMS,fmt='o',color = "black",label="AMS",markeredgecolor='k')
# HESS Data all electron flux
y_HESS = HESS_data[:,3]*pow(HESS_data[:,0],3)
yerror_HESS = HESS_data[:,4]*pow(HESS_data[:,0],3)
HESS_points = plt.errorbar(HESS_data[:,0]*1.e-3,y_HESS,yerr=yerror_HESS,fmt='^',color = "red",label="HESS",markeredgecolor='k')
# Fermi Data all electron flux
y_Fermi = Fermi_data[:,3]*pow(Fermi_data[:,0],3)
yerror_Fermi = Fermi_data[:,4]*pow(Fermi_data[:,0],3)
Fermi_points = plt.errorbar(Fermi_data[:,0]*1.e-3,y_Fermi,yerr=yerror_Fermi,fmt='s',color = "blue", label="Fermi",markeredgecolor='k')
# Values for galactic electrons and positrons
# From Moskalenko and Strong (1998), Figure 5, left panel
primary_el_data = np.loadtxt("Data/Moskalenko_and_Strong/Primary_electrons.txt",skiprows=1)
secondary_el_data = np.loadtxt("Data/Moskalenko_and_Strong/Secondary_electrons.txt",skiprows=1)
secondary_pos_data = np.loadtxt("Data/Moskalenko_and_Strong/Secondary_positrons.txt",skiprows=1)
x_primary_el = primary_el_data[:,0] * 1e-6 # TeV
y_primary_el = primary_el_data[:,1] * 1e-3 * 1e4 # GeV m^-2 s^-1 sr^-1
x_secondary_el = secondary_el_data[:,0] * 1e-6 # TeV
y_secondary_el = secondary_el_data[:,1] * 1e-3 * 1e4 # GeV m^-2 s^-1 sr^-1
x_secondary_pos = secondary_pos_data[:,0] * 1e-6 # TeV
y_secondary_pos = secondary_pos_data[:,1] * 1e-3 * 1e4 # GeV m^-2 s^-1 sr^-1
primary_el=np.interp(E/gp.TeV_to_erg,x_primary_el,y_primary_el,right=0) * EGeV # GeV^2 m^-2 s^-1 sr^-1
secondary_el=np.interp(E/gp.TeV_to_erg,x_secondary_el,y_secondary_el,right=0) * EGeV # GeV^2 m^-2 s^-1 sr^-1
secondary_pos=np.interp(E/gp.TeV_to_erg,x_secondary_pos,y_secondary_pos,right=0) * EGeV # GeV^2 m^-2 s^-1 sr^-1
#primary_el=np.interp(E/gp.TeV_to_erg,x_primary_el,y_primary_el) * EGeV # GeV^2 m^-2 s^-1 sr^-1
#secondary_el=np.interp(E/gp.TeV_to_erg,x_secondary_el,y_secondary_el) * EGeV # GeV^2 m^-2 s^-1 sr^-1
#secondary_pos=np.interp(E/gp.TeV_to_erg,x_secondary_pos,y_secondary_pos) * EGeV # GeV^2 m^-2 s^-1 sr^-1
plt.loglog(E/gp.TeV_to_erg,primary_el, color = "blue", label ="Primary e$^-$") # E^3 J(E)
plt.loglog(E/gp.TeV_to_erg,secondary_el, color = "magenta", label ="Secondary e$^-$") # E^3 J(E)
plt.loglog(E/gp.TeV_to_erg,secondary_pos, color = "green", label ="Secondary e$^+$") # E^3 J(E)
#print "Primary electrons", primary_el
#print "Secondary electrons", secondary_el
#print "Secondary positrons", secondary_pos
#zipped=zip(E/gp.TeV_to_erg,primary_el)
#np.savetxt("Results/Flux_Earth_primary_electrons.txt", zipped)
#zipped=zip(E/gp.TeV_to_erg,secondary_el)
#np.savetxt("Results/Flux_Earth_secondary_electrons.txt", zipped)
#zipped=zip(E/gp.TeV_to_erg,secondary_pos)
#np.savetxt("Results/Flux_Earth_secondary_positrons.txt", zipped)
# Yuksel Figure 3, delta=0.4
#plt.loglog(Yuksel_delta04[:,0]*1.e-3,Yuksel_delta04[:,1],color = '0.75',label ="Yuksel Fig 3 delta 0.4")
#plt.loglog(Aharonian_Fig4[:,0]*1.e-3,Aharonian_Fig4[:,1],color = "cyan",label ="Aharonian Fig 4")
plt.ylabel("E$^3$ J(E) [GeV$^2$/(m$^2$s sr)]", fontsize=13)
plt.xlabel("E [TeV]", fontsize=13)
plt.grid(color="black",alpha=.5)
#plt.legend(numpoints=1,handles=[AMS_points,HESS_points,Fermi_points],prop={'size':10},loc="upper right")
plt.legend(numpoints=1,prop={'size':10},loc="upper right",ncol=3)
plt.xlim([1e-3,1e1])
plt.ylim([1e0,1e3])
if (FIG_EPS):
fig.savefig("Figures/Flux_Earth_"+tag+".eps")
else:
fig.savefig("Figures/Flux_Earth_"+tag+".png")
# ********** POSITRON FRACTION ***********
fig = plt.figure()
flux_earth_Source=EGeV**3.*GeV_to_erg*fac*twoDarray.T[ii]
# All pulsars
#flux_earth_all_pulsars=EGeV**3.*GeV_to_erg*fac*f_st_int
#fraction=(0.5 * flux_earth_Source+secondary_pos)/(flux_earth_Source + flux_earth_all_pulsars + primary_el + secondary_el + secondary_pos)
fraction=(0.5 * flux_earth_Source+secondary_pos)/(flux_earth_Source + primary_el + secondary_el + secondary_pos)
plt.loglog(E/gp.TeV_to_erg,fraction,label = "Fraction total")
zipped=zip(E/gp.TeV_to_erg,fraction)
print ("Fraction",fraction)
np.savetxt("Results/Fraction_Total_Positron_Earth_"+tag+".txt", zipped)
fraction_galactic_positrons=secondary_pos/(flux_earth_Source+primary_el + secondary_el + secondary_pos)
plt.loglog(E/gp.TeV_to_erg,fraction_galactic_positrons,label = "Galactic e$^+$ fraction")
#print "fraction galactic positrons",fraction_galactic_positrons
zipped=zip(E/gp.TeV_to_erg,fraction_galactic_positrons)
np.savetxt("Results/Fraction_Galactic_Positron_Earth_"+tag+".txt", zipped)
fraction_Source_positrons=0.5 * flux_earth_Source/(flux_earth_Source+primary_el + secondary_el + secondary_pos)
plt.loglog(E/gp.TeV_to_erg,fraction_Source_positrons,label = "Source e$^+$ fraction")
#print "fraction Source positrons",fraction_Source_positrons
zipped=zip(E/gp.TeV_to_erg,fraction_Source_positrons)
np.savetxt("Results/Fraction_Source_Positron_Earth_"+tag+".txt", zipped)
# AMS Data positron fraction
plt.errorbar(AMS_positron_fraction[:,0]*1.e-3,AMS_positron_fraction[:,3],yerr=AMS_positron_fraction[:,4],fmt='o',color = "black",label="AMS",markeredgecolor='k')
# PAMELA Data positron fraction
plt.errorbar(PAMELA_positron_fraction[:,0]*1.e-3,PAMELA_positron_fraction[:,3],yerr=PAMELA_positron_fraction[:,4],fmt='o',color = "red",label="PAMELA",markeredgecolor='k')
plt.ylabel("e$^+$/(e$^+$+e$^-$)", fontsize=13)
plt.xlabel("E [TeV]", fontsize=13)
plt.grid(color="black",alpha=.5)
plt.xlim([1e-4,1e0])
plt.ylim([1e-2,1e0])
plt.legend(numpoints=1,prop={'size':10},loc="upper right")
if (FIG_EPS):
fig.savefig("Figures/Fraction_Earth_"+tag+".eps")
else:
fig.savefig("Figures/Fraction_Earth_"+tag+".png")
# Break in case we do not want to calculate the gamma-ray spectrum
if(ONLY_FLUX_EARTH):
exit()
# ************ ELECTRON COLUMN DENSITIES ************
# This integrates the spectra along the angular distance
values = []
for d in deg:
values.append(LineOfSightIntegration(d,0.,twoDarray,E,R,1e4))
#print "los,values",d,values
values = np.array(values)
IntSpec = []
for va in values.T: # One per angle definition
intsp = np.array(fu.IntegratedProfile(zip(deg * gp.pi/180.,2.*gp.pi * va * deg * gp.pi/180. * (1/(4*gp.pi)))))[:,1] # Integration over the solid angle.
intsp = np.array(intsp)
#print "intsp",intsp
#intsp = np.array(fu.IntegratedProfile(zip(deg * gp.pi/180.,2.*gp.pi * va * deg * gp.pi/180.)))[:,1] # Integration over the solid angle. We use deg instead of sin(deg)
# The (1/(4*gp.pi)) is to take into account that we are integrating over the solid angle
IntSpec.append(intsp)
IntSpec = np.array(IntSpec)
# plot the angular-integrated electron spectra
IntSpec = np.array(IntSpec.T)
fig = plt.figure()
for s in IntSpec:
plt.loglog(E/gp.TeV_to_erg,E**2.*s) # E is in erg
plt.grid(color="black",alpha=.5)
plt.ylim([1e-3,1e5])
plt.ylabel("E$^2$ dN/dE [erg/cm$^2$]", fontsize=13)
plt.xlabel("E [TeV]", fontsize=13)
if (FIG_EPS):
fig.savefig("Figures/Electron_Spectra_"+tag+".eps")
else:
fig.savefig("Figures/Electron_Spectra_"+tag+".png")
#************* ELECTRON SPECTRA SOURCE ***********
values_diff_spectrum = []
#for d in deg:
for d in deg:
print ("los %.2f" % d)
values_diff_spectrum.append(LineOfSightVolumeIntegration(d,0.,twoDarray,E,R,1e4))
values_diff_spectrum = np.array(values_diff_spectrum)
# This integrates the spectra along the angular distance
IntSpec_volume = []
for va_volume in values_diff_spectrum.T: # One per angle definition
#intsp_volume = np.array(fu.IntegratedProfile(zip(deg * gp.pi/180.,2.*gp.pi * va_volume * deg * gp.pi/180. * (1/(4*gp.pi)))))[:,1] # Integration over the solid angle.
intsp_volume = np.array(fu.IntegratedProfile(zip(deg * gp.pi/180.,2.*gp.pi * va_volume * deg * gp.pi/180. )))[:,1] # Integration over the solid angle.
# The (1/(4*gp.pi)) is to take into account that we are integrating over the solid angle
IntSpec_volume.append(intsp_volume)
IntSpec_volume = np.array(IntSpec_volume)
IntSpec_volume = np.array(IntSpec_volume.T)
#IntSpec_volume_all = LineOfSightVolumeIntegration(90.,0.,twoDarray,E,R,1e4) # 2.*gp.pi comes from the solid angle integral of half a sphere
#IntSpec_volume_all = 2.* gp.pi * np.array(IntSpec_volume_all)
#print IntSpec_volume_all
# plot the angular-integrated electron spectra
fig = plt.figure()
#for volume_spectra in IntSpec_volume:
# plt.loglog(E/gp.TeV_to_erg,E**2.*volume_spectra) # E is in erg
#plt.loglog(E/gp.TeV_to_erg,E**2.*IntSpec_volume[bin_1dot7],label='1.7 deg') # E is in erg
#plt.loglog(E/gp.TeV_to_erg,E**2.*IntSpec_volume[bin_5dot5],label='5.5 deg') # E is in erg
#plt.loglog(E/gp.TeV_to_erg,E**2.*IntSpec_volume[bin_8dot6],label='8.6 deg') # E is in erg
for d in degs :
ind=np.where(deg >= d)[0][0]
print ("ind %i,d %.2f" % (ind,d))
plt.loglog(E/gp.TeV_to_erg,E**2.*IntSpec_volume[ind-1],label='%s deg' % d)
#plt.loglog(E/gp.TeV_to_erg,E**2.*IntSpec_volume[bin_Milagro],label='2.6 deg [Milagro]') # E is in erg
#plt.loglog(E/gp.TeV_to_erg,E**2.*IntSpec_volume[bin_Size],label='%s deg' % SIZE) # E is in erg
#plt.loglog(E/gp.TeV_to_erg,E**2.*IntSpec_volume_all,label='All') # E is in erg
#plt.loglog(x_Mehr,y_Mehr,label="Mehr flux",color='black')
plt.grid(color="black",alpha=.5)
plt.ylim([1e40,1e47])
plt.ylabel("E$^2$ dN/dE [erg]", fontsize=13)
plt.xlabel("E [TeV]", fontsize=13)
plt.legend(prop={'size':9},loc="upper right")
if (FIG_EPS):
fig.savefig("Figures/Electron_Spectra_Volume_"+tag+".eps")
else:
fig.savefig("Figures/Electron_Spectra_Volume_"+tag+".png")
zipped=zip(E/gp.TeV_to_erg,E**2.*IntSpec_volume[ind-1])
np.savetxt("Results/Electron_Spectra_Volume_%s_%sdeg.txt" %(tag,d), zipped)
# ************* GAMMA SPECTRUM ***************
# calculate the corresponding gamma-ray spectra of the angular integrated
# column densities
sp = []
inds = []
for d in degs :
inds.append(np.where(deg >= d)[0][0])
#print inds
#print IntSpec
for i,d in zip(inds,degs):
print ("i,d",i,d)
print ("IntSpec[i-1]",IntSpec[i-1])
s = IntSpec[i-1]
fr.SetElectrons(zip(E,s))
fr.CalculateDifferentialPhotonSpectrum(E)
sp.append(np.array(fr.GetTotalSED()))
sp = np.array(sp)
fig = plt.figure()
for s,d in zip(sp,degs):
print ("sp",s)
plt.loglog(s[:,0],s[:,1],label=r'%s deg' % (d)) # s[:,0] contains the Energy [TeV] and s[:,1] directly the SED [erg cm^-2 s^-1]
zipped=zip(s[:,0],s[:,1])
np.savetxt("Results/Gamma_Spectra_%s_%sdeg.txt" %(tag,d), zipped)
# Include in the plot Source's spectral energy distribution
x_Source = np.arange(1., 100., 0.1)
y_Source = NORM*pow(x_Source/PIVOT_E,-GAMMA) * pow(x_Source,2) * gp.TeV_to_erg # Norm is given in TeV^-1 cm^-2 s^-1, but when multiplied by E^2 it is converted to TeV
plt.loglog(x_Source,y_Source,label=tag,color='black')
# Butterfly
y_max_Source_down = (NORM+NORM_ERR)*pow(x_Source/PIVOT_E,-(GAMMA+GAMMA_ERR)) * pow(x_Source,2) * gp.TeV_to_erg
y_min_Source_down = (NORM-NORM_ERR)*pow(x_Source/PIVOT_E,-(GAMMA-GAMMA_ERR)) * pow(x_Source,2) * gp.TeV_to_erg
y_max_Source_up = (NORM+NORM_ERR)*pow(x_Source/PIVOT_E,-(GAMMA-GAMMA_ERR)) * pow(x_Source,2) * gp.TeV_to_erg
y_min_Source_up = (NORM-NORM_ERR)*pow(x_Source/PIVOT_E,-(GAMMA+GAMMA_ERR)) * pow(x_Source,2) * gp.TeV_to_erg
plt.fill_between(x_Source,y_min_Source_down,y_max_Source_down,where=x_Source<20,color='grey', alpha='0.5')
plt.fill_between(x_Source,y_min_Source_up,y_max_Source_up,where=x_Source>20,color='grey', alpha='0.5')
# Milagro point
x_Milagro = 20.
y_Milagro = 6.9e-15 * pow(x_Milagro,2) * gp.TeV_to_erg
y_err_Milagro = 1.6e-15 * pow(x_Milagro,2) * gp.TeV_to_erg
#plt.errorbar(x_Milagro,y_Milagro,yerr=y_err_Milagro,fmt='o',color = "red",label="Milagro")
plt.ylabel("E$^2$ dN/dE [erg s$^{-1}$cm$^{-2}$]", fontsize=13)
plt.xlabel("E [TeV]", fontsize=13)
plt.ylim([1e-16,1e-8])
plt.grid(color="black",alpha=.5)
plt.legend(numpoints=1,prop={'size':9},loc="upper right")
if (FIG_EPS):
fig.savefig("Figures/Gamma_Spectra_"+tag+".eps")
else:
fig.savefig("Figures/Gamma_Spectra_"+tag+".png")
#fig.savefig("Figures/Gamma_Spectra_"+tag+".eps")
# ************* GAMMA SPECTRUM VOLUME ***************
# calculate the corresponding gamma-ray spectra of the angular integrated
# column densities
sp_volume = []
inds = []
for d in degs :
inds.append(np.where(deg >= d)[0][0])
#print inds
#print IntSpec
for i,d in zip(inds,degs):
print ("i,d",i,d)
print ("IntSpec_volume[i-1]",IntSpec_volume[i-1])
s = IntSpec_volume[i-1]
fr.SetElectrons(zip(E,s))
#fr.SetElectrons(zip(Mehr_data[:,0]*1.e-9,Mehr_data[:,2]*1.e9/ gp.TeV_to_erg))
fr.SetDistance(DIST*1.e3)
fr.CalculateDifferentialPhotonSpectrum(E)
sp_volume.append(np.array(fr.GetTotalSED()))
sp_volume = np.array(sp_volume)
fig = plt.figure()
for s,d in zip(sp_volume,degs):
print ("sp_volume",s)
plt.loglog(s[:,0],s[:,1],label=r'%s deg' % (d)) # s[:,0] contains the Energy [TeV] and s[:,1] directly the SED [erg cm^-2 s^-1]
plt.loglog(x_Source,y_Source,label=tag,color='black')
plt.ylabel("E$^2$ dN/dE [erg s$^{-1}$cm$^{-2}$]", fontsize=13)
plt.xlabel("E [TeV]", fontsize=13)
plt.ylim([1e-16,1e-8])
plt.grid(color="black",alpha=.5)
plt.legend(prop={'size':9},loc="upper right")
if (FIG_EPS):
fig.savefig("Figures/Gamma_Spectra_Volume_"+tag+".eps")
else:
fig.savefig("Figures/Gamma_Spectra_Volume_"+tag+".png")
# *********** GAMMA-RAY ANGULAR PROFILES ***********
# this gives an array of line-of-sight integrated electron spectra vs. angular
# distance from the source
# corr = np.diff(deg) * deg_to_rad * deg[1:] * deg_to_rad * gp.pi # sr^2 Delta_theta * theta * pi (area of the circular section)
deg_sqr = deg**2.
corr = np.diff(deg_sqr) * gp.pi # deg^2 Delta_theta^2 * pi (area of the ring)
# integrand for the solid angle.
# calculate the corresponding gamma-ray spectra of the *not* angular integrated
# column densities (aka surface brightness)
fig = plt.figure()
sb = []
sb_20TeV = []
diff_sb_20TeV = []
intspec_profile_1TeV = []
intspec_profile_20TeV = []
diffspec_profile_20TeV = []
diffspec_profile = []
EE_1TeV = 1. * gp.TeV_to_erg # We compute the profile above 1 TeV
EE_20TeV = 20. * gp.TeV_to_erg # We compute the profile above 20 TeV
EE_20TeV_list=[20 * gp.TeV_to_erg]
ee = np.logspace(math.log10(EE_1TeV),math.log10(EMAX),40)
IntSpec_diff = np.diff(IntSpec,axis=0)
# IntSpec is a 2D array with the values of the electron spectrum [100] x values for each d.
# This is dN/dE [d_i+1] - dN/dE [d_i]
for s in IntSpec_diff:
#print "IntSpec,axis=0",s
fr.SetElectrons(zip(E,s))
fr.SetDistance(0.)
fr.CalculateDifferentialPhotonSpectrum(ee)
intspec_profile_1TeV = fu.Integrate(fr.GetTotalSpectrum(),EE_1TeV,EMAX) # The E range should be given in erg
intspec_profile_20TeV = fu.Integrate(fr.GetTotalSpectrum(),EE_20TeV,EMAX) # The E range should be given in erg
sb.append(intspec_profile_1TeV) # Integrated surface brigthness of photons
sb_20TeV.append(intspec_profile_20TeV) # Integrated surface brigthness of photons
# Differential spectrum
fr.CalculateDifferentialPhotonSpectrum(EE_20TeV_list)
diffspec_profile_20TeV = fr.GetTotalSpectrum()[0][1]
diff_sb_20TeV.append(diffspec_profile_20TeV)
sb = np.array(sb)
sb_20TeV = np.array(sb_20TeV)
diff_sb_20TeV=np.array(diff_sb_20TeV)
profile = []
#for p,c in zip(s[:,1],corr):
for p,c in zip(sb,corr[1:]):
print ("s,corr 1 TeV",p,c)
profile.append(p/c) # Wrong, should be multiplied?
print ("p/c 1 TeV",p/c)
profile_20TeV = []
for p,c in zip(sb_20TeV,corr[1:]):
print ("s,corr 20 TeV",p,c)
profile_20TeV.append(p/c) # Wrong, should be multiplied?
print ("p/c 20 TeV",p/c)
# Calculate the contribution for the first bin in theta
fr.SetElectrons(zip(E,IntSpec[0]))
fr.SetDistance(0.)
fr.CalculateDifferentialPhotonSpectrum(EE_20TeV_list)
Total = fr.GetTotalSpectrum()[0][1] # The total differential flux per bin in theta
print ("Flux from theta[0] to theta[1]=",Total)
profile_20TeV_diff = []
profile_20TeV_only_flux = []
for p,c,d in zip(diff_sb_20TeV,corr[1:],deg[2:]):
print ("s,corr 20 TeV diff",p,c)
profile_20TeV_diff.append(p/c) # Wrong, should be multiplied?
profile_20TeV_only_flux.append(p)
print ("p/c 20 TeV diff",p/c)
Total=Total+p
print ("Total differential %f [erg^-1 s^1 cm^-2] for deg %f" % (Total,d))
#print "int = ",fu.Integrate(zip(gp.pi*deg_sqr[1:],profile),deg_sqr[1],deg_sqr[len(deg_sqr)-1])
profile = np.array(profile)
profile_20TeV = np.array(profile_20TeV)
#plt.plot(deg_reduced[1:],profile,label = "E >"+str(EE_1TeV/gp.TeV_to_erg)+" TeV") # Profile above 1 TeV
plt.plot(deg[2:],profile_20TeV,label = "E >"+str(EE_20TeV/gp.TeV_to_erg)+" TeV") # Profile above 20 TeV
ax = fig.add_subplot(111)
plt.ylabel("Surface luminosity [1/(s cm$^{2}$ deg$^{2}$)]", fontsize=13)
#plt.xlabel("angular distance [deg$^2$]", fontsize=13)
plt.xlabel("Angular distance [deg]", fontsize=13)
#plt.ylim([-120.,500.])
#ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e'))
#ax.yaxis.labelpad = -10
#plt.ticklabel_format(style='sci', axis='y')
ax.yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
plt.grid(color="black",alpha=.5)
plt.legend(prop={'size':8},loc="upper right")
if (FIG_EPS):
fig.savefig("Figures/Gamma_Profiles_"+tag+".eps")
else:
fig.savefig("Figures/Gamma_Profiles_"+tag+".png")
# Differential profile
profile_20TeV_diff = np.array(profile_20TeV_diff)
fig = plt.figure()
plt.plot(deg[2:],profile_20TeV_diff,label = "E = 20 TeV")
ax = fig.add_subplot(111)
plt.ylabel("Surface luminosity [1/(erg s cm$^{2}$ deg$^{2}$)]", fontsize=13)
plt.xlabel("Angular distance [deg]", fontsize=13)
ax.yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
#plt.ticklabel_format(style='sci', axis='y')
#plt.ylim([-120.,500.])
#ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e'))
#ax.yaxis.labelpad = -10
plt.grid(color="black",alpha=.5)
plt.legend(prop={'size':8},loc="upper right")
if (FIG_EPS):
fig.savefig("Figures/Gamma_Profile_Differential_"+tag+".eps")
else:
fig.savefig("Figures/Gamma_Profile_Differential_"+tag+".png")
zipped=zip(deg[2:],profile_20TeV_diff)
np.savetxt("Results/Gamma_Profile_Differential_flux_solid_angle"+tag+".txt", zipped)
# Differential profile (only flux)
profile_20TeV_only_flux = np.array(profile_20TeV_only_flux)
fig = plt.figure()
plt.plot(deg[2:],profile_20TeV_only_flux,'o', label = "E = 20 TeV")
ax = fig.add_subplot(111)
plt.ylabel("Surface luminosity [1/(erg s cm$^{2}$)]", fontsize=13)
plt.xlabel("Angular distance [deg]", fontsize=13)
ax.yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
#plt.ticklabel_format(style='sci', axis='y')
#ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e'))
#ax.yaxis.labelpad = -10
plt.grid(color="black",alpha=.5)
plt.legend(prop={'size':8},loc="upper right")
if (FIG_EPS):
fig.savefig("Figures/Gamma_Profile_Differential_flux_"+tag+".eps")
else:
fig.savefig("Figures/Gamma_Profile_Differential_flux_"+tag+".png")
zipped=zip(deg[2:],profile_20TeV_diff)
np.savetxt("Results/Gamma_Profile_Differential_flux"+tag+".txt", zipped)
| 58,240 | 51.375 | 270 | py |
EDGE | EDGE-master/tests/test_sample.py | import edge
# Run EDGE tests
| 30 | 6.75 | 16 | py |
EDGE | EDGE-master/Science_paper/EDGE_Science_paper.py | #######################################################
# #
# Calculation of electron spectra, #
# gamma-ray spectra and electrons #
# flux at the Earth for different #
# initial parameters #
# #
#######################################################
# #
# Ruben Lopez-Coto, MPIK, rlopez@mpi-hd.mpg.de #
# Joachim Hahn, MPIK, joachim.hahn@mpi-hd.mpg.de #
# #
#######################################################
import os, sys
from math import exp
import math
import numpy as np
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
sys.path.append(os.path.abspath('/Users/rubenlopez/Code/GAMERA-master/lib'))
import gappa as gp
import argparse
import astropy.units as u
from gammapy.astro.population import make_base_catalog_galactic
from scipy.special import erfc
import matplotlib.ticker as mtick
from matplotlib.ticker import OldScalarFormatter, ScalarFormatter
global fp
fu = gp.Utils()
fr = gp.Radiation()
fp = gp.Particles()
fa = gp.Astro()
deg_to_rad = gp.pi / 180.
os.system("mkdir -p Figures")
os.system("mkdir -p Results")
global opts
p = argparse.ArgumentParser(description="Calculate the IC electron spectrum of sources")
p.add_argument("-n", "--name", dest="Name", type=str, default="Source",
help="Name of the source.")
p.add_argument("-f", "--file", dest="File", type=str, default="Data/GemingaProfile.dat",
help="File containing the angular profile of the source.")
p.add_argument("-al", "--alpha", dest="ALPHA", type=float, default=2.2,
help="Spectral index of the injection spectrum")
p.add_argument("-d", "--distance", dest="DIST", type=float, default=0.25,
help="Distance to the source (kpc)")
p.add_argument("-del", "--delta", dest="DELTA", type=float, default=0.33,
help="Diffusion index")
p.add_argument("-a", "--age", dest="AGE", type=float, default=3.e5,
help="Age of the source (yr)")
p.add_argument("-emax", "--emax", dest="EMAX", type=float, default=500.,
help="EMAX of accelerated electrons") # You give it in TeV but it is transformed to erg
p.add_argument("-m", "--mu", dest="MU", type=float, default=0.5,
help="Percentage of energy that goes into electrons")
p.add_argument("-d0", "--d0", dest="D0", type=float, default=4.e27,
help="Diffusion coefficient")
p.add_argument("-s", "--s", dest="SIZE", type=float, default=5.,
help="Size of the source given by the diffusion coefficient")
p.add_argument("-kn", "--kn", dest="KN", action='store_true', default=False,
help="Flag to activate or deactivate the KN option to calculate IC losses")
p.add_argument("-edens", "--edens", dest="TOT_E_DENS", type=float, default=1.06,
help="Total energy density. For Thomson losses.")
p.add_argument("-bfield", "--bfield", dest="BCONT", type=float, default=3.e-6,
help="Magnetic field")
p.add_argument("-edot", "--edot", dest="EDOT", type=float, default=3.2e34,
help="Spin-down power")
p.add_argument("-brind", "--brind", dest="BRIND", type=float, default=3.,
help="Breaking index")
p.add_argument("-tau", "--tau", dest="TC", type=float, default=1.2e4,
help="Initial spin-down timescale")
# Running-related inputs
p.add_argument("-all_pulsar", "--all_pulsar", dest="ALL_PULSAR", action='store_true', default=False,
help="Flag to calculate the contribution at the Earth of all pulsars")
p.add_argument("-only_flux_earth", "--only_flux_earth", dest="ONLY_FLUX_EARTH", action='store_true', default=False,
help="Only calculate the flux at the Earth and exit")
p.add_argument("-eps", "--eps", dest="FIG_EPS", action='store_true', default=False,
help="Save Figures in EPS format")
# Geminga-related inputs
p.add_argument("-norm", "--norm", dest="NORM", type=float, default=12.1e-15,
help="Normalization of the source's flux at a given pivot E")
p.add_argument("-norm_err", "--norm_err", dest="NORM_ERR", type=float, default=2.5e-15,
help="Error on the normalization of the source's flux at a given pivot E")
p.add_argument("-pivot", "--pivot", dest="PIVOT_E", type=float, default=20.,
help="Pivot energy for the normalization of the flux")
p.add_argument("-gamma", "--gamma", dest="GAMMA", type=float, default=2.40,
help="Spectral index of the gamma-ray spectrum")
p.add_argument("-gamma_err", "--gamma_err", dest="GAMMA_ERR", type=float, default=0.09,
help="Error on the spectral index of the gamma-ray spectrum")
# Input parameters
args = p.parse_args()
opts = args
AGE = opts.AGE # 3.e5 # yr Age
DIST = opts.DIST # 0.25 # kpc Distance
ALPHA = opts.ALPHA # 2.0 # Spectral index of the injection function
DELTA = opts.DELTA # 0.4 # Diffusion index
EMAX = opts.EMAX * gp.TeV_to_erg # 500 # erg
MU = opts.MU # 0.5 # Percentage of energy that goes into electrons
D0 = opts.D0 # 4.e27 # Diffusion coefficient
SIZE = opts.SIZE*2 # 4.7 # deg (to take into account the total emission integrated up to the tails)
KN = opts.KN # False
TOT_E_DENS= opts.TOT_E_DENS # 1.06 # eV/cm^3
BCONT = opts.BCONT # 3.e-6 # Gauss
EDOT = opts.EDOT # 3.2e34 # erg/s
BRIND = opts.BRIND # 3
TC = opts.TC # 1.2e4 # yr
NORM = opts.NORM # 12.1e-15 # TeV^-1 cm^-2 s^-1
NORM_ERR = opts.NORM_ERR # 2.5e-15 # TeV^-1 cm^-2 s^-1
PIVOT_E = opts.PIVOT_E # 20 # TeV
GAMMA = opts.GAMMA # 2.40
GAMMA_ERR = opts.GAMMA_ERR # 0.09
ALL_PULSAR= opts.ALL_PULSAR # False
ONLY_FLUX_EARTH= opts.ONLY_FLUX_EARTH # False
FIG_EPS = opts.FIG_EPS # False
t=AGE*gp.yr_to_sec # s
electron_mass=0.5e-6 # TeV/c^2
c=3.e10 # cm/s
#Edot=3.2e34 # erg/s
nu=4.218 # Hz Frequency
nu_dot=1.952e-13 # Hz/s Frequency derivate
nu_dot_dot_old=1.49e-25 # Hz/s^2 Frequency second derivate
nu_0=nu+nu_dot*t+nu_dot_dot_old*pow(t,2)
#nu_0=nu+nu_dot*t # Hz Initial frequency
l0 = 5.e-20 # s^-1
LUM0=EDOT/pow(1+AGE/TC,-1.*(BRIND+1.)/(BRIND-1.))# erg/s
E_star=3.e-3 * gp.TeV_to_erg # erg
TIMEOFFSET = 0. # s
AGEBURST = AGE # s
AGECONT = AGEBURST - TIMEOFFSET # s
ETA = .1
el_charge=4.80320427e-10 # StatC
TMIN = 1. # s
EMIN = 1.e-3 * gp.TeV_to_erg # erg
DENS = 1e-4
TIR = 20. # K
TOPT = 5e3 # K
WIR = 0.3 # erg/cm^3
WOPT = 0.3 # erg/cm^3
#BCONT = 3.e-6 # G Magnetic field for continuous emission
BBURST = 3.e-6 # G Magnetic field for burst emission
ESN = 2.5e48 # erg
# Luminosity evolution of a pulsar (simply spin-down)
def CalculateLuminosity(bins):
T = np.logspace(math.log10(TMIN),math.log10(2.*AGE),bins) # Array with the time
lum = MU*LUM0*(1.+T/TC)**(-1.*(BRIND+1.)/(BRIND-1.)) # Array with the luminosity for each of the times
print ("LUM0",LUM0)
if TIMEOFFSET != 0.:
t_index = np.max(np.where(T < TIMEOFFSET)[0])
lumBurst = np.vstack((T[:t_index], lum[:t_index])).T
lumCont = np.vstack((T[t_index+1:]-TIMEOFFSET, lum[t_index+1:])).T
else:
lumCont = np.vstack((T, lum)).T # We stack both arrays, having two columns, the first one for the time and the second for the corresponding luminosity
lumBurst = []
return np.log10(lumBurst),np.log10(lumCont)
# Diffusion coefficient at energy e (in erg)
def Diffusion(e):
return D0 * math.pow(1. + e/E_star, DELTA)
def CalculateEnergyTrajectory(fp):
e = EMAX
E = []
T = []
LossRates = []
LossRatesInverse = []
DiffIntegrand = []
DiffIntegrandInt = []
diff_int = 0.
t = 0.
while e > EMIN: # loop from EMAX to EMIN
if (KN):
lr = fp.EnergyLossRate(e)
#print "Calculating losses using the KN formula"
else:
gamma=e / gp.m_e
lr = (TOT_E_DENS * gp.TeV_to_erg * 1.e-12) * (4./3 * gp.sigma_T * gp.c_speed) * pow(gamma,2) # Loss rate for 1 eV/cm^3 energy density
# TOT_E_DENS in eV/cm^3 -> we transform it to erg/cm^3
# Thomson energy losses: 4./3 * sigma_t * c
# (TOT_E_DENS * gp.TeV_to_erg * 1.e-12) * (4./3 * gp.sigma_T * gp.c_speed) = l0 * m_e[erg] = 4./3 * gp.sigma_T * gp.c_speed/m_e[eV] * m_e[erg]
#print "Calculating losses using the Thomson formula"
#print "(TOT_E_DENS * gp.TeV_to_erg * 1.e-12) * (4./3 * gp.sigma_T * gp.c_speed)", (TOT_E_DENS * gp.TeV_to_erg * 1.e-12) * (4./3 * gp.sigma_T * gp.c_speed)
#print "lr,e,gamma",lr,e,gamma
dt = 1.e-3 * e / lr; # time increase
e -= dt * lr; # we decrease the energy in steps of DeltaE=dt*lr
t += dt / gp.yr_to_sec # and increase the time in steps of dt
D = Diffusion(e)
diff_int = diff_int + dt * lr * D / lr # This is Delta E * f(E), we are integrating in E the expression lambda = int(D(E)/E_dot)
T.append(t)
E.append(e)
DiffIntegrandInt.append(diff_int)
etraj = np.log10(np.array(zip(T,E))) # Energy trajectory (array of [(time0,energy0),(time1,energy1),...]). We zip in (T,E)
etrajinverse = np.log10(np.flipud(np.array(zip(E,T)))) # Energy trajectory inverted (last element, corresponding to the minimum energy, is now the first). We zip in (E,T)
lamb = np.log10(np.flipud(np.array(zip(E,DiffIntegrandInt)))) # Lambda (minimum energy goes first)
return etraj,etrajinverse,lamb
def Create_E_R_ArrayOfElectrons(rbins,ebins):
r = np.logspace(-3.,math.log10(1.e3*DIST*20.),rbins) # Array of distances to the pulsar [pc]
e = np.logspace(math.log10(EMIN),math.log10(EMAX),ebins) # Array of energies [erg]
twoDarray = []
twoDarrayPLOT = []
thr = 1e-30
#fig = plt.figure()
halfwidth = [] # The point at which the density of electrons has gone down to half its maximum value for a given energy
for ee in e: # For a given energy
print ("Energy [TeV]",ee/gp.TeV_to_erg)
#ee = ee*gp.TeV_to_erg
line = []
lineplot = []
linebplot = []
vlamb,DT,E0,E = FillLambdaVector(ee,1000) # We fill the mean free path vector for each of the energies
# vlamb is a vector of the form [(Energy,lambda_integral(Enow=ee)-lambda_integral(Energy))], where ee is varying from EMIN to EMAX and Energy from ee to EMAX
vzero = 0.
fill_halfwidth = True
for rr in r:
scont = Spectrum(ee,vlamb,rr,DT,E0) # Value in 1/(erg*cm^3) of the differential energy spectrum
# srl = SpectrumRectilinear(ee,rr)
if len(LUMBURST):
sburst = SpectrumBurst(ee,rr)
else:
sburst = 0.
# print scont,srl,sburst
v = scont + sburst# + srl
#loop to find the halfwidth of the distribution
if vzero == 0.:
vzero = v
if vzero != 0. and fill_halfwidth == True and v < 0.5 * vzero:
fill_halfwidth = False
halfwidth.append([ee,rr])
vplot = ee*ee*v # In the plot we represent E^2 dN/dE [erg/cm^3]
if v < thr:
v = thr
vplot = thr
line.append(v) # 1-D Array of densities for each of the different radii=rr and for a given energy ee [1/(erg*cm^3)]
lineplot.append(vplot) # The same, multiplied by E^2, for plotting [erg/cm^3]
twoDarray.append(np.array(line)) # 2-D Array containing all the 1-D Arrays previously mentioned, for each of the energies ee [1/(erg*cm^3)]
twoDarrayPLOT.append(np.array(lineplot))
halfwidth = np.array(halfwidth)
twoDarray = np.array(twoDarray)
twoDarrayPLOT = np.array(twoDarrayPLOT)
#fig.savefig("Figures/lambdas_"+tag+".png")
return twoDarray,twoDarrayPLOT,halfwidth,e,r
# fill vector of LAMBDA vs lower energy bound of electrons
def FillLambdaVector(Enow,bins):
DT = math.pow(10.,np.interp(math.log10(Enow),ETRAJCONTINVERSE[:,0],ETRAJCONTINVERSE[:,1])) # We interpolate between the first element (that is an array) of ETRAJCONTINVERSE (x=energy) and the second (f(x)=time) to obtain the interpolated time for a given energy
if DT < AGECONT: # If the interpolated time is smaller than the age, we can consider the initial energy E0=EMAX, otherwise we would be on curve 3 of the notes and the maximum energy would not be EMAX but the one calculated in the next step
E0 = EMAX
else:
E0 = math.pow(10.,np.interp(math.log10(DT-AGECONT),ETRAJCONT[:,0],ETRAJCONT[:,1])) # We interpolate between the first element (that is an array) of ETRAJCONT (x=time) and the second (f(x)=energy) to obtain the interpolated energy for a given time
E = np.logspace(math.log10(Enow),math.log10(E0),bins)
lamb = []
for e in E[1:]:
# LAMBCONT is the integral over the energy of dE D(E)/EDOT(E), from EMIN to EMAX
# We interpolate between the first time element of LAMBCONT (x=energy) and the second (f(x)=integral(dE D(E)/EDOT(E)))
v = math.pow(10.,np.interp(math.log10(Enow),LAMBCONT[:,0],LAMBCONT[:,1])) - math.pow(10.,np.interp(math.log10(e),LAMBCONT[:,0],LAMBCONT[:,1]))
# We are interested on int_E'^Enow{ dE D(E)/EDOT(E) }, therefore we need to break the integral:
# int_E'^Enow{ } = int_Emax^Enow{ } - int_Emax^E'{ } = lambda(Enow) - lambda(E')
# We subtract from the integral for Enow the integral for every energy e and fill a vector with this subtraction
lamb.append([e,v])
lamb = np.log10(np.array(lamb))
return lamb,DT,E0,lamb[:,0]
# main function to calculate the differential number (1/(erg*cm^3)) of electrons at
# energy e and radius R from the (point-) source in the *continuous* scenario.
def Spectrum(e,vlamb,R,DT,E0):
tmin = acc_time(E0,BCONT) # minimum acceleration time needed to accelerate the particle to that energy
if DT <= tmin:
return 0.
R = R * gp.pc_to_cm
spec = []
if ALPHA == 2.:
norm = math.log(EMAX/EMIN)
else:
norm = 1. / (ALPHA-2.) * (math.pow(EMIN, -ALPHA + 2.) - math.pow(EMAX, -ALPHA + 2.)) # Normalization of the electron spectrum
vq = np.array(zip(LUMCONT[:,0],np.log10(10.**LUMCONT[:,1] / norm))) # Array with Time and luminosity/normalization
T = np.logspace(math.log10(max(1e-3,AGECONT-DT)),math.log10(AGECONT-tmin),2000) # Array with Time in logarithmic bins
T2 = T - (AGECONT - tmin - DT)
e0 = 10.**np.interp(np.log10(T2),ETRAJCONT[:,0],ETRAJCONT[:,1])
lamb = 10.**np.interp(np.log10(e0),vlamb[:,0],vlamb[:,1])
Q = 10.**np.interp(np.log10(T),vq[:,0],vq[:,1])
#print "Q",Q
val = Q * e0 ** (-ALPHA) * e0*e0 * np.exp(-R*R/(4.*lamb)) /( e*e * (4.*gp.pi*lamb)**1.5 )
np.place(val, val!=val, [0.])
val = fu.Integrate(zip(T*gp.yr_to_sec,val),T[0]*gp.yr_to_sec,T[len(T)-1]*gp.yr_to_sec) # Differential number of electrons for an energy e and at radius R [1/(erg * cm^3)]
return val
# acceleration time for particles of energy e. Used to determine starting time
# of injection.
def acc_time(energy,b):
momentum = ( energy - gp.m_e) / c
gyrorad = momentum * c / (el_charge * b)
tacc = gyrorad / c / ETA
return tacc / gp.yr_to_sec
def InitialiseGappa(fp,fr,b,age):
fr.AddThermalTargetPhotons(2.7,0.26*gp.eV_to_erg) #CMB
fr.AddThermalTargetPhotons(TIR,WIR*gp.eV_to_erg) #IR
fr.AddThermalTargetPhotons(TOPT,WOPT*gp.eV_to_erg) #OPT
fr.CreateICLossLookup()
fr.SetBField(b)
fr.SetAmbientDensity(DENS)
fr.SetDistance(0.) # This will calculate the luminosity (which is what we want for the LOS integral)
fp.SetBField(b)
fp.SetICLossLookup(fr.GetICLossLookup())
fp.SetAmbientDensity(DENS)
fp.SetAge(age)
return fp
# Calculate electron column densities for every spectral energy bin along the
# l, b direction (although this model is radial symmetric,
# so only one angle is required...).
def LineOfSightIntegration(l,b,twoDarray,e,r,rbins):
# twoDarray contains the density of photons in bins of r and e [1/(erg*cm^3)]
# l is the vertical angle
# b?
# e is the energy [erg]
# r is an array with distances from the Earth? [pc] Bug?
rvals = np.logspace(-6.,math.log10(DIST),rbins)
# make r-steps so that they are very fine at the source [kpc]
rvals = np.concatenate(((DIST - rvals)[::-1],rvals + DIST))
#rvals = np.linspace(0.,2.*DIST,rbins)
vals = []
los = line_of_sight(l,b,rvals,fa) # Array with xyz positions w.r.t. the Earth for all the elements with angle < l
integrand = []
for xyz in los:
x = xyz[0]
y = xyz[1]
z = xyz[2]
rr = math.sqrt( x * x + y * y + z * z) * 1000. # *1000 to convert it into pc
r_index = np.where(r > rr)[0][0] # It returns the index of the first element where the condition is fulfilled
integrandE = []
for i in xrange(len(e)):
val = twoDarray.T[r_index][i] # we add for all the energies the twoDarray element with index r_index (Remember, twoDarray [1/(erg*cm^3)])
integrandE.append(val) # For every energy, we add a value to the integrandE array, with the density corresponding to the distance r[r_index]
integrand.append(integrandE)
# Array containing, for each xyz value
# in the line of sight from the Earth,
# the integrandE of the densities for
# the distance corresponding to r[r_index] for all the energies
integrand = np.array(integrand).T
for integr in integrand:
vals.append(fu.Integrate(zip(rvals*gp.kpc_to_cm,integr),rvals[0]*gp.kpc_to_cm,rvals[len(rvals)-1]*gp.kpc_to_cm))
# Integrate integr * rvals (rvals is in kpc)
# from rvals[0]
# to rvals[len(rvals)-1]
return vals # Units [1/(erg * cm^2)]
def LineOfSightVolumeIntegration(l,b,twoDarray,e,r,rbins):
# twoDarray contains the density of photons in bins of r and e [1/(erg*cm^3)]
# l is the vertical angle
# b?
# e is the energy [erg]
# r is an array with distances from the pulsar [pc]
rvals = np.logspace(-6.,math.log10(DIST),rbins)
# make r-steps so that they are very fine at the source [kpc]
rvals = np.concatenate(((DIST - rvals)[::-1],rvals + DIST))
#rvals = np.linspace(0.,2.*DIST,rbins)
vals = []
los = line_of_sight(l,b,rvals,fa) # Array with xyz positions w.r.t. the Earth for all the elements with angle < l
integrand = []
for xyz in los:
x = xyz[0]
y = xyz[1]
z = xyz[2]
rr = math.sqrt( x * x + y * y + z * z) * 1000. # *1000 to convert it into pc
r_index = np.where(r > rr)[0][0] # It returns the index of the first element where the condition is fulfilled
integrandE = []
for i in xrange(len(e)):
val = twoDarray.T[r_index][i] # we add for all the energies the twoDarray element with index r_index (Remember, twoDarray [1/(erg*cm^3)])
integrandE.append(val) # For every energy, we add a value to the integrandE array, with the density corresponding to the distance r[r_index]
integrand.append(integrandE)
# Array containing, for each xyz value
# in the line of sight from the Earth,
# the integrandE of the densities for
# the distance corresponding to r[r_index] for all the energies
integrand = np.array(integrand).T
for integr in integrand:
vals.append(fu.Integrate(zip(rvals*gp.kpc_to_cm,integr*rvals*rvals*math.pow(gp.kpc_to_cm,2)),rvals[0]*gp.kpc_to_cm,rvals[len(rvals)-1]*gp.kpc_to_cm))
# Integrate (integr*rvals*rvals) * rvals (rvals is in kpc)
# from rvals[0]
# to rvals[len(rvals)-1]
return vals # Units [1/(erg)]
# creates an array of x,y,z values along a line of sight in the l,b direction
def line_of_sight(l,b,rvals,fa):
xyz_obs = [DIST, 0. ,0. ]
los = []
for r in rvals:
xyz = fa.GetCartesian(r,l,b,xyz_obs)
# It gives the xyz position of a point w.r.t. the Earth. It is a vector with 3 components [0]=x,[1]=y,[2]=z
los.append(xyz)
return np.array(los)
# Calculate the contribution of an homogeneus distributions of
# pulsars in the galaxy
def Homogeneus_distribution_pulsars(age,SN_rate):
n_sources = age * SN_rate
table = make_base_catalog_galactic(n_sources=n_sources,
rad_dis='L06',
vel_dis='F06B',
max_age=max_age,
spiralarms=True)
return table
# ********** HOMOGENEOUS PULSAR CONTRIBUTION ********
def Flux_Earth_all_pulsars(E):
max_age = 1e7 * u.yr
SN_rate = 2. / (100. * u.yr)
pulsar_distribution = Homogeneus_distribution_pulsars(max_age,SN_rate);
x_pc = np.array(pulsar_distribution[3][:]) # in kpc
y_pc = np.array(pulsar_distribution[4][:]) # in kpc
age = np.array(pulsar_distribution[9][:]) * gp.yr_to_sec # in s
x_Earth = x_pc - 8.3 # in kpc
y_Earth = y_pc - 0 # in kpc
d = np.sqrt(x_Earth * x_Earth + y_Earth * y_Earth) * gp.kpc_to_cm
# Steady flux
# Eq 21 Atoyan et al. 1995
Q0 = 5.e32 # 1/(erg * s)
f_st_int = []
for e in E:
D = Diffusion(e) # cm^2/s
t_gamma = []
for t in age:
if (t < gp.m_e/(l0 * e)):
t_gamma.append(t)
else:
t_gamma.append(gp.m_e/(l0 * e))
t_gamma = np.array(t_gamma)
f_st = Q0 * e**-2.4 / (4*gp.pi * D * d) * erfc(d/(2 * np.sqrt(D * t_gamma)))
#print "f_st",f_st
#print "t_gamma",t_gamma
#print "d",d
#print "D",D
#print "Q0 * e**-2.4",Q0 * e**-2.4
#print "(4*gp.pi * D * d)", (4*gp.pi * D * d)
#print "D * t_gamma", D * t_gamma
#print "np.sqrt(D * t_gamma)",np.sqrt(D * t_gamma)
#print "d/(2 * np.sqrt(D * t_gamma)",d/(2 * np.sqrt(D * t_gamma))
#print "erfc(d/(2 * np.sqrt(D * t_gamma)))",erfc(d/(2 * np.sqrt(D * t_gamma)))
# Condition to consider the contribution of pulsars at a distance > 1 kpc.
# Note: If we do not add this condition, the electron emission extends up to TeV energies
sum_all_pulsars = sum(f_st[i] for i in range(len(f_st)) if d[i] > 1 * gp.kpc_to_cm)
#sum_all_pulsars = sum(f_st)
f_st_int.append(sum_all_pulsars)
# Check how many pulsars there are in a region of 1 kpc from the Earth
N_pulsars_1kpc = 0
print "Positions of pulsars at distance < 1 kpc"
print "Distance x_Earth y_Earth"
for i in range(len(f_st)):
if d[i] < 1 * gp.kpc_to_cm:
print d[i],x_Earth[i],y_Earth[i]
N_pulsars_1kpc += 1
print "The number of pulsars within 1 kpc distance from the Earth is ", N_pulsars_1kpc
return f_st_int
# ******************** MAIN FUNCTION ******************
if __name__=='__main__':
global LUM
global tag
#points = opts.File
#tag = sys.argv[2]
tag = opts.Name
#data = np.loadtxt(points)
AMS_data = np.loadtxt("../Data/Data_points/AMS_data.dat",skiprows=2)
HESS_data = np.loadtxt("../Data/Data_points/HESS_data.dat",skiprows=2)
Fermi_data = np.loadtxt("../Data/Data_points/Fermi_data.dat",skiprows=2)
AMS_positron_fraction = np.loadtxt("../Data/Data_points/AMS_positron_fraction.dat",skiprows=2)
PAMELA_positron_fraction = np.loadtxt("../Data/Data_points/PAMELA_positron_fraction.dat",skiprows=2)
# Curves from other papers
Yuksel_delta04 = np.loadtxt("../Data/Predictions_papers/Yuksel_Fig3_dotted_delta04.csv",delimiter=',')
Aharonian_Fig4 = np.loadtxt("../Data/Predictions_papers/Aharonian_1995_Fig4_time_dependent_injection.csv",skiprows=1)
min_bin_deg = 0.
max_bin_deg = SIZE+0.2
nbins = SIZE*10+3
#nbins = 51
#deg = np.linspace(min_bin_deg,max_bin_deg,nbins)
deg1=np.linspace(0.01,0.09,9)
deg2=np.linspace(0.1,SIZE,SIZE*10)
deg=np.concatenate((deg1,deg2))
#degs = [1.7, 5.5, 8.6]
degs = [2.6,SIZE] # IMPROVE ME!: SIZE should be an array with different sizes, just to compare
#bin_1dot7=int(1.7/((max_bin_deg-min_bin_deg)/nbins))
#bin_5dot5=int(5.5/((max_bin_deg-min_bin_deg)/nbins))
#bin_8dot6=int(8.6/((max_bin_deg-min_bin_deg)/nbins))
bin_Size = int(SIZE/((max_bin_deg-min_bin_deg)/nbins)) # Bin for the corresponding size given by diffusion
bin_Milagro = int(2.6/((max_bin_deg-min_bin_deg)/nbins)) # Bin for the corresponding size given by Milagro's point at FWHM=2.6
#****************** LUMINOSITY *************
LUMBURST,LUMCONT = CalculateLuminosity(10000)
fig = plt.figure()
#print "LUMBURST,LUMCONT",LUMBURST,LUMCONT
if len(LUMBURST) != 0:
plt.plot(10.**LUMBURST[:,0],10.**LUMBURST[:,1],label=" ")
plt.loglog(10.**LUMCONT[:,0],10.**LUMCONT[:,1]/MU,label="Pulsar evolution luminosity")
#plt.xlim([0.,10.*TC])
plt.xlim([0.,2*AGE])
#plt.xlim([1.e5,AGE])
plt.ylim([EDOT/10.,LUM0*100])
plt.ylabel(r'L$_e$ [erg/s]')
plt.xlabel("Age [kyr]")
plt.plot((1., 2*AGE), (EDOT, EDOT), label=r'Constant injection luminosity',color='red')
plt.plot((TC, TC), (EDOT/10., LUM0*100), label=r'$\tau_c$',color='black',linestyle = "dashed")
plt.plot((AGE, AGE), (EDOT/10., LUM0*100), label=r'Now',color='blue',linestyle = "dashed")
#print TC,EDOT/10.,LUMCONT[0,1]
plt.title(r'L$_0$=%.1e erg/s; $\tau_c$=%.1e yr; n = %.1f' %(LUM0,TC,BRIND))
plt.grid(color="black",alpha=.5)
plt.legend(prop={'size':10},loc="upper left")
#plt.legend(title="log10(L0),t0,n =\n"+str(round(math.log10(LUM0),2))+","+str(round(TC,2))+","+str(round(BRIND,2)),loc="upper right")
if (FIG_EPS):
fig.savefig("Figures/Luminosity_"+tag+".eps")
else:
fig.savefig("Figures/Luminosity_"+tag+".png")
#************* ELECTRON DENSITY IN SPACE AND ENERGY ***********
global ETRAJCONT,ETRAJBURST,ETRAJCONTINVERSE,ETRAJBURSTINVERSE,LAMBCONT,LAMBBURST
fp = InitialiseGappa(fp,fr,BBURST,AGEBURST)
ETRAJBURST,ETRAJBURSTINVERSE,LAMBBURST = CalculateEnergyTrajectory(fp)
fp = InitialiseGappa(fp,fr,BCONT,AGECONT)
ETRAJCONT,ETRAJCONTINVERSE,LAMBCONT = CalculateEnergyTrajectory(fp)
# This creates an array of electron densities in (E,R) space
twoDarray,twoDarrayPLOT,halfwidth,E,R = Create_E_R_ArrayOfElectrons(400,100)
# twoDarray: 2-D Array containing all the electron densities, for each of the radii and the energies [1/(erg*cm^3)]
# twoDarrayPLOT: The same * E^2, for plotting [erg/cm^3)]
# halfwidth: Array containing, for each energy, the distance at which the maximum density goes to half
# E: Array of the energies [erg]
# R: Array of the radii [pc]
#print halfwidth
# plot it!
fig,ax = plt.subplots(1, 1,figsize=(7,5))
logarray = np.log10(twoDarrayPLOT)
levels = np.linspace(np.amin(logarray),np.amax(logarray),100)
plt.contourf(np.log10(E/gp.TeV_to_erg),np.log10(R), logarray.T,levels, cmap=plt.get_cmap('viridis')) # Density of electrons
#plt.plot(np.log10(halfwidth[:,0]),np.log10(halfwidth[:,1]),color="black",linestyle = "dashed") # Line limiting half of the density of the electrons for a given energy
plt.grid(color="black",alpha=.5)
cbar = plt.colorbar()
cbar.set_label(r'log$_{10}$(E$^2$ $\frac{\mathrm{dN}}{\mathrm{dE}})$ [erg cm$^{-3}$]')
plt.ylabel("log$_{10}$ (R) [pc]")
plt.xlabel("log$_{10}$ (E) [TeV]")
if (FIG_EPS):
fig.savefig("Figures/Electrons_E_R_Array_"+tag+".eps")
else:
fig.savefig("Figures/Electrons_E_R_Array_"+tag+".png")
#***************** ELECTRON FLUX EARTH *****************
GeV_to_erg = 1.e-3 * gp.TeV_to_erg
ii = np.where(R >= 1000.*DIST)[0][0]
# First index where R > 1000*DIST (corresponds to the distance in pc)
# Since the problem is spherically symmetric, the flux at Earth is equal to the flux at any point of the sphere with radius R=1000*DIST
#print "-->",R[ii]
fig = plt.figure()
fac = 1e4 * c / (4.*gp.pi) # c/4pi in cm/s, 1e4 transform the cm^-2 to m^-2 in the E^3 J(E) function
EGeV = E/gp.TeV_to_erg * 1.e3 # GeV
plt.loglog(E/gp.TeV_to_erg,EGeV**3.*GeV_to_erg*fac*twoDarray.T[ii],color="black", label=tag) # E^3 J(E)
# GeV_to_erg pass one of the GeV to erg on the numeral and they go away with the one coming from twoDarray [1/(erg*cm^3)]
#print "EGeV**3.*GeV_to_erg*fac*f_st_int",EGeV**3.*GeV_to_erg*fac*f_st_int
print ("Flux_Earth",EGeV**3.*GeV_to_erg*fac*twoDarray.T[ii])
zipped=zip(E/gp.TeV_to_erg,EGeV**3.*GeV_to_erg*fac*twoDarray.T[ii])
np.savetxt("Results/Flux_Earth"+tag+".txt", zipped)
# ********** HOMOGENEOUS PULSAR CONTRIBUTION ********
if (ALL_PULSAR):
f_st_int=Flux_Earth_all_pulsars(E)
zipped_all_pulsars=zip(E/gp.TeV_to_erg,EGeV**3.*GeV_to_erg*fac*f_st_int)
np.savetxt("Results/Flux_Earth_all_pulsars"+tag+".txt", zipped_all_pulsars)
plt.loglog(E/gp.TeV_to_erg,EGeV**3.*GeV_to_erg*fac*f_st_int,color="red",label="All pulsars [d > 1 kpc]") # E^3 J(E)
# AMS Data all electron flux
y_AMS = AMS_data[:,3]*pow(AMS_data[:,0],3) # F x E^3
yerror_AMS = AMS_data[:,4]*pow(AMS_data[:,0],3)
AMS_points = plt.errorbar(AMS_data[:,0]*1.e-3,y_AMS,yerr=yerror_AMS,fmt='o',color = "black",label="AMS",markeredgecolor='k')
# HESS Data all electron flux
y_HESS = HESS_data[:,3]*pow(HESS_data[:,0],3)
yerror_HESS = HESS_data[:,4]*pow(HESS_data[:,0],3)
HESS_points = plt.errorbar(HESS_data[:,0]*1.e-3,y_HESS,yerr=yerror_HESS,fmt='^',color = "red",label="HESS",markeredgecolor='k')
# Fermi Data all electron flux
y_Fermi = Fermi_data[:,3]*pow(Fermi_data[:,0],3)
yerror_Fermi = Fermi_data[:,4]*pow(Fermi_data[:,0],3)
Fermi_points = plt.errorbar(Fermi_data[:,0]*1.e-3,y_Fermi,yerr=yerror_Fermi,fmt='s',color = "blue", label="Fermi",markeredgecolor='k')
# Values for galactic electrons and positrons
# From Moskalenko and Strong (1998), Figure 5, left panel
primary_el_data = np.loadtxt("../Data/Moskalenko_and_Strong/Primary_electrons.txt",skiprows=1)
secondary_el_data = np.loadtxt("../Data/Moskalenko_and_Strong/Secondary_electrons.txt",skiprows=1)
secondary_pos_data = np.loadtxt("../Data/Moskalenko_and_Strong/Secondary_positrons.txt",skiprows=1)
x_primary_el = primary_el_data[:,0] * 1e-6 # TeV
y_primary_el = primary_el_data[:,1] * 1e-3 * 1e4 # GeV m^-2 s^-1 sr^-1
x_secondary_el = secondary_el_data[:,0] * 1e-6 # TeV
y_secondary_el = secondary_el_data[:,1] * 1e-3 * 1e4 # GeV m^-2 s^-1 sr^-1
x_secondary_pos = secondary_pos_data[:,0] * 1e-6 # TeV
y_secondary_pos = secondary_pos_data[:,1] * 1e-3 * 1e4 # GeV m^-2 s^-1 sr^-1
primary_el=np.interp(E/gp.TeV_to_erg,x_primary_el,y_primary_el,right=0) * EGeV # GeV^2 m^-2 s^-1 sr^-1
secondary_el=np.interp(E/gp.TeV_to_erg,x_secondary_el,y_secondary_el,right=0) * EGeV # GeV^2 m^-2 s^-1 sr^-1
secondary_pos=np.interp(E/gp.TeV_to_erg,x_secondary_pos,y_secondary_pos,right=0) * EGeV # GeV^2 m^-2 s^-1 sr^-1
#primary_el=np.interp(E/gp.TeV_to_erg,x_primary_el,y_primary_el) * EGeV # GeV^2 m^-2 s^-1 sr^-1
#secondary_el=np.interp(E/gp.TeV_to_erg,x_secondary_el,y_secondary_el) * EGeV # GeV^2 m^-2 s^-1 sr^-1
#secondary_pos=np.interp(E/gp.TeV_to_erg,x_secondary_pos,y_secondary_pos) * EGeV # GeV^2 m^-2 s^-1 sr^-1
plt.loglog(E/gp.TeV_to_erg,primary_el, color = "blue", label ="Primary e$^-$") # E^3 J(E)
plt.loglog(E/gp.TeV_to_erg,secondary_el, color = "magenta", label ="Secondary e$^-$") # E^3 J(E)
plt.loglog(E/gp.TeV_to_erg,secondary_pos, color = "green", label ="Secondary e$^+$") # E^3 J(E)
#print "Primary electrons", primary_el
#print "Secondary electrons", secondary_el
#print "Secondary positrons", secondary_pos
#zipped=zip(E/gp.TeV_to_erg,primary_el)
#np.savetxt("Results/Flux_Earth_primary_electrons.txt", zipped)
#zipped=zip(E/gp.TeV_to_erg,secondary_el)
#np.savetxt("Results/Flux_Earth_secondary_electrons.txt", zipped)
#zipped=zip(E/gp.TeV_to_erg,secondary_pos)
#np.savetxt("Results/Flux_Earth_secondary_positrons.txt", zipped)
# Yuksel Figure 3, delta=0.4
#plt.loglog(Yuksel_delta04[:,0]*1.e-3,Yuksel_delta04[:,1],color = '0.75',label ="Yuksel Fig 3 delta 0.4")
#plt.loglog(Aharonian_Fig4[:,0]*1.e-3,Aharonian_Fig4[:,1],color = "cyan",label ="Aharonian Fig 4")
plt.ylabel("E$^3$ J(E) [GeV$^2$/(m$^2$s sr)]", fontsize=13)
plt.xlabel("E [TeV]", fontsize=13)
plt.grid(color="black",alpha=.5)
#plt.legend(numpoints=1,handles=[AMS_points,HESS_points,Fermi_points],prop={'size':10},loc="upper right")
plt.legend(numpoints=1,prop={'size':10},loc="upper right",ncol=3)
plt.xlim([1e-3,1e1])
plt.ylim([1e0,1e3])
if (FIG_EPS):
fig.savefig("Figures/Flux_Earth_"+tag+".eps")
else:
fig.savefig("Figures/Flux_Earth_"+tag+".png")
# ********** POSITRON FRACTION ***********
fig = plt.figure()
flux_earth_Source=EGeV**3.*GeV_to_erg*fac*twoDarray.T[ii]
# All pulsars
#flux_earth_all_pulsars=EGeV**3.*GeV_to_erg*fac*f_st_int
#fraction=(0.5 * flux_earth_Source+secondary_pos)/(flux_earth_Source + flux_earth_all_pulsars + primary_el + secondary_el + secondary_pos)
fraction=(0.5 * flux_earth_Source+secondary_pos)/(flux_earth_Source + primary_el + secondary_el + secondary_pos)
plt.loglog(E/gp.TeV_to_erg,fraction,label = "Fraction total")
print ("Fraction",fraction)
zipped=zip(E/gp.TeV_to_erg,fraction)
np.savetxt("Results/Fraction_Total_Positron_Earth_"+tag+".txt", zipped)
fraction_galactic_positrons=secondary_pos/(flux_earth_Source+primary_el + secondary_el + secondary_pos)
plt.loglog(E/gp.TeV_to_erg,fraction_galactic_positrons,label = "Galactic e$^+$ fraction")
#print "fraction galactic positrons",fraction_galactic_positrons
zipped=zip(E/gp.TeV_to_erg,fraction_galactic_positrons)
np.savetxt("Results/Fraction_Galactic_Positron_Earth_"+tag+".txt", zipped)
fraction_Source_positrons=0.5 * flux_earth_Source/(flux_earth_Source+primary_el + secondary_el + secondary_pos)
plt.loglog(E/gp.TeV_to_erg,fraction_Source_positrons,label = "Source e$^+$ fraction")
#print "fraction Source positrons",fraction_Source_positrons
zipped=zip(E/gp.TeV_to_erg,fraction_Source_positrons)
np.savetxt("Results/Fraction_Source_Positron_Earth_"+tag+".txt", zipped)
# AMS Data positron fraction
plt.errorbar(AMS_positron_fraction[:,0]*1.e-3,AMS_positron_fraction[:,3],yerr=AMS_positron_fraction[:,4],fmt='o',color = "black",label="AMS",markeredgecolor='k')
# PAMELA Data positron fraction
plt.errorbar(PAMELA_positron_fraction[:,0]*1.e-3,PAMELA_positron_fraction[:,3],yerr=PAMELA_positron_fraction[:,4],fmt='o',color = "red",label="PAMELA",markeredgecolor='k')
plt.ylabel("e$^+$/(e$^+$+e$^-$)", fontsize=13)
plt.xlabel("E [TeV]", fontsize=13)
plt.grid(color="black",alpha=.5)
plt.xlim([1e-4,1e0])
plt.ylim([1e-2,1e0])
plt.legend(numpoints=1,prop={'size':10},loc="upper right")
if (FIG_EPS):
fig.savefig("Figures/Fraction_Earth_"+tag+".eps")
else:
fig.savefig("Figures/Fraction_Earth_"+tag+".png")
# Break in case we do not want to calculate the gamma-ray spectrum
if(ONLY_FLUX_EARTH):
exit()
# ************ ELECTRON COLUMN DENSITIES ************
# This integrates the spectra along the angular distance
values = []
for d in deg:
values.append(LineOfSightIntegration(d,0.,twoDarray,E,R,1e4))
#print "los,values",d,values
values = np.array(values)
IntSpec = []
for va in values.T: # One per angle definition
intsp = np.array(fu.IntegratedProfile(zip(deg * gp.pi/180.,2.*gp.pi * va * deg * gp.pi/180. * (1/(4*gp.pi)))))[:,1] # Integration over the solid angle.
intsp = np.array(intsp)
#print "intsp",intsp
#intsp = np.array(fu.IntegratedProfile(zip(deg * gp.pi/180.,2.*gp.pi * va * deg * gp.pi/180.)))[:,1] # Integration over the solid angle. We use deg instead of sin(deg)
# The (1/(4*gp.pi)) is to take into account that we are integrating over the solid angle
IntSpec.append(intsp)
IntSpec = np.array(IntSpec)
# plot the angular-integrated electron spectra
IntSpec = np.array(IntSpec.T)
fig = plt.figure()
for s in IntSpec:
plt.loglog(E/gp.TeV_to_erg,E**2.*s) # E is in erg
plt.grid(color="black",alpha=.5)
plt.ylim([1e-3,1e5])
plt.ylabel("E$^2$ dN/dE [erg/cm$^2$]", fontsize=13)
plt.xlabel("E [TeV]", fontsize=13)
if (FIG_EPS):
fig.savefig("Figures/Electron_Spectra_"+tag+".eps")
else:
fig.savefig("Figures/Electron_Spectra_"+tag+".png")
#************* ELECTRON SPECTRA SOURCE ***********
values_diff_spectrum = []
#for d in deg:
for d in deg:
print ("los %.2f" % d)
values_diff_spectrum.append(LineOfSightVolumeIntegration(d,0.,twoDarray,E,R,1e4))
values_diff_spectrum = np.array(values_diff_spectrum)
# This integrates the spectra along the angular distance
IntSpec_volume = []
for va_volume in values_diff_spectrum.T: # One per angle definition
#intsp_volume = np.array(fu.IntegratedProfile(zip(deg * gp.pi/180.,2.*gp.pi * va_volume * deg * gp.pi/180. * (1/(4*gp.pi)))))[:,1] # Integration over the solid angle.
intsp_volume = np.array(fu.IntegratedProfile(zip(deg * gp.pi/180.,2.*gp.pi * va_volume * deg * gp.pi/180. )))[:,1] # Integration over the solid angle.
# The (1/(4*gp.pi)) is to take into account that we are integrating over the solid angle
IntSpec_volume.append(intsp_volume)
IntSpec_volume = np.array(IntSpec_volume)
IntSpec_volume = np.array(IntSpec_volume.T)
#IntSpec_volume_all = LineOfSightVolumeIntegration(90.,0.,twoDarray,E,R,1e4) # 2.*gp.pi comes from the solid angle integral of half a sphere
#IntSpec_volume_all = 2.* gp.pi * np.array(IntSpec_volume_all)
#print IntSpec_volume_all
# plot the angular-integrated electron spectra
fig = plt.figure()
#for volume_spectra in IntSpec_volume:
# plt.loglog(E/gp.TeV_to_erg,E**2.*volume_spectra) # E is in erg
#Mehr_data = np.loadtxt("Data/GemingaIC.txt",skiprows=2)
#y_Mehr = Mehr_data[:,2]*1.e9*pow(Mehr_data[:,0]*1.e-9,2)* gp.TeV_to_erg
#x_Mehr = Mehr_data[:,0]*1.e-9
#plt.loglog(E/gp.TeV_to_erg,E**2.*IntSpec_volume[bin_1dot7],label='1.7 deg') # E is in erg
#plt.loglog(E/gp.TeV_to_erg,E**2.*IntSpec_volume[bin_5dot5],label='5.5 deg') # E is in erg
#plt.loglog(E/gp.TeV_to_erg,E**2.*IntSpec_volume[bin_8dot6],label='8.6 deg') # E is in erg
for d in degs :
ind=np.where(deg >= d)[0][0]
print ("ind %i,d %.2f" % (ind,d))
plt.loglog(E/gp.TeV_to_erg,E**2.*IntSpec_volume[ind-1],label='%s deg' % d)
#plt.loglog(E/gp.TeV_to_erg,E**2.*IntSpec_volume[bin_Milagro],label='2.6 deg [Milagro]') # E is in erg
#plt.loglog(E/gp.TeV_to_erg,E**2.*IntSpec_volume[bin_Size],label='%s deg' % SIZE) # E is in erg
#plt.loglog(E/gp.TeV_to_erg,E**2.*IntSpec_volume_all,label='All') # E is in erg
#plt.loglog(x_Mehr,y_Mehr,label="Mehr flux",color='black')
plt.grid(color="black",alpha=.5)
plt.ylim([1e40,1e47])
plt.ylabel("E$^2$ dN/dE [erg]", fontsize=13)
plt.xlabel("E [TeV]", fontsize=13)
plt.legend(prop={'size':9},loc="upper right")
if (FIG_EPS):
fig.savefig("Figures/Electron_Spectra_Volume_"+tag+".eps")
else:
fig.savefig("Figures/Electron_Spectra_Volume_"+tag+".png")
zipped=zip(E/gp.TeV_to_erg,E**2.*IntSpec_volume[ind-1])
np.savetxt("Results/Electron_Spectra_Volume_%s_%sdeg.txt" %(tag,d), zipped)
# ************* GAMMA SPECTRUM ***************
# calculate the corresponding gamma-ray spectra of the angular integrated
# column densities
sp = []
inds = []
for d in degs :
inds.append(np.where(deg >= d)[0][0])
#print inds
#print IntSpec
for i,d in zip(inds,degs):
print ("i,d",i,d)
print ("IntSpec[i-1]",IntSpec[i-1])
s = IntSpec[i-1]
fr.SetElectrons(zip(E,s))
fr.CalculateDifferentialPhotonSpectrum(E)
sp.append(np.array(fr.GetTotalSED()))
sp = np.array(sp)
fig = plt.figure()
for s,d in zip(sp,degs):
print ("sp",s)
plt.loglog(s[:,0],s[:,1],label=r'%s deg' % (d)) # s[:,0] contains the Energy [TeV] and s[:,1] directly the SED [erg cm^-2 s^-1]
zipped=zip(s[:,0],s[:,1])
np.savetxt("Results/Gamma_Spectra_%s_%sdeg.txt" %(tag,d), zipped)
# Include in the plot Geminga's spectral energy distribution
x_Geminga = np.arange(1., 100., 0.1)
y_Geminga = NORM*pow(x_Geminga/PIVOT_E,-GAMMA) * pow(x_Geminga,2) * gp.TeV_to_erg # Norm is given in TeV^-1 cm^-2 s^-1, but when multiplied by E^2 it is converted to TeV
plt.loglog(x_Geminga,y_Geminga,label=tag,color='black')
# Butterfly
y_max_Geminga_down = (NORM+NORM_ERR)*pow(x_Geminga/PIVOT_E,-(GAMMA+GAMMA_ERR)) * pow(x_Geminga,2) * gp.TeV_to_erg
y_min_Geminga_down = (NORM-NORM_ERR)*pow(x_Geminga/PIVOT_E,-(GAMMA-GAMMA_ERR)) * pow(x_Geminga,2) * gp.TeV_to_erg
y_max_Geminga_up = (NORM+NORM_ERR)*pow(x_Geminga/PIVOT_E,-(GAMMA-GAMMA_ERR)) * pow(x_Geminga,2) * gp.TeV_to_erg
y_min_Geminga_up = (NORM-NORM_ERR)*pow(x_Geminga/PIVOT_E,-(GAMMA+GAMMA_ERR)) * pow(x_Geminga,2) * gp.TeV_to_erg
plt.fill_between(x_Geminga,y_min_Geminga_down,y_max_Geminga_down,where=x_Geminga<20,color='grey', alpha='0.5')
plt.fill_between(x_Geminga,y_min_Geminga_up,y_max_Geminga_up,where=x_Geminga>20,color='grey', alpha='0.5')
# Milagro point
x_Milagro = 20.
y_Milagro = 6.9e-15 * pow(x_Milagro,2) * gp.TeV_to_erg
y_err_Milagro = 1.6e-15 * pow(x_Milagro,2) * gp.TeV_to_erg
plt.errorbar(x_Milagro,y_Milagro,yerr=y_err_Milagro,fmt='o',color = "red",label="Milagro")
plt.ylabel("E$^2$ dN/dE [erg s$^{-1}$cm$^{-2}$]", fontsize=13)
plt.xlabel("E [TeV]", fontsize=13)
plt.ylim([1e-16,1e-8])
plt.grid(color="black",alpha=.5)
plt.legend(numpoints=1,prop={'size':9},loc="upper right")
if (FIG_EPS):
fig.savefig("Figures/Gamma_Spectra_"+tag+".eps")
else:
fig.savefig("Figures/Gamma_Spectra_"+tag+".png")
#fig.savefig("Figures/Gamma_Spectra_"+tag+".eps")
# ************* GAMMA SPECTRUM VOLUME ***************
# calculate the corresponding gamma-ray spectra of the angular integrated
# column densities
sp_volume = []
inds = []
for d in degs :
inds.append(np.where(deg >= d)[0][0])
#print inds
#print IntSpec
for i,d in zip(inds,degs):
print ("i,d",i,d)
print ("IntSpec_volume[i-1]",IntSpec_volume[i-1])
s = IntSpec_volume[i-1]
fr.SetElectrons(zip(E,s))
#fr.SetElectrons(zip(Mehr_data[:,0]*1.e-9,Mehr_data[:,2]*1.e9/ gp.TeV_to_erg))
fr.SetDistance(DIST*1.e3)
fr.CalculateDifferentialPhotonSpectrum(E)
sp_volume.append(np.array(fr.GetTotalSED()))
sp_volume = np.array(sp_volume)
fig = plt.figure()
for s,d in zip(sp_volume,degs):
print ("sp_volume",s)
plt.loglog(s[:,0],s[:,1],label=r'%s deg' % (d)) # s[:,0] contains the Energy [TeV] and s[:,1] directly the SED [erg cm^-2 s^-1]
plt.loglog(x_Geminga,y_Geminga,label=tag,color='black')
plt.ylabel("E$^2$ dN/dE [erg s$^{-1}$cm$^{-2}$]", fontsize=13)
plt.xlabel("E [TeV]", fontsize=13)
plt.ylim([1e-16,1e-8])
plt.grid(color="black",alpha=.5)
plt.legend(prop={'size':9},loc="upper right")
if (FIG_EPS):
fig.savefig("Figures/Gamma_Spectra_Volume_"+tag+".eps")
else:
fig.savefig("Figures/Gamma_Spectra_Volume_"+tag+".png")
# *********** GAMMA-RAY ANGULAR PROFILES ***********
# this gives an array of line-of-sight integrated electron spectra vs. angular
# distance from the source
# corr = np.diff(deg) * deg_to_rad * deg[1:] * deg_to_rad * gp.pi # sr^2 Delta_theta * theta * pi (area of the circular section)
deg_sqr = deg**2.
corr = np.diff(deg_sqr) * gp.pi # deg^2 Delta_theta^2 * pi (area of the ring)
# integrand for the solid angle.
# calculate the corresponding gamma-ray spectra of the *not* angular integrated
# column densities (aka surface brightness)
fig = plt.figure()
sb = []
sb_20TeV = []
diff_sb_20TeV = []
intspec_profile_1TeV = []
intspec_profile_20TeV = []
diffspec_profile_20TeV = []
diffspec_profile = []
EE_1TeV = 1. * gp.TeV_to_erg # We compute the profile above 1 TeV
EE_20TeV = 20. * gp.TeV_to_erg # We compute the profile above 20 TeV
EE_20TeV_list=[20 * gp.TeV_to_erg]
ee = np.logspace(math.log10(EE_1TeV),math.log10(EMAX),40)
IntSpec_diff = np.diff(IntSpec,axis=0)
# IntSpec is a 2D array with the values of the electron spectrum [100] x values for each d.
# This is dN/dE [d_i+1] - dN/dE [d_i]
for s in IntSpec_diff:
#print "IntSpec,axis=0",s
fr.SetElectrons(zip(E,s))
fr.SetDistance(0.)
fr.CalculateDifferentialPhotonSpectrum(ee)
intspec_profile_1TeV = fu.Integrate(fr.GetTotalSpectrum(),EE_1TeV,EMAX) # The E range should be given in erg
intspec_profile_20TeV = fu.Integrate(fr.GetTotalSpectrum(),EE_20TeV,EMAX) # The E range should be given in erg
sb.append(intspec_profile_1TeV) # Integrated surface brigthness of photons
sb_20TeV.append(intspec_profile_20TeV) # Integrated surface brigthness of photons
# Differential spectrum
fr.CalculateDifferentialPhotonSpectrum(EE_20TeV_list)
diffspec_profile_20TeV = fr.GetTotalSpectrum()[0][1]
diff_sb_20TeV.append(diffspec_profile_20TeV)
sb = np.array(sb)
sb_20TeV = np.array(sb_20TeV)
diff_sb_20TeV=np.array(diff_sb_20TeV)
profile = []
#for p,c in zip(s[:,1],corr):
for p,c in zip(sb,corr[1:]):
print ("s,corr 1 TeV",p,c)
profile.append(p/c) # Wrong, should be multiplied?
print ("p/c 1 TeV",p/c)
profile_20TeV = []
for p,c in zip(sb_20TeV,corr[1:]):
print ("s,corr 20 TeV",p,c)
profile_20TeV.append(p/c) # Wrong, should be multiplied?
print ("p/c 20 TeV",p/c)
# Calculate the contribution for the first bin in theta
fr.SetElectrons(zip(E,IntSpec[0]))
fr.SetDistance(0.)
fr.CalculateDifferentialPhotonSpectrum(EE_20TeV_list)
Total = fr.GetTotalSpectrum()[0][1] # The total differential flux per bin in theta
print ("Flux from theta[0] to theta[1]=",Total)
profile_20TeV_diff = []
profile_20TeV_only_flux = []
for p,c,d in zip(diff_sb_20TeV,corr[1:],deg[2:]):
print ("s,corr 20 TeV diff",p,c)
profile_20TeV_diff.append(p/c) # Wrong, should be multiplied?
profile_20TeV_only_flux.append(p)
print ("p/c 20 TeV diff",p/c)
Total=Total+p
print ("Total differential",Total, "[erg^-1 s^1 cm^-2] for deg",d)
#print "int = ",fu.Integrate(zip(gp.pi*deg_sqr[1:],profile),deg_sqr[1],deg_sqr[len(deg_sqr)-1])
profile = np.array(profile)
profile_20TeV = np.array(profile_20TeV)
#plt.plot(deg_reduced[1:],profile,label = "E >"+str(EE_1TeV/gp.TeV_to_erg)+" TeV") # Profile above 1 TeV
plt.plot(deg[2:],profile_20TeV,label = "E >"+str(EE_20TeV/gp.TeV_to_erg)+" TeV") # Profile above 20 TeV
ax = fig.add_subplot(111)
plt.ylabel("Surface luminosity [1/(s cm$^{2}$ deg$^{2}$)]", fontsize=13)
#plt.xlabel("angular distance [deg$^2$]", fontsize=13)
plt.xlabel("Angular distance [deg]", fontsize=13)
#plt.ylim([-120.,500.])
#ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e'))
#ax.yaxis.labelpad = -10
#plt.ticklabel_format(style='sci', axis='y')
ax.yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
plt.grid(color="black",alpha=.5)
plt.legend(prop={'size':8},loc="upper right")
if (FIG_EPS):
fig.savefig("Figures/Gamma_Profiles_"+tag+".eps")
else:
fig.savefig("Figures/Gamma_Profiles_"+tag+".png")
# Differential profile
profile_20TeV_diff = np.array(profile_20TeV_diff)
fig = plt.figure()
plt.plot(deg[2:],profile_20TeV_diff,label = "E = 20 TeV")
ax = fig.add_subplot(111)
plt.ylabel("Surface luminosity [1/(erg s cm$^{2}$ deg$^{2}$)]", fontsize=13)
plt.xlabel("Angular distance [deg]", fontsize=13)
ax.yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
#plt.ticklabel_format(style='sci', axis='y')
#plt.ylim([-120.,500.])
#ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e'))
#ax.yaxis.labelpad = -10
plt.grid(color="black",alpha=.5)
plt.legend(prop={'size':8},loc="upper right")
if (FIG_EPS):
fig.savefig("Figures/Gamma_Profile_Differential_"+tag+".eps")
else:
fig.savefig("Figures/Gamma_Profile_Differential_"+tag+".png")
zipped=zip(deg[2:],profile_20TeV_diff)
np.savetxt("Results/Gamma_Profile_Differential_flux_solid_angle"+tag+".txt", zipped)
# Differential profile (only flux)
profile_20TeV_only_flux = np.array(profile_20TeV_only_flux)
fig = plt.figure()
plt.plot(deg[2:],profile_20TeV_only_flux,'o', label = "E = 20 TeV")
ax = fig.add_subplot(111)
plt.ylabel("Surface luminosity [1/(erg s cm$^{2}$)]", fontsize=13)
plt.xlabel("Angular distance [deg]", fontsize=13)
ax.yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
#plt.ticklabel_format(style='sci', axis='y')
#ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e'))
#ax.yaxis.labelpad = -10
plt.grid(color="black",alpha=.5)
plt.legend(prop={'size':8},loc="upper right")
if (FIG_EPS):
fig.savefig("Figures/Gamma_Profile_Differential_flux_"+tag+".eps")
else:
fig.savefig("Figures/Gamma_Profile_Differential_flux_"+tag+".png")
zipped=zip(deg[2:],profile_20TeV_diff)
np.savetxt("Results/Gamma_Profile_Differential_flux"+tag+".txt", zipped)
| 55,889 | 51.726415 | 270 | py |
DoSA | DoSA-main/generate_annotations.py | import os
print("Warning:Installing tesseract on machine")
os.system('apt-get install tesseract-ocr -y')
print("tesseract should be installed")
import time
from transformers import LayoutLMv3Processor, LayoutLMv3ForTokenClassification, LayoutLMv3FeatureExtractor
from datasets import load_dataset
from PIL import Image, ImageDraw, ImageFont
import argparse
dataset = load_dataset("nielsr/funsd", split="test")
# define id2label, label2color
labels = dataset.features['ner_tags'].feature.names
id2label = {v: k for v, k in enumerate(labels)}
label2color = {'question':'blue', 'answer':'green', 'header':'orange', 'other':'violet'}
l2l = {'question':'key', 'answer':'value', 'header':'title'}
f_labels = {'question':'key', 'answer':'value', 'header':'title', 'other':'others'}
processor = LayoutLMv3Processor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
model = LayoutLMv3ForTokenClassification.from_pretrained("nielsr/layoutlmv3-finetuned-funsd",ignore_mismatched_sizes = True)
feature_extractor = LayoutLMv3FeatureExtractor()
def iob_to_label(label):
label = label[2:]
if not label:
return 'other'
return label
def unnormalize_box(bbox, width, height):
return [
width * (bbox[0] / 1000),
height * (bbox[1] / 1000),
width * (bbox[2] / 1000),
height * (bbox[3] / 1000),
]
import json
def normalize_box(box, width, height):
return [
int(1000 * (box[0] / width)),
int(1000 * (box[1] / height)),
int(1000 * (box[2] / width)),
int(1000 * (box[3] / height)),
]
def parsing(true_predictions,token_boxes,bbdict):
cluster_master=[]
cluster=[]
tbox_list = []
picked=[]
prev=''
for prediction,tbox in zip(true_predictions,token_boxes):
# print(prediction,tbox)
x,y=tbox[:2]
if tbox not in picked:
picked.append(tbox)
else:
continue
try:
word=bbdict[str(tbox)]
gap=False
if x-prevx > 150 or y-prevy > 20 :
gap=True
if prediction in ['B-QUESTION','I-QUESTION']:
if prev=='value':
cluster_master.append((cluster,'value',x,y,tbox_list))
cluster=[]
tbox_list = []
elif gap:
cluster_master.append((cluster,prev,x,y,tbox_list))
cluster=[]
tbox_list = []
cluster.append(word)
tbox_list.append(tbox)
prev='key'
else:
if prev=='key' :
cluster_master.append((cluster,'key',x,y,tbox_list))
cluster=[]
tbox_list = []
elif gap:
cluster_master.append((cluster,prev,x,y,tbox_list))
cluster=[]
tbox_list = []
cluster.append(word)
tbox_list.append(tbox)
prev='value'
except:
pass
prevx=x
prevy=y
cluster_master.append((cluster,prev,x,y,tbox_list))
# return cluster_master
key_value=dict()
for item in cluster_master:
# print('item',item)
typ=item[1]
text=' '.join(item[0])
x,y=item[2:4]
if typ=='value' and prevtyp=='key':
key_value[prevtext]=text
prevtext=text
prevtyp=typ
prevx=x
prevy=y
return key_value,cluster_master
def con_coordinates(lst):
try:
x1 = lst[0][0]
y1 = lst[0][1]
x2 = lst[0][2]
y2 = lst[0][3]
for i in range(1,len(lst)):
if lst[i][0] < x1:
x1 = lst[i][0]
if lst[i][1] < y1:
y1 = lst[i][1]
if lst[i][2] > x2:
x2 = lst[i][2]
if lst[i][3] > y2:
y2 = lst[i][3]
return [x1,y1,x2,y2]
except Exception as e:
return []
def main(img):
image = img.convert("RGB")
width, height = image.size
features = feature_extractor(image, return_tensors="pt")
words,boxes=features['words'][0],features['boxes'][0]
bbdict=dict()
for word,box in zip(words,boxes):
bbdict[str(box)]=word
start=time.time()
encoding = processor(image, words, boxes=boxes, return_tensors="pt")
outputs = model(**encoding)
print('Results Exported Sucessfully at results/final_annotated')
# get predictions
predictions = outputs.logits.argmax(-1).squeeze().tolist()
token_boxes = encoding.bbox.squeeze().tolist()
true_predictions = [id2label[pred] for idx, pred in enumerate(predictions)]
true_boxes = [unnormalize_box(box, width, height) for idx, box in enumerate(token_boxes)]
# draw predictions over the image
draw = ImageDraw.Draw(image)
font = ImageFont.truetype("tools/arial/arial.ttf", 10, encoding="unic")
for prediction, box in zip(true_predictions, true_boxes):
predicted_label = iob_to_label(prediction).lower()
if predicted_label != 'other' and predicted_label !='header' :
draw.rectangle(box, outline=label2color[predicted_label])
draw.text((box[0]+10, box[1]-10), f_labels[predicted_label], fill=label2color[predicted_label])
else:
continue
output,cluster_master=parsing(true_predictions,token_boxes,bbdict)
## saving output key value dict as json file
json_name = str(im.filename.split('/')[-1].split('.')[0])+'_key_value'+'.json'
with open(os.path.join('results/json_output/',json_name), 'w') as fp:
json.dump(output, fp)
## saving funsd annnotated image
filename = str(im.filename.split('/')[-1].split('.')[0])+'_annotated'+'.jpeg'
image.save(os.path.join('results/funsd_output',filename))
key_boxes = []
for item in cluster_master:
key_boxes.append((item[0], item[1], item[2], item[3], con_coordinates(item[4])))
key_box=dict()
for item in key_boxes:
typ=item[1]
text=' '.join(item[0])
x,y=item[2:4]
if typ=='value' and prevtyp=='key':
key_box[prevtext]={'value':text,'box':item[4]}
prevtext=text
prevtyp=typ
prevx=x
prevy=y
image_2 = img.convert("RGB")
draw = ImageDraw.Draw(image_2)
width, height = image_2.size
for item in key_box.items():
box = unnormalize_box(item[1]['box'], width, height)
draw.rectangle(box, outline='green')
draw.text((box[0] + 10, box[1] - 10), item[0], outline='green', fill='green', font=font)
filename = str(im.filename.split('/')[-1].split('.')[0]) + '_annotated' + '.jpg'
## saving post processed annnotated image
image_2.save((os.path.join('results/final_annotated', filename)))
return image_2,output
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", type=str, required=True, help='Input Image')
args = parser.parse_args()
im = Image.open(args.i)
main(im)
| 6,655 | 28.065502 | 124 | py |
trx | trx-main/video_reader.py | import torch
from torchvision import datasets, transforms
from PIL import Image
import os
import zipfile
import io
import numpy as np
import random
import re
import pickle
from glob import glob
from videotransforms.video_transforms import Compose, Resize, RandomCrop, RandomRotation, ColorJitter, RandomHorizontalFlip, CenterCrop, TenCrop
from videotransforms.volume_transforms import ClipToTensor
"""Contains video frame paths and ground truth labels for a single split (e.g. train videos). """
class Split():
def __init__(self):
self.gt_a_list = []
self.videos = []
def add_vid(self, paths, gt_a):
self.videos.append(paths)
self.gt_a_list.append(gt_a)
def get_rand_vid(self, label, idx=-1):
match_idxs = []
for i in range(len(self.gt_a_list)):
if label == self.gt_a_list[i]:
match_idxs.append(i)
if idx != -1:
return self.videos[match_idxs[idx]], match_idxs[idx]
random_idx = np.random.choice(match_idxs)
return self.videos[random_idx], random_idx
def get_num_videos_for_class(self, label):
return len([gt for gt in self.gt_a_list if gt == label])
def get_unique_classes(self):
return list(set(self.gt_a_list))
def get_max_video_len(self):
max_len = 0
for v in self.videos:
l = len(v)
if l > max_len:
max_len = l
return max_len
def __len__(self):
return len(self.gt_a_list)
"""Dataset for few-shot videos, which returns few-shot tasks. """
class VideoDataset(torch.utils.data.Dataset):
def __init__(self, args):
self.args = args
self.get_item_counter = 0
self.data_dir = args.path
self.seq_len = args.seq_len
self.train = True
self.tensor_transform = transforms.ToTensor()
self.img_size = args.img_size
self.annotation_path = args.traintestlist
self.way=args.way
self.shot=args.shot
self.query_per_class=args.query_per_class
self.train_split = Split()
self.test_split = Split()
self.setup_transforms()
self._select_fold()
self.read_dir()
"""Setup crop sizes/flips for augmentation during training and centre crop for testing"""
def setup_transforms(self):
video_transform_list = []
video_test_list = []
if self.img_size == 84:
video_transform_list.append(Resize(96))
video_test_list.append(Resize(96))
elif self.img_size == 224:
video_transform_list.append(Resize(256))
video_test_list.append(Resize(256))
else:
print("img size transforms not setup")
exit(1)
video_transform_list.append(RandomHorizontalFlip())
video_transform_list.append(RandomCrop(self.img_size))
video_test_list.append(CenterCrop(self.img_size))
self.transform = {}
self.transform["train"] = Compose(video_transform_list)
self.transform["test"] = Compose(video_test_list)
"""Loads all videos into RAM from an uncompressed zip. Necessary as the filesystem has a large block size, which is unsuitable for lots of images. """
"""Contains some legacy code for loading images directly, but this has not been used/tested for a while so might not work with the current codebase. """
def read_dir(self):
# load zipfile into memory
if self.data_dir.endswith('.zip'):
self.zip = True
zip_fn = os.path.join(self.data_dir)
self.mem = open(zip_fn, 'rb').read()
self.zfile = zipfile.ZipFile(io.BytesIO(self.mem))
else:
self.zip = False
# go through zip and populate splits with frame locations and action groundtruths
if self.zip:
dir_list = list(set([x for x in self.zfile.namelist() if '.jpg' not in x]))
class_folders = list(set([x.split(os.sep)[-3] for x in dir_list if len(x.split(os.sep)) > 2]))
class_folders.sort()
self.class_folders = class_folders
video_folders = list(set([x.split(os.sep)[-2] for x in dir_list if len(x.split(os.sep)) > 3]))
video_folders.sort()
self.video_folders = video_folders
class_folders_indexes = {v: k for k, v in enumerate(self.class_folders)}
video_folders_indexes = {v: k for k, v in enumerate(self.video_folders)}
img_list = [x for x in self.zfile.namelist() if '.jpg' in x]
img_list.sort()
c = self.get_train_or_test_db(video_folders[0])
last_video_folder = None
last_video_class = -1
insert_frames = []
for img_path in img_list:
class_folder, video_folder, jpg = img_path.split(os.sep)[-3:]
if video_folder != last_video_folder:
if len(insert_frames) >= self.seq_len:
c = self.get_train_or_test_db(last_video_folder.lower())
if c != None:
c.add_vid(insert_frames, last_video_class)
else:
pass
insert_frames = []
class_id = class_folders_indexes[class_folder]
vid_id = video_folders_indexes[video_folder]
insert_frames.append(img_path)
last_video_folder = video_folder
last_video_class = class_id
c = self.get_train_or_test_db(last_video_folder)
if c != None and len(insert_frames) >= self.seq_len:
c.add_vid(insert_frames, last_video_class)
else:
class_folders = os.listdir(self.data_dir)
class_folders.sort()
self.class_folders = class_folders
for class_folder in class_folders:
video_folders = os.listdir(os.path.join(self.data_dir, class_folder))
video_folders.sort()
if self.args.debug_loader:
video_folders = video_folders[0:1]
for video_folder in video_folders:
c = self.get_train_or_test_db(video_folder)
if c == None:
continue
imgs = os.listdir(os.path.join(self.data_dir, class_folder, video_folder))
if len(imgs) < self.seq_len:
continue
imgs.sort()
paths = [os.path.join(self.data_dir, class_folder, video_folder, img) for img in imgs]
paths.sort()
class_id = class_folders.index(class_folder)
c.add_vid(paths, class_id)
print("loaded {}".format(self.data_dir))
print("train: {}, test: {}".format(len(self.train_split), len(self.test_split)))
""" return the current split being used """
def get_train_or_test_db(self, split=None):
if split is None:
get_train_split = self.train
else:
if split in self.train_test_lists["train"]:
get_train_split = True
elif split in self.train_test_lists["test"]:
get_train_split = False
else:
return None
if get_train_split:
return self.train_split
else:
return self.test_split
""" load the paths of all videos in the train and test splits. """
def _select_fold(self):
lists = {}
for name in ["train", "test"]:
fname = "{}list{:02d}.txt".format(name, self.args.split)
f = os.path.join(self.annotation_path, fname)
selected_files = []
with open(f, "r") as fid:
data = fid.readlines()
data = [x.replace(' ', '_').lower() for x in data]
data = [x.strip().split(" ")[0] for x in data]
data = [os.path.splitext(os.path.split(x)[1])[0] for x in data]
if "kinetics" in self.args.path:
data = [x[0:11] for x in data]
selected_files.extend(data)
lists[name] = selected_files
self.train_test_lists = lists
""" Set len to large number as we use lots of random tasks. Stopping point controlled in run.py. """
def __len__(self):
c = self.get_train_or_test_db()
return 1000000
return len(c)
""" Get the classes used for the current split """
def get_split_class_list(self):
c = self.get_train_or_test_db()
classes = list(set(c.gt_a_list))
classes.sort()
return classes
"""Loads a single image from a specified path """
def read_single_image(self, path):
if self.zip:
with self.zfile.open(path, 'r') as f:
with Image.open(f) as i:
i.load()
return i
else:
with Image.open(path) as i:
i.load()
return i
"""Gets a single video sequence. Handles sampling if there are more frames than specified. """
def get_seq(self, label, idx=-1):
c = self.get_train_or_test_db()
paths, vid_id = c.get_rand_vid(label, idx)
n_frames = len(paths)
if n_frames == self.args.seq_len:
idxs = [int(f) for f in range(n_frames)]
else:
if self.train:
excess_frames = n_frames - self.seq_len
excess_pad = int(min(5, excess_frames / 2))
if excess_pad < 1:
start = 0
end = n_frames - 1
else:
start = random.randint(0, excess_pad)
end = random.randint(n_frames-1 -excess_pad, n_frames-1)
else:
start = 1
end = n_frames - 2
if end - start < self.seq_len:
end = n_frames - 1
start = 0
else:
pass
idx_f = np.linspace(start, end, num=self.seq_len)
idxs = [int(f) for f in idx_f]
if self.seq_len == 1:
idxs = [random.randint(start, end-1)]
imgs = [self.read_single_image(paths[i]) for i in idxs]
if (self.transform is not None):
if self.train:
transform = self.transform["train"]
else:
transform = self.transform["test"]
imgs = [self.tensor_transform(v) for v in transform(imgs)]
imgs = torch.stack(imgs)
return imgs, vid_id
"""returns dict of support and target images and labels"""
def __getitem__(self, index):
#select classes to use for this task
c = self.get_train_or_test_db()
classes = c.get_unique_classes()
batch_classes = random.sample(classes, self.way)
if self.train:
n_queries = self.args.query_per_class
else:
n_queries = self.args.query_per_class_test
support_set = []
support_labels = []
target_set = []
target_labels = []
real_support_labels = []
real_target_labels = []
for bl, bc in enumerate(batch_classes):
#select shots from the chosen classes
n_total = c.get_num_videos_for_class(bc)
idxs = random.sample([i for i in range(n_total)], self.args.shot + n_queries)
for idx in idxs[0:self.args.shot]:
vid, vid_id = self.get_seq(bc, idx)
support_set.append(vid)
support_labels.append(bl)
for idx in idxs[self.args.shot:]:
vid, vid_id = self.get_seq(bc, idx)
target_set.append(vid)
target_labels.append(bl)
real_target_labels.append(bc)
s = list(zip(support_set, support_labels))
random.shuffle(s)
support_set, support_labels = zip(*s)
t = list(zip(target_set, target_labels, real_target_labels))
random.shuffle(t)
target_set, target_labels, real_target_labels = zip(*t)
support_set = torch.cat(support_set)
target_set = torch.cat(target_set)
support_labels = torch.FloatTensor(support_labels)
target_labels = torch.FloatTensor(target_labels)
real_target_labels = torch.FloatTensor(real_target_labels)
batch_classes = torch.FloatTensor(batch_classes)
return {"support_set":support_set, "support_labels":support_labels, "target_set":target_set, "target_labels":target_labels, "real_target_labels":real_target_labels, "batch_class_list": batch_classes}
| 12,944 | 36.850877 | 207 | py |
trx | trx-main/utils.py | import torch
import torch.nn.functional as F
import os
import math
from enum import Enum
import sys
class TestAccuracies:
"""
Determines if an evaluation on the validation set is better than the best so far.
In particular, this handles the case for meta-dataset where we validate on multiple datasets and we deem
the evaluation to be better if more than half of the validation accuracies on the individual validation datsets
are better than the previous best.
"""
def __init__(self, validation_datasets):
self.datasets = validation_datasets
self.dataset_count = len(self.datasets)
# self.current_best_accuracy_dict = {}
# for dataset in self.datasets:
# self.current_best_accuracy_dict[dataset] = {"accuracy": 0.0, "confidence": 0.0}
# def is_better(self, accuracies_dict):
# is_better = False
# is_better_count = 0
# for i, dataset in enumerate(self.datasets):
# if accuracies_dict[dataset]["accuracy"] > self.current_best_accuracy_dict[dataset]["accuracy"]:
# is_better_count += 1
#
# if is_better_count >= int(math.ceil(self.dataset_count / 2.0)):
# is_better = True
#
# return is_better
# def replace(self, accuracies_dict):
# self.current_best_accuracy_dict = accuracies_dict
def print(self, logfile, accuracy_dict):
print_and_log(logfile, "") # add a blank line
print_and_log(logfile, "Test Accuracies:")
for dataset in self.datasets:
print_and_log(logfile, "{0:}: {1:.1f}+/-{2:.1f}".format(dataset, accuracy_dict[dataset]["accuracy"],
accuracy_dict[dataset]["confidence"]))
print_and_log(logfile, "") # add a blank line
# def get_current_best_accuracy_dict(self):
# return self.current_best_accuracy_dict
def verify_checkpoint_dir(checkpoint_dir, resume, test_mode):
if resume: # verify that the checkpoint directory and file exists
if not os.path.exists(checkpoint_dir):
print("Can't resume for checkpoint. Checkpoint directory ({}) does not exist.".format(checkpoint_dir), flush=True)
sys.exit()
checkpoint_file = os.path.join(checkpoint_dir, 'checkpoint.pt')
if not os.path.isfile(checkpoint_file):
print("Can't resume for checkpoint. Checkpoint file ({}) does not exist.".format(checkpoint_file), flush=True)
sys.exit()
#elif test_mode:
# if not os.path.exists(checkpoint_dir):
# print("Can't test. Checkpoint directory ({}) does not exist.".format(checkpoint_dir), flush=True)
# sys.exit()
else:
if os.path.exists(checkpoint_dir):
print("Checkpoint directory ({}) already exits.".format(checkpoint_dir), flush=True)
print("If starting a new training run, specify a directory that does not already exist.", flush=True)
print("If you want to resume a training run, specify the -r option on the command line.", flush=True)
sys.exit()
def print_and_log(log_file, message):
"""
Helper function to print to the screen and the cnaps_layer_log.txt file.
"""
print(message, flush=True)
log_file.write(message + '\n')
def get_log_files(checkpoint_dir, resume, test_mode):
"""
Function that takes a path to a checkpoint directory and returns a reference to a logfile and paths to the
fully trained model and the model with the best validation score.
"""
verify_checkpoint_dir(checkpoint_dir, resume, test_mode)
#if not test_mode and not resume:
if not resume:
os.makedirs(checkpoint_dir)
checkpoint_path_validation = os.path.join(checkpoint_dir, 'best_validation.pt')
checkpoint_path_final = os.path.join(checkpoint_dir, 'fully_trained.pt')
logfile_path = os.path.join(checkpoint_dir, 'log.txt')
if os.path.isfile(logfile_path):
logfile = open(logfile_path, "a", buffering=1)
else:
logfile = open(logfile_path, "w", buffering=1)
return checkpoint_dir, logfile, checkpoint_path_validation, checkpoint_path_final
def stack_first_dim(x):
"""
Method to combine the first two dimension of an array
"""
x_shape = x.size()
new_shape = [x_shape[0] * x_shape[1]]
if len(x_shape) > 2:
new_shape += x_shape[2:]
return x.view(new_shape)
def split_first_dim_linear(x, first_two_dims):
"""
Undo the stacking operation
"""
x_shape = x.size()
new_shape = first_two_dims
if len(x_shape) > 1:
new_shape += [x_shape[-1]]
return x.view(new_shape)
def sample_normal(mean, var, num_samples):
"""
Generate samples from a reparameterized normal distribution
:param mean: tensor - mean parameter of the distribution
:param var: tensor - variance of the distribution
:param num_samples: np scalar - number of samples to generate
:return: tensor - samples from distribution of size numSamples x dim(mean)
"""
sample_shape = [num_samples] + len(mean.size())*[1]
normal_distribution = torch.distributions.Normal(mean.repeat(sample_shape), var.repeat(sample_shape))
return normal_distribution.rsample()
def loss(test_logits_sample, test_labels, device):
"""
Compute the classification loss.
"""
size = test_logits_sample.size()
sample_count = size[0] # scalar for the loop counter
num_samples = torch.tensor([sample_count], dtype=torch.float, device=device, requires_grad=False)
log_py = torch.empty(size=(size[0], size[1]), dtype=torch.float, device=device)
for sample in range(sample_count):
log_py[sample] = -F.cross_entropy(test_logits_sample[sample], test_labels, reduction='none')
score = torch.logsumexp(log_py, dim=0) - torch.log(num_samples)
return -torch.sum(score, dim=0)
def aggregate_accuracy(test_logits_sample, test_labels):
"""
Compute classification accuracy.
"""
averaged_predictions = torch.logsumexp(test_logits_sample, dim=0)
return torch.mean(torch.eq(test_labels, torch.argmax(averaged_predictions, dim=-1)).float())
def task_confusion(test_logits, test_labels, real_test_labels, batch_class_list):
preds = torch.argmax(torch.logsumexp(test_logits, dim=0), dim=-1)
real_preds = batch_class_list[preds]
return real_preds
def linear_classifier(x, param_dict):
"""
Classifier.
"""
return F.linear(x, param_dict['weight_mean'], param_dict['bias_mean'])
| 6,529 | 37.639053 | 126 | py |
trx | trx-main/model.py | import torch
import torch.nn as nn
from collections import OrderedDict
from utils import split_first_dim_linear
import math
from itertools import combinations
from torch.autograd import Variable
import torchvision.models as models
NUM_SAMPLES=1
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, d_model, dropout, max_len=5000, pe_scale_factor=0.1):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
self.pe_scale_factor = pe_scale_factor
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term) * self.pe_scale_factor
pe[:, 1::2] = torch.cos(position * div_term) * self.pe_scale_factor
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + Variable(self.pe[:, :x.size(1)], requires_grad=False)
return self.dropout(x)
class TemporalCrossTransformer(nn.Module):
def __init__(self, args, temporal_set_size=3):
super(TemporalCrossTransformer, self).__init__()
self.args = args
self.temporal_set_size = temporal_set_size
max_len = int(self.args.seq_len * 1.5)
self.pe = PositionalEncoding(self.args.trans_linear_in_dim, self.args.trans_dropout, max_len=max_len)
self.k_linear = nn.Linear(self.args.trans_linear_in_dim * temporal_set_size, self.args.trans_linear_out_dim)#.cuda()
self.v_linear = nn.Linear(self.args.trans_linear_in_dim * temporal_set_size, self.args.trans_linear_out_dim)#.cuda()
self.norm_k = nn.LayerNorm(self.args.trans_linear_out_dim)
self.norm_v = nn.LayerNorm(self.args.trans_linear_out_dim)
self.class_softmax = torch.nn.Softmax(dim=1)
# generate all tuples
frame_idxs = [i for i in range(self.args.seq_len)]
frame_combinations = combinations(frame_idxs, temporal_set_size)
self.tuples = [torch.tensor(comb).cuda() for comb in frame_combinations]
self.tuples_len = len(self.tuples)
def forward(self, support_set, support_labels, queries):
n_queries = queries.shape[0]
n_support = support_set.shape[0]
# static pe
support_set = self.pe(support_set)
queries = self.pe(queries)
# construct new queries and support set made of tuples of images after pe
s = [torch.index_select(support_set, -2, p).reshape(n_support, -1) for p in self.tuples]
q = [torch.index_select(queries, -2, p).reshape(n_queries, -1) for p in self.tuples]
support_set = torch.stack(s, dim=-2)
queries = torch.stack(q, dim=-2)
# apply linear maps
support_set_ks = self.k_linear(support_set)
queries_ks = self.k_linear(queries)
support_set_vs = self.v_linear(support_set)
queries_vs = self.v_linear(queries)
# apply norms where necessary
mh_support_set_ks = self.norm_k(support_set_ks)
mh_queries_ks = self.norm_k(queries_ks)
mh_support_set_vs = support_set_vs
mh_queries_vs = queries_vs
unique_labels = torch.unique(support_labels)
# init tensor to hold distances between every support tuple and every target tuple
all_distances_tensor = torch.zeros(n_queries, self.args.way).cuda()
for label_idx, c in enumerate(unique_labels):
# select keys and values for just this class
class_k = torch.index_select(mh_support_set_ks, 0, self._extract_class_indices(support_labels, c))
class_v = torch.index_select(mh_support_set_vs, 0, self._extract_class_indices(support_labels, c))
k_bs = class_k.shape[0]
class_scores = torch.matmul(mh_queries_ks.unsqueeze(1), class_k.transpose(-2,-1)) / math.sqrt(self.args.trans_linear_out_dim)
# reshape etc. to apply a softmax for each query tuple
class_scores = class_scores.permute(0,2,1,3)
class_scores = class_scores.reshape(n_queries, self.tuples_len, -1)
class_scores = [self.class_softmax(class_scores[i]) for i in range(n_queries)]
class_scores = torch.cat(class_scores)
class_scores = class_scores.reshape(n_queries, self.tuples_len, -1, self.tuples_len)
class_scores = class_scores.permute(0,2,1,3)
# get query specific class prototype
query_prototype = torch.matmul(class_scores, class_v)
query_prototype = torch.sum(query_prototype, dim=1)
# calculate distances from queries to query-specific class prototypes
diff = mh_queries_vs - query_prototype
norm_sq = torch.norm(diff, dim=[-2,-1])**2
distance = torch.div(norm_sq, self.tuples_len)
# multiply by -1 to get logits
distance = distance * -1
c_idx = c.long()
all_distances_tensor[:,c_idx] = distance
return_dict = {'logits': all_distances_tensor}
return return_dict
@staticmethod
def _extract_class_indices(labels, which_class):
"""
Helper method to extract the indices of elements which have the specified label.
:param labels: (torch.tensor) Labels of the context set.
:param which_class: Label for which indices are extracted.
:return: (torch.tensor) Indices in the form of a mask that indicate the locations of the specified label.
"""
class_mask = torch.eq(labels, which_class) # binary mask of labels equal to which_class
class_mask_indices = torch.nonzero(class_mask) # indices of labels equal to which class
return torch.reshape(class_mask_indices, (-1,)) # reshape to be a 1D vector
class CNN_TRX(nn.Module):
"""
Standard Resnet connected to a Temporal Cross Transformer.
"""
def __init__(self, args):
super(CNN_TRX, self).__init__()
self.train()
self.args = args
if self.args.method == "resnet18":
resnet = models.resnet18(pretrained=True)
elif self.args.method == "resnet34":
resnet = models.resnet34(pretrained=True)
elif self.args.method == "resnet50":
resnet = models.resnet50(pretrained=True)
last_layer_idx = -1
self.resnet = nn.Sequential(*list(resnet.children())[:last_layer_idx])
self.transformers = nn.ModuleList([TemporalCrossTransformer(args, s) for s in args.temp_set])
def forward(self, context_images, context_labels, target_images):
context_features = self.resnet(context_images).squeeze()
target_features = self.resnet(target_images).squeeze()
dim = int(context_features.shape[1])
context_features = context_features.reshape(-1, self.args.seq_len, dim)
target_features = target_features.reshape(-1, self.args.seq_len, dim)
all_logits = [t(context_features, context_labels, target_features)['logits'] for t in self.transformers]
all_logits = torch.stack(all_logits, dim=-1)
sample_logits = all_logits
sample_logits = torch.mean(sample_logits, dim=[-1])
return_dict = {'logits': split_first_dim_linear(sample_logits, [NUM_SAMPLES, target_features.shape[0]])}
return return_dict
def distribute_model(self):
"""
Distributes the CNNs over multiple GPUs.
:return: Nothing
"""
if self.args.num_gpus > 1:
self.resnet.cuda(0)
self.resnet = torch.nn.DataParallel(self.resnet, device_ids=[i for i in range(0, self.args.num_gpus)])
self.transformers.cuda(0)
if __name__ == "__main__":
class ArgsObject(object):
def __init__(self):
self.trans_linear_in_dim = 512
self.trans_linear_out_dim = 128
self.way = 5
self.shot = 1
self.query_per_class = 5
self.trans_dropout = 0.1
self.seq_len = 8
self.img_size = 84
self.method = "resnet18"
self.num_gpus = 1
self.temp_set = [2,3]
args = ArgsObject()
torch.manual_seed(0)
device = 'cuda:0'
model = CNN_TRX(args).to(device)
support_imgs = torch.rand(args.way * args.shot * args.seq_len,3, args.img_size, args.img_size).to(device)
target_imgs = torch.rand(args.way * args.query_per_class * args.seq_len ,3, args.img_size, args.img_size).to(device)
support_labels = torch.tensor([0,1,2,3,4]).to(device)
print("Support images input shape: {}".format(support_imgs.shape))
print("Target images input shape: {}".format(target_imgs.shape))
print("Support labels input shape: {}".format(support_imgs.shape))
out = model(support_imgs, support_labels, target_imgs)
print("TRX returns the distances from each query to each class prototype. Use these as logits. Shape: {}".format(out['logits'].shape))
| 9,264 | 38.935345 | 140 | py |
trx | trx-main/run.py | import torch
import numpy as np
import argparse
import os
import pickle
from utils import print_and_log, get_log_files, TestAccuracies, loss, aggregate_accuracy, verify_checkpoint_dir, task_confusion
from model import CNN_TRX
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Quiet TensorFlow warnings
import tensorflow as tf
from torch.optim.lr_scheduler import MultiStepLR
from torch.utils.tensorboard import SummaryWriter
import torchvision
import video_reader
import random
def main():
learner = Learner()
learner.run()
class Learner:
def __init__(self):
self.args = self.parse_command_line()
self.checkpoint_dir, self.logfile, self.checkpoint_path_validation, self.checkpoint_path_final \
= get_log_files(self.args.checkpoint_dir, self.args.resume_from_checkpoint, False)
print_and_log(self.logfile, "Options: %s\n" % self.args)
print_and_log(self.logfile, "Checkpoint Directory: %s\n" % self.checkpoint_dir)
self.writer = SummaryWriter()
gpu_device = 'cuda'
self.device = torch.device(gpu_device if torch.cuda.is_available() else 'cpu')
self.model = self.init_model()
self.train_set, self.validation_set, self.test_set = self.init_data()
self.vd = video_reader.VideoDataset(self.args)
self.video_loader = torch.utils.data.DataLoader(self.vd, batch_size=1, num_workers=self.args.num_workers)
self.loss = loss
self.accuracy_fn = aggregate_accuracy
if self.args.opt == "adam":
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
elif self.args.opt == "sgd":
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.args.learning_rate)
self.test_accuracies = TestAccuracies(self.test_set)
self.scheduler = MultiStepLR(self.optimizer, milestones=self.args.sch, gamma=0.1)
self.start_iteration = 0
if self.args.resume_from_checkpoint:
self.load_checkpoint()
self.optimizer.zero_grad()
def init_model(self):
model = CNN_TRX(self.args)
model = model.to(self.device)
if self.args.num_gpus > 1:
model.distribute_model()
return model
def init_data(self):
train_set = [self.args.dataset]
validation_set = [self.args.dataset]
test_set = [self.args.dataset]
return train_set, validation_set, test_set
"""
Command line parser
"""
def parse_command_line(self):
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", choices=["ssv2", "kinetics", "hmdb", "ucf"], default="ssv2", help="Dataset to use.")
parser.add_argument("--learning_rate", "-lr", type=float, default=0.001, help="Learning rate.")
parser.add_argument("--tasks_per_batch", type=int, default=16, help="Number of tasks between parameter optimizations.")
parser.add_argument("--checkpoint_dir", "-c", default=None, help="Directory to save checkpoint to.")
parser.add_argument("--test_model_path", "-m", default=None, help="Path to model to load and test.")
parser.add_argument("--training_iterations", "-i", type=int, default=100020, help="Number of meta-training iterations.")
parser.add_argument("--resume_from_checkpoint", "-r", dest="resume_from_checkpoint", default=False, action="store_true", help="Restart from latest checkpoint.")
parser.add_argument("--way", type=int, default=5, help="Way of each task.")
parser.add_argument("--shot", type=int, default=5, help="Shots per class.")
parser.add_argument("--query_per_class", type=int, default=5, help="Target samples (i.e. queries) per class used for training.")
parser.add_argument("--query_per_class_test", type=int, default=1, help="Target samples (i.e. queries) per class used for testing.")
parser.add_argument('--test_iters', nargs='+', type=int, help='iterations to test at. Default is for ssv2 otam split.', default=[75000])
parser.add_argument("--num_test_tasks", type=int, default=10000, help="number of random tasks to test on.")
parser.add_argument("--print_freq", type=int, default=1000, help="print and log every n iterations.")
parser.add_argument("--seq_len", type=int, default=8, help="Frames per video.")
parser.add_argument("--num_workers", type=int, default=10, help="Num dataloader workers.")
parser.add_argument("--method", choices=["resnet18", "resnet34", "resnet50"], default="resnet50", help="method")
parser.add_argument("--trans_linear_out_dim", type=int, default=1152, help="Transformer linear_out_dim")
parser.add_argument("--opt", choices=["adam", "sgd"], default="sgd", help="Optimizer")
parser.add_argument("--trans_dropout", type=int, default=0.1, help="Transformer dropout")
parser.add_argument("--save_freq", type=int, default=5000, help="Number of iterations between checkpoint saves.")
parser.add_argument("--img_size", type=int, default=224, help="Input image size to the CNN after cropping.")
parser.add_argument('--temp_set', nargs='+', type=int, help='cardinalities e.g. 2,3 is pairs and triples', default=[2,3])
parser.add_argument("--scratch", choices=["bc", "bp"], default="bp", help="directory containing dataset, splits, and checkpoint saves.")
parser.add_argument("--num_gpus", type=int, default=1, help="Number of GPUs to split the ResNet over")
parser.add_argument("--debug_loader", default=False, action="store_true", help="Load 1 vid per class for debugging")
parser.add_argument("--split", type=int, default=7, help="Dataset split.")
parser.add_argument('--sch', nargs='+', type=int, help='iters to drop learning rate', default=[1000000])
args = parser.parse_args()
if args.scratch == "bc":
args.scratch = "/mnt/storage/home/tp8961/scratch"
elif args.scratch == "bp":
args.num_gpus = 4
# this is low becuase of RAM constraints for the data loader
args.num_workers = 3
args.scratch = "/work/tp8961"
if args.checkpoint_dir == None:
print("need to specify a checkpoint dir")
exit(1)
if (args.method == "resnet50") or (args.method == "resnet34"):
args.img_size = 224
if args.method == "resnet50":
args.trans_linear_in_dim = 2048
else:
args.trans_linear_in_dim = 512
if args.dataset == "ssv2":
args.traintestlist = os.path.join(args.scratch, "video_datasets/splits/somethingsomethingv2TrainTestlist")
args.path = os.path.join(args.scratch, "video_datasets/data/somethingsomethingv2_256x256q5_7l8.zip")
elif args.dataset == "kinetics":
args.traintestlist = os.path.join(args.scratch, "video_datasets/splits/kineticsTrainTestlist")
args.path = os.path.join(args.scratch, "video_datasets/data/kinetics_256q5_1.zip")
elif args.dataset == "ucf":
args.traintestlist = os.path.join(args.scratch, "video_datasets/splits/ucfTrainTestlist")
args.path = os.path.join(args.scratch, "video_datasets/data/UCF-101_320.zip")
elif args.dataset == "hmdb":
args.traintestlist = os.path.join(args.scratch, "video_datasets/splits/hmdb51TrainTestlist")
args.path = os.path.join(args.scratch, "video_datasets/data/hmdb51_256q5.zip")
return args
def run(self):
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
with tf.compat.v1.Session(config=config) as session:
train_accuracies = []
losses = []
total_iterations = self.args.training_iterations
iteration = self.start_iteration
for task_dict in self.video_loader:
if iteration >= total_iterations:
break
iteration += 1
torch.set_grad_enabled(True)
task_loss, task_accuracy = self.train_task(task_dict)
train_accuracies.append(task_accuracy)
losses.append(task_loss)
# optimize
if ((iteration + 1) % self.args.tasks_per_batch == 0) or (iteration == (total_iterations - 1)):
self.optimizer.step()
self.optimizer.zero_grad()
self.scheduler.step()
if (iteration + 1) % self.args.print_freq == 0:
# print training stats
print_and_log(self.logfile,'Task [{}/{}], Train Loss: {:.7f}, Train Accuracy: {:.7f}'
.format(iteration + 1, total_iterations, torch.Tensor(losses).mean().item(),
torch.Tensor(train_accuracies).mean().item()))
train_accuracies = []
losses = []
if ((iteration + 1) % self.args.save_freq == 0) and (iteration + 1) != total_iterations:
self.save_checkpoint(iteration + 1)
if ((iteration + 1) in self.args.test_iters) and (iteration + 1) != total_iterations:
accuracy_dict = self.test(session)
print(accuracy_dict)
self.test_accuracies.print(self.logfile, accuracy_dict)
# save the final model
torch.save(self.model.state_dict(), self.checkpoint_path_final)
self.logfile.close()
def train_task(self, task_dict):
context_images, target_images, context_labels, target_labels, real_target_labels, batch_class_list = self.prepare_task(task_dict)
model_dict = self.model(context_images, context_labels, target_images)
target_logits = model_dict['logits']
task_loss = self.loss(target_logits, target_labels, self.device) / self.args.tasks_per_batch
task_accuracy = self.accuracy_fn(target_logits, target_labels)
task_loss.backward(retain_graph=False)
return task_loss, task_accuracy
def test(self, session):
self.model.eval()
with torch.no_grad():
self.video_loader.dataset.train = False
accuracy_dict ={}
accuracies = []
iteration = 0
item = self.args.dataset
for task_dict in self.video_loader:
if iteration >= self.args.num_test_tasks:
break
iteration += 1
context_images, target_images, context_labels, target_labels, real_target_labels, batch_class_list = self.prepare_task(task_dict)
model_dict = self.model(context_images, context_labels, target_images)
target_logits = model_dict['logits']
accuracy = self.accuracy_fn(target_logits, target_labels)
accuracies.append(accuracy.item())
del target_logits
accuracy = np.array(accuracies).mean() * 100.0
confidence = (196.0 * np.array(accuracies).std()) / np.sqrt(len(accuracies))
accuracy_dict[item] = {"accuracy": accuracy, "confidence": confidence}
self.video_loader.dataset.train = True
self.model.train()
return accuracy_dict
def prepare_task(self, task_dict, images_to_device = True):
context_images, context_labels = task_dict['support_set'][0], task_dict['support_labels'][0]
target_images, target_labels = task_dict['target_set'][0], task_dict['target_labels'][0]
real_target_labels = task_dict['real_target_labels'][0]
batch_class_list = task_dict['batch_class_list'][0]
if images_to_device:
context_images = context_images.to(self.device)
target_images = target_images.to(self.device)
context_labels = context_labels.to(self.device)
target_labels = target_labels.type(torch.LongTensor).to(self.device)
return context_images, target_images, context_labels, target_labels, real_target_labels, batch_class_list
def shuffle(self, images, labels):
"""
Return shuffled data.
"""
permutation = np.random.permutation(images.shape[0])
return images[permutation], labels[permutation]
def save_checkpoint(self, iteration):
d = {'iteration': iteration,
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict()}
torch.save(d, os.path.join(self.checkpoint_dir, 'checkpoint{}.pt'.format(iteration)))
torch.save(d, os.path.join(self.checkpoint_dir, 'checkpoint.pt'))
def load_checkpoint(self):
checkpoint = torch.load(os.path.join(self.checkpoint_dir, 'checkpoint.pt'))
self.start_iteration = checkpoint['iteration']
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.scheduler.load_state_dict(checkpoint['scheduler'])
if __name__ == "__main__":
main()
| 13,447 | 47.901818 | 168 | py |
trx | trx-main/videotransforms/stack_transforms.py | import numpy as np
import PIL
import torch
from videotransforms.utils import images as imageutils
class ToStackedTensor(object):
"""Converts a list of m (H x W x C) numpy.ndarrays in the range [0, 255]
or PIL Images to a torch.FloatTensor of shape (m*C x H x W)
in the range [0, 1.0]
"""
def __init__(self, channel_nb=3):
self.channel_nb = channel_nb
def __call__(self, clip):
"""
Args:
clip (list of numpy.ndarray or PIL.Image.Image): clip
(list of images) to be converted to tensor.
"""
# Retrieve shape
if isinstance(clip[0], np.ndarray):
h, w, ch = clip[0].shape
assert ch == self.channel_nb, 'got {} channels instead of 3'.format(
ch)
elif isinstance(clip[0], PIL.Image.Image):
w, h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image\
but got list of {0}'.format(type(clip[0])))
np_clip = np.zeros([self.channel_nb * len(clip), int(h), int(w)])
# Convert
for img_idx, img in enumerate(clip):
if isinstance(img, np.ndarray):
pass
elif isinstance(img, PIL.Image.Image):
img = np.array(img, copy=False)
else:
raise TypeError('Expected numpy.ndarray or PIL.Image\
but got list of {0}'.format(type(clip[0])))
img = imageutils.convert_img(img)
np_clip[img_idx * self.channel_nb:(
img_idx + 1) * self.channel_nb, :, :] = img
tensor_clip = torch.from_numpy(np_clip)
return tensor_clip.float().div(255)
| 1,699 | 33 | 80 | py |
trx | trx-main/videotransforms/volume_transforms.py | import numpy as np
from PIL import Image
import torch
from videotransforms.utils import images as imageutils
class ClipToTensor(object):
"""Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255]
to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0]
"""
def __init__(self, channel_nb=3, div_255=True, numpy=False):
self.channel_nb = channel_nb
self.div_255 = div_255
self.numpy = numpy
def __call__(self, clip):
"""
Args: clip (list of numpy.ndarray): clip (list of images)
to be converted to tensor.
"""
# Retrieve shape
if isinstance(clip[0], np.ndarray):
h, w, ch = clip[0].shape
assert ch == self.channel_nb, 'Got {0} instead of 3 channels'.format(
ch)
elif isinstance(clip[0], Image.Image):
w, h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image\
but got list of {0}'.format(type(clip[0])))
np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)])
# Convert
for img_idx, img in enumerate(clip):
if isinstance(img, np.ndarray):
pass
elif isinstance(img, Image.Image):
img = np.array(img, copy=False)
else:
raise TypeError('Expected numpy.ndarray or PIL.Image\
but got list of {0}'.format(type(clip[0])))
img = imageutils.convert_img(img)
np_clip[:, img_idx, :, :] = img
if self.numpy:
if self.div_255:
np_clip = np_clip / 255
return np_clip
else:
tensor_clip = torch.from_numpy(np_clip)
if not isinstance(tensor_clip, torch.FloatTensor):
tensor_clip = tensor_clip.float()
if self.div_255:
tensor_clip = tensor_clip.div(255)
return tensor_clip
class ToTensor(object):
"""Converts numpy array to tensor
"""
def __call__(self, array):
tensor = torch.from_numpy(array)
return tensor
| 2,152 | 30.202899 | 81 | py |
trx | trx-main/videotransforms/functional.py | import numbers
#import cv2
import numpy as np
import PIL
#from skimage.transform import resize
import torchvision
def crop_clip(clip, min_h, min_w, h, w):
if isinstance(clip[0], np.ndarray):
cropped = [img[min_h:min_h + h, min_w:min_w + w, :] for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
cropped = [
img.crop((min_w, min_h, min_w + w, min_h + h)) for img in clip
]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return cropped
def resize_clip(clip, size, interpolation='bilinear'):
if isinstance(clip[0], np.ndarray):
# if isinstance(size, numbers.Number):
# im_h, im_w, im_c = clip[0].shape
# # Min spatial dim already matches minimal size
# if (im_w <= im_h and im_w == size) or (im_h <= im_w
# and im_h == size):
# return clip
# new_h, new_w = get_resize_sizes(im_h, im_w, size)
# size = (new_w, new_h)
# else:
# size = size[1], size[0]
# if interpolation == 'bilinear':
# np_inter = cv2.INTER_LINEAR
# else:
# np_inter = cv2.INTER_NEAREST
# scaled = [
# cv2.resize(img, size, interpolation=np_inter) for img in clip
# ]
raise NotImplementedError
elif isinstance(clip[0], PIL.Image.Image):
if isinstance(size, numbers.Number):
im_w, im_h = clip[0].size
# Min spatial dim already matches minimal size
if (im_w <= im_h and im_w == size) or (im_h <= im_w
and im_h == size):
return clip
new_h, new_w = get_resize_sizes(im_h, im_w, size)
size = (new_w, new_h)
else:
size = size[1], size[0]
if interpolation == 'bilinear':
pil_inter = PIL.Image.NEAREST
else:
pil_inter = PIL.Image.BILINEAR
scaled = [img.resize(size, pil_inter) for img in clip]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return scaled
def get_resize_sizes(im_h, im_w, size):
if im_w < im_h:
ow = size
oh = int(size * im_h / im_w)
else:
oh = size
ow = int(size * im_w / im_h)
return oh, ow
| 2,493 | 32.702703 | 76 | py |
trx | trx-main/videotransforms/video_transforms.py | import numbers
import random
#import cv2
from matplotlib import pyplot as plt
import numpy as np
import PIL
import scipy
import torch
import torchvision
from . import functional as F
class Compose(object):
"""Composes several transforms
Args:
transforms (list of ``Transform`` objects): list of transforms
to compose
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, clip):
for t in self.transforms:
clip = t(clip)
return clip
class RandomHorizontalFlip(object):
"""Horizontally flip the list of given images randomly
with a probability 0.5
"""
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Randomly flipped clip
"""
if random.random() < 0.5:
if isinstance(clip[0], np.ndarray):
return [np.fliplr(img) for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
return [
img.transpose(PIL.Image.FLIP_LEFT_RIGHT) for img in clip
]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
' but got list of {0}'.format(type(clip[0])))
return clip
class RandomResize(object):
"""Resizes a list of (H x W x C) numpy.ndarray to the final size
The larger the original image is, the more times it takes to
interpolate
Args:
interpolation (str): Can be one of 'nearest', 'bilinear'
defaults to nearest
size (tuple): (widht, height)
"""
def __init__(self, ratio=(3. / 4., 4. / 3.), interpolation='nearest'):
self.ratio = ratio
self.interpolation = interpolation
def __call__(self, clip):
scaling_factor = random.uniform(self.ratio[0], self.ratio[1])
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
new_w = int(im_w * scaling_factor)
new_h = int(im_h * scaling_factor)
new_size = (new_w, new_h)
resized = F.resize_clip(
clip, new_size, interpolation=self.interpolation)
return resized
class Resize(object):
"""Resizes a list of (H x W x C) numpy.ndarray to the final size
The larger the original image is, the more times it takes to
interpolate
Args:
interpolation (str): Can be one of 'nearest', 'bilinear'
defaults to nearest
size (tuple): (widht, height)
"""
def __init__(self, size, interpolation='nearest'):
self.size = size
self.interpolation = interpolation
def __call__(self, clip):
resized = F.resize_clip(
clip, self.size, interpolation=self.interpolation)
return resized
class RandomCrop(object):
"""Extract random crop at the same location for a list of images
Args:
size (sequence or int): Desired output size for the
crop in format (h, w)
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
size = (size, size)
self.size = size
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
h, w = self.size
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
if w > im_w or h > im_h:
error_msg = (
'Initial image size should be larger then '
'cropped size but got cropped sizes : ({w}, {h}) while '
'initial image is ({im_w}, {im_h})'.format(
im_w=im_w, im_h=im_h, w=w, h=h))
raise ValueError(error_msg)
x1 = random.randint(0, im_w - w)
y1 = random.randint(0, im_h - h)
cropped = F.crop_clip(clip, y1, x1, h, w)
return cropped
class RandomRotation(object):
"""Rotate entire clip randomly by a random angle within
given bounds
Args:
degrees (sequence or int): Range of degrees to select from
If degrees is a number instead of sequence like (min, max),
the range of degrees, will be (-degrees, +degrees).
"""
def __init__(self, degrees):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError('If degrees is a single number,'
'must be positive')
degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError('If degrees is a sequence,'
'it must be of len 2.')
self.degrees = degrees
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
angle = random.uniform(self.degrees[0], self.degrees[1])
if isinstance(clip[0], np.ndarray):
rotated = [scipy.misc.imrotate(img, angle) for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
rotated = [img.rotate(angle) for img in clip]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return rotated
class CenterCrop(object):
"""Extract center crop at the same location for a list of images
Args:
size (sequence or int): Desired output size for the
crop in format (h, w)
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
size = (size, size)
self.size = size
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
h, w = self.size
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
if w > im_w or h > im_h:
error_msg = (
'Initial image size should be larger then '
'cropped size but got cropped sizes : ({w}, {h}) while '
'initial image is ({im_w}, {im_h})'.format(
im_w=im_w, im_h=im_h, w=w, h=h))
raise ValueError(error_msg)
x1 = int(round((im_w - w) / 2.))
y1 = int(round((im_h - h) / 2.))
cropped = F.crop_clip(clip, y1, x1, h, w)
return cropped
class TenCrop(object):
"""Extract center crop at the same location for a list of images
Args:
size (sequence or int): Desired output size for the
crop in format (h, w)
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
size = (size, size)
self.size = size
def __call__(self, clip):
"""
Args:
img (PIL.Image or numpy.ndarray): List of images to be cropped
in format (h, w, c) in numpy.ndarray
Returns:
PIL.Image or numpy.ndarray: Cropped list of images
"""
h, w = self.size
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
if w > im_w or h > im_h:
error_msg = (
'Initial image size should be larger then '
'cropped size but got cropped sizes : ({w}, {h}) while '
'initial image is ({im_w}, {im_h})'.format(
im_w=im_w, im_h=im_h, w=w, h=h))
raise ValueError(error_msg)
if isinstance(clip[0], np.ndarray):
flip_clip = [np.fliplr(img) for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
flip_clip = [img.transpose(PIL.Image.FLIP_LEFT_RIGHT) for img in clip]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
' but got list of {0}'.format(type(clip[0])))
x1 = int(round((im_w - w) / 2.))
y1 = int(round((im_h - h) / 2.))
all_x = [x1]
all_y = [y1]
all_x.append(0)
all_y.append(0)
all_x.append(im_w - w)
all_y.append(0)
all_x.append(0)
all_y.append(im_h - h)
all_x.append(im_w - w)
all_y.append(im_h - h)
#cropped = F.crop_clip(clip, y1, x1, h, w)
cropped = [F.crop_clip(clip, y, x, h, w) for x, y in zip(all_x, all_y)]
flip_cropped = [F.crop_clip(flip_clip, y, x, h, w) for x, y in zip(all_x, all_y)]
cropped.extend(flip_cropped)
return cropped
class ColorJitter(object):
"""Randomly change the brightness, contrast and saturation and hue of the clip
Args:
brightness (float): How much to jitter brightness. brightness_factor
is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].
contrast (float): How much to jitter contrast. contrast_factor
is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].
saturation (float): How much to jitter saturation. saturation_factor
is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].
hue(float): How much to jitter hue. hue_factor is chosen uniformly from
[-hue, hue]. Should be >=0 and <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
def get_params(self, brightness, contrast, saturation, hue):
if brightness > 0:
brightness_factor = random.uniform(
max(0, 1 - brightness), 1 + brightness)
else:
brightness_factor = None
if contrast > 0:
contrast_factor = random.uniform(
max(0, 1 - contrast), 1 + contrast)
else:
contrast_factor = None
if saturation > 0:
saturation_factor = random.uniform(
max(0, 1 - saturation), 1 + saturation)
else:
saturation_factor = None
if hue > 0:
hue_factor = random.uniform(-hue, hue)
else:
hue_factor = None
return brightness_factor, contrast_factor, saturation_factor, hue_factor
def __call__(self, clip):
"""
Args:
clip (list): list of PIL.Image
Returns:
list PIL.Image : list of transformed PIL.Image
"""
if isinstance(clip[0], np.ndarray):
raise TypeError(
'Color jitter not yet implemented for numpy arrays')
elif isinstance(clip[0], PIL.Image.Image):
brightness, contrast, saturation, hue = self.get_params(
self.brightness, self.contrast, self.saturation, self.hue)
# Create img transform function sequence
img_transforms = []
if brightness is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness))
if saturation is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation))
if hue is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue))
if contrast is not None:
img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast))
random.shuffle(img_transforms)
# Apply to all images
jittered_clip = []
for img in clip:
for func in img_transforms:
jittered_img = func(img)
jittered_clip.append(jittered_img)
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return jittered_clip
| 13,108 | 31.44802 | 119 | py |
trx | trx-main/videotransforms/__init__.py | 0 | 0 | 0 | py | |
trx | trx-main/videotransforms/tensor_transforms.py | import random
from videotransforms.utils import functional as F
class Normalize(object):
"""Normalize a tensor image with mean and standard deviation
Given mean: m and std: s
will normalize each channel as channel = (channel - mean) / std
Args:
mean (int): mean value
std (int): std value
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor of stacked images or image
of size (C, H, W) to be normalized
Returns:
Tensor: Normalized stack of image of image
"""
return F.normalize(tensor, self.mean, self.std)
class SpatialRandomCrop(object):
"""Crops a random spatial crop in a spatio-temporal
numpy or tensor input [Channel, Time, Height, Width]
"""
def __init__(self, size):
"""
Args:
size (tuple): in format (height, width)
"""
self.size = size
def __call__(self, tensor):
h, w = self.size
_, _, tensor_h, tensor_w = tensor.shape
if w > tensor_w or h > tensor_h:
error_msg = (
'Initial tensor spatial size should be larger then '
'cropped size but got cropped sizes : ({w}, {h}) while '
'initial tensor is ({t_w}, {t_h})'.format(
t_w=tensor_w, t_h=tensor_h, w=w, h=h))
raise ValueError(error_msg)
x1 = random.randint(0, tensor_w - w)
y1 = random.randint(0, tensor_h - h)
cropped = tensor[:, :, y1:y1 + h, x1:x1 + h]
return cropped
| 1,671 | 26.866667 | 72 | py |
trx | trx-main/videotransforms/utils/images.py | import numpy as np
def convert_img(img):
"""Converts (H, W, C) numpy.ndarray to (C, W, H) format
"""
if len(img.shape) == 3:
img = img.transpose(2, 0, 1)
if len(img.shape) == 2:
img = np.expand_dims(img, 0)
return img
| 256 | 20.416667 | 59 | py |
trx | trx-main/videotransforms/utils/functional.py | def normalize(tensor, mean, std):
"""
Args:
tensor (Tensor): Tensor to normalize
Returns:
Tensor: Normalized tensor
"""
tensor.sub_(mean).div_(std)
return tensor
| 203 | 17.545455 | 44 | py |
Kitsune-py | Kitsune-py-master/example.py | from Kitsune import Kitsune
import numpy as np
import time
##############################################################################
# Kitsune a lightweight online network intrusion detection system based on an ensemble of autoencoders (kitNET).
# For more information and citation, please see our NDSS'18 paper: Kitsune: An Ensemble of Autoencoders for Online Network Intrusion Detection
# This script demonstrates Kitsune's ability to incrementally learn, and detect anomalies in recorded a pcap of the Mirai Malware.
# The demo involves an m-by-n dataset with n=115 dimensions (features), and m=100,000 observations.
# Each observation is a snapshot of the network's state in terms of incremental damped statistics (see the NDSS paper for more details)
#The runtimes presented in the paper, are based on the C++ implimentation (roughly 100x faster than the python implimentation)
################### Last Tested with Anaconda 3.6.3 #######################
# Load Mirai pcap (a recording of the Mirai botnet malware being activated)
# The first 70,000 observations are clean...
print("Unzipping Sample Capture...")
import zipfile
with zipfile.ZipFile("mirai.zip","r") as zip_ref:
zip_ref.extractall()
# File location
path = "mirai.pcap" #the pcap, pcapng, or tsv file to process.
packet_limit = np.Inf #the number of packets to process
# KitNET params:
maxAE = 10 #maximum size for any autoencoder in the ensemble layer
FMgrace = 5000 #the number of instances taken to learn the feature mapping (the ensemble's architecture)
ADgrace = 50000 #the number of instances used to train the anomaly detector (ensemble itself)
# Build Kitsune
K = Kitsune(path,packet_limit,maxAE,FMgrace,ADgrace)
print("Running Kitsune:")
RMSEs = []
i = 0
start = time.time()
# Here we process (train/execute) each individual packet.
# In this way, each observation is discarded after performing process() method.
while True:
i+=1
if i % 1000 == 0:
print(i)
rmse = K.proc_next_packet()
if rmse == -1:
break
RMSEs.append(rmse)
stop = time.time()
print("Complete. Time elapsed: "+ str(stop - start))
# Here we demonstrate how one can fit the RMSE scores to a log-normal distribution (useful for finding/setting a cutoff threshold \phi)
from scipy.stats import norm
benignSample = np.log(RMSEs[FMgrace+ADgrace+1:100000])
logProbs = norm.logsf(np.log(RMSEs), np.mean(benignSample), np.std(benignSample))
# plot the RMSE anomaly scores
print("Plotting results")
from matplotlib import pyplot as plt
from matplotlib import cm
plt.figure(figsize=(10,5))
fig = plt.scatter(range(FMgrace+ADgrace+1,len(RMSEs)),RMSEs[FMgrace+ADgrace+1:],s=0.1,c=logProbs[FMgrace+ADgrace+1:],cmap='RdYlGn')
plt.yscale("log")
plt.title("Anomaly Scores from Kitsune's Execution Phase")
plt.ylabel("RMSE (log scaled)")
plt.xlabel("Time elapsed [min]")
figbar=plt.colorbar()
figbar.ax.set_ylabel('Log Probability\n ', rotation=270)
plt.show()
| 2,948 | 39.958333 | 142 | py |
Kitsune-py | Kitsune-py-master/setup.py | from distutils.core import setup
from Cython.Build import cythonize
setup(
ext_modules = cythonize(["*.pyx"])
) | 116 | 18.5 | 38 | py |
Kitsune-py | Kitsune-py-master/FeatureExtractor.py | #Check if cython code has been compiled
import os
import subprocess
use_extrapolation=False #experimental correlation code
if use_extrapolation:
print("Importing AfterImage Cython Library")
if not os.path.isfile("AfterImage.c"): #has not yet been compiled, so try to do so...
cmd = "python setup.py build_ext --inplace"
subprocess.call(cmd,shell=True)
#Import dependencies
import netStat as ns
import csv
import numpy as np
print("Importing Scapy Library")
from scapy.all import *
import os.path
import platform
import subprocess
#Extracts Kitsune features from given pcap file one packet at a time using "get_next_vector()"
# If wireshark is installed (tshark) it is used to parse (it's faster), otherwise, scapy is used (much slower).
# If wireshark is used then a tsv file (parsed version of the pcap) will be made -which you can use as your input next time
class FE:
def __init__(self,file_path,limit=np.inf):
self.path = file_path
self.limit = limit
self.parse_type = None #unknown
self.curPacketIndx = 0
self.tsvin = None #used for parsing TSV file
self.scapyin = None #used for parsing pcap with scapy
### Prep pcap ##
self.__prep__()
### Prep Feature extractor (AfterImage) ###
maxHost = 100000000000
maxSess = 100000000000
self.nstat = ns.netStat(np.nan, maxHost, maxSess)
def _get_tshark_path(self):
if platform.system() == 'Windows':
return 'C:\Program Files\Wireshark\\tshark.exe'
else:
system_path = os.environ['PATH']
for path in system_path.split(os.pathsep):
filename = os.path.join(path, 'tshark')
if os.path.isfile(filename):
return filename
return ''
def __prep__(self):
### Find file: ###
if not os.path.isfile(self.path): # file does not exist
print("File: " + self.path + " does not exist")
raise Exception()
### check file type ###
type = self.path.split('.')[-1]
self._tshark = self._get_tshark_path()
##If file is TSV (pre-parsed by wireshark script)
if type == "tsv":
self.parse_type = "tsv"
##If file is pcap
elif type == "pcap" or type == 'pcapng':
# Try parsing via tshark dll of wireshark (faster)
if os.path.isfile(self._tshark):
self.pcap2tsv_with_tshark() # creates local tsv file
self.path += ".tsv"
self.parse_type = "tsv"
else: # Otherwise, parse with scapy (slower)
print("tshark not found. Trying scapy...")
self.parse_type = "scapy"
else:
print("File: " + self.path + " is not a tsv or pcap file")
raise Exception()
### open readers ##
if self.parse_type == "tsv":
maxInt = sys.maxsize
decrement = True
while decrement:
# decrease the maxInt value by factor 10
# as long as the OverflowError occurs.
decrement = False
try:
csv.field_size_limit(maxInt)
except OverflowError:
maxInt = int(maxInt / 10)
decrement = True
print("counting lines in file...")
num_lines = sum(1 for line in open(self.path))
print("There are " + str(num_lines) + " Packets.")
self.limit = min(self.limit, num_lines-1)
self.tsvinf = open(self.path, 'rt', encoding="utf8")
self.tsvin = csv.reader(self.tsvinf, delimiter='\t')
row = self.tsvin.__next__() #move iterator past header
else: # scapy
print("Reading PCAP file via Scapy...")
self.scapyin = rdpcap(self.path)
self.limit = len(self.scapyin)
print("Loaded " + str(len(self.scapyin)) + " Packets.")
def get_next_vector(self):
if self.curPacketIndx == self.limit:
if self.parse_type == 'tsv':
self.tsvinf.close()
return []
### Parse next packet ###
if self.parse_type == "tsv":
row = self.tsvin.__next__()
IPtype = np.nan
timestamp = row[0]
framelen = row[1]
srcIP = ''
dstIP = ''
if row[4] != '': # IPv4
srcIP = row[4]
dstIP = row[5]
IPtype = 0
elif row[17] != '': # ipv6
srcIP = row[17]
dstIP = row[18]
IPtype = 1
srcproto = row[6] + row[
8] # UDP or TCP port: the concatenation of the two port strings will will results in an OR "[tcp|udp]"
dstproto = row[7] + row[9] # UDP or TCP port
srcMAC = row[2]
dstMAC = row[3]
if srcproto == '': # it's a L2/L1 level protocol
if row[12] != '': # is ARP
srcproto = 'arp'
dstproto = 'arp'
srcIP = row[14] # src IP (ARP)
dstIP = row[16] # dst IP (ARP)
IPtype = 0
elif row[10] != '': # is ICMP
srcproto = 'icmp'
dstproto = 'icmp'
IPtype = 0
elif srcIP + srcproto + dstIP + dstproto == '': # some other protocol
srcIP = row[2] # src MAC
dstIP = row[3] # dst MAC
elif self.parse_type == "scapy":
packet = self.scapyin[self.curPacketIndx]
IPtype = np.nan
timestamp = packet.time
framelen = len(packet)
if packet.haslayer(IP): # IPv4
srcIP = packet[IP].src
dstIP = packet[IP].dst
IPtype = 0
elif packet.haslayer(IPv6): # ipv6
srcIP = packet[IPv6].src
dstIP = packet[IPv6].dst
IPtype = 1
else:
srcIP = ''
dstIP = ''
if packet.haslayer(TCP):
srcproto = str(packet[TCP].sport)
dstproto = str(packet[TCP].dport)
elif packet.haslayer(UDP):
srcproto = str(packet[UDP].sport)
dstproto = str(packet[UDP].dport)
else:
srcproto = ''
dstproto = ''
srcMAC = packet.src
dstMAC = packet.dst
if srcproto == '': # it's a L2/L1 level protocol
if packet.haslayer(ARP): # is ARP
srcproto = 'arp'
dstproto = 'arp'
srcIP = packet[ARP].psrc # src IP (ARP)
dstIP = packet[ARP].pdst # dst IP (ARP)
IPtype = 0
elif packet.haslayer(ICMP): # is ICMP
srcproto = 'icmp'
dstproto = 'icmp'
IPtype = 0
elif srcIP + srcproto + dstIP + dstproto == '': # some other protocol
srcIP = packet.src # src MAC
dstIP = packet.dst # dst MAC
else:
return []
self.curPacketIndx = self.curPacketIndx + 1
### Extract Features
try:
return self.nstat.updateGetStats(IPtype, srcMAC, dstMAC, srcIP, srcproto, dstIP, dstproto,
int(framelen),
float(timestamp))
except Exception as e:
print(e)
return []
def pcap2tsv_with_tshark(self):
print('Parsing with tshark...')
fields = "-e frame.time_epoch -e frame.len -e eth.src -e eth.dst -e ip.src -e ip.dst -e tcp.srcport -e tcp.dstport -e udp.srcport -e udp.dstport -e icmp.type -e icmp.code -e arp.opcode -e arp.src.hw_mac -e arp.src.proto_ipv4 -e arp.dst.hw_mac -e arp.dst.proto_ipv4 -e ipv6.src -e ipv6.dst"
cmd = '"' + self._tshark + '" -r '+ self.path +' -T fields '+ fields +' -E header=y -E occurrence=f > '+self.path+".tsv"
subprocess.call(cmd,shell=True)
print("tshark parsing complete. File saved as: "+self.path +".tsv")
def get_num_features(self):
return len(self.nstat.getNetStatHeaders())
| 8,441 | 37.547945 | 297 | py |
Kitsune-py | Kitsune-py-master/netStat.py | import numpy as np
## Prep AfterImage cython package
import os
import subprocess
import pyximport
pyximport.install()
import AfterImage as af
#import AfterImage_NDSS as af
#
# MIT License
#
# Copyright (c) 2018 Yisroel mirsky
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class netStat:
#Datastructure for efficent network stat queries
# HostLimit: no more that this many Host identifiers will be tracked
# HostSimplexLimit: no more that this many outgoing channels from each host will be tracked (purged periodically)
# Lambdas: a list of 'window sizes' (decay factors) to track for each stream. nan resolved to default [5,3,1,.1,.01]
def __init__(self, Lambdas = np.nan, HostLimit=255,HostSimplexLimit=1000):
#Lambdas
if np.isnan(Lambdas):
self.Lambdas = [5,3,1,.1,.01]
else:
self.Lambdas = Lambdas
#HT Limits
self.HostLimit = HostLimit
self.SessionLimit = HostSimplexLimit*self.HostLimit*self.HostLimit #*2 since each dual creates 2 entries in memory
self.MAC_HostLimit = self.HostLimit*10
#HTs
self.HT_jit = af.incStatDB(limit=self.HostLimit*self.HostLimit)#H-H Jitter Stats
self.HT_MI = af.incStatDB(limit=self.MAC_HostLimit)#MAC-IP relationships
self.HT_H = af.incStatDB(limit=self.HostLimit) #Source Host BW Stats
self.HT_Hp = af.incStatDB(limit=self.SessionLimit)#Source Host BW Stats
def findDirection(self,IPtype,srcIP,dstIP,eth_src,eth_dst): #cpp: this is all given to you in the direction string of the instance (NO NEED FOR THIS FUNCTION)
if IPtype==0: #is IPv4
lstP = srcIP.rfind('.')
src_subnet = srcIP[0:lstP:]
lstP = dstIP.rfind('.')
dst_subnet = dstIP[0:lstP:]
elif IPtype==1: #is IPv6
src_subnet = srcIP[0:round(len(srcIP)/2):]
dst_subnet = dstIP[0:round(len(dstIP)/2):]
else: #no Network layer, use MACs
src_subnet = eth_src
dst_subnet = eth_dst
return src_subnet, dst_subnet
def updateGetStats(self, IPtype, srcMAC,dstMAC, srcIP, srcProtocol, dstIP, dstProtocol, datagramSize, timestamp):
# Host BW: Stats on the srcIP's general Sender Statistics
# Hstat = np.zeros((3*len(self.Lambdas,)))
# for i in range(len(self.Lambdas)):
# Hstat[(i*3):((i+1)*3)] = self.HT_H.update_get_1D_Stats(srcIP, timestamp, datagramSize, self.Lambdas[i])
#MAC.IP: Stats on src MAC-IP relationships
MIstat = np.zeros((3*len(self.Lambdas,)))
for i in range(len(self.Lambdas)):
MIstat[(i*3):((i+1)*3)] = self.HT_MI.update_get_1D_Stats(srcMAC+srcIP, timestamp, datagramSize, self.Lambdas[i])
# Host-Host BW: Stats on the dual traffic behavior between srcIP and dstIP
HHstat = np.zeros((7*len(self.Lambdas,)))
for i in range(len(self.Lambdas)):
HHstat[(i*7):((i+1)*7)] = self.HT_H.update_get_1D2D_Stats(srcIP, dstIP,timestamp,datagramSize,self.Lambdas[i])
# Host-Host Jitter:
HHstat_jit = np.zeros((3*len(self.Lambdas,)))
for i in range(len(self.Lambdas)):
HHstat_jit[(i*3):((i+1)*3)] = self.HT_jit.update_get_1D_Stats(srcIP+dstIP, timestamp, 0, self.Lambdas[i],isTypeDiff=True)
# Host-Host BW: Stats on the dual traffic behavior between srcIP and dstIP
HpHpstat = np.zeros((7*len(self.Lambdas,)))
if srcProtocol == 'arp':
for i in range(len(self.Lambdas)):
HpHpstat[(i*7):((i+1)*7)] = self.HT_Hp.update_get_1D2D_Stats(srcMAC, dstMAC, timestamp, datagramSize, self.Lambdas[i])
else: # some other protocol (e.g. TCP/UDP)
for i in range(len(self.Lambdas)):
HpHpstat[(i*7):((i+1)*7)] = self.HT_Hp.update_get_1D2D_Stats(srcIP + srcProtocol, dstIP + dstProtocol, timestamp, datagramSize, self.Lambdas[i])
return np.concatenate((MIstat, HHstat, HHstat_jit, HpHpstat)) # concatenation of stats into one stat vector
def getNetStatHeaders(self):
MIstat_headers = []
Hstat_headers = []
HHstat_headers = []
HHjitstat_headers = []
HpHpstat_headers = []
for i in range(len(self.Lambdas)):
MIstat_headers += ["MI_dir_"+h for h in self.HT_MI.getHeaders_1D(Lambda=self.Lambdas[i],ID=None)]
HHstat_headers += ["HH_"+h for h in self.HT_H.getHeaders_1D2D(Lambda=self.Lambdas[i],IDs=None,ver=2)]
HHjitstat_headers += ["HH_jit_"+h for h in self.HT_jit.getHeaders_1D(Lambda=self.Lambdas[i],ID=None)]
HpHpstat_headers += ["HpHp_" + h for h in self.HT_Hp.getHeaders_1D2D(Lambda=self.Lambdas[i], IDs=None, ver=2)]
return MIstat_headers + Hstat_headers + HHstat_headers + HHjitstat_headers + HpHpstat_headers
| 5,834 | 48.449153 | 162 | py |
Kitsune-py | Kitsune-py-master/AfterImage.py | import math
import numpy as np
class incStat:
def __init__(self, Lambda, ID, init_time=0, isTypeDiff=False): # timestamp is creation time
self.ID = ID
self.CF1 = 0 # linear sum
self.CF2 = 0 # sum of squares
self.w = 1e-20 # weight
self.isTypeDiff = isTypeDiff
self.Lambda = Lambda # Decay Factor
self.lastTimestamp = init_time
self.cur_mean = np.nan
self.cur_var = np.nan
self.cur_std = np.nan
self.covs = [] # a list of incStat_covs (references) with relate to this incStat
def insert(self, v, t=0): # v is a scalar, t is v's arrival the timestamp
if self.isTypeDiff:
dif = t - self.lastTimestamp
if dif > 0:
v = dif
else:
v = 0
self.processDecay(t)
# update with v
self.CF1 += v
self.CF2 += math.pow(v, 2)
self.w += 1
self.cur_mean = np.nan # force recalculation if called
self.cur_var = np.nan
self.cur_std = np.nan
# update covs (if any)
for cov in self.covs:
cov.update_cov(self.ID, v, t)
def processDecay(self, timestamp):
factor=1
# check for decay
timeDiff = timestamp - self.lastTimestamp
if timeDiff > 0:
factor = math.pow(2, (-self.Lambda * timeDiff))
self.CF1 = self.CF1 * factor
self.CF2 = self.CF2 * factor
self.w = self.w * factor
self.lastTimestamp = timestamp
return factor
def weight(self):
return self.w
def mean(self):
if math.isnan(self.cur_mean): # calculate it only once when necessary
self.cur_mean = self.CF1 / self.w
return self.cur_mean
def var(self):
if math.isnan(self.cur_var): # calculate it only once when necessary
self.cur_var = abs(self.CF2 / self.w - math.pow(self.mean(), 2))
return self.cur_var
def std(self):
if math.isnan(self.cur_std): # calculate it only once when necessary
self.cur_std = math.sqrt(self.var())
return self.cur_std
def cov(self,ID2):
for cov in self.covs:
if cov.incStats[0].ID == ID2 or cov.incStats[1].ID == ID2:
return cov.cov()
return [np.nan]
def pcc(self,ID2):
for cov in self.covs:
if cov.incStats[0].ID == ID2 or cov.incStats[1].ID == ID2:
return cov.pcc()
return [np.nan]
def cov_pcc(self,ID2):
for cov in self.covs:
if cov.incStats[0].ID == ID2 or cov.incStats[1].ID == ID2:
return cov.get_stats1()
return [np.nan]*2
def radius(self, other_incStats): # the radius of a set of incStats
A = self.var()**2
for incS in other_incStats:
A += incS.var()**2
return math.sqrt(A)
def magnitude(self, other_incStats): # the magnitude of a set of incStats
A = math.pow(self.mean(), 2)
for incS in other_incStats:
A += math.pow(incS.mean(), 2)
return math.sqrt(A)
#calculates and pulls all stats on this stream
def allstats_1D(self):
self.cur_mean = self.CF1 / self.w
self.cur_var = abs(self.CF2 / self.w - math.pow(self.cur_mean, 2))
return [self.w, self.cur_mean, self.cur_var]
#calculates and pulls all stats on this stream, and stats shared with the indicated stream
def allstats_2D(self, ID2):
stats1D = self.allstats_1D()
# Find cov component
stats2D = [np.nan] * 4
for cov in self.covs:
if cov.incStats[0].ID == ID2 or cov.incStats[1].ID == ID2:
stats2D = cov.get_stats2()
break
return stats1D + stats2D
def getHeaders_1D(self, suffix=True):
if self.ID is None:
s0=""
else:
s0 = "_0"
if suffix:
s0 = "_"+self.ID
headers = ["weight"+s0, "mean"+s0, "std"+s0]
return headers
def getHeaders_2D(self, ID2, suffix=True):
hdrs1D = self.getHeaders_1D(suffix)
if self.ID is None:
s0=""
s1=""
else:
s0 = "_0"
s1 = "_1"
if suffix:
s0 = "_"+self.ID
s1 = "_" + ID2
hdrs2D = ["radius_" + s0 + "_" + s1, "magnitude_" + s0 + "_" + s1, "covariance_" + s0 + "_" + s1,
"pcc_" + s0 + "_" + s1]
return hdrs1D+hdrs2D
#like incStat, but maintains stats between two streams
class incStat_cov:
def __init__(self, incS1, incS2, init_time = 0):
# store references tot he streams' incStats
self.incStats = [incS1,incS2]
self.lastRes = [0,0]
# init extrapolators
#self.EXs = [extrapolator(),extrapolator()]
# init sum product residuals
self.CF3 = 0 # sum of residule products (A-uA)(B-uB)
self.w3 = 1e-20
self.lastTimestamp_cf3 = init_time
#other_incS_decay is the decay factor of the other incstat
# ID: the stream ID which produced (v,t)
def update_cov(self, ID, v, t): # it is assumes that incStat "ID" has ALREADY been updated with (t,v) [this si performed automatically in method incStat.insert()]
# find incStat
if ID == self.incStats[0].ID:
inc = 0
elif ID == self.incStats[1].ID:
inc = 1
else:
print("update_cov ID error")
return ## error
# Decay other incStat
self.incStats[not(inc)].processDecay(t)
# Decay residules
self.processDecay(t,inc)
# Update extrapolator for current stream
#self.EXs[inc].insert(t,v)
# Extrapolate other stream
#v_other = self.EXs[not(inc)].predict(t)
# Compute and update residule
res = (v - self.incStats[inc].mean())
resid = (v - self.incStats[inc].mean()) * self.lastRes[not(inc)]
self.CF3 += resid
self.w3 += 1
self.lastRes[inc] = res
def processDecay(self,t,micro_inc_indx):
factor = 1
# check for decay cf3
timeDiffs_cf3 = t - self.lastTimestamp_cf3
if timeDiffs_cf3 > 0:
factor = math.pow(2, (-(self.incStats[micro_inc_indx].Lambda) * timeDiffs_cf3))
self.CF3 *= factor
self.w3 *= factor
self.lastTimestamp_cf3 = t
self.lastRes[micro_inc_indx] *= factor
return factor
#todo: add W3 for cf3
#covariance approximation
def cov(self):
return self.CF3 / self.w3
# Pearson corl. coef
def pcc(self):
ss = self.incStats[0].std() * self.incStats[1].std()
if ss != 0:
return self.cov() / ss
else:
return 0
# calculates and pulls all correlative stats
def get_stats1(self):
return [self.cov(), self.pcc()]
# calculates and pulls all correlative stats AND 2D stats from both streams (incStat)
def get_stats2(self):
return [self.incStats[0].radius([self.incStats[1]]),self.incStats[0].magnitude([self.incStats[1]]),self.cov(), self.pcc()]
# calculates and pulls all correlative stats AND 2D stats AND the regular stats from both streams (incStat)
def get_stats3(self):
return [self.incStats[0].w,self.incStats[0].mean(),self.incStats[0].std(),self.incStats[1].w,self.incStats[1].mean(),self.incStats[1].std(),self.cov(), self.pcc()]
# calculates and pulls all correlative stats AND the regular stats from both incStats AND 2D stats
def get_stats4(self):
return [self.incStats[0].w,self.incStats[0].mean(),self.incStats[0].std(),self.incStats[1].w,self.incStats[1].mean(),self.incStats[1].std(), self.incStats[0].radius([self.incStats[1]]),self.incStats[0].magnitude([self.incStats[1]]),self.cov(), self.pcc()]
def getHeaders(self,ver,suffix=True): #ver = {1,2,3,4}
headers = []
s0 = "0"
s1 = "1"
if suffix:
s0 = self.incStats[0].ID
s1 = self.incStats[1].ID
if ver == 1:
headers = ["covariance_"+s0+"_"+s1, "pcc_"+s0+"_"+s1]
if ver == 2:
headers = ["radius_"+s0+"_"+s1, "magnitude_"+s0+"_"+s1, "covariance_"+s0+"_"+s1, "pcc_"+s0+"_"+s1]
if ver == 3:
headers = ["weight_"+s0, "mean_"+s0, "std_"+s0,"weight_"+s1, "mean_"+s1, "std_"+s1, "covariance_"+s0+"_"+s1, "pcc_"+s0+"_"+s1]
if ver == 4:
headers = ["weight_" + s0, "mean_" + s0, "std_" + s0, "covariance_" + s0 + "_" + s1, "pcc_" + s0 + "_" + s1]
if ver == 5:
headers = ["weight_"+s0, "mean_"+s0, "std_"+s0,"weight_"+s1, "mean_"+s1, "std_"+s1, "radius_"+s0+"_"+s1, "magnitude_"+s0+"_"+s1, "covariance_"+s0+"_"+s1, "pcc_"+s0+"_"+s1]
return headers
class incStatDB:
# default_lambda: use this as the lambda for all streams. If not specified, then you must supply a Lambda with every query.
def __init__(self,limit=np.Inf,default_lambda=np.nan):
self.HT = dict()
self.limit = limit
self.df_lambda = default_lambda
def get_lambda(self,Lambda):
if not np.isnan(self.df_lambda):
Lambda = self.df_lambda
return Lambda
# Registers a new stream. init_time: init lastTimestamp of the incStat
def register(self,ID,Lambda=1,init_time=0,isTypeDiff=False):
#Default Lambda?
Lambda = self.get_lambda(Lambda)
#Retrieve incStat
key = ID+"_"+str(Lambda)
incS = self.HT.get(key)
if incS is None: #does not already exist
if len(self.HT) + 1 > self.limit:
raise LookupError(
'Adding Entry:\n' + key + '\nwould exceed incStatHT 1D limit of ' + str(
self.limit) + '.\nObservation Rejected.')
incS = incStat(Lambda, ID, init_time, isTypeDiff)
self.HT[key] = incS #add new entry
return incS
# Registers covariance tracking for two streams, registers missing streams
def register_cov(self,ID1,ID2,Lambda=1,init_time=0,isTypeDiff=False):
#Default Lambda?
Lambda = self.get_lambda(Lambda)
# Lookup both streams
incS1 = self.register(ID1,Lambda,init_time,isTypeDiff)
incS2 = self.register(ID2,Lambda,init_time,isTypeDiff)
#check for pre-exiting link
for cov in incS1.covs:
if cov.incStats[0].ID == ID2 or cov.incStats[1].ID == ID2:
return cov #there is a pre-exiting link
# Link incStats
inc_cov = incStat_cov(incS1,incS2,init_time)
incS1.covs.append(inc_cov)
incS2.covs.append(inc_cov)
return inc_cov
# updates/registers stream
def update(self,ID,t,v,Lambda=1,isTypeDiff=False):
incS = self.register(ID,Lambda,t,isTypeDiff)
incS.insert(v,t)
return incS
# Pulls current stats from the given ID
def get_1D_Stats(self,ID,Lambda=1): #weight, mean, std
#Default Lambda?
Lambda = self.get_lambda(Lambda)
#Get incStat
incS = self.HT.get(ID+"_"+str(Lambda))
if incS is None: # does not already exist
return [np.na]*3
else:
return incS.allstats_1D()
# Pulls current correlational stats from the given IDs
def get_2D_Stats(self, ID1, ID2, Lambda=1): #cov, pcc
# Default Lambda?
Lambda = self.get_lambda(Lambda)
# Get incStat
incS1 = self.HT.get(ID1 + "_" + str(Lambda))
if incS1 is None: # does not exist
return [np.na]*2
# find relevant cov entry
return incS1.cov_pcc(ID2)
# Pulls all correlational stats registered with the given ID
# returns tuple [0]: stats-covs&pccs, [2]: IDs
def get_all_2D_Stats(self, ID, Lambda=1): # cov, pcc
# Default Lambda?
Lambda = self.get_lambda(Lambda)
# Get incStat
incS1 = self.HT.get(ID + "_" + str(Lambda))
if incS1 is None: # does not exist
return ([],[])
# find relevant cov entry
stats = []
IDs = []
for cov in incS1.covs:
stats.append(cov.get_stats1())
IDs.append([cov.incStats[0].ID,cov.incStats[1].ID])
return stats,IDs
# Pulls current multidimensional stats from the given IDs
def get_nD_Stats(self,IDs,Lambda=1): #radius, magnitude (IDs is a list)
# Default Lambda?
Lambda = self.get_lambda(Lambda)
# Get incStats
incStats = []
for ID in IDs:
incS = self.HT.get(ID + "_" + str(Lambda))
if incS is not None: #exists
incStats.append(incS)
# Compute stats
rad = 0 #radius
mag = 0 #magnitude
for incS in incStats:
rad += incS.var()
mag += incS.mean()**2
return [np.sqrt(rad),np.sqrt(mag)]
# Updates and then pulls current 1D stats from the given ID. Automatically registers previously unknown stream IDs
def update_get_1D_Stats(self, ID,t,v,Lambda=1,isTypeDiff=False): # weight, mean, std
incS = self.update(ID,t,v,Lambda,isTypeDiff)
return incS.allstats_1D()
# Updates and then pulls current correlative stats between the given IDs. Automatically registers previously unknown stream IDs, and cov tracking
#Note: AfterImage does not currently support Diff Type streams for correlational statistics.
def update_get_2D_Stats(self, ID1,ID2,t1,v1,Lambda=1,level=1): #level= 1:cov,pcc 2:radius,magnitude,cov,pcc
#retrieve/add cov tracker
inc_cov = self.register_cov(ID1, ID2, Lambda, t1)
# Update cov tracker
inc_cov.update_cov(ID1,v1,t1)
if level == 1:
return inc_cov.get_stats1()
else:
return inc_cov.get_stats2()
# Updates and then pulls current 1D and 2D stats from the given IDs. Automatically registers previously unknown stream IDs
def update_get_1D2D_Stats(self, ID1,ID2,t1,v1,Lambda=1): # weight, mean, std
return self.update_get_1D_Stats(ID1,t1,v1,Lambda) + self.update_get_2D_Stats(ID1,ID2,t1,v1,Lambda,level=2)
def getHeaders_1D(self,Lambda=1,ID=None):
# Default Lambda?
Lambda = self.get_lambda(Lambda)
hdrs = incStat(Lambda,ID).getHeaders_1D(suffix=False)
return [str(Lambda)+"_"+s for s in hdrs]
def getHeaders_2D(self,Lambda=1,IDs=None, ver=1): #IDs is a 2-element list or tuple
# Default Lambda?
Lambda = self.get_lambda(Lambda)
if IDs is None:
IDs = [0,1]
hdrs = incStat_cov(incStat(Lambda,IDs[0]),incStat(Lambda,IDs[0]),Lambda).getHeaders(ver,suffix=False)
return [str(Lambda)+"_"+s for s in hdrs]
def getHeaders_1D2D(self,Lambda=1,IDs=None, ver=1):
# Default Lambda?
Lambda = self.get_lambda(Lambda)
if IDs is None:
IDs = [0,1]
hdrs1D = self.getHeaders_1D(Lambda,IDs[0])
hdrs2D = self.getHeaders_2D(Lambda,IDs, ver)
return hdrs1D + hdrs2D
def getHeaders_nD(self,Lambda=1,IDs=[]): #IDs is a n-element list or tuple
# Default Lambda?
ID = ":"
for s in IDs:
ID += "_"+s
Lambda = self.get_lambda(Lambda)
hdrs = ["radius"+ID, "magnitude"+ID]
return [str(Lambda)+"_"+s for s in hdrs]
#cleans out records that have a weight less than the cutoff.
#returns number or removed records.
def cleanOutOldRecords(self,cutoffWeight,curTime):
n = 0
dump = sorted(self.HT.items(), key=lambda tup: tup[1][0].getMaxW(curTime))
for entry in dump:
entry[1][0].processDecay(curTime)
W = entry[1][0].w
if W <= cutoffWeight:
key = entry[0]
del entry[1][0]
del self.HT[key]
n=n+1
elif W > cutoffWeight:
break
return n
| 16,000 | 35.119639 | 263 | py |
Kitsune-py | Kitsune-py-master/Kitsune.py | from FeatureExtractor import *
from KitNET.KitNET import KitNET
# MIT License
#
# Copyright (c) 2018 Yisroel mirsky
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class Kitsune:
def __init__(self,file_path,limit,max_autoencoder_size=10,FM_grace_period=None,AD_grace_period=10000,learning_rate=0.1,hidden_ratio=0.75,):
#init packet feature extractor (AfterImage)
self.FE = FE(file_path,limit)
#init Kitnet
self.AnomDetector = KitNET(self.FE.get_num_features(),max_autoencoder_size,FM_grace_period,AD_grace_period,learning_rate,hidden_ratio)
def proc_next_packet(self):
# create feature vector
x = self.FE.get_next_vector()
if len(x) == 0:
return -1 #Error or no packets left
# process KitNET
return self.AnomDetector.process(x) # will train during the grace periods, then execute on all the rest.
| 1,905 | 43.325581 | 143 | py |
Kitsune-py | Kitsune-py-master/KitNET/utils.py |
import numpy
from scipy.stats import norm
numpy.seterr(all='ignore')
def pdf(x,mu,sigma): #normal distribution pdf
x = (x-mu)/sigma
return numpy.exp(-x**2/2)/(numpy.sqrt(2*numpy.pi)*sigma)
def invLogCDF(x,mu,sigma): #normal distribution cdf
x = (x - mu) / sigma
return norm.logcdf(-x) #note: we mutiple by -1 after normalization to better get the 1-cdf
def sigmoid(x):
return 1. / (1 + numpy.exp(-x))
def dsigmoid(x):
return x * (1. - x)
def tanh(x):
return numpy.tanh(x)
def dtanh(x):
return 1. - x * x
def softmax(x):
e = numpy.exp(x - numpy.max(x)) # prevent overflow
if e.ndim == 1:
return e / numpy.sum(e, axis=0)
else:
return e / numpy.array([numpy.sum(e, axis=1)]).T # ndim = 2
def ReLU(x):
return x * (x > 0)
def dReLU(x):
return 1. * (x > 0)
class rollmean:
def __init__(self,k):
self.winsize = k
self.window = numpy.zeros(self.winsize)
self.pointer = 0
def apply(self,newval):
self.window[self.pointer]=newval
self.pointer = (self.pointer+1) % self.winsize
return numpy.mean(self.window)
# probability density for the Gaussian dist
# def gaussian(x, mean=0.0, scale=1.0):
# s = 2 * numpy.power(scale, 2)
# e = numpy.exp( - numpy.power((x - mean), 2) / s )
# return e / numpy.square(numpy.pi * s)
| 1,363 | 22.118644 | 94 | py |
Kitsune-py | Kitsune-py-master/KitNET/dA.py | # Copyright (c) 2017 Yusuke Sugomori
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Portions of this code have been adapted from Yusuke Sugomori's code on GitHub: https://github.com/yusugomori/DeepLearning
import sys
import numpy
from KitNET.utils import *
import json
class dA_params:
def __init__(self,n_visible = 5, n_hidden = 3, lr=0.001, corruption_level=0.0, gracePeriod = 10000, hiddenRatio=None):
self.n_visible = n_visible# num of units in visible (input) layer
self.n_hidden = n_hidden# num of units in hidden layer
self.lr = lr
self.corruption_level = corruption_level
self.gracePeriod = gracePeriod
self.hiddenRatio = hiddenRatio
class dA:
def __init__(self, params):
self.params = params
if self.params.hiddenRatio is not None:
self.params.n_hidden = int(numpy.ceil(self.params.n_visible*self.params.hiddenRatio))
# for 0-1 normlaization
self.norm_max = numpy.ones((self.params.n_visible,)) * -numpy.Inf
self.norm_min = numpy.ones((self.params.n_visible,)) * numpy.Inf
self.n = 0
self.rng = numpy.random.RandomState(1234)
a = 1. / self.params.n_visible
self.W = numpy.array(self.rng.uniform( # initialize W uniformly
low=-a,
high=a,
size=(self.params.n_visible, self.params.n_hidden)))
self.hbias = numpy.zeros(self.params.n_hidden) # initialize h bias 0
self.vbias = numpy.zeros(self.params.n_visible) # initialize v bias 0
self.W_prime = self.W.T
def get_corrupted_input(self, input, corruption_level):
assert corruption_level < 1
return self.rng.binomial(size=input.shape,
n=1,
p=1 - corruption_level) * input
# Encode
def get_hidden_values(self, input):
return sigmoid(numpy.dot(input, self.W) + self.hbias)
# Decode
def get_reconstructed_input(self, hidden):
return sigmoid(numpy.dot(hidden, self.W_prime) + self.vbias)
def train(self, x):
self.n = self.n + 1
# update norms
self.norm_max[x > self.norm_max] = x[x > self.norm_max]
self.norm_min[x < self.norm_min] = x[x < self.norm_min]
# 0-1 normalize
x = (x - self.norm_min) / (self.norm_max - self.norm_min + 0.0000000000000001)
if self.params.corruption_level > 0.0:
tilde_x = self.get_corrupted_input(x, self.params.corruption_level)
else:
tilde_x = x
y = self.get_hidden_values(tilde_x)
z = self.get_reconstructed_input(y)
L_h2 = x - z
L_h1 = numpy.dot(L_h2, self.W) * y * (1 - y)
L_vbias = L_h2
L_hbias = L_h1
L_W = numpy.outer(tilde_x.T, L_h1) + numpy.outer(L_h2.T, y)
self.W += self.params.lr * L_W
self.hbias += self.params.lr * L_hbias
self.vbias += self.params.lr * L_vbias
return numpy.sqrt(numpy.mean(L_h2**2)) #the RMSE reconstruction error during training
def reconstruct(self, x):
y = self.get_hidden_values(x)
z = self.get_reconstructed_input(y)
return z
def execute(self, x): #returns MSE of the reconstruction of x
if self.n < self.params.gracePeriod:
return 0.0
else:
# 0-1 normalize
x = (x - self.norm_min) / (self.norm_max - self.norm_min + 0.0000000000000001)
z = self.reconstruct(x)
rmse = numpy.sqrt(((x - z) ** 2).mean()) #MSE
return rmse
def inGrace(self):
return self.n < self.params.gracePeriod
| 4,681 | 35.866142 | 123 | py |
Kitsune-py | Kitsune-py-master/KitNET/__init__.py | __all__ = ["corClust", "dA", "KitNET","utils"] | 46 | 46 | 46 | py |
Kitsune-py | Kitsune-py-master/KitNET/corClust.py | import numpy as np
from scipy.cluster.hierarchy import linkage, fcluster, to_tree
# A helper class for KitNET which performs a correlation-based incremental clustering of the dimensions in X
# n: the number of dimensions in the dataset
# For more information and citation, please see our NDSS'18 paper: Kitsune: An Ensemble of Autoencoders for Online Network Intrusion Detection
class corClust:
def __init__(self,n):
#parameter:
self.n = n
#varaibles
self.c = np.zeros(n) #linear num of features
self.c_r = np.zeros(n) #linear sum of feature residules
self.c_rs = np.zeros(n) #linear sum of feature residules
self.C = np.zeros((n,n)) #partial correlation matrix
self.N = 0 #number of updates performed
# x: a numpy vector of length n
def update(self,x):
self.N += 1
self.c += x
c_rt = x - self.c/self.N
self.c_r += c_rt
self.c_rs += c_rt**2
self.C += np.outer(c_rt,c_rt)
# creates the current correlation distance matrix between the features
def corrDist(self):
c_rs_sqrt = np.sqrt(self.c_rs)
C_rs_sqrt = np.outer(c_rs_sqrt,c_rs_sqrt)
C_rs_sqrt[C_rs_sqrt==0] = 1e-100 #this protects against dive by zero erros (occurs when a feature is a constant)
D = 1-self.C/C_rs_sqrt #the correlation distance matrix
D[D<0] = 0 #small negatives may appear due to the incremental fashion in which we update the mean. Therefore, we 'fix' them
return D
# clusters the features together, having no more than maxClust features per cluster
def cluster(self,maxClust):
D = self.corrDist()
Z = linkage(D[np.triu_indices(self.n, 1)]) # create a linkage matrix based on the distance matrix
if maxClust < 1:
maxClust = 1
if maxClust > self.n:
maxClust = self.n
map = self.__breakClust__(to_tree(Z),maxClust)
return map
# a recursive helper function which breaks down the dendrogram branches until all clusters have no more than maxClust elements
def __breakClust__(self,dendro,maxClust):
if dendro.count <= maxClust: #base case: we found a minimal cluster, so mark it
return [dendro.pre_order()] #return the origional ids of the features in this cluster
return self.__breakClust__(dendro.get_left(),maxClust) + self.__breakClust__(dendro.get_right(),maxClust)
# Copyright (c) 2017 Yisroel Mirsky
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 3,549 | 46.972973 | 142 | py |
Kitsune-py | Kitsune-py-master/KitNET/KitNET.py | import numpy as np
import KitNET.dA as AE
import KitNET.corClust as CC
# This class represents a KitNET machine learner.
# KitNET is a lightweight online anomaly detection algorithm based on an ensemble of autoencoders.
# For more information and citation, please see our NDSS'18 paper: Kitsune: An Ensemble of Autoencoders for Online Network Intrusion Detection
# For licensing information, see the end of this document
class KitNET:
#n: the number of features in your input dataset (i.e., x \in R^n)
#m: the maximum size of any autoencoder in the ensemble layer
#AD_grace_period: the number of instances the network will learn from before producing anomaly scores
#FM_grace_period: the number of instances which will be taken to learn the feature mapping. If 'None', then FM_grace_period=AM_grace_period
#learning_rate: the default stochastic gradient descent learning rate for all autoencoders in the KitNET instance.
#hidden_ratio: the default ratio of hidden to visible neurons. E.g., 0.75 will cause roughly a 25% compression in the hidden layer.
#feature_map: One may optionally provide a feature map instead of learning one. The map must be a list,
# where the i-th entry contains a list of the feature indices to be assingned to the i-th autoencoder in the ensemble.
# For example, [[2,5,3],[4,0,1],[6,7]]
def __init__(self,n,max_autoencoder_size=10,FM_grace_period=None,AD_grace_period=10000,learning_rate=0.1,hidden_ratio=0.75, feature_map = None):
# Parameters:
self.AD_grace_period = AD_grace_period
if FM_grace_period is None:
self.FM_grace_period = AD_grace_period
else:
self.FM_grace_period = FM_grace_period
if max_autoencoder_size <= 0:
self.m = 1
else:
self.m = max_autoencoder_size
self.lr = learning_rate
self.hr = hidden_ratio
self.n = n
# Variables
self.n_trained = 0 # the number of training instances so far
self.n_executed = 0 # the number of executed instances so far
self.v = feature_map
if self.v is None:
print("Feature-Mapper: train-mode, Anomaly-Detector: off-mode")
else:
self.__createAD__()
print("Feature-Mapper: execute-mode, Anomaly-Detector: train-mode")
self.FM = CC.corClust(self.n) #incremental feature cluatering for the feature mapping process
self.ensembleLayer = []
self.outputLayer = None
#If FM_grace_period+AM_grace_period has passed, then this function executes KitNET on x. Otherwise, this function learns from x.
#x: a numpy array of length n
#Note: KitNET automatically performs 0-1 normalization on all attributes.
def process(self,x):
if self.n_trained > self.FM_grace_period + self.AD_grace_period: #If both the FM and AD are in execute-mode
return self.execute(x)
else:
self.train(x)
return 0.0
#force train KitNET on x
#returns the anomaly score of x during training (do not use for alerting)
def train(self,x):
if self.n_trained <= self.FM_grace_period and self.v is None: #If the FM is in train-mode, and the user has not supplied a feature mapping
#update the incremetnal correlation matrix
self.FM.update(x)
if self.n_trained == self.FM_grace_period: #If the feature mapping should be instantiated
self.v = self.FM.cluster(self.m)
self.__createAD__()
print("The Feature-Mapper found a mapping: "+str(self.n)+" features to "+str(len(self.v))+" autoencoders.")
print("Feature-Mapper: execute-mode, Anomaly-Detector: train-mode")
else: #train
## Ensemble Layer
S_l1 = np.zeros(len(self.ensembleLayer))
for a in range(len(self.ensembleLayer)):
# make sub instance for autoencoder 'a'
xi = x[self.v[a]]
S_l1[a] = self.ensembleLayer[a].train(xi)
## OutputLayer
self.outputLayer.train(S_l1)
if self.n_trained == self.AD_grace_period+self.FM_grace_period:
print("Feature-Mapper: execute-mode, Anomaly-Detector: execute-mode")
self.n_trained += 1
#force execute KitNET on x
def execute(self,x):
if self.v is None:
raise RuntimeError('KitNET Cannot execute x, because a feature mapping has not yet been learned or provided. Try running process(x) instead.')
else:
self.n_executed += 1
## Ensemble Layer
S_l1 = np.zeros(len(self.ensembleLayer))
for a in range(len(self.ensembleLayer)):
# make sub inst
xi = x[self.v[a]]
S_l1[a] = self.ensembleLayer[a].execute(xi)
## OutputLayer
return self.outputLayer.execute(S_l1)
def __createAD__(self):
# construct ensemble layer
for map in self.v:
params = AE.dA_params(n_visible=len(map), n_hidden=0, lr=self.lr, corruption_level=0, gracePeriod=0, hiddenRatio=self.hr)
self.ensembleLayer.append(AE.dA(params))
# construct output layer
params = AE.dA_params(len(self.v), n_hidden=0, lr=self.lr, corruption_level=0, gracePeriod=0, hiddenRatio=self.hr)
self.outputLayer = AE.dA(params)
# Copyright (c) 2017 Yisroel Mirsky
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 6,544 | 50.132813 | 154 | py |
DDoS | DDoS-master/analyse_dataset.py | import argparse
import logging
import math
import os
import random
import statistics
import sys
import numpy as np
import pandas as pd
import torch
import torch.autograd.profiler as profiler
import torch.nn.functional as F
from torch.cuda.amp import autocast
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import wandb
from models import *
from models.ReconResNet import ResNet
from models.ShuffleUNet.net import ShuffleUNet
from models.ThisNewNet import ThisNewNet
from utils.data import *
from utils.datasets_dyn import SRDataset
from utils.utilities import ResSaver
__author__ = "Soumick Chatterjee"
__copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany"
__credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Soumick Chatterjee"
__email__ = "soumick.chatterjee@ovgu.de"
__status__ = "Production"
modelIDs = {
0: "UNET",
1: "SRCNN",
2: "SRCNNv2",
3: "SRCNNv3",
4: "UNETvSeg",
5: "UNETvSegDS",
6: "DenseNet",
7: "UNETSRCNN",
8: "SRCNNUNET",
9: "ReconResNet",
10: "ShuffleUNet",
11: "UNETMSS",
}
lossIDs = {
0: "pLoss",
1: "MAE",
2: "MultiSSIM",
3: "SSIM3D"
}
def parseARGS():
ap = argparse.ArgumentParser()
ap.add_argument("-g", "--gpu", default="0", help="GPU ID(s).")
ap.add_argument("--seed", default=2020, type=int, help="Seed")
ap.add_argument("-ds", "--dataset", default=r'/home/schatter/Soumick/Data/Chimp/3DDynTest/MickAbdomen3DDyn/DynProtocol3/Filtered/', help="Path to Dataset Folder.")
ap.add_argument("-op", "--outpath", default=r'/home/schatter/Soumick/Data/Chimp/CHAOSwoT2Dyn/newSet/', help="Path for Output.")
ap.add_argument("-ot", "--outtype", default=r'StatTPinit_MickAbd3DDyn3conST_woZpad_full_Best', help="Type of Recon currently being performed.")
ap.add_argument("-us", "--us", default='Center4MaskWoPad', help="Undersample.")
ap.add_argument("-s", "--scalefact", default='(1,1,1)', help="Scaling Factor. For Zero padded data, set the dim to 1. [As a 3 valued tuple, factor for each dim. Supply seperated by coma or as a tuple, no spaces in between.].")
ap.add_argument("-uf", "--usfolder", default='usTestDynConST', help="Undersampled Folder.")
ap.add_argument("-hf", "--hrfolder", default='hrTestDynConST', help="HighRes (Fully-sampled) Folder.") #hrTestDynPadded for ktGRASP
ap.add_argument("-o", "--outfolder", default='dynDualChn', help="Output Folder.")
ap.add_argument("-bs", "--batchsize", type=int, default=1, help="Batch Size.")
ap.add_argument("-nw", "--nworkers", type=int, default=0, help="Number of Workers.")
ap.add_argument("-m", "--modelname", default="ZeroPadded", help="Model to Load for testing.")
# ap.add_argument("-bst", "--beststring", default="best", help="Model to Load for testing.")
# ap.add_argument("-mb", "--modelbest", type=int, default=1, help="Model to Load for testing.")
ap.add_argument("-c", "--cuda", type=bool, default=True, help="Use CUDA.")
# ap.add_argument("-mg", "--mulgpu", type=bool, default=False, help="Use Multiple GPU.")
ap.add_argument("-amp", "--amp", type=bool, default=True, help="Use AMP.")
# ap.add_argument("-p", "--profile", type=bool, default=False, help="Do Model Profiling.")
ap.add_argument("-ps", "--patchsize", default=None, help="Patch Size. Supply seperated by coma or as a tuple, no spaces in between. Set it to None if not desired.")
ap.add_argument("-pst", "--patchstride", default='(3,3,3)', help="Stride of patches, to be used during validation")
# ap.add_argument("-l", "--logfreq", type=int, default=10, help="log Frequency.")
ap.add_argument("-ml", "--medianloss", type=int, default=True, help="Use Median to get loss value (Final Reduction).")
# ap.add_argument("-inc", "--inchannel", type=int, default=1, help="Number of Channels in the Data.")
# ap.add_argument("-otc", "--outchannel", type=int, default=1, help="Number of Channels in the Data.")
ap.add_argument("-is", "--inshape", default='(256,256,30)', help="Input Shape. Supply seperated by coma or as a tuple, no spaces in between. Will only be used if Patch Size is None.")
ap.add_argument("-int", "--preint", default="trilinear", help="Pre-interpolate before sending it to the Network. Set it to None if not needed.")
ap.add_argument("-nrm", "--prenorm", default=True, type=bool, help="Pre-norm before saving the images and calculating the metrics.")
ap.add_argument("-dus", "--detectus", type=int, default=0, help="Whether to replace the us using model name")
#param to reproduce model
# ap.add_argument("-mid", "--modelid", type=int, default=0, help="Model ID."+str(modelIDs))
# ap.add_argument("-mbn", "--batchnorm", type=bool, default=False, help="(Only for Model ID 0, 11) Do BatchNorm.")
# ap.add_argument("-mum", "--upmode", default='upsample', help="(Only for Model ID 0, 11) UpMode for model ID 0 and 11: [upconv, upsample], for model ID 9: [convtrans, <interp algo>]")
# ap.add_argument("-mdp", "--mdepth", type=int, default=3, help="(Only for Model ID 0, 6, 11) Depth of the Model.")
# ap.add_argument("-d", "--dropprob", type=float, default=0.0, help="(Only for Model ID 0, 6, 11) Dropout Probability.")
# ap.add_argument("-mslvl", "--msslevel", type=int, default=2, help="(Only for Model ID 11) Depth of the Model.")
# ap.add_argument("-msltn", "--msslatent", type=int, default=1, help="(Only for Model ID 11) Use the latent as one of the MSS level.")
# ap.add_argument("-msup", "--mssup", default="trilinear", help="(Only for Model ID 11) Interpolation to use on the MSS levels.")
# ap.add_argument("-msinb4", "--mssinterpb4", type=int, default=0, help="(Only for Model ID 11) Apply Interpolation before applying conv for the MSS levels. If False, interp will be applied after conv.")
# ap.add_argument("-f", "--nfeatures", type=int, default=64, help="(Not for DenseNet) N Starting Features of the Network.")
# ap.add_argument("-lid", "--lossid", type=int, default=0, help="Loss ID."+str(lossIDs))
# ap.add_argument("-plt", "--plosstyp", default="L1", help="(Only for Loss ID 0) Perceptual Loss Type.")
# ap.add_argument("-pll", "--plosslvl", type=int, default=3, help="(Only for Loss ID 0) Perceptual Loss Level.")
# ap.add_argument("-lrd", "--lrdecrate", type=int, default=1, help="(To be used for Fine-Tuning) Factor by which lr will be divided to find the actual lr. Set it to 1 if not desired")
# ap.add_argument("-ft", "--finetune", type=int, default=0, help="Is it a Fine-tuning traing or not (main-train).")
# ap.add_argument("-ftep", "--fteprt", type=float, default=0.00, help="(To be used for Fine-Tuning) Fine-Tune Epoch Rate.")
# ap.add_argument("-ftit", "--ftitrt", type=float, default=0.10, help="(To be used for Fine-Tuning, if fteprt is None) Fine-Tune Iteration Rate.")
# ap.add_argument("-tls", "--tnnlslc", type=int, default=2, help="Solo per ThisNewNet. loss_slice_count. Default 2")
# ap.add_argument("-tli", "--tnnlinp", type=int, default=1, help="Solo per ThisNewNet. loss_inplane. Default 1")
# #WnB related params
# ap.add_argument("-wnb", "--wnbactive", type=bool, default=True, help="WandB: Whether to use or not")
# ap.add_argument("-wnbp", "--wnbproject", default='SuperResMRI', help="WandB: Name of the project")
# ap.add_argument("-wnbe", "--wnbentity", default='mickchimp', help="WandB: Name of the entity")
# ap.add_argument("-wnbg", "--wnbgroup", default='dynDualChnFullVol', help="WandB: Name of the group")
# ap.add_argument("-wnbpf", "--wnbprefix", default='', help="WandB: Prefix for TrainID")
# ap.add_argument("-wnbml", "--wnbmodellog", default='all', help="WandB: While watching the model, what to save: gradients, parameters, all, None")
# ap.add_argument("-wnbmf", "--wnbmodelfreq", type=int, default=100, help="WandB: The number of steps between logging gradients")
return ap.parse_args()
args = parseARGS()
# os.environ["TMPDIR"] = "/scratch/schatter/tmp"
# os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
torch.set_num_threads(1)
random.seed(args.seed)
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if __name__ == "__main__" :
args.scalefact = tuple(map(int, args.scalefact.replace('(','').replace(')','').split(',')))
args.homepath = os.path.expanduser("~/Documents")
if args.patchsize:
args.patchsize = tuple(map(int, args.patchsize.replace('(','').replace(')','').split(',')))
if args.patchstride:
args.patchstride = tuple(map(int, args.patchstride.replace('(','').replace(')','').split(',')))
if args.inshape:
args.inshape = tuple(map(int, args.inshape.replace('(','').replace(')','').split(',')))
# args.chkpoint = os.path.join(args.outpath, args.outfolder, args.modelname, args.modelname)
# if args.modelbest:
# print('best model testing')
# args.chkpoint += "_" + args.beststring + ".pth.tar"
# else:
# args.chkpoint += ".pth.tar"
# if args.patchstride:
# args.modelname += "_infstr" + "c".join(list(map(str, args.patchstride)))
# args.modelname = args.modelname.replace(args.usfolder+"_", "")
# print("Testing: "+args.modelname)
# if args.modelid == 2:
# SRCNN3D = SRCNN3Dv2
# elif args.modelid == 3:
# SRCNN3D = SRCNN3Dv3
# if args.medianloss:
# loss_reducer = statistics.median
# else:
# loss_reducer = statistics.mean
dir_path = args.dataset + args.usfolder+ '/' + args.us + '/'
label_dir_path = args.dataset + args.hrfolder + '/'
# log_path = os.path.join(args.dataset, args.outfolder, 'TBLogs', args.modelname)
save_path = os.path.join(args.outpath, args.outfolder, args.modelname, args.outtype)
device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu")
# tb_writer = SummaryWriter(log_dir = log_path)
os.makedirs(save_path, exist_ok=True)
# logname = os.path.join(args.homepath, 'testlog_'+args.modelname+'.txt')
# logging.basicConfig(filename=logname,
# filemode='a',
# format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
# datefmt='%H:%M:%S',
# level=logging.DEBUG)
# # transforms = [tio.transforms.RescaleIntensity((0, 1))]
# transforms = []
testDS = SRDataset(logger=logging, patch_size=args.patchsize[0] if args.patchsize else -1, dir_path=dir_path, label_dir_path=label_dir_path, #TODO: implement non-iso patch-size, now only using the first element
stride_depth=args.patchstride[2], stride_length=args.patchstride[0], stride_width=args.patchstride[1], Size=None, fly_under_percent=None, #TODO: implement fly_under_percent, if needed
patch_size_us=None, return_coords=True, pad_patch=False, pre_interpolate=args.preint, norm_data=args.prenorm, pre_load=True, noncumulative=True) #TODO implement patch_size_us if required - patch_size//scaling_factor
test_loader = torch.utils.data.DataLoader(testDS, batch_size=args.batchsize,shuffle=False, num_workers=args.nworkers, pin_memory=True)
# model_scale_factor=tuple(np.roll(args.scalefact,shift=1))
# if args.modelid == 0:
# model = UNet(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, wf=round(math.log(args.nfeatures,2)), batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob))
# elif (args.modelid == 1) or (args.modelid == 2) or (args.modelid == 3):
# sys.exit("SRCNN3D is not ready for different numbers of input and output channel")
# model = SRCNN3D(n_channels=args.nchannel, scale_factor=model_scale_factor, num_features=args.nfeatures)
# elif (args.modelid == 4) or (args.modelid == 5):
# model = UNetVSeg(in_ch=args.inchannel, out_ch=args.outchannel, n1=args.nfeatures)
# elif args.modelid == 6:
# model = DenseNet(model_depth=args.mdepth, n_input_channels=args.inchannel, num_classes=args.outchannel, drop_rate=args.dropprob)
# elif (args.modelid == 7) or (args.modelid == 8):
# model = ThisNewNet(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob),
# scale_factor=model_scale_factor, num_features=args.nfeatures, sliceup_first=True if args.modelid==8 else False,
# loss_slice_count=args.tnnlslc, loss_inplane=args.tnnlinp)
# elif args.modelid == 9:
# model=ResNet(in_channels=args.inchannel, out_channels=args.outchannel, res_blocks=4, starting_nfeatures=args.nfeatures, updown_blocks=2, is_relu_leaky=True, #TODO: put all params as args
# do_batchnorm=args.batchnorm, res_drop_prob=0.2, is_replicatepad=0, out_act="sigmoid", forwardV=0, upinterp_algo='convtrans' if args.upmode == "upconv" else "trilinear", post_interp_convtrans=True, is3D=True)
# elif args.modelid == 10:
# model=ShuffleUNet(in_ch=args.inchannel, num_features=args.nfeatures, out_ch=args.outchannel)
# elif args.modelid == 11:
# model = UNetMSS(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, wf=round(math.log(args.nfeatures,2)),
# batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob),
# mss_level=args.msslevel, mss_fromlatent=args.msslatent, mss_up=args.mssup, mss_interpb4=args.mssinterpb4)
# else:
# sys.exit("Invalid Model ID")
# if args.modelid == 5:
# IsDeepSup = True
# else:
# IsDeepSup = False
# if args.profile:
# dummy = torch.randn(args.batchsize, args.inchannel, *args.inshape)
# with profiler.profile(profile_memory=True, record_shapes=True, use_cuda=True) as prof:
# model(dummy)
# prof.export_chrome_trace(os.path.join(save_path, 'model_trace'))
# model.to(device)
# chk = torch.load(args.chkpoint, map_location=device)
# model.load_state_dict(chk['state_dict'])
# trained_epoch = chk['epoch']
# model.eval()
saver = ResSaver(os.path.join(save_path, "Results"), save_inp=True, do_norm=args.prenorm)
markers = {}
inputs = {}
results = {}
targets = {}
if not args.wnbactive:
os.environ["WANDB_MODE"] = "dryrun"
with torch.no_grad():
runningSSIM = []
test_ssim = []
test_metrics = []
for b, (lr_imgs, hr_imgs, start_coords, files, shapes, pad) in enumerate(tqdm(test_loader)):
lr_imgs = lr_imgs[:,1,...].unsqueeze(1).contiguous().to(device, non_blocking=True) # (batch_size (N), 3, 24, 24), imagenet-normed
hr_imgs = hr_imgs.contiguous()#.to(device) # (batch_size (N), 3, 96, 96), in [-1, 1]
pad = pad.numpy()
lr_imgs = F.interpolate(lr_imgs, size=hr_imgs.shape[2:], mode='trilinear')
tmp_in = lr_imgs.cpu().detach()#.numpy()
tmp_tar = hr_imgs#.numpy()
for i in range(hr_imgs.shape[0]):
if bool(args.patchsize) and args.patchsize[0] != -1: #TODO: implement non-iso patch-size, now only using the first element
if files[i] not in results:
markers[files[i]] = np.zeros(shapes[i][0].numpy())
inputs[files[i]] = np.zeros(shapes[i][0].numpy())
results[files[i]] = np.zeros(shapes[i][0].numpy())
targets[files[i]] = np.zeros(shapes[i][0].numpy())
(startIndex_depth, startIndex_length, startIndex_width) = start_coords[i][0].numpy() #because of moveaxis, l,w,d has become d,l,w
if pad[i].any():
tin = F.pad(tmp_in[i].unsqueeze(0), tuple(-pad[i])).squeeze().numpy()
ttar = F.pad(tmp_tar[i].unsqueeze(0), tuple(-pad[i])).squeeze().numpy()
else:
tin = tmp_in[i].squeeze().numpy()
ttar = tmp_tar[i].squeeze().numpy()
tin = tin[1,...] #TODO make it configurable. Currently its prevTPPatch, patch
markers[files[i]][startIndex_length:startIndex_length+args.patchsize[0], startIndex_width:startIndex_width+args.patchsize[1], startIndex_depth:startIndex_depth+args.patchsize[2]] += 1
inputs[files[i]][startIndex_length:startIndex_length+args.patchsize[0], startIndex_width:startIndex_width+args.patchsize[1], startIndex_depth:startIndex_depth+args.patchsize[2]] += np.moveaxis(tin, 0, -1)
targets[files[i]][startIndex_length:startIndex_length+args.patchsize[0], startIndex_width:startIndex_width+args.patchsize[1], startIndex_depth:startIndex_depth+args.patchsize[2]] += np.moveaxis(ttar, 0, -1)
else:
inputs[files[i]] = np.moveaxis(tmp_in[i,0,...].squeeze().numpy(), 0, -1) #TODO make it configurable. Currently its prevTPPatch, patch
targets[files[i]] = np.moveaxis(tmp_tar[i,0,...].squeeze().numpy(), 0, -1)
if bool(args.patchsize) and args.patchsize[0] != -1:
for f in inputs.keys():
inputs[f] = np.divide(inputs[f], markers[f])
results[f] = np.divide(results[f], markers[f])
targets[f] = np.divide(targets[f], markers[f])
for i, filename in enumerate(results.keys()):
out = results[filename]
inp = inputs[filename]
gt = targets[filename]
metrics = saver.CalcNSave(out, inp, gt, filename, already_numpy=True)
if metrics is not None:
metrics['file'] = filename
test_metrics.append(metrics)
ssim = round(metrics['SSIMOut'],4)
test_ssim.append(ssim)
runningSSIM.append(ssim)
if len(test_metrics) > 0:
df = pd.DataFrame.from_dict(test_metrics)
df.to_csv(os.path.join(save_path, 'Results.csv'), index=False)
| 18,510 | 58.330128 | 239 | py |
DDoS | DDoS-master/train_DDoS_baseline_nondyn.py | import argparse
import logging
import math
import os
import random
import statistics
import sys
import numpy as np
import torch
import torch.autograd.profiler as profiler
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchio as tio
from torch.cuda.amp import GradScaler, autocast
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import wandb
from models import *
from models.ReconResNet import ResNet
from models.ShuffleUNet.net import ShuffleUNet
from models.ThisNewNet import ThisNewNet
from utils.data import *
from utils.datasets import SRDataset
from utils.pLoss.perceptual_loss import PerceptualLoss
from utils.utilities import getSSIM, tensorboard_images
__author__ = "Soumick Chatterjee"
__copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany"
__credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Soumick Chatterjee"
__email__ = "soumick.chatterjee@ovgu.de"
__status__ = "Production"
modelIDs = {
0: "UNET",
1: "SRCNN",
2: "SRCNNv2",
3: "SRCNNv3",
4: "UNETvSeg",
5: "UNETvSegDS",
6: "DenseNet",
7: "UNETSRCNN",
8: "SRCNNUNET",
9: "ReconResNet",
10: "ShuffleUNet",
11: "UNETMSS",
}
lossIDs = {
0: "pLoss",
1: "MAE",
2: "MultiSSIM",
3: "SSIM3D"
}
def parseARGS():
ap = argparse.ArgumentParser()
ap.add_argument("-g", "--gpu", default="0", help="GPU ID(s).")
ap.add_argument("--seed", default=2020, type=int, help="Seed")
# ap.add_argument("-ds", "--dataset", default=r'/mnt/MEMoRIAL/MEMoRIAL_SharedStorage_M1.2+4+7/Chompunuch/PhD/Data/CHAOSwoT2Dyn/newSet/', help="Path to Dataset Folder.")
ap.add_argument("-ds", "--dataset", default=r'/home/schatter/Soumick/Data/Chimp/CHAOSwoT2/', help="Path to Dataset Folder.")
ap.add_argument("-us", "--us", default='Center4MaskWoPad', help="Undersample.")
ap.add_argument("-s", "--scalefact", default='(1,1,1)', help="Scaling Factor. For Zero padded data, set the dim to 1. [As a 3 valued tuple, factor for each dim. Supply seperated by coma or as a tuple, no spaces in between.].")
ap.add_argument("-uf", "--usfolder", default='usTrain', help="Undersampled Folder.")
ap.add_argument("-hf", "--hrfolder", default='hrTrain', help="HighRes (Fully-sampled) Folder.")
ap.add_argument("-o", "--outfolder", default='dynDualChn', help="Output Folder.")
ap.add_argument("-ms", "--modelsuffix", default='fullBaselineNonDyn', help="Any Suffix To Add with the Model Name.")
ap.add_argument("-bs", "--batchsize", type=int, default=1, help="Batch Size.")
ap.add_argument("-nw", "--nworkers", type=int, default=0, help="Number of Workers.")
ap.add_argument("-cp", "--chkpoint", default=None, help="Checkpoint (of the current training) to Load.")
ap.add_argument("-cpft", "--chkpointft", default=None, help="(To be used for Fine-Tuning) Checkpoint to Load for Fine-Tuning.")
ap.add_argument("-c", "--cuda", type=bool, default=True, help="Use CUDA.")
ap.add_argument("-mg", "--mulgpu", type=bool, default=False, help="Use Multiple GPU.")
ap.add_argument("-amp", "--amp", type=bool, default=True, help="Use AMP.")
ap.add_argument("-v", "--val", type=bool, default=True, help="Do Validation.")
ap.add_argument("-vp", "--valdsper", type=float, default=0.3, help="Percentage of the DS to be used for Validation.")
ap.add_argument("-p", "--profile", type=bool, default=False, help="Do Model Profiling.")
ap.add_argument("-ep", "--epochs", type=int, default=100, help="Total Number of Epochs. To use Number of Iterations, set it to None")
ap.add_argument("-it", "--iterations", type=int, default=1e6, help="Total Number of Iterations. To be used if number of Epochs is None")
ap.add_argument("-lr", "--lr", type=float, default=1e-4, help="Total Number of Epochs.")
ap.add_argument("-ps", "--patchsize", default=None, help="Patch Size. Supply seperated by coma or as a tuple, no spaces in between. Set it to None if not desired.")
ap.add_argument("-pst", "--patchstride", default='(12,12,6)', help="Stride of patches, to be used during validation")
ap.add_argument("-l", "--logfreq", type=int, default=10, help="log Frequency.")
ap.add_argument("-sf", "--savefreq", type=int, default=1, help="saving Frequency.")
ap.add_argument("-ml", "--medianloss", type=int, default=True, help="Use Median to get loss value (Final Reduction).")
ap.add_argument("-mid", "--modelid", type=int, default=0, help="Model ID."+str(modelIDs))
ap.add_argument("-mbn", "--batchnorm", type=bool, default=False, help="(Only for Model ID 0, 11) Do BatchNorm.")
ap.add_argument("-mum", "--upmode", default='upsample', help="(Only for Model ID 0, 11) UpMode for model ID 0 and 11: [upconv, upsample], for model ID 9: [convtrans, <interp algo>]")
ap.add_argument("-mdp", "--mdepth", type=int, default=3, help="(Only for Model ID 0, 6, 11) Depth of the Model.")
ap.add_argument("-d", "--dropprob", type=float, default=0.0, help="(Only for Model ID 0, 6, 11) Dropout Probability.")
ap.add_argument("-inc", "--inchannel", type=int, default=1, help="Number of Channels in the Data.")
ap.add_argument("-otc", "--outchannel", type=int, default=1, help="Number of Channels in the Data.")
ap.add_argument("-mslvl", "--msslevel", type=int, default=1, help="(Only for Model ID 11) Depth of the Model.")
ap.add_argument("-msltn", "--msslatent", type=int, default=0, help="(Only for Model ID 11) Use the latent as one of the MSS level.")
ap.add_argument("-msup", "--mssup", default="trilinear", help="(Only for Model ID 11) Interpolation to use on the MSS levels.")
ap.add_argument("-msinb4", "--mssinterpb4", type=int, default=1, help="(Only for Model ID 11) Apply Interpolation before applying conv for the MSS levels. If False, interp will be applied after conv.")
ap.add_argument("-is", "--inshape", default='(256,256,30)', help="Input Shape. Supply seperated by coma or as a tuple, no spaces in between. Will only be used if Patch Size is None.")
ap.add_argument("-f", "--nfeatures", type=int, default=64, help="(Not for DenseNet) N Starting Features of the Network.")
ap.add_argument("-lid", "--lossid", type=int, default=0, help="Loss ID."+str(lossIDs))
ap.add_argument("-plt", "--plosstyp", default="L1", help="(Only for Loss ID 0) Perceptual Loss Type.")
ap.add_argument("-pll", "--plosslvl", type=int, default=3, help="(Only for Loss ID 0) Perceptual Loss Level.")
ap.add_argument("-lrd", "--lrdecrate", type=int, default=1, help="(To be used for Fine-Tuning) Factor by which lr will be divided to find the actual lr. Set it to 1 if not desired")
ap.add_argument("-ft", "--finetune", type=int, default=0, help="Is it a Fine-tuning traing or not (main-train).")
ap.add_argument("-ftep", "--fteprt", type=float, default=0.00, help="(To be used for Fine-Tuning) Fine-Tune Epoch Rate.")
ap.add_argument("-ftit", "--ftitrt", type=float, default=0.10, help="(To be used for Fine-Tuning, if fteprt is None) Fine-Tune Iteration Rate.")
ap.add_argument("-int", "--preint", default="trilinear", help="Pre-interpolate before sending it to the Network. Set it to None if not needed.")
ap.add_argument("-nrm", "--prenorm", default=True, type=bool, help="Rescale intensities beteen 0 and 1")
ap.add_argument("-tls", "--tnnlslc", type=int, default=2, help="Solo per ThisNewNet. loss_slice_count. Default 2")
ap.add_argument("-tli", "--tnnlinp", type=int, default=1, help="Solo per ThisNewNet. loss_inplane. Default 1")
#WnB related params
ap.add_argument("-wnb", "--wnbactive", type=bool, default=True, help="WandB: Whether to use or not")
ap.add_argument("-wnbp", "--wnbproject", default='SuperResMRI', help="WandB: Name of the project")
ap.add_argument("-wnbe", "--wnbentity", default='mickchimp', help="WandB: Name of the entity")
ap.add_argument("-wnbg", "--wnbgroup", default='dynDualChnFullVol', help="WandB: Name of the group")
ap.add_argument("-wnbpf", "--wnbprefix", default='', help="WandB: Prefix for TrainID")
ap.add_argument("-wnbml", "--wnbmodellog", default='all', help="WandB: While watching the model, what to save: gradients, parameters, all, None")
ap.add_argument("-wnbmf", "--wnbmodelfreq", type=int, default=100, help="WandB: The number of steps between logging gradients")
return ap.parse_args()
args = parseARGS()
# os.environ["TMPDIR"] = "/scratch/schatter/tmp"
# os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
torch.set_num_threads(1)
random.seed(args.seed)
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if __name__ == "__main__" :
args.scalefact = tuple(map(int, args.scalefact.replace('(','').replace(')','').split(',')))
args.homepath = os.path.expanduser("~/Documents")
if args.patchsize:
args.patchsize = tuple(map(int, args.patchsize.replace('(','').replace(')','').split(',')))
if args.patchstride:
args.patchstride = tuple(map(int, args.patchstride.replace('(','').replace(')','').split(',')))
if args.inshape:
args.inshape = tuple(map(int, args.inshape.replace('(','').replace(')','').split(',')))
args.modelname = args.usfolder + "_" + modelIDs[args.modelid] + args.modelsuffix
if args.modelid == 0 or args.modelid == 6 or args.modelid == 11:
args.modelname += "do" + str(args.dropprob) + "dp" + str(args.mdepth)
if args.modelid == 0 or args.modelid == 9 or args.modelid == 11:
args.modelname += args.upmode
if args.batchnorm:
args.modelname += "BN"
if args.modelid == 11:
args.modelname += "MSS"+str(args.msslevel)
args.modelname += "Latent" if args.msslatent else "NoLatent"
args.modelname += args.mssup
args.modelname += "InterpB4" if args.mssinterpb4 else "NoInterpB4"
trainID = args.modelname + '_' + args.us + '_' + lossIDs[args.lossid]
if args.lossid == 0:
trainID += args.plosstyp + 'lvl' + str(args.plosslvl)
if args.finetune:
trainID += "_FT_lrdec" + str(args.lrdecrate)
if args.fteprt:
trainID += "_eprt" + str(args.fteprt)
else:
trainID += "_itrt" + str(args.ftitrt)
print("Training: "+trainID)
if args.modelid == 2:
SRCNN3D = SRCNN3Dv2
elif args.modelid == 3:
SRCNN3D = SRCNN3Dv3
if args.medianloss:
loss_reducer = statistics.median
else:
loss_reducer = statistics.mean
dir_path = args.dataset + args.usfolder+ '/' + args.us + '/'
label_dir_path = args.dataset + args.hrfolder + '/'
log_path = os.path.join(args.dataset, args.outfolder, 'TBLogs', trainID)
save_path = os.path.join(args.dataset, args.outfolder, trainID)
device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu")
tb_writer = SummaryWriter(log_dir = log_path)
os.makedirs(save_path, exist_ok=True)
logname = os.path.join(args.homepath, 'log_'+trainID+'.txt')
logging.basicConfig(filename=logname,
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
transforms = []
if not args.patchsize:
transforms.append(tio.transforms.CropOrPad(target_shape=args.inshape))
trainDS = SRDataset(logger=logging, patch_size=args.patchsize[0] if args.patchsize else -1, dir_path=dir_path, label_dir_path=label_dir_path, #TODO: implement non-iso patch-size, now only using the first element
stride_depth=args.patchstride[2], stride_length=args.patchstride[0], stride_width=args.patchstride[1], Size=None, fly_under_percent=None, #TODO: implement fly_under_percent, if needed
patch_size_us=None, pre_interpolate=args.preint, norm_data=args.prenorm, pre_load=True, pad_patch=False) #TODO implement patch_size_us if required - patch_size//scaling_factor
model_scale_factor=tuple(np.roll(args.scalefact,shift=1))
if args.val:
train_size = int((1-args.valdsper) * len(trainDS))
val_size = len(trainDS) - train_size
trainDS, valDS = torch.utils.data.random_split(trainDS, [train_size, val_size])
else:
valDS = None
if bool(args.patchsize):
args.inshape = args.patchsize
train_loader = DataLoader(dataset=trainDS, batch_size=args.batchsize,shuffle=True, num_workers=args.nworkers, pin_memory=True)
val_loader = None if not args.val else DataLoader(dataset=valDS,batch_size=args.batchsize,shuffle=False, num_workers=args.nworkers, pin_memory=True)
if args.modelid == 0:
model = UNet(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, wf=round(math.log(args.nfeatures,2)), batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob))
elif (args.modelid == 1) or (args.modelid == 2) or (args.modelid == 3):
sys.exit("SRCNN3D is not ready for different numbers of input and output channel")
model = SRCNN3D(n_channels=args.nchannel, scale_factor=model_scale_factor, num_features=args.nfeatures)
elif (args.modelid == 4) or (args.modelid == 5):
model = UNetVSeg(in_ch=args.inchannel, out_ch=args.outchannel, n1=args.nfeatures)
elif args.modelid == 6:
model = DenseNet(model_depth=args.mdepth, n_input_channels=args.inchannel, num_classes=args.outchannel, drop_rate=args.dropprob)
elif (args.modelid == 7) or (args.modelid == 8):
model = ThisNewNet(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob),
scale_factor=model_scale_factor, num_features=args.nfeatures, sliceup_first=True if args.modelid==8 else False,
loss_slice_count=args.tnnlslc, loss_inplane=args.tnnlinp)
elif args.modelid == 9:
model=ResNet(in_channels=args.inchannel, out_channels=args.outchannel, res_blocks=4, starting_nfeatures=args.nfeatures, updown_blocks=2, is_relu_leaky=True, #TODO: put all params as args
do_batchnorm=args.batchnorm, res_drop_prob=0.2, is_replicatepad=0, out_act="sigmoid", forwardV=0, upinterp_algo='convtrans' if args.upmode == "upconv" else "trilinear", post_interp_convtrans=True, is3D=True)
elif args.modelid == 10:
model=ShuffleUNet(in_ch=args.inchannel, num_features=args.nfeatures, out_ch=args.outchannel)
elif args.modelid == 11:
model = UNetMSS(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, wf=round(math.log(args.nfeatures,2)),
batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob),
mss_level=args.msslevel, mss_fromlatent=args.msslatent, mss_up=args.mssup, mss_interpb4=args.mssinterpb4)
else:
sys.exit("Invalid Model ID")
if args.modelid == 5:
IsDeepSup = True
else:
IsDeepSup = False
if args.profile:
dummy = torch.randn(args.batchsize, args.inchannel, *args.inshape)
with profiler.profile(profile_memory=True, record_shapes=True, use_cuda=True) as prof:
model(dummy)
prof.export_chrome_trace(os.path.join(save_path, 'model_trace'))
args.lr = args.lr/args.lrdecrate
optimizer = optim.Adam(params=filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)
model.to(device)
if args.lossid == 0:
if args.outchannel != 1:
sys.exit("Perceptual Loss used here only works for 1 channel images")
loss_func = PerceptualLoss(device=device, loss_model="unet3Dds", resize=None, loss_type=args.plosstyp, n_level=args.plosslvl)
elif args.lossid == 1:
loss_func = nn.L1Loss(reduction='mean')
elif args.lossid == 2:
loss_func = MultiSSIM(data_range=1, n_channels=args.outchannel, reduction='mean').to(device)
elif args.lossid == 3:
loss_func = SSIM(data_range=1, channel=args.outchannel, spatial_dims=3).to(device)
else:
sys.exit("Invalid Loss ID")
if (args.lossid == 0 and args.plosstyp == "L1") or (args.lossid == 1):
IsNegLoss = False
else:
IsNegLoss = True
if (args.modelid == 7) or (args.modelid == 8):
model.loss_func = loss_func
scaler = GradScaler(enabled=args.amp)
if args.chkpoint:
chk = torch.load(args.chkpoint, map_location=device)
elif args.finetune:
if args.chkpointft:
chk = torch.load(args.chkpointft, map_location=device)
else:
sys.exit("Finetune can't be performed if chkpointft not supplied")
else:
chk = None
start_epoch = 0
best_loss = float('-inf') if IsNegLoss else float('inf')
if chk is not None:
model.load_state_dict(chk['state_dict'])
optimizer.load_state_dict(chk['optimizer'])
scaler.load_state_dict(chk['AMPScaler'])
best_loss = chk['best_loss']
start_epoch = chk['epoch'] + 1
iterations = chk['iterations']
main_train_epcoh = (chk['main_train_epoch'] + 1) if 'main_train_epoch' in chk else start_epoch #only be used for finetune
if args.finetune:
if args.fteprt:
args.epochs = int((main_train_epcoh*(1+args.fteprt)))
else:
args.iterations = int(iterations*args.ftitrt)
n_ft_ep = int(args.iterations // len(train_loader))
args.epochs = main_train_epcoh + n_ft_ep
if args.epochs is None:
args.epochs = int(args.iterations // len(train_loader) + 1)
if start_epoch >= args.epochs:
logging.error('Training should atleast be for one epoch. Adjusting to perform 1 epoch training')
args.epochs = start_epoch+1
if not args.wnbactive:
os.environ["WANDB_MODE"] = "dryrun"
with wandb.init(project=args.wnbproject, entity=args.wnbentity, group=args.wnbgroup, config=args, name=args.wnbprefix+trainID, id=args.wnbprefix+trainID, resume=True) as WnBRun:
wandb.watch(model, log=args.wnbmodellog, log_freq=args.wnbmodelfreq)
logging.info('Training Epochs: from {0} to {1}'.format(start_epoch, args.epochs-1))
for epoch in range(start_epoch, args.epochs):
#Train
model.train()
runningLoss = []
train_loss = []
print('Epoch '+ str(epoch)+ ': Train')
for i, (images, gt) in enumerate(tqdm(train_loader)):
images = images[:, None, ...].to(device)
gt = gt[:, None, ...].to(device)
with autocast(enabled=args.amp):
if type(model) is SRCNN3D:
output1, output2 = model(images)
loss1 = loss_func(output1, gt)
loss2 = loss_func(output2, gt)
loss = loss2 + loss1
elif type(model) is UNetVSeg:
if IsDeepSup:
sys.exit("Not Implimented yet")
else:
out, _, _ = model(images)
loss = loss_func(out, gt)
elif type(model) is ThisNewNet:
out, loss = model(images, gt=gt)
elif type(model) is UNetMSS:
out, mssout = model(images)
loss = loss_func(out, gt)
for mss in range(len(mssout)):
loss += model.mss_coeff[mss] * loss_func(mssout[mss], gt)
else:
out = model(images)
loss = loss_func(out, gt)
if IsNegLoss:
loss = -loss
optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
loss = round((-loss).data.item(),4) if IsNegLoss else round(loss.data.item(),4)
train_loss.append(loss)
runningLoss.append(loss)
logging.info('[%d/%d][%d/%d] Train Loss: %.4f' % ((epoch+1), args.epochs, i, len(train_loader), loss))
del gt, out, loss
torch.cuda.empty_cache()
if i % args.logfreq == 0:
niter = epoch*len(train_loader)+i
tb_writer.add_scalar('Train/Loss', loss_reducer(runningLoss), niter)
wandb.log({"Epoch":epoch, "TrainLoss":loss_reducer(runningLoss)})#, step=niter)
# tensorboard_images(tb_writer, inp, out.detach(), gt, epoch, 'train')
runningLoss = []
if args.finetune or (epoch % args.savefreq == 0):
checkpoint = {
'epoch': epoch,
'iterations': (epoch+1)*len(train_loader),
'best_loss': best_loss,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'AMPScaler': scaler.state_dict()
}
torch.save(checkpoint, os.path.join(save_path, trainID+".pth.tar"))
if args.modelid != 9 and args.modelid != 6:
torch.onnx.export(model, images, trainID+".onnx", input_names=["LRCurrTP"], output_names=["SuperResolvedCurrTP"])
wandb.save(trainID+".onnx")
del images
tb_writer.add_scalar('Train/EpochLoss', loss_reducer(train_loss), epoch)
wandb.log({"TrainEpochLoss":loss_reducer(train_loss)})#, step=epoch)
torch.cuda.empty_cache()
#Validate
if val_loader:
model.eval()
with torch.no_grad():
runningLoss = []
val_loss = []
runningAcc = []
val_acc = []
print('Epoch '+ str(epoch)+ ': Val')
for i, (images, gt) in enumerate(tqdm(val_loader)):
images = images[:, None, ...].to(device)
gt = gt[:, None, ...].to(device)
with autocast(enabled=args.amp):
if type(model) is SRCNN3D:
output1, output2 = model(images)
loss1 = loss_func(output1, gt)
loss2 = loss_func(output2, gt)
loss = loss2 + loss1
elif type(model) is UNetVSeg:
if IsDeepSup:
sys.exit("Not Implimented yet")
else:
out, _, _ = model(images)
loss = loss_func(out, gt)
elif type(model) is ThisNewNet:
out, loss = model(images, gt=gt)
else:
out = model(images)
loss = loss_func(out, gt)
ssim = getSSIM(gt.detach().cpu().numpy(), out.detach().cpu().numpy(), data_range=1)
loss = round((-loss).data.item(),4) if IsNegLoss else round(loss.data.item(),4)
val_loss.append(loss)
runningLoss.append(loss)
val_acc.append(ssim)
runningAcc.append(ssim)
logging.info('[%d/%d][%d/%d] Val Loss: %.4f' % ((epoch+1), args.epochs, i, len(val_loader), loss))
del gt, out, loss
torch.cuda.empty_cache()
#For tensorboard
if i % args.logfreq == 0:
niter = epoch*len(val_loader)+i
tb_writer.add_scalar('Val/Loss', loss_reducer(runningLoss), niter)
wandb.log({"Epoch":epoch, "ValLoss":loss_reducer(runningLoss)})#, step=niter)
tb_writer.add_scalar('Val/SSIM', loss_reducer(runningAcc), niter)
wandb.log({"Epoch":epoch, "ValSSIM":loss_reducer(runningAcc)})#, step=niter)
# tensorboard_images(tb_writer, inp, out.detach(), gt, epoch, 'val')
runningLoss = []
runningAcc = []
if (loss_reducer(val_loss) < best_loss and not IsNegLoss) or (loss_reducer(val_loss) > best_loss and IsNegLoss):
best_loss = loss_reducer(val_loss)
WnBRun.summary["best_loss"] = best_loss
checkpoint = {
'epoch': epoch,
'best_loss': best_loss,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'AMPScaler': scaler.state_dict()
}
torch.save(checkpoint, os.path.join(save_path, trainID+"_best.pth.tar"))
if args.modelid != 9 and args.modelid != 6:
torch.onnx.export(model, images, trainID+"_best.onnx", input_names=["LRCurrTP"], output_names=["SuperResolvedCurrTP"])
wandb.save(trainID+"_best.onnx")
del images
tb_writer.add_scalar('Val/EpochLoss', loss_reducer(val_loss), epoch)
wandb.log({"ValEpochLoss":loss_reducer(val_loss)})#, step=epoch)
tb_writer.add_scalar('Val/EpochSSIM', loss_reducer(val_acc), epoch)
wandb.log({"ValEpochSSIM":loss_reducer(val_acc)})#, step=epoch)
torch.cuda.empty_cache()
| 26,386 | 53.972917 | 230 | py |
DDoS | DDoS-master/apply_DDoS_baseline.py | import argparse
import logging
import math
import os
import random
import statistics
import sys
import numpy as np
import pandas as pd
import torch
import torch.autograd.profiler as profiler
import torch.nn.functional as F
from torch.cuda.amp import autocast
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import wandb
from models import *
from models.ReconResNet import ResNet
from models.ShuffleUNet.net import ShuffleUNet
from models.ThisNewNet import ThisNewNet
from utils.data import *
from utils.datasets_dyn import SRDataset
from utils.utilities import ResSaver
__author__ = "Soumick Chatterjee"
__copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany"
__credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Soumick Chatterjee"
__email__ = "soumick.chatterjee@ovgu.de"
__status__ = "Production"
modelIDs = {
0: "UNET",
1: "SRCNN",
2: "SRCNNv2",
3: "SRCNNv3",
4: "UNETvSeg",
5: "UNETvSegDS",
6: "DenseNet",
7: "UNETSRCNN",
8: "SRCNNUNET",
9: "ReconResNet",
10: "ShuffleUNet",
11: "UNETMSS",
}
lossIDs = {
0: "pLoss",
1: "MAE",
2: "MultiSSIM",
3: "SSIM3D"
}
def parseARGS():
ap = argparse.ArgumentParser()
ap.add_argument("-g", "--gpu", default="0", help="GPU ID(s).")
ap.add_argument("--seed", default=2020, type=int, help="Seed")
ap.add_argument("-ds", "--dataset", default=r'/home/schatter/Soumick/Data/Chimp/3DDynTest/MickAbdomen3DDyn/DynProtocol3/Filtered/', help="Path to Dataset Folder.")
ap.add_argument("-op", "--outpath", default=r'/home/schatter/Soumick/Data/Chimp/CHAOSwoT2Dyn/newSet/', help="Path for Output.")
ap.add_argument("-ot", "--outtype", default=r'StatTPinit_MickAbd3DDyn3conST_woZpad_full_Best', help="Type of Recon currently being performed.")
ap.add_argument("-us", "--us", default='Center4MaskWoPad', help="Undersample.")
ap.add_argument("-s", "--scalefact", default='(1,1,1)', help="Scaling Factor. For Zero padded data, set the dim to 1. [As a 3 valued tuple, factor for each dim. Supply seperated by coma or as a tuple, no spaces in between.].")
ap.add_argument("-uf", "--usfolder", default='usTestDynConST', help="Undersampled Folder.")
ap.add_argument("-hf", "--hrfolder", default='hrTestDynConST', help="HighRes (Fully-sampled) Folder.") #hrTestDynPadded for ktGRASP
ap.add_argument("-o", "--outfolder", default='dynDualChn', help="Output Folder.")
ap.add_argument("-bs", "--batchsize", type=int, default=1, help="Batch Size.")
ap.add_argument("-nw", "--nworkers", type=int, default=0, help="Number of Workers.")
ap.add_argument("-m", "--modelname", default="usTrain_UNETfullBaselinedo0.0dp3upsample_Center4MaskWoPad_pLossL1lvl3", help="Model to Load for testing.")
ap.add_argument("-bst", "--beststring", default="best", help="Model to Load for testing.")
ap.add_argument("-mb", "--modelbest", type=int, default=1, help="Model to Load for testing.")
ap.add_argument("-c", "--cuda", type=bool, default=True, help="Use CUDA.")
ap.add_argument("-mg", "--mulgpu", type=bool, default=False, help="Use Multiple GPU.")
ap.add_argument("-amp", "--amp", type=bool, default=True, help="Use AMP.")
ap.add_argument("-p", "--profile", type=bool, default=False, help="Do Model Profiling.")
ap.add_argument("-ps", "--patchsize", default=None, help="Patch Size. Supply seperated by coma or as a tuple, no spaces in between. Set it to None if not desired.")
ap.add_argument("-pst", "--patchstride", default='(3,3,3)', help="Stride of patches, to be used during validation")
ap.add_argument("-l", "--logfreq", type=int, default=10, help="log Frequency.")
ap.add_argument("-ml", "--medianloss", type=int, default=True, help="Use Median to get loss value (Final Reduction).")
ap.add_argument("-inc", "--inchannel", type=int, default=1, help="Number of Channels in the Data.")
ap.add_argument("-otc", "--outchannel", type=int, default=1, help="Number of Channels in the Data.")
ap.add_argument("-is", "--inshape", default='(256,256,30)', help="Input Shape. Supply seperated by coma or as a tuple, no spaces in between. Will only be used if Patch Size is None.")
ap.add_argument("-int", "--preint", default="trilinear", help="Pre-interpolate before sending it to the Network. Set it to None if not needed.")
ap.add_argument("-nrm", "--prenorm", default=True, type=bool, help="Pre-norm before saving the images and calculating the metrics.")
ap.add_argument("-dus", "--detectus", type=int, default=0, help="Whether to replace the us using model name")
#param to reproduce model
ap.add_argument("-mid", "--modelid", type=int, default=0, help="Model ID."+str(modelIDs))
ap.add_argument("-mbn", "--batchnorm", type=bool, default=False, help="(Only for Model ID 0, 11) Do BatchNorm.")
ap.add_argument("-mum", "--upmode", default='upsample', help="(Only for Model ID 0, 11) UpMode for model ID 0 and 11: [upconv, upsample], for model ID 9: [convtrans, <interp algo>]")
ap.add_argument("-mdp", "--mdepth", type=int, default=3, help="(Only for Model ID 0, 6, 11) Depth of the Model.")
ap.add_argument("-d", "--dropprob", type=float, default=0.0, help="(Only for Model ID 0, 6, 11) Dropout Probability.")
ap.add_argument("-mslvl", "--msslevel", type=int, default=2, help="(Only for Model ID 11) Depth of the Model.")
ap.add_argument("-msltn", "--msslatent", type=int, default=1, help="(Only for Model ID 11) Use the latent as one of the MSS level.")
ap.add_argument("-msup", "--mssup", default="trilinear", help="(Only for Model ID 11) Interpolation to use on the MSS levels.")
ap.add_argument("-msinb4", "--mssinterpb4", type=int, default=0, help="(Only for Model ID 11) Apply Interpolation before applying conv for the MSS levels. If False, interp will be applied after conv.")
ap.add_argument("-f", "--nfeatures", type=int, default=64, help="(Not for DenseNet) N Starting Features of the Network.")
ap.add_argument("-lid", "--lossid", type=int, default=0, help="Loss ID."+str(lossIDs))
ap.add_argument("-plt", "--plosstyp", default="L1", help="(Only for Loss ID 0) Perceptual Loss Type.")
ap.add_argument("-pll", "--plosslvl", type=int, default=3, help="(Only for Loss ID 0) Perceptual Loss Level.")
ap.add_argument("-lrd", "--lrdecrate", type=int, default=1, help="(To be used for Fine-Tuning) Factor by which lr will be divided to find the actual lr. Set it to 1 if not desired")
ap.add_argument("-ft", "--finetune", type=int, default=0, help="Is it a Fine-tuning traing or not (main-train).")
ap.add_argument("-ftep", "--fteprt", type=float, default=0.00, help="(To be used for Fine-Tuning) Fine-Tune Epoch Rate.")
ap.add_argument("-ftit", "--ftitrt", type=float, default=0.10, help="(To be used for Fine-Tuning, if fteprt is None) Fine-Tune Iteration Rate.")
ap.add_argument("-tls", "--tnnlslc", type=int, default=2, help="Solo per ThisNewNet. loss_slice_count. Default 2")
ap.add_argument("-tli", "--tnnlinp", type=int, default=1, help="Solo per ThisNewNet. loss_inplane. Default 1")
#WnB related params
ap.add_argument("-wnb", "--wnbactive", type=bool, default=True, help="WandB: Whether to use or not")
ap.add_argument("-wnbp", "--wnbproject", default='SuperResMRI', help="WandB: Name of the project")
ap.add_argument("-wnbe", "--wnbentity", default='mickchimp', help="WandB: Name of the entity")
ap.add_argument("-wnbg", "--wnbgroup", default='dynDualChnFullVol', help="WandB: Name of the group")
ap.add_argument("-wnbpf", "--wnbprefix", default='', help="WandB: Prefix for TrainID")
ap.add_argument("-wnbml", "--wnbmodellog", default='all', help="WandB: While watching the model, what to save: gradients, parameters, all, None")
ap.add_argument("-wnbmf", "--wnbmodelfreq", type=int, default=100, help="WandB: The number of steps between logging gradients")
return ap.parse_args()
args = parseARGS()
# os.environ["TMPDIR"] = "/scratch/schatter/tmp"
# os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
torch.set_num_threads(1)
random.seed(args.seed)
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if __name__ == "__main__" :
args.scalefact = tuple(map(int, args.scalefact.replace('(','').replace(')','').split(',')))
args.homepath = os.path.expanduser("~/Documents")
if args.patchsize:
args.patchsize = tuple(map(int, args.patchsize.replace('(','').replace(')','').split(',')))
if args.patchstride:
args.patchstride = tuple(map(int, args.patchstride.replace('(','').replace(')','').split(',')))
if args.inshape:
args.inshape = tuple(map(int, args.inshape.replace('(','').replace(')','').split(',')))
args.chkpoint = os.path.join(args.outpath, args.outfolder, args.modelname, args.modelname)
if args.modelbest:
print('best model testing')
args.chkpoint += "_" + args.beststring + ".pth.tar"
else:
args.chkpoint += ".pth.tar"
if args.patchstride:
args.modelname += "_infstr" + "c".join(list(map(str, args.patchstride)))
args.modelname = args.modelname.replace(args.usfolder+"_", "")
print("Testing: "+args.modelname)
if args.modelid == 2:
SRCNN3D = SRCNN3Dv2
elif args.modelid == 3:
SRCNN3D = SRCNN3Dv3
if args.medianloss:
loss_reducer = statistics.median
else:
loss_reducer = statistics.mean
dir_path = args.dataset + args.usfolder+ '/' + args.us + '/'
label_dir_path = args.dataset + args.hrfolder + '/'
log_path = os.path.join(args.dataset, args.outfolder, 'TBLogs', args.modelname)
save_path = os.path.join(args.outpath, args.outfolder, args.modelname, args.outtype)
device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu")
tb_writer = SummaryWriter(log_dir = log_path)
os.makedirs(save_path, exist_ok=True)
logname = os.path.join(args.homepath, 'testlog_'+args.modelname+'.txt')
logging.basicConfig(filename=logname,
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
# transforms = [tio.transforms.RescaleIntensity((0, 1))]
transforms = []
testDS = SRDataset(logger=logging, patch_size=args.patchsize[0] if args.patchsize else -1, dir_path=dir_path, label_dir_path=label_dir_path, #TODO: implement non-iso patch-size, now only using the first element
stride_depth=args.patchstride[2], stride_length=args.patchstride[0], stride_width=args.patchstride[1], Size=None, fly_under_percent=None, #TODO: implement fly_under_percent, if needed
patch_size_us=None, return_coords=True, pad_patch=False, pre_interpolate=args.preint, norm_data=args.prenorm, pre_load=True, noncumulative=True) #TODO implement patch_size_us if required - patch_size//scaling_factor
test_loader = torch.utils.data.DataLoader(testDS, batch_size=args.batchsize,shuffle=False, num_workers=args.nworkers, pin_memory=True)
model_scale_factor=tuple(np.roll(args.scalefact,shift=1))
if args.modelid == 0:
model = UNet(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, wf=round(math.log(args.nfeatures,2)), batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob))
elif (args.modelid == 1) or (args.modelid == 2) or (args.modelid == 3):
sys.exit("SRCNN3D is not ready for different numbers of input and output channel")
model = SRCNN3D(n_channels=args.nchannel, scale_factor=model_scale_factor, num_features=args.nfeatures)
elif (args.modelid == 4) or (args.modelid == 5):
model = UNetVSeg(in_ch=args.inchannel, out_ch=args.outchannel, n1=args.nfeatures)
elif args.modelid == 6:
model = DenseNet(model_depth=args.mdepth, n_input_channels=args.inchannel, num_classes=args.outchannel, drop_rate=args.dropprob)
elif (args.modelid == 7) or (args.modelid == 8):
model = ThisNewNet(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob),
scale_factor=model_scale_factor, num_features=args.nfeatures, sliceup_first=True if args.modelid==8 else False,
loss_slice_count=args.tnnlslc, loss_inplane=args.tnnlinp)
elif args.modelid == 9:
model=ResNet(in_channels=args.inchannel, out_channels=args.outchannel, res_blocks=4, starting_nfeatures=args.nfeatures, updown_blocks=2, is_relu_leaky=True, #TODO: put all params as args
do_batchnorm=args.batchnorm, res_drop_prob=0.2, is_replicatepad=0, out_act="sigmoid", forwardV=0, upinterp_algo='convtrans' if args.upmode == "upconv" else "trilinear", post_interp_convtrans=True, is3D=True)
elif args.modelid == 10:
model=ShuffleUNet(in_ch=args.inchannel, num_features=args.nfeatures, out_ch=args.outchannel)
elif args.modelid == 11:
model = UNetMSS(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, wf=round(math.log(args.nfeatures,2)),
batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob),
mss_level=args.msslevel, mss_fromlatent=args.msslatent, mss_up=args.mssup, mss_interpb4=args.mssinterpb4)
else:
sys.exit("Invalid Model ID")
if args.modelid == 5:
IsDeepSup = True
else:
IsDeepSup = False
if args.profile:
dummy = torch.randn(args.batchsize, args.inchannel, *args.inshape)
with profiler.profile(profile_memory=True, record_shapes=True, use_cuda=True) as prof:
model(dummy)
prof.export_chrome_trace(os.path.join(save_path, 'model_trace'))
model.to(device)
chk = torch.load(args.chkpoint, map_location=device)
model.load_state_dict(chk['state_dict'])
trained_epoch = chk['epoch']
model.eval()
saver = ResSaver(os.path.join(save_path, "Results"), save_inp=True, do_norm=args.prenorm)
markers = {}
inputs = {}
results = {}
targets = {}
if not args.wnbactive:
os.environ["WANDB_MODE"] = "dryrun"
with torch.no_grad():
runningSSIM = []
test_ssim = []
test_metrics = []
print('Epoch '+ str(trained_epoch)+ ': Test')
with wandb.init(project=args.wnbproject, entity=args.wnbentity, group=args.wnbgroup, config=args, name=args.wnbprefix+args.modelname, id=args.wnbprefix+args.modelname, resume=True) as WnBRun:
wandb.watch(model, log=args.wnbmodellog, log_freq=args.wnbmodelfreq)
for b, (lr_imgs, hr_imgs, start_coords, files, shapes, pad) in enumerate(tqdm(test_loader)):
lr_imgs = lr_imgs[:,1,...].unsqueeze(1).contiguous().to(device, non_blocking=True) # (batch_size (N), 3, 24, 24), imagenet-normed
hr_imgs = hr_imgs.contiguous()#.to(device) # (batch_size (N), 3, 96, 96), in [-1, 1]
pad = pad.numpy()
with autocast(enabled=args.amp):
if type(model) in (SRCNN3D, SRCNN3Dv2, SRCNN3Dv3):
_, sr_imgs = model(lr_imgs)
elif type(model) is UNetVSeg:
sr_imgs, _, _ = model(lr_imgs)
else:
sr_imgs = model(lr_imgs)
sr_imgs = sr_imgs.type(lr_imgs.dtype)
sr_imgs = F.interpolate(sr_imgs, size=hr_imgs.shape[2:], mode='trilinear')
lr_imgs = F.interpolate(lr_imgs, size=hr_imgs.shape[2:], mode='trilinear')
tmp_in = lr_imgs.cpu().detach()#.numpy()
tmp_res = sr_imgs.cpu().detach()#.numpy()
tmp_tar = hr_imgs#.numpy()
for i in range(hr_imgs.shape[0]):
if bool(args.patchsize) and args.patchsize[0] != -1: #TODO: implement non-iso patch-size, now only using the first element
if files[i] not in results:
markers[files[i]] = np.zeros(shapes[i][0].numpy())
inputs[files[i]] = np.zeros(shapes[i][0].numpy())
results[files[i]] = np.zeros(shapes[i][0].numpy())
targets[files[i]] = np.zeros(shapes[i][0].numpy())
(startIndex_depth, startIndex_length, startIndex_width) = start_coords[i][0].numpy() #because of moveaxis, l,w,d has become d,l,w
if pad[i].any():
tin = F.pad(tmp_in[i].unsqueeze(0), tuple(-pad[i])).squeeze().numpy()
tres = F.pad(tmp_res[i].unsqueeze(0), tuple(-pad[i])).squeeze().numpy()
ttar = F.pad(tmp_tar[i].unsqueeze(0), tuple(-pad[i])).squeeze().numpy()
else:
tin = tmp_in[i].squeeze().numpy()
tres = tmp_res[i].squeeze().numpy()
ttar = tmp_tar[i].squeeze().numpy()
tin = tin[1,...] #TODO make it configurable. Currently its prevTPPatch, patch
markers[files[i]][startIndex_length:startIndex_length+args.patchsize[0], startIndex_width:startIndex_width+args.patchsize[1], startIndex_depth:startIndex_depth+args.patchsize[2]] += 1
inputs[files[i]][startIndex_length:startIndex_length+args.patchsize[0], startIndex_width:startIndex_width+args.patchsize[1], startIndex_depth:startIndex_depth+args.patchsize[2]] += np.moveaxis(tin, 0, -1)
results[files[i]][startIndex_length:startIndex_length+args.patchsize[0], startIndex_width:startIndex_width+args.patchsize[1], startIndex_depth:startIndex_depth+args.patchsize[2]] += np.moveaxis(tres, 0, -1)
targets[files[i]][startIndex_length:startIndex_length+args.patchsize[0], startIndex_width:startIndex_width+args.patchsize[1], startIndex_depth:startIndex_depth+args.patchsize[2]] += np.moveaxis(ttar, 0, -1)
else:
inputs[files[i]] = np.moveaxis(tmp_in[i,0,...].squeeze().numpy(), 0, -1) #TODO make it configurable. Currently its prevTPPatch, patch
results[files[i]] = np.moveaxis(tmp_res[i,0,...].squeeze().numpy(), 0, -1)
targets[files[i]] = np.moveaxis(tmp_tar[i,0,...].squeeze().numpy(), 0, -1)
if bool(args.patchsize) and args.patchsize[0] != -1:
for f in inputs.keys():
inputs[f] = np.divide(inputs[f], markers[f])
results[f] = np.divide(results[f], markers[f])
targets[f] = np.divide(targets[f], markers[f])
for i, filename in enumerate(results.keys()):
out = results[filename]
inp = inputs[filename]
gt = targets[filename]
metrics = saver.CalcNSave(out, inp, gt, filename, already_numpy=True)
if metrics is not None:
metrics['file'] = filename
test_metrics.append(metrics)
ssim = round(metrics['SSIMOut'],4)
test_ssim.append(ssim)
runningSSIM.append(ssim)
logging.info('[%d/%d] Test SSIM: %.4f' % (i, len(testDS), ssim))
#For tensorboard
tb_writer.add_scalar('Test/SSIM', loss_reducer(runningSSIM), i)
wandb.log({"TestEpoch":trained_epoch, "TestSSIM":loss_reducer(runningSSIM)})#, step=niter)
runningSSIM = []
if len(test_metrics) > 0:
print("Avg SSIM: "+str(loss_reducer(test_ssim)))
WnBRun.summary["AvgTestSSIM"] = loss_reducer(test_ssim)
df = pd.DataFrame.from_dict(test_metrics)
df.to_csv(os.path.join(save_path, 'Results.csv'), index=False)
| 20,417 | 59.587537 | 239 | py |
DDoS | DDoS-master/apply_DDoS.py | import argparse
import logging
import math
import os
import random
import statistics
import sys
import numpy as np
import pandas as pd
import torch
import torch.autograd.profiler as profiler
import torch.nn.functional as F
from torch.cuda.amp import autocast
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import wandb
from models import *
from models.ReconResNet import ResNet
from models.ShuffleUNet.net import ShuffleUNet
from models.ThisNewNet import ThisNewNet
from utils.data import *
from utils.datasets_dyn import SRDataset
from utils.utilities import ResSaver, process_DDoS_SRPrev
__author__ = "Soumick Chatterjee"
__copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany"
__credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Soumick Chatterjee"
__email__ = "soumick.chatterjee@ovgu.de"
__status__ = "Production"
modelIDs = {
0: "UNET",
1: "SRCNN",
2: "SRCNNv2",
3: "SRCNNv3",
4: "UNETvSeg",
5: "UNETvSegDS",
6: "DenseNet",
7: "UNETSRCNN",
8: "SRCNNUNET",
9: "ReconResNet",
10: "ShuffleUNet",
11: "UNETMSS",
}
lossIDs = {
0: "pLoss",
1: "MAE",
2: "MultiSSIM",
3: "SSIM3D"
}
def parseARGS():
ap = argparse.ArgumentParser()
ap.add_argument("-g", "--gpu", default="0", help="GPU ID(s).")
ap.add_argument("--seed", default=2020, type=int, help="Seed")
ap.add_argument("-ds", "--dataset", default=r'/home/schatter/Soumick/Data/Chimp/3DDynTest/MickAbdomen3DDyn/DynProtocol3/Filtered/', help="Path to Dataset Folder.")
ap.add_argument("-op", "--outpath", default=r'/home/schatter/Soumick/Data/Chimp/CHAOSwoT2Dyn/newSet/', help="Path for Output.")
ap.add_argument("-ot", "--outtype", default=r'StatTPinitCumulative_MickAbd3DDyn3conST_woZpad_full_Best', help="Type of Recon currently being performed.")
ap.add_argument("-us", "--us", default='Center4MaskWoPad', help="Undersample.")
ap.add_argument("-s", "--scalefact", default='(1,1,1)', help="Scaling Factor. For Zero padded data, set the dim to 1. [As a 3 valued tuple, factor for each dim. Supply seperated by coma or as a tuple, no spaces in between.].")
ap.add_argument("-uf", "--usfolder", default='usTestDynConST', help="Undersampled Folder.")
ap.add_argument("-hf", "--hrfolder", default='hrTestDynConST', help="HighRes (Fully-sampled) Folder.") #hrTestDynPadded for ktGRASP
ap.add_argument("-o", "--outfolder", default='dynDualChn', help="Output Folder.")
ap.add_argument("-bs", "--batchsize", type=int, default=1, help="Batch Size.")
ap.add_argument("-nw", "--nworkers", type=int, default=0, help="Number of Workers.")
ap.add_argument("-m", "--modelname", default="usTrain_UNETfulldo0.0dp3upsample_Center4MaskWoPad_pLossL1lvl3", help="Model to Load for testing.")
ap.add_argument("-bst", "--beststring", default="best", help="Model to Load for testing.")
ap.add_argument("-mb", "--modelbest", type=int, default=1, help="Model to Load for testing.")
ap.add_argument("-c", "--cuda", type=bool, default=True, help="Use CUDA.")
ap.add_argument("-mg", "--mulgpu", type=bool, default=False, help="Use Multiple GPU.")
ap.add_argument("-amp", "--amp", type=bool, default=True, help="Use AMP.")
ap.add_argument("-p", "--profile", type=bool, default=False, help="Do Model Profiling.")
ap.add_argument("-ps", "--patchsize", default=None, help="Patch Size. Supply seperated by coma or as a tuple, no spaces in between. Set it to None if not desired.")
ap.add_argument("-pst", "--patchstride", default='(3,3,3)', help="Stride of patches, to be used during validation")
ap.add_argument("-l", "--logfreq", type=int, default=10, help="log Frequency.")
ap.add_argument("-ml", "--medianloss", type=int, default=True, help="Use Median to get loss value (Final Reduction).")
ap.add_argument("-inc", "--inchannel", type=int, default=2, help="Number of Channels in the Data.")
ap.add_argument("-otc", "--outchannel", type=int, default=1, help="Number of Channels in the Data.")
ap.add_argument("-is", "--inshape", default='(256,256,30)', help="Input Shape. Supply seperated by coma or as a tuple, no spaces in between. Will only be used if Patch Size is None.")
ap.add_argument("-int", "--preint", default="trilinear", help="Pre-interpolate before sending it to the Network. Set it to None if not needed.")
ap.add_argument("-nrm", "--prenorm", default=True, type=bool, help="Pre-norm before saving the images and calculating the metrics.")
ap.add_argument("-dus", "--detectus", type=int, default=0, help="Whether to replace the us using model name")
#param to reproduce model
ap.add_argument("-mid", "--modelid", type=int, default=0, help="Model ID."+str(modelIDs))
ap.add_argument("-mbn", "--batchnorm", type=bool, default=False, help="(Only for Model ID 0, 11) Do BatchNorm.")
ap.add_argument("-mum", "--upmode", default='upsample', help="(Only for Model ID 0, 11) UpMode for model ID 0 and 11: [upconv, upsample], for model ID 9: [convtrans, <interp algo>]")
ap.add_argument("-mdp", "--mdepth", type=int, default=3, help="(Only for Model ID 0, 6, 11) Depth of the Model.")
ap.add_argument("-d", "--dropprob", type=float, default=0.0, help="(Only for Model ID 0, 6, 11) Dropout Probability.")
ap.add_argument("-mslvl", "--msslevel", type=int, default=2, help="(Only for Model ID 11) Depth of the Model.")
ap.add_argument("-msltn", "--msslatent", type=int, default=1, help="(Only for Model ID 11) Use the latent as one of the MSS level.")
ap.add_argument("-msup", "--mssup", default="trilinear", help="(Only for Model ID 11) Interpolation to use on the MSS levels.")
ap.add_argument("-msinb4", "--mssinterpb4", type=int, default=0, help="(Only for Model ID 11) Apply Interpolation before applying conv for the MSS levels. If False, interp will be applied after conv.")
ap.add_argument("-f", "--nfeatures", type=int, default=64, help="(Not for DenseNet) N Starting Features of the Network.")
ap.add_argument("-lid", "--lossid", type=int, default=0, help="Loss ID."+str(lossIDs))
ap.add_argument("-plt", "--plosstyp", default="L1", help="(Only for Loss ID 0) Perceptual Loss Type.")
ap.add_argument("-pll", "--plosslvl", type=int, default=3, help="(Only for Loss ID 0) Perceptual Loss Level.")
ap.add_argument("-lrd", "--lrdecrate", type=int, default=1, help="(To be used for Fine-Tuning) Factor by which lr will be divided to find the actual lr. Set it to 1 if not desired")
ap.add_argument("-ft", "--finetune", type=int, default=0, help="Is it a Fine-tuning traing or not (main-train).")
ap.add_argument("-ftep", "--fteprt", type=float, default=0.00, help="(To be used for Fine-Tuning) Fine-Tune Epoch Rate.")
ap.add_argument("-ftit", "--ftitrt", type=float, default=0.10, help="(To be used for Fine-Tuning, if fteprt is None) Fine-Tune Iteration Rate.")
ap.add_argument("-tls", "--tnnlslc", type=int, default=2, help="Solo per ThisNewNet. loss_slice_count. Default 2")
ap.add_argument("-tli", "--tnnlinp", type=int, default=1, help="Solo per ThisNewNet. loss_inplane. Default 1")
#WnB related params
ap.add_argument("-wnb", "--wnbactive", type=bool, default=True, help="WandB: Whether to use or not")
ap.add_argument("-wnbp", "--wnbproject", default='SuperResMRI', help="WandB: Name of the project")
ap.add_argument("-wnbe", "--wnbentity", default='mickchimp', help="WandB: Name of the entity")
ap.add_argument("-wnbg", "--wnbgroup", default='dynDualChnFullVol', help="WandB: Name of the group")
ap.add_argument("-wnbpf", "--wnbprefix", default='', help="WandB: Prefix for TrainID")
ap.add_argument("-wnbml", "--wnbmodellog", default='all', help="WandB: While watching the model, what to save: gradients, parameters, all, None")
ap.add_argument("-wnbmf", "--wnbmodelfreq", type=int, default=100, help="WandB: The number of steps between logging gradients")
return ap.parse_args()
args = parseARGS()
# os.environ["TMPDIR"] = "/scratch/schatter/tmp"
# os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
torch.set_num_threads(1)
random.seed(args.seed)
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if __name__ == "__main__" :
args.scalefact = tuple(map(int, args.scalefact.replace('(','').replace(')','').split(',')))
args.homepath = os.path.expanduser("~/Documents")
if args.patchsize:
args.patchsize = tuple(map(int, args.patchsize.replace('(','').replace(')','').split(',')))
if args.patchstride:
args.patchstride = tuple(map(int, args.patchstride.replace('(','').replace(')','').split(',')))
if args.inshape:
args.inshape = tuple(map(int, args.inshape.replace('(','').replace(')','').split(',')))
args.chkpoint = os.path.join(args.outpath, args.outfolder, args.modelname, args.modelname)
if args.modelbest:
print('best model testing')
args.chkpoint += "_" + args.beststring + ".pth.tar"
else:
args.chkpoint += ".pth.tar"
if args.patchstride:
args.modelname += "_infstr" + "c".join(list(map(str, args.patchstride)))
args.modelname = args.modelname.replace(args.usfolder+"_", "")
print("Testing: "+args.modelname)
if args.modelid == 2:
SRCNN3D = SRCNN3Dv2
elif args.modelid == 3:
SRCNN3D = SRCNN3Dv3
if args.medianloss:
loss_reducer = statistics.median
else:
loss_reducer = statistics.mean
dir_path = args.dataset + args.usfolder+ '/' + args.us + '/'
label_dir_path = args.dataset + args.hrfolder + '/'
log_path = os.path.join(args.dataset, args.outfolder, 'TBLogs', args.modelname)
save_path = os.path.join(args.outpath, args.outfolder, args.modelname, args.outtype)
device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu")
tb_writer = SummaryWriter(log_dir = log_path)
os.makedirs(save_path, exist_ok=True)
logname = os.path.join(args.homepath, 'testlog_'+args.modelname+'.txt')
logging.basicConfig(filename=logname,
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
# transforms = [tio.transforms.RescaleIntensity((0, 1))]
transforms = []
testDS = SRDataset(logger=logging, patch_size=args.patchsize[0] if args.patchsize else -1, dir_path=dir_path, label_dir_path=label_dir_path, #TODO: implement non-iso patch-size, now only using the first element
stride_depth=args.patchstride[2], stride_length=args.patchstride[0], stride_width=args.patchstride[1], Size=None, fly_under_percent=None, #TODO: implement fly_under_percent, if needed
patch_size_us=None, return_coords=True, pad_patch=False, pre_interpolate=args.preint, norm_data=args.prenorm, pre_load=True, noncumulative=False) #TODO implement patch_size_us if required - patch_size//scaling_factor
tpIDs = sorted(testDS.data.tpID.unique())
DSs = []
for tp in tpIDs:
indices = testDS.data.index[testDS.data.tpID == tp].tolist()
dsOB = torch.utils.data.Subset(testDS, indices)
DSs.append(dsOB)
# test_loader = torch.utils.data.DataLoader(testDS, batch_size=args.batchsize,shuffle=False, num_workers=args.nworkers, pin_memory=True)
model_scale_factor=tuple(np.roll(args.scalefact,shift=1))
if args.modelid == 0:
model = UNet(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, wf=round(math.log(args.nfeatures,2)), batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob))
elif (args.modelid == 1) or (args.modelid == 2) or (args.modelid == 3):
sys.exit("SRCNN3D is not ready for different numbers of input and output channel")
model = SRCNN3D(n_channels=args.nchannel, scale_factor=model_scale_factor, num_features=args.nfeatures)
elif (args.modelid == 4) or (args.modelid == 5):
model = UNetVSeg(in_ch=args.inchannel, out_ch=args.outchannel, n1=args.nfeatures)
elif args.modelid == 6:
model = DenseNet(model_depth=args.mdepth, n_input_channels=args.inchannel, num_classes=args.outchannel, drop_rate=args.dropprob)
elif (args.modelid == 7) or (args.modelid == 8):
model = ThisNewNet(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob),
scale_factor=model_scale_factor, num_features=args.nfeatures, sliceup_first=True if args.modelid==8 else False,
loss_slice_count=args.tnnlslc, loss_inplane=args.tnnlinp)
elif args.modelid == 9:
model=ResNet(in_channels=args.inchannel, out_channels=args.outchannel, res_blocks=4, starting_nfeatures=args.nfeatures, updown_blocks=2, is_relu_leaky=True, #TODO: put all params as args
do_batchnorm=args.batchnorm, res_drop_prob=0.2, is_replicatepad=0, out_act="sigmoid", forwardV=0, upinterp_algo='convtrans' if args.upmode == "upconv" else "trilinear", post_interp_convtrans=True, is3D=True)
elif args.modelid == 10:
model=ShuffleUNet(in_ch=args.inchannel, num_features=args.nfeatures, out_ch=args.outchannel)
elif args.modelid == 11:
model = UNetMSS(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, wf=round(math.log(args.nfeatures,2)),
batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob),
mss_level=args.msslevel, mss_fromlatent=args.msslatent, mss_up=args.mssup, mss_interpb4=args.mssinterpb4)
else:
sys.exit("Invalid Model ID")
if args.modelid == 5:
IsDeepSup = True
else:
IsDeepSup = False
if args.profile:
dummy = torch.randn(args.batchsize, args.inchannel, *args.inshape)
with profiler.profile(profile_memory=True, record_shapes=True, use_cuda=True) as prof:
model(dummy)
prof.export_chrome_trace(os.path.join(save_path, 'model_trace'))
model.to(device)
chk = torch.load(args.chkpoint, map_location=device)
model.load_state_dict(chk['state_dict'])
trained_epoch = chk['epoch']
model.eval()
saver = ResSaver(os.path.join(save_path, "Results"), save_inp=True, do_norm=args.prenorm)
markers = {}
inputs = {}
results = {}
targets = {}
if not args.wnbactive:
os.environ["WANDB_MODE"] = "dryrun"
with torch.no_grad():
runningSSIM = []
test_ssim = []
test_metrics = []
print('Epoch '+ str(trained_epoch)+ ': Test')
with wandb.init(project=args.wnbproject, entity=args.wnbentity, group=args.wnbgroup, config=args, name=args.wnbprefix+args.modelname, id=args.wnbprefix+args.modelname, resume=True) as WnBRun:
wandb.watch(model, log=args.wnbmodellog, log_freq=args.wnbmodelfreq)
SRPrev = None
for tp, tpDS in enumerate(DSs):
print(f"Testing TP{tp+1}")
test_loader = torch.utils.data.DataLoader(tpDS, batch_size=args.batchsize,shuffle=False, num_workers=args.nworkers, pin_memory=True)
for b, (lr_imgs, hr_imgs, start_coords, files, shapes, pad) in enumerate(tqdm(test_loader)):
if SRPrev is not None: #Means its not the initial TP and already super-resolved some timepoints.
lr_imgs[:,0] = SRPrev
lr_imgs = lr_imgs.contiguous().to(device, non_blocking=True) # (batch_size (N), 3, 24, 24), imagenet-normed
hr_imgs = hr_imgs.contiguous()#.to(device) # (batch_size (N), 3, 96, 96), in [-1, 1]
pad = pad.numpy()
with autocast(enabled=args.amp):
if type(model) in (SRCNN3D, SRCNN3Dv2, SRCNN3Dv3):
_, sr_imgs = model(lr_imgs)
elif type(model) is UNetVSeg:
sr_imgs, _, _ = model(lr_imgs)
else:
sr_imgs = model(lr_imgs)
sr_imgs = sr_imgs.type(lr_imgs.dtype)
sr_imgs = F.interpolate(sr_imgs, size=hr_imgs.shape[2:], mode='trilinear')
lr_imgs = F.interpolate(lr_imgs, size=hr_imgs.shape[2:], mode='trilinear')
tmp_in = lr_imgs.cpu().detach()#.numpy()
tmp_res = sr_imgs.cpu().detach()#.numpy()
tmp_tar = hr_imgs#.numpy()
for i in range(hr_imgs.shape[0]):
if bool(args.patchsize) and args.patchsize[0] != -1: #TODO: implement non-iso patch-size, now only using the first element
if files[i] not in results:
markers[files[i]] = np.zeros(shapes[i][0].numpy())
inputs[files[i]] = np.zeros(shapes[i][0].numpy())
results[files[i]] = np.zeros(shapes[i][0].numpy())
targets[files[i]] = np.zeros(shapes[i][0].numpy())
(startIndex_depth, startIndex_length, startIndex_width) = start_coords[i][0].numpy() #because of moveaxis, l,w,d has become d,l,w
if pad[i].any():
tin = F.pad(tmp_in[i].unsqueeze(0), tuple(-pad[i])).squeeze().numpy()
tres = F.pad(tmp_res[i].unsqueeze(0), tuple(-pad[i])).squeeze().numpy()
ttar = F.pad(tmp_tar[i].unsqueeze(0), tuple(-pad[i])).squeeze().numpy()
else:
tin = tmp_in[i].squeeze().numpy()
tres = tmp_res[i].squeeze().numpy()
ttar = tmp_tar[i].squeeze().numpy()
tin = tin[1,...] #TODO make it configurable. Currently its prevTPPatch, patch
markers[files[i]][startIndex_length:startIndex_length+args.patchsize[0], startIndex_width:startIndex_width+args.patchsize[1], startIndex_depth:startIndex_depth+args.patchsize[2]] += 1
inputs[files[i]][startIndex_length:startIndex_length+args.patchsize[0], startIndex_width:startIndex_width+args.patchsize[1], startIndex_depth:startIndex_depth+args.patchsize[2]] += np.moveaxis(tin, 0, -1)
results[files[i]][startIndex_length:startIndex_length+args.patchsize[0], startIndex_width:startIndex_width+args.patchsize[1], startIndex_depth:startIndex_depth+args.patchsize[2]] += np.moveaxis(tres, 0, -1)
targets[files[i]][startIndex_length:startIndex_length+args.patchsize[0], startIndex_width:startIndex_width+args.patchsize[1], startIndex_depth:startIndex_depth+args.patchsize[2]] += np.moveaxis(ttar, 0, -1)
else:
inputs[files[i]] = np.moveaxis(tmp_in[i,1,...].squeeze().numpy(), 0, -1) #TODO make it configurable. Currently its prevTPPatch, patch
results[files[i]] = np.moveaxis(tmp_res[i,0,...].squeeze().numpy(), 0, -1)
targets[files[i]] = np.moveaxis(tmp_tar[i,0,...].squeeze().numpy(), 0, -1)
if bool(args.patchsize) and args.patchsize[0] != -1:
for f in inputs.keys():
inputs[f] = np.divide(inputs[f], markers[f])
results[f] = np.divide(results[f], markers[f])
targets[f] = np.divide(targets[f], markers[f])
for i, filename in enumerate(results.keys()):
out = results[filename]
inp = inputs[filename]
gt = targets[filename]
metrics = saver.CalcNSave(out, inp, gt, filename, already_numpy=True)
if metrics is not None:
metrics['file'] = filename
test_metrics.append(metrics)
ssim = round(metrics['SSIMOut'],4)
test_ssim.append(ssim)
runningSSIM.append(ssim)
logging.info('[%d/%d] Test SSIM: %.4f' % (i, len(testDS), ssim))
#For tensorboard
tb_writer.add_scalar('Test/SSIM', loss_reducer(runningSSIM), i)
wandb.log({"TestEpoch":trained_epoch, "TestSSIM":loss_reducer(runningSSIM)})#, step=niter)
runningSSIM = []
if len(test_metrics) > 0:
print("Avg SSIM: "+str(loss_reducer(test_ssim)))
WnBRun.summary["AvgTestSSIM"] = loss_reducer(test_ssim)
df = pd.DataFrame.from_dict(test_metrics)
df.to_csv(os.path.join(save_path, 'Results.csv'), index=False)
| 21,258 | 59.566952 | 240 | py |
DDoS | DDoS-master/train_DDoS_baseline.py | import argparse
import logging
import math
import os
import random
import statistics
import sys
import numpy as np
import torch
import torch.autograd.profiler as profiler
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchio as tio
from torch.cuda.amp import GradScaler, autocast
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import wandb
from models import *
from models.ReconResNet import ResNet
from models.ShuffleUNet.net import ShuffleUNet
from models.ThisNewNet import ThisNewNet
from utils.data import *
from utils.datasets_dyn import SRDataset
from utils.pLoss.perceptual_loss import PerceptualLoss
from utils.utilities import getSSIM, tensorboard_images
__author__ = "Soumick Chatterjee"
__copyright__ = "Copyright 2022, Faculty of Computer Science, Otto von Guericke University Magdeburg, Germany"
__credits__ = ["Soumick Chatterjee", "Chompunuch Sarasaen"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Soumick Chatterjee"
__email__ = "soumick.chatterjee@ovgu.de"
__status__ = "Production"
modelIDs = {
0: "UNET",
1: "SRCNN",
2: "SRCNNv2",
3: "SRCNNv3",
4: "UNETvSeg",
5: "UNETvSegDS",
6: "DenseNet",
7: "UNETSRCNN",
8: "SRCNNUNET",
9: "ReconResNet",
10: "ShuffleUNet",
11: "UNETMSS",
}
lossIDs = {
0: "pLoss",
1: "MAE",
2: "MultiSSIM",
3: "SSIM3D"
}
def parseARGS():
ap = argparse.ArgumentParser()
ap.add_argument("-g", "--gpu", default="0", help="GPU ID(s).")
ap.add_argument("--seed", default=2020, type=int, help="Seed")
# ap.add_argument("-ds", "--dataset", default=r'/mnt/MEMoRIAL/MEMoRIAL_SharedStorage_M1.2+4+7/Chompunuch/PhD/Data/CHAOSwoT2Dyn/newSet/', help="Path to Dataset Folder.")
ap.add_argument("-ds", "--dataset", default=r'/home/schatter/Soumick/Data/Chimp/CHAOSwoT2Dyn/newSet/', help="Path to Dataset Folder.")
ap.add_argument("-us", "--us", default='Center4MaskWoPad', help="Undersample.")
ap.add_argument("-s", "--scalefact", default='(1,1,1)', help="Scaling Factor. For Zero padded data, set the dim to 1. [As a 3 valued tuple, factor for each dim. Supply seperated by coma or as a tuple, no spaces in between.].")
ap.add_argument("-uf", "--usfolder", default='usTrain', help="Undersampled Folder.")
ap.add_argument("-hf", "--hrfolder", default='hrTrain', help="HighRes (Fully-sampled) Folder.")
ap.add_argument("-o", "--outfolder", default='dynDualChn', help="Output Folder.")
ap.add_argument("-ms", "--modelsuffix", default='fullBaseline', help="Any Suffix To Add with the Model Name.")
ap.add_argument("-bs", "--batchsize", type=int, default=1, help="Batch Size.")
ap.add_argument("-nw", "--nworkers", type=int, default=0, help="Number of Workers.")
ap.add_argument("-cp", "--chkpoint", default=None, help="Checkpoint (of the current training) to Load.")
ap.add_argument("-cpft", "--chkpointft", default=None, help="(To be used for Fine-Tuning) Checkpoint to Load for Fine-Tuning.")
ap.add_argument("-c", "--cuda", type=bool, default=True, help="Use CUDA.")
ap.add_argument("-mg", "--mulgpu", type=bool, default=False, help="Use Multiple GPU.")
ap.add_argument("-amp", "--amp", type=bool, default=True, help="Use AMP.")
ap.add_argument("-v", "--val", type=bool, default=True, help="Do Validation.")
ap.add_argument("-vp", "--valdsper", type=float, default=0.3, help="Percentage of the DS to be used for Validation.")
ap.add_argument("-p", "--profile", type=bool, default=False, help="Do Model Profiling.")
ap.add_argument("-ep", "--epochs", type=int, default=100, help="Total Number of Epochs. To use Number of Iterations, set it to None")
ap.add_argument("-it", "--iterations", type=int, default=1e6, help="Total Number of Iterations. To be used if number of Epochs is None")
ap.add_argument("-lr", "--lr", type=float, default=1e-4, help="Total Number of Epochs.")
ap.add_argument("-ps", "--patchsize", default=None, help="Patch Size. Supply seperated by coma or as a tuple, no spaces in between. Set it to None if not desired.")
ap.add_argument("-pst", "--patchstride", default='(12,12,6)', help="Stride of patches, to be used during validation")
ap.add_argument("-l", "--logfreq", type=int, default=10, help="log Frequency.")
ap.add_argument("-sf", "--savefreq", type=int, default=1, help="saving Frequency.")
ap.add_argument("-ml", "--medianloss", type=int, default=True, help="Use Median to get loss value (Final Reduction).")
ap.add_argument("-mid", "--modelid", type=int, default=0, help="Model ID."+str(modelIDs))
ap.add_argument("-mbn", "--batchnorm", type=bool, default=False, help="(Only for Model ID 0, 11) Do BatchNorm.")
ap.add_argument("-mum", "--upmode", default='upsample', help="(Only for Model ID 0, 11) UpMode for model ID 0 and 11: [upconv, upsample], for model ID 9: [convtrans, <interp algo>]")
ap.add_argument("-mdp", "--mdepth", type=int, default=3, help="(Only for Model ID 0, 6, 11) Depth of the Model.")
ap.add_argument("-d", "--dropprob", type=float, default=0.0, help="(Only for Model ID 0, 6, 11) Dropout Probability.")
ap.add_argument("-inc", "--inchannel", type=int, default=1, help="Number of Channels in the Data.")
ap.add_argument("-otc", "--outchannel", type=int, default=1, help="Number of Channels in the Data.")
ap.add_argument("-mslvl", "--msslevel", type=int, default=1, help="(Only for Model ID 11) Depth of the Model.")
ap.add_argument("-msltn", "--msslatent", type=int, default=0, help="(Only for Model ID 11) Use the latent as one of the MSS level.")
ap.add_argument("-msup", "--mssup", default="trilinear", help="(Only for Model ID 11) Interpolation to use on the MSS levels.")
ap.add_argument("-msinb4", "--mssinterpb4", type=int, default=1, help="(Only for Model ID 11) Apply Interpolation before applying conv for the MSS levels. If False, interp will be applied after conv.")
ap.add_argument("-is", "--inshape", default='(256,256,30)', help="Input Shape. Supply seperated by coma or as a tuple, no spaces in between. Will only be used if Patch Size is None.")
ap.add_argument("-f", "--nfeatures", type=int, default=64, help="(Not for DenseNet) N Starting Features of the Network.")
ap.add_argument("-lid", "--lossid", type=int, default=0, help="Loss ID."+str(lossIDs))
ap.add_argument("-plt", "--plosstyp", default="L1", help="(Only for Loss ID 0) Perceptual Loss Type.")
ap.add_argument("-pll", "--plosslvl", type=int, default=3, help="(Only for Loss ID 0) Perceptual Loss Level.")
ap.add_argument("-lrd", "--lrdecrate", type=int, default=1, help="(To be used for Fine-Tuning) Factor by which lr will be divided to find the actual lr. Set it to 1 if not desired")
ap.add_argument("-ft", "--finetune", type=int, default=0, help="Is it a Fine-tuning traing or not (main-train).")
ap.add_argument("-ftep", "--fteprt", type=float, default=0.00, help="(To be used for Fine-Tuning) Fine-Tune Epoch Rate.")
ap.add_argument("-ftit", "--ftitrt", type=float, default=0.10, help="(To be used for Fine-Tuning, if fteprt is None) Fine-Tune Iteration Rate.")
ap.add_argument("-int", "--preint", default="trilinear", help="Pre-interpolate before sending it to the Network. Set it to None if not needed.")
ap.add_argument("-nrm", "--prenorm", default=True, type=bool, help="Rescale intensities beteen 0 and 1")
ap.add_argument("-tls", "--tnnlslc", type=int, default=2, help="Solo per ThisNewNet. loss_slice_count. Default 2")
ap.add_argument("-tli", "--tnnlinp", type=int, default=1, help="Solo per ThisNewNet. loss_inplane. Default 1")
#WnB related params
ap.add_argument("-wnb", "--wnbactive", type=bool, default=True, help="WandB: Whether to use or not")
ap.add_argument("-wnbp", "--wnbproject", default='SuperResMRI', help="WandB: Name of the project")
ap.add_argument("-wnbe", "--wnbentity", default='mickchimp', help="WandB: Name of the entity")
ap.add_argument("-wnbg", "--wnbgroup", default='dynDualChnFullVol', help="WandB: Name of the group")
ap.add_argument("-wnbpf", "--wnbprefix", default='', help="WandB: Prefix for TrainID")
ap.add_argument("-wnbml", "--wnbmodellog", default='all', help="WandB: While watching the model, what to save: gradients, parameters, all, None")
ap.add_argument("-wnbmf", "--wnbmodelfreq", type=int, default=100, help="WandB: The number of steps between logging gradients")
return ap.parse_args()
args = parseARGS()
# os.environ["TMPDIR"] = "/scratch/schatter/tmp"
# os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
torch.set_num_threads(1)
random.seed(args.seed)
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if __name__ == "__main__" :
args.scalefact = tuple(map(int, args.scalefact.replace('(','').replace(')','').split(',')))
args.homepath = os.path.expanduser("~/Documents")
if args.patchsize:
args.patchsize = tuple(map(int, args.patchsize.replace('(','').replace(')','').split(',')))
if args.patchstride:
args.patchstride = tuple(map(int, args.patchstride.replace('(','').replace(')','').split(',')))
if args.inshape:
args.inshape = tuple(map(int, args.inshape.replace('(','').replace(')','').split(',')))
args.modelname = args.usfolder + "_" + modelIDs[args.modelid] + args.modelsuffix
if args.modelid == 0 or args.modelid == 6 or args.modelid == 11:
args.modelname += "do" + str(args.dropprob) + "dp" + str(args.mdepth)
if args.modelid == 0 or args.modelid == 9 or args.modelid == 11:
args.modelname += args.upmode
if args.batchnorm:
args.modelname += "BN"
if args.modelid == 11:
args.modelname += "MSS"+str(args.msslevel)
args.modelname += "Latent" if args.msslatent else "NoLatent"
args.modelname += args.mssup
args.modelname += "InterpB4" if args.mssinterpb4 else "NoInterpB4"
trainID = args.modelname + '_' + args.us + '_' + lossIDs[args.lossid]
if args.lossid == 0:
trainID += args.plosstyp + 'lvl' + str(args.plosslvl)
if args.finetune:
trainID += "_FT_lrdec" + str(args.lrdecrate)
if args.fteprt:
trainID += "_eprt" + str(args.fteprt)
else:
trainID += "_itrt" + str(args.ftitrt)
print("Training: "+trainID)
if args.modelid == 2:
SRCNN3D = SRCNN3Dv2
elif args.modelid == 3:
SRCNN3D = SRCNN3Dv3
if args.medianloss:
loss_reducer = statistics.median
else:
loss_reducer = statistics.mean
dir_path = args.dataset + args.usfolder+ '/' + args.us + '/'
label_dir_path = args.dataset + args.hrfolder + '/'
log_path = os.path.join(args.dataset, args.outfolder, 'TBLogs', trainID)
save_path = os.path.join(args.dataset, args.outfolder, trainID)
device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu")
tb_writer = SummaryWriter(log_dir = log_path)
os.makedirs(save_path, exist_ok=True)
logname = os.path.join(args.homepath, 'log_'+trainID+'.txt')
logging.basicConfig(filename=logname,
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
transforms = []
if not args.patchsize:
transforms.append(tio.transforms.CropOrPad(target_shape=args.inshape))
trainDS = SRDataset(logger=logging, patch_size=args.patchsize[0] if args.patchsize else -1, dir_path=dir_path, label_dir_path=label_dir_path, #TODO: implement non-iso patch-size, now only using the first element
stride_depth=args.patchstride[2], stride_length=args.patchstride[0], stride_width=args.patchstride[1], Size=None, fly_under_percent=None, #TODO: implement fly_under_percent, if needed
patch_size_us=None, pre_interpolate=args.preint, norm_data=args.prenorm, pre_load=False, pad_patch=False) #TODO implement patch_size_us if required - patch_size//scaling_factor
model_scale_factor=tuple(np.roll(args.scalefact,shift=1))
if args.val:
train_size = int((1-args.valdsper) * len(trainDS))
val_size = len(trainDS) - train_size
trainDS, valDS = torch.utils.data.random_split(trainDS, [train_size, val_size])
else:
valDS = None
if bool(args.patchsize):
args.inshape = args.patchsize
train_loader = DataLoader(dataset=trainDS, batch_size=args.batchsize,shuffle=True, num_workers=args.nworkers, pin_memory=True)
val_loader = None if not args.val else DataLoader(dataset=valDS,batch_size=args.batchsize,shuffle=False, num_workers=args.nworkers, pin_memory=True)
if args.modelid == 0:
model = UNet(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, wf=round(math.log(args.nfeatures,2)), batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob))
elif (args.modelid == 1) or (args.modelid == 2) or (args.modelid == 3):
sys.exit("SRCNN3D is not ready for different numbers of input and output channel")
model = SRCNN3D(n_channels=args.nchannel, scale_factor=model_scale_factor, num_features=args.nfeatures)
elif (args.modelid == 4) or (args.modelid == 5):
model = UNetVSeg(in_ch=args.inchannel, out_ch=args.outchannel, n1=args.nfeatures)
elif args.modelid == 6:
model = DenseNet(model_depth=args.mdepth, n_input_channels=args.inchannel, num_classes=args.outchannel, drop_rate=args.dropprob)
elif (args.modelid == 7) or (args.modelid == 8):
model = ThisNewNet(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob),
scale_factor=model_scale_factor, num_features=args.nfeatures, sliceup_first=True if args.modelid==8 else False,
loss_slice_count=args.tnnlslc, loss_inplane=args.tnnlinp)
elif args.modelid == 9:
model=ResNet(in_channels=args.inchannel, out_channels=args.outchannel, res_blocks=4, starting_nfeatures=args.nfeatures, updown_blocks=2, is_relu_leaky=True, #TODO: put all params as args
do_batchnorm=args.batchnorm, res_drop_prob=0.2, is_replicatepad=0, out_act="sigmoid", forwardV=0, upinterp_algo='convtrans' if args.upmode == "upconv" else "trilinear", post_interp_convtrans=True, is3D=True)
elif args.modelid == 10:
model=ShuffleUNet(in_ch=args.inchannel, num_features=args.nfeatures, out_ch=args.outchannel)
elif args.modelid == 11:
model = UNetMSS(in_channels=args.inchannel, n_classes=args.outchannel, depth=args.mdepth, wf=round(math.log(args.nfeatures,2)),
batch_norm=args.batchnorm, up_mode=args.upmode, dropout=bool(args.dropprob),
mss_level=args.msslevel, mss_fromlatent=args.msslatent, mss_up=args.mssup, mss_interpb4=args.mssinterpb4)
else:
sys.exit("Invalid Model ID")
if args.modelid == 5:
IsDeepSup = True
else:
IsDeepSup = False
if args.profile:
dummy = torch.randn(args.batchsize, args.inchannel, *args.inshape)
with profiler.profile(profile_memory=True, record_shapes=True, use_cuda=True) as prof:
model(dummy)
prof.export_chrome_trace(os.path.join(save_path, 'model_trace'))
args.lr = args.lr/args.lrdecrate
optimizer = optim.Adam(params=filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)
model.to(device)
if args.lossid == 0:
if args.outchannel != 1:
sys.exit("Perceptual Loss used here only works for 1 channel images")
loss_func = PerceptualLoss(device=device, loss_model="unet3Dds", resize=None, loss_type=args.plosstyp, n_level=args.plosslvl)
elif args.lossid == 1:
loss_func = nn.L1Loss(reduction='mean')
elif args.lossid == 2:
loss_func = MultiSSIM(data_range=1, n_channels=args.outchannel, reduction='mean').to(device)
elif args.lossid == 3:
loss_func = SSIM(data_range=1, channel=args.outchannel, spatial_dims=3).to(device)
else:
sys.exit("Invalid Loss ID")
if (args.lossid == 0 and args.plosstyp == "L1") or (args.lossid == 1):
IsNegLoss = False
else:
IsNegLoss = True
if (args.modelid == 7) or (args.modelid == 8):
model.loss_func = loss_func
scaler = GradScaler(enabled=args.amp)
if args.chkpoint:
chk = torch.load(args.chkpoint, map_location=device)
elif args.finetune:
if args.chkpointft:
chk = torch.load(args.chkpointft, map_location=device)
else:
sys.exit("Finetune can't be performed if chkpointft not supplied")
else:
chk = None
start_epoch = 0
best_loss = float('-inf') if IsNegLoss else float('inf')
if chk is not None:
model.load_state_dict(chk['state_dict'])
optimizer.load_state_dict(chk['optimizer'])
scaler.load_state_dict(chk['AMPScaler'])
best_loss = chk['best_loss']
start_epoch = chk['epoch'] + 1
iterations = chk['iterations']
main_train_epcoh = (chk['main_train_epoch'] + 1) if 'main_train_epoch' in chk else start_epoch #only be used for finetune
if args.finetune:
if args.fteprt:
args.epochs = int((main_train_epcoh*(1+args.fteprt)))
else:
args.iterations = int(iterations*args.ftitrt)
n_ft_ep = int(args.iterations // len(train_loader))
args.epochs = main_train_epcoh + n_ft_ep
if args.epochs is None:
args.epochs = int(args.iterations // len(train_loader) + 1)
if start_epoch >= args.epochs:
logging.error('Training should atleast be for one epoch. Adjusting to perform 1 epoch training')
args.epochs = start_epoch+1
if not args.wnbactive:
os.environ["WANDB_MODE"] = "dryrun"
with wandb.init(project=args.wnbproject, entity=args.wnbentity, group=args.wnbgroup, config=args, name=args.wnbprefix+trainID, id=args.wnbprefix+trainID, resume=True) as WnBRun:
wandb.watch(model, log=args.wnbmodellog, log_freq=args.wnbmodelfreq)
logging.info('Training Epochs: from {0} to {1}'.format(start_epoch, args.epochs-1))
for epoch in range(start_epoch, args.epochs):
#Train
model.train()
runningLoss = []
train_loss = []
print('Epoch '+ str(epoch)+ ': Train')
for i, (images, gt) in enumerate(tqdm(train_loader)):
images = images[:,1,...].unsqueeze(1).to(device)
gt = gt.to(device)
with autocast(enabled=args.amp):
if type(model) is SRCNN3D:
output1, output2 = model(images)
loss1 = loss_func(output1, gt)
loss2 = loss_func(output2, gt)
loss = loss2 + loss1
elif type(model) is UNetVSeg:
if IsDeepSup:
sys.exit("Not Implimented yet")
else:
out, _, _ = model(images)
loss = loss_func(out, gt)
elif type(model) is ThisNewNet:
out, loss = model(images, gt=gt)
elif type(model) is UNetMSS:
out, mssout = model(images)
loss = loss_func(out, gt)
for mss in range(len(mssout)):
loss += model.mss_coeff[mss] * loss_func(mssout[mss], gt)
else:
out = model(images)
loss = loss_func(out, gt)
if IsNegLoss:
loss = -loss
optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
loss = round((-loss).data.item(),4) if IsNegLoss else round(loss.data.item(),4)
train_loss.append(loss)
runningLoss.append(loss)
logging.info('[%d/%d][%d/%d] Train Loss: %.4f' % ((epoch+1), args.epochs, i, len(train_loader), loss))
del gt, out, loss
torch.cuda.empty_cache()
if i % args.logfreq == 0:
niter = epoch*len(train_loader)+i
tb_writer.add_scalar('Train/Loss', loss_reducer(runningLoss), niter)
wandb.log({"Epoch":epoch, "TrainLoss":loss_reducer(runningLoss)})#, step=niter)
# tensorboard_images(tb_writer, inp, out.detach(), gt, epoch, 'train')
runningLoss = []
if args.finetune or (epoch % args.savefreq == 0):
checkpoint = {
'epoch': epoch,
'iterations': (epoch+1)*len(train_loader),
'best_loss': best_loss,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'AMPScaler': scaler.state_dict()
}
torch.save(checkpoint, os.path.join(save_path, trainID+".pth.tar"))
if args.modelid != 9 and args.modelid != 6:
torch.onnx.export(model, images, trainID+".onnx", input_names=["LRCurrTP"], output_names=["SuperResolvedCurrTP"])
wandb.save(trainID+".onnx")
del images
tb_writer.add_scalar('Train/EpochLoss', loss_reducer(train_loss), epoch)
wandb.log({"TrainEpochLoss":loss_reducer(train_loss)})#, step=epoch)
torch.cuda.empty_cache()
#Validate
if val_loader:
model.eval()
with torch.no_grad():
runningLoss = []
val_loss = []
runningAcc = []
val_acc = []
print('Epoch '+ str(epoch)+ ': Val')
for i, (images, gt) in enumerate(tqdm(val_loader)):
images = images[:,1,...].unsqueeze(1).to(device)
gt = gt.to(device)
with autocast(enabled=args.amp):
if type(model) is SRCNN3D:
output1, output2 = model(images)
loss1 = loss_func(output1, gt)
loss2 = loss_func(output2, gt)
loss = loss2 + loss1
elif type(model) is UNetVSeg:
if IsDeepSup:
sys.exit("Not Implimented yet")
else:
out, _, _ = model(images)
loss = loss_func(out, gt)
elif type(model) is ThisNewNet:
out, loss = model(images, gt=gt)
else:
out = model(images)
loss = loss_func(out, gt)
ssim = getSSIM(gt.detach().cpu().numpy(), out.detach().cpu().numpy(), data_range=1)
loss = round((-loss).data.item(),4) if IsNegLoss else round(loss.data.item(),4)
val_loss.append(loss)
runningLoss.append(loss)
val_acc.append(ssim)
runningAcc.append(ssim)
logging.info('[%d/%d][%d/%d] Val Loss: %.4f' % ((epoch+1), args.epochs, i, len(val_loader), loss))
del gt, out, loss
torch.cuda.empty_cache()
#For tensorboard
if i % args.logfreq == 0:
niter = epoch*len(val_loader)+i
tb_writer.add_scalar('Val/Loss', loss_reducer(runningLoss), niter)
wandb.log({"Epoch":epoch, "ValLoss":loss_reducer(runningLoss)})#, step=niter)
tb_writer.add_scalar('Val/SSIM', loss_reducer(runningAcc), niter)
wandb.log({"Epoch":epoch, "ValSSIM":loss_reducer(runningAcc)})#, step=niter)
# tensorboard_images(tb_writer, inp, out.detach(), gt, epoch, 'val')
runningLoss = []
runningAcc = []
if (loss_reducer(val_loss) < best_loss and not IsNegLoss) or (loss_reducer(val_loss) > best_loss and IsNegLoss):
best_loss = loss_reducer(val_loss)
WnBRun.summary["best_loss"] = best_loss
checkpoint = {
'epoch': epoch,
'best_loss': best_loss,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'AMPScaler': scaler.state_dict()
}
torch.save(checkpoint, os.path.join(save_path, trainID+"_best.pth.tar"))
if args.modelid != 9 and args.modelid != 6:
torch.onnx.export(model, images, trainID+"_best.onnx", input_names=["LRCurrTP"], output_names=["SuperResolvedCurrTP"])
wandb.save(trainID+"_best.onnx")
del images
tb_writer.add_scalar('Val/EpochLoss', loss_reducer(val_loss), epoch)
wandb.log({"ValEpochLoss":loss_reducer(val_loss)})#, step=epoch)
tb_writer.add_scalar('Val/EpochSSIM', loss_reducer(val_acc), epoch)
wandb.log({"ValEpochSSIM":loss_reducer(val_acc)})#, step=epoch)
torch.cuda.empty_cache()
| 26,396 | 53.99375 | 230 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.