repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
eco-dqn | eco-dqn-master/src/agents/solver.py | from abc import ABC, abstractmethod
import numpy as np
import torch
class SpinSolver(ABC):
"""Abstract base class for agents solving SpinSystem Ising problems."""
def __init__(self, env, record_cut=False, record_rewards=False, record_qs=False, verbose=False):
"""Base initialisation of a SpinSolver.
Args:
env (SpinSystem): The environment (an instance of SpinSystem) with
which the agent interacts.
verbose (bool, optional): The logging verbosity.
Attributes:
env (SpinSystem): The environment (an instance of SpinSystem) with
which the agent interacts.
verbose (bool): The logging verbosity.
total_reward (float): The cumulative total reward received.
"""
self.env = env
self.verbose = verbose
self.record_cut = record_cut
self.record_rewards = record_rewards
self.record_qs = record_qs
self.total_reward = 0
def reset(self):
self.total_reward = 0
self.env.reset()
def solve(self, *args):
"""Solve the SpinSystem by flipping individual spins until termination.
Args:
*args: The arguments passed through to the 'step' method to take the
next action. The implementation of 'step' depedens on the
solver instance used.
Returns:
(float): The cumulative total reward received.
"""
done = False
while not done:
reward, done = self.step(*args)
self.total_reward += reward
return self.total_reward
@abstractmethod
def step(self, *args):
"""Take the next step (flip the next spin).
The implementation of 'step' depedens on the
solver instance used.
Args:
*args: The arguments passed through to the 'step' method to take the
next action. The implementation of 'step' depedens on the
solver instance used.
Raises:
NotImplementedError: Every subclass of SpinSolver must implement the
step method.
"""
raise NotImplementedError()
class Greedy(SpinSolver):
"""A greedy solver for a SpinSystem."""
def __init__(self, *args, **kwargs):
"""Initialise a greedy solver.
Args:
*args: Passed through to the SpinSolver constructor.
Attributes:
trial_env (SpinSystemMCTS): The environment with in the agent tests
actions (a clone of self.env where the final actions are taken).
current_snap: The current state of the environment.
"""
super().__init__(*args, **kwargs)
def step(self):
"""Take the action which maximises the immediate reward.
Returns:
reward (float): The reward recieved.
done (bool): Whether the environment is in a terminal state after
the action is taken.
"""
rewards_avaialable = self.env.get_immeditate_rewards_avaialable()
if self.env.reversible_spins:
action = rewards_avaialable.argmax()
else:
masked_rewards_avaialable = rewards_avaialable.copy()
np.putmask(masked_rewards_avaialable,
self.env.get_observation()[0, :] != self.env.get_allowed_action_states(),
-100)
action = masked_rewards_avaialable.argmax()
if rewards_avaialable[action] < 0:
action = None
reward = 0
done = True
else:
observation, reward, done, _ = self.env.step(action)
return reward, done
class Random(SpinSolver):
"""A random solver for a SpinSystem."""
def step(self):
"""Take a random action.
Returns:
reward (float): The reward recieved.
done (bool): Whether the environment is in a terminal state after
the action is taken.
"""
observation, reward, done, _ = self.env.step(self.env.action_space.sample())
return reward, done
class Network(SpinSolver):
"""A network-only solver for a SpinSystem."""
epsilon = 0.
def __init__(self, network, *args, **kwargs):
"""Initialise a network-only solver.
Args:
network: The network.
*args: Passed through to the SpinSolver constructor.
Attributes:
current_snap: The last observation of the environment, used to choose the next action.
"""
super().__init__(*args, **kwargs)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.network = network.to(self.device)
self.network.eval()
self.current_observation = self.env.get_observation()
self.current_observation = torch.FloatTensor(self.current_observation).to(self.device)
self.history = []
def reset(self, spins=None, clear_history=True):
if spins is None:
self.current_observation = self.env.reset()
else:
self.current_observation = self.env.reset(spins)
self.current_observation = torch.FloatTensor(self.current_observation).to(self.device)
self.total_reward = 0
if clear_history:
self.history = []
@torch.no_grad()
def step(self):
# Q-values predicted by the network.
qs = self.network(self.current_observation)
if self.env.reversible_spins:
if np.random.uniform(0, 1) >= self.epsilon:
# Action that maximises Q function
action = qs.argmax().item()
else:
# Random action
action = np.random.randint(0, self.env.action_space.n)
else:
x = (self.current_observation[0, :] == self.env.get_allowed_action_states()).nonzero()
if np.random.uniform(0, 1) >= self.epsilon:
action = x[qs[x].argmax().item()].item()
# Allowed action that maximises Q function
else:
# Random allowed action
action = x[np.random.randint(0, len(x))].item()
if action is not None:
observation, reward, done, _ = self.env.step(action)
self.current_observation = torch.FloatTensor(observation).to(self.device)
else:
reward = 0
done = True
if not self.record_cut and not self.record_rewards:
record = [action]
else:
record = [action]
if self.record_cut:
record += [self.env.calculate_cut()]
if self.record_rewards:
record += [reward]
if self.record_qs:
record += [qs]
record += [self.env.get_immeditate_rewards_avaialable()]
self.history.append(record)
return reward, done
| 6,973 | 31.138249 | 100 | py |
eco-dqn | eco-dqn-master/src/agents/dqn/utils.py | import math
import pickle
import random
import threading
from collections import namedtuple
from enum import Enum
import numpy as np
import torch
Transition = namedtuple(
'Transition', ('state', 'action', 'reward', 'state_next', 'done')
)
class TestMetric(Enum):
CUMULATIVE_REWARD = 1
BEST_ENERGY = 2
ENERGY_ERROR = 3
MAX_CUT = 4
FINAL_CUT = 5
def set_global_seed(seed, env):
torch.manual_seed(seed)
env.set_seed(seed)
np.random.seed(seed)
random.seed(seed)
class ReplayBuffer:
def __init__(self, capacity):
self._capacity = capacity
self._memory = {}
self._position = 0
self.next_batch_process=None
self.next_batch_size=None
self.next_batch_device=None
self.next_batch = None
def add(self, *args):
"""
Saves a transition.
"""
if self.next_batch_process is not None:
# Don't add to the buffer when sampling from it.
self.next_batch_process.join()
self._memory[self._position] = Transition(*args)
self._position = (self._position + 1) % self._capacity
def _prepare_sample(self, batch_size, device=None):
self.next_batch_size = batch_size
self.next_batch_device = device
batch = random.sample(list(self._memory.values()), batch_size)
self.next_batch = [torch.stack(tensors).to(device) for tensors in zip(*batch)]
self.next_batch_ready = True
def launch_sample(self, *args):
self.next_batch_process = threading.Thread(target=self._prepare_sample, args=args)
self.next_batch_process.start()
def sample(self, batch_size, device=None):
"""
Samples a batch of Transitions, with the tensors already stacked
and transfered to the specified device.
Return a list of tensors in the order specified in Transition.
"""
if self.next_batch_process is not None:
self.next_batch_process.join()
else:
self.launch_sample(batch_size, device)
self.sample(batch_size, device)
if self.next_batch_size==batch_size and self.next_batch_device==device:
next_batch = self.next_batch
self.launch_sample(batch_size, device)
return next_batch
else:
self.launch_sample(batch_size, device)
self.sample(batch_size, device)
def __len__(self):
return len(self._memory)
class PrioritisedReplayBuffer:
def __init__(self, capacity=10000, alpha=0.7, beta0=0.5):
# The capacity of the replay buffer.
self._capacity = capacity
# A binary (max-)heap of the buffer contents, sorted by the td_error <--> priority.
self.priority_heap = {} # heap_position : [buffer_position, td_error, transition]
# Maps a buffer position (when the transition was added) to the position of the
# transition in the priority_heap.
self.buffer2heap = {} # buffer_position : heap_position
# The current position in the replay buffer. Starts at 1 for ease of binary-heap calcs.
self._buffer_position = 1
# Flag for when the replay buffer reaches max capicity.
self.full = False
self.alpha = alpha
self.beta = beta0
self.beta_step = 0
self.partitions = []
self.probabilities = []
self.__partitions_fixed = False
def __get_max_td_err(self):
try:
return self.priority_heap[0][1]
except KeyError:
# Nothing exists in the priority heap yet!
return 1
def add(self, *args):
"""
Add the transition described by *args : (state, action, reward, state_next, done), to the
memory.
"""
# By default a new transition has equal highest priority in the heap.
trans = [self._buffer_position, self.__get_max_td_err(), Transition(*args)]
try:
# Find the heap position of the transition to be replaced
heap_pos = self.buffer2heap[self._buffer_position]
self.full = True # We found a transition in this buffer slot --> the memory is at capacity.
except KeyError:
# No transition in the buffer slot, therefore we will be adding one fresh.
heap_pos = self._buffer_position
# Update the heap, associated data stuctures and re-sort.
self.__update_heap(heap_pos, trans)
self.up_heap(heap_pos)
if self.full:
self.down_heap(heap_pos)
# Iterate to the next buffer position.
self._buffer_position = (self._buffer_position % self._capacity) + 1
def __update_heap(self, heap_pos, val):
"""
heapList[heap_pos] <-- val = [buffer_position, td_error, transition]
"""
self.priority_heap[heap_pos] = val
self.buffer2heap[val[0]] = heap_pos
def up_heap(self, i):
"""
Iteratively swap heap items with their parents until they are in the correct order.
"""
if i >= 2:
i_parent = i // 2
if self.priority_heap[i_parent][1] < self.priority_heap[i][1]:
tmp = self.priority_heap[i]
self.__update_heap(i, self.priority_heap[i_parent])
self.__update_heap(i_parent, tmp)
self.up_heap(i_parent)
def down_heap(self, i):
"""
Iteratively swap heap items with their children until they are in the correct order.
"""
i_largest = i
left = 2 * i
right = 2 * i + 1
size = self._capacity if self.full else len(self)
if left < size and self.priority_heap[left][1] > self.priority_heap[i_largest][1]:
i_largest = left
if right < size and self.priority_heap[right][1] > self.priority_heap[i_largest][1]:
i_largest = right
if i_largest != i:
tmp = self.priority_heap[i]
self.__update_heap(i, self.priority_heap[i_largest])
self.__update_heap(i_largest, tmp)
self.down_heap(i_largest)
def rebalance(self):
"""
rebalance priority_heap
"""
sort_array = sorted(self.priority_heap.values(), key=lambda x: x[1], reverse=True)
# reconstruct priority_queue
self.priority_heap.clear()
self.buffer2heap.clear()
count = 1
while count <= self._capacity:
self.__update_heap(count, sort_array[count - 1])
count += 1
# sort the heap
for i in range(self._capacity // 2, 1, -1):
self.down_heap(i)
def update_partitions(self, num_partitions):
# P(t_i) = p_i^alpha / Sum_k(p_k^alpha), where the priority p_i = 1 / rank_i.
priorities = [math.pow(rank, -self.alpha) for rank in range(1, len(self.priority_heap) + 1)]
priorities_sum = sum(priorities)
probabilities = dict(
[(rank0index + 1, priority / priorities_sum) for rank0index, priority in enumerate(priorities)])
partitions = [1]
partition_num = 1
cum_probabilty = 0
next_boundary = partition_num / num_partitions
rank = 1
while partition_num < num_partitions:
cum_probabilty += probabilities[rank]
rank += 1
if cum_probabilty >= next_boundary:
partitions.append(rank)
partition_num += 1
next_boundary = partition_num / num_partitions
partitions.append(len(self.priority_heap))
partitions = [(a, b) for a, b in zip(partitions, partitions[1:])]
return partitions, probabilities
def update_priorities(self, buffer_positions, td_error):
for buf_id, td_err in zip(buffer_positions, td_error):
heap_id = self.buffer2heap[buf_id]
[id, _, trans] = self.priority_heap[heap_id]
self.priority_heap[heap_id] = [id, td_err, trans]
self.down_heap(heap_id)
self.up_heap(heap_id)
def sample(self, batch_size, device=None):
# print("\nStarting sample():...")
# t = time.time()
if batch_size != len(self.partitions) or not self.__partitions_fixed:
# t1 = time.time()
self.partitions, self.probabilities = self.update_partitions(batch_size)
if self.full:
# Once the buffer is full, the partitions no longer need to be updated
# (as they depend only on the number of stored transitions and alpha).
self.__partitions_fixed = True
# print("\tupdate_partitions in :", time.time()-t1)
self.beta = min(self.beta + self.beta_step, 1)
# t1 = time.time()
batch_ranks = [np.random.randint(low, high) for low, high in self.partitions]
batch_buffer_positions, batch_td_errors, batch_transitions = zip(*[self.priority_heap[rank] for rank in batch_ranks])
batch = [torch.stack(tensors).to(device) for tensors in zip(*batch_transitions)]
# print("\tbatch sampled in :", time.time() - t1)
# t1 = time.time()
N = self._capacity if self.full else len(self)
# Note this is a column vector to match the dimensions of weights and td_target in dqn.train_step(...)
sample_probs = torch.FloatTensor([[self.probabilities[rank]] for rank in batch_ranks])
weights = (N * sample_probs).pow(-self.beta)
weights /= weights.max()
# print("\tweights calculated in :", time.time() - t1)
# print("...finished in :", time.time() - t)
return batch, weights.to(device), batch_buffer_positions
def configure_beta_anneal_time(self, beta_max_at_samples):
self.beta_step = (1 - self.beta) / beta_max_at_samples
def __len__(self):
return len(self.priority_heap)
class Logger:
def __init__(self):
self._memory = {}
self._saves = 0
self._maxsize = 1000000
self._dumps = 0
def add_scalar(self, name, data, timestep):
"""
Saves a scalar
"""
if isinstance(data, torch.Tensor):
data = data.item()
self._memory.setdefault(name, []).append([data, timestep])
self._saves += 1
if self._saves == self._maxsize - 1:
with open('log_data_' + str((self._dumps + 1) * self._maxsize) + '.pkl', 'wb') as output:
pickle.dump(self._memory, output, pickle.HIGHEST_PROTOCOL)
self._dumps += 1
self._saves = 0
self._memory = {}
def save(self):
with open('log_data.pkl', 'wb') as output:
pickle.dump(self._memory, output, pickle.HIGHEST_PROTOCOL)
| 10,733 | 33.850649 | 125 | py |
eco-dqn | eco-dqn-master/src/agents/dqn/dqn.py | """
Implements a DQN learning agent.
"""
import os
import pickle
import random
import time
from copy import deepcopy
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from src.agents.dqn.utils import ReplayBuffer, Logger, TestMetric, set_global_seed
from src.envs.utils import ExtraAction
class DQN:
"""
# Required parameters.
envs : List of environments to use.
network : Choice of neural network.
# Initial network parameters.
init_network_params : Pre-trained network to load upon initialisation.
init_weight_std : Standard deviation of initial network weights.
# DQN parameters
double_dqn : Whether to use double DQN (DDQN).
update_target_frequency : How often to update the DDQN target network.
gamma : Discount factor.
clip_Q_targets : Whether negative Q targets are clipped (generally True/False for irreversible/reversible agents).
# Replay buffer.
replay_start_size : The capacity of the replay buffer at which training can begin.
replay_buffer_size : Maximum buffer capacity.
minibatch_size : Minibatch size.
update_frequency : Number of environment steps taken between parameter update steps.
# Learning rate
update_learning_rate : Whether to dynamically update the learning rate (if False, initial_learning_rate is always used).
initial_learning_rate : Initial learning rate.
peak_learning_rate : The maximum learning rate.
peak_learning_rate_step : The timestep (from the start, not from when training starts) at which the peak_learning_rate is found.
final_learning_rate : The final learning rate.
final_learning_rate_step : The timestep of the final learning rate.
# Optional regularization.
max_grad_norm : The norm grad to clip gradients to (None means no clipping).
weight_decay : The weight decay term for regularisation.
# Exploration
update_exploration : Whether to update the exploration rate (False would tend to be used with NoisyNet layers).
initial_exploration_rate : Inital exploration rate.
final_exploration_rate : Final exploration rate.
final_exploration_step : Timestep at which the final exploration rate is reached.
# Loss function
adam_epsilon : epsilon for ADAM optimisation.
loss="mse" : Loss function to use.
# Saving the agent
save_network_frequency : Frequency with which the network parameters are saved.
network_save_path : Folder into which the network parameters are saved.
# Testing the agent
evaluate : Whether to test the agent during training.
test_envs : List of test environments. None means the training environments (envs) are used.
test_episodes : Number of episodes at each test point.
test_frequency : Frequency of tests.
test_save_path : Folder into which the test scores are saved.
test_metric : The metric used to quantify performance.
# Other
logging : Whether to log.
seed : The global seed to set. None means randomly selected.
"""
def __init__(
self,
envs,
network,
# Initial network parameters.
init_network_params = None,
init_weight_std = None,
# DQN parameters
double_dqn = True,
update_target_frequency=10000,
gamma=0.99,
clip_Q_targets=False,
# Replay buffer.
replay_start_size=50000,
replay_buffer_size=1000000,
minibatch_size=32,
update_frequency=1,
# Learning rate
update_learning_rate = True,
initial_learning_rate = 0,
peak_learning_rate = 1e-3,
peak_learning_rate_step = 10000,
final_learning_rate = 5e-5,
final_learning_rate_step = 200000,
# Optional regularization.
max_grad_norm=None,
weight_decay=0,
# Exploration
update_exploration=True,
initial_exploration_rate=1,
final_exploration_rate=0.1,
final_exploration_step=1000000,
# Loss function
adam_epsilon=1e-8,
loss="mse",
# Saving the agent
save_network_frequency=10000,
network_save_path='network',
# Testing the agent
evaluate=True,
test_envs=None,
test_episodes=20,
test_frequency=10000,
test_save_path='test_scores',
test_metric=TestMetric.ENERGY_ERROR,
# Other
logging=True,
seed=None
):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.double_dqn = double_dqn
self.replay_start_size = replay_start_size
self.replay_buffer_size = replay_buffer_size
self.gamma = gamma
self.clip_Q_targets = clip_Q_targets
self.update_target_frequency = update_target_frequency
self.minibatch_size = minibatch_size
self.update_learning_rate = update_learning_rate
self.initial_learning_rate = initial_learning_rate
self.peak_learning_rate = peak_learning_rate
self.peak_learning_rate_step = peak_learning_rate_step
self.final_learning_rate = final_learning_rate
self.final_learning_rate_step = final_learning_rate_step
self.max_grad_norm = max_grad_norm
self.weight_decay=weight_decay
self.update_frequency = update_frequency
self.update_exploration = update_exploration,
self.initial_exploration_rate = initial_exploration_rate
self.epsilon = self.initial_exploration_rate
self.final_exploration_rate = final_exploration_rate
self.final_exploration_step = final_exploration_step
self.adam_epsilon = adam_epsilon
self.logging = logging
if callable(loss):
self.loss = loss
else:
try:
self.loss = {'huber': F.smooth_l1_loss, 'mse': F.mse_loss}[loss]
except KeyError:
raise ValueError("loss must be 'huber', 'mse' or a callable")
if type(envs)!=list:
envs = [envs]
self.envs = envs
self.env, self.acting_in_reversible_spin_env = self.get_random_env()
self.replay_buffers = {}
for n_spins in set([env.action_space.n for env in self.envs]):
self.replay_buffers[n_spins] = ReplayBuffer(self.replay_buffer_size)
self.replay_buffer = self.get_replay_buffer_for_env(self.env)
self.seed = random.randint(0, 1e6) if seed is None else seed
for env in self.envs:
set_global_seed(self.seed, env)
self.network = network().to(self.device)
self.init_network_params = init_network_params
self.init_weight_std = init_weight_std
if self.init_network_params != None:
print("Pre-loading network parameters from {}.\n".format(init_network_params))
self.load(init_network_params)
else:
if self.init_weight_std != None:
def init_weights(m):
if type(m) == torch.nn.Linear:
print("Setting weights for", m)
m.weight.normal_(0, init_weight_std)
with torch.no_grad():
self.network.apply(init_weights)
self.target_network = network().to(self.device)
self.target_network.load_state_dict(self.network.state_dict())
for param in self.target_network.parameters():
param.requires_grad = False
self.optimizer = optim.Adam(self.network.parameters(), lr=self.initial_learning_rate, eps=self.adam_epsilon,
weight_decay=self.weight_decay)
self.evaluate = evaluate
if test_envs in [None,[None]]:
# By default, test on the same environment(s) as are trained on.
self.test_envs = self.envs
else:
if type(test_envs) != list:
test_envs = [test_envs]
self.test_envs = test_envs
self.test_episodes = int(test_episodes)
self.test_frequency = test_frequency
self.test_save_path = test_save_path
self.test_metric = test_metric
self.losses_save_path = os.path.join(os.path.split(self.test_save_path)[0], "losses.pkl")
if not self.acting_in_reversible_spin_env:
for env in self.envs:
assert env.extra_action==ExtraAction.NONE, "For deterministic MDP, no extra action is allowed."
for env in self.test_envs:
assert env.extra_action==ExtraAction.NONE, "For deterministic MDP, no extra action is allowed."
self.allowed_action_state = self.env.get_allowed_action_states()
self.save_network_frequency = save_network_frequency
self.network_save_path = network_save_path
def get_random_env(self, envs=None):
if envs is None:
env = random.sample(self.envs, k=1)[0]
else:
env = random.sample(envs, k=1)[0]
return env, env.reversible_spins
def get_replay_buffer_for_env(self, env):
return self.replay_buffers[env.action_space.n]
def get_random_replay_buffer(self):
return random.sample(self.replay_buffers.items(), k=1)[0][1]
def learn(self, timesteps, verbose=False):
if self.logging:
logger = Logger()
# Initialise the state
state = torch.as_tensor(self.env.reset())
score = 0
losses_eps = []
t1 = time.time()
test_scores = []
losses = []
is_training_ready = False
for timestep in range(timesteps):
if not is_training_ready:
if all([len(rb)>=self.replay_start_size for rb in self.replay_buffers.values()]):
print('\nAll buffers have {} transitions stored - training is starting!\n'.format(
self.replay_start_size))
is_training_ready=True
# Choose action
action = self.act(state.to(self.device).float(), is_training_ready=is_training_ready)
# Update epsilon
if self.update_exploration:
self.update_epsilon(timestep)
# Update learning rate
if self.update_learning_rate:
self.update_lr(timestep)
# Perform action in environment
state_next, reward, done, _ = self.env.step(action)
score += reward
# Store transition in replay buffer
action = torch.as_tensor([action], dtype=torch.long)
reward = torch.as_tensor([reward], dtype=torch.float)
state_next = torch.as_tensor(state_next)
done = torch.as_tensor([done], dtype=torch.float)
self.replay_buffer.add(state, action, reward, state_next, done)
if done:
# Reinitialise the state
if verbose:
loss_str = "{:.2e}".format(np.mean(losses_eps)) if is_training_ready else "N/A"
print("timestep : {}, episode time: {}, score : {}, mean loss: {}, time : {} s".format(
(timestep+1),
self.env.current_step,
np.round(score,3),
loss_str,
round(time.time() - t1, 3)))
if self.logging:
logger.add_scalar('Episode_score', score, timestep)
self.env, self.acting_in_reversible_spin_env = self.get_random_env()
self.replay_buffer = self.get_replay_buffer_for_env(self.env)
state = torch.as_tensor(self.env.reset())
score = 0
losses_eps = []
t1 = time.time()
else:
state = state_next
if is_training_ready:
# Update the main network
if timestep % self.update_frequency == 0:
# Sample a batch of transitions
transitions = self.get_random_replay_buffer().sample(self.minibatch_size, self.device)
# Train on selected batch
loss = self.train_step(transitions)
losses.append([timestep,loss])
losses_eps.append(loss)
if self.logging:
logger.add_scalar('Loss', loss, timestep)
# Periodically update target network
if timestep % self.update_target_frequency == 0:
self.target_network.load_state_dict(self.network.state_dict())
if (timestep+1) % self.test_frequency == 0 and self.evaluate and is_training_ready:
test_score = self.evaluate_agent()
print('\nTest score: {}\n'.format(np.round(test_score,3)))
if self.test_metric in [TestMetric.FINAL_CUT,TestMetric.MAX_CUT,TestMetric.CUMULATIVE_REWARD]:
best_network = all([test_score > score for t,score in test_scores])
elif self.test_metric in [TestMetric.ENERGY_ERROR, TestMetric.BEST_ENERGY]:
best_network = all([test_score < score for t, score in test_scores])
else:
raise NotImplementedError("{} is not a recognised TestMetric".format(self.test_metric))
if best_network:
path = self.network_save_path
path_main, path_ext = os.path.splitext(path)
path_main += "_best"
if path_ext == '':
path_ext += '.pth'
self.save(path_main + path_ext)
test_scores.append([timestep+1,test_score])
if (timestep + 1) % self.save_network_frequency == 0 and is_training_ready:
path = self.network_save_path
path_main, path_ext = os.path.splitext(path)
path_main += str(timestep+1)
if path_ext == '':
path_ext += '.pth'
self.save(path_main+path_ext)
if self.logging:
logger.save()
path = self.test_save_path
if os.path.splitext(path)[-1] == '':
path += '.pkl'
with open(path, 'wb+') as output:
pickle.dump(np.array(test_scores), output, pickle.HIGHEST_PROTOCOL)
if verbose:
print('test_scores saved to {}'.format(path))
with open(self.losses_save_path, 'wb+') as output:
pickle.dump(np.array(losses), output, pickle.HIGHEST_PROTOCOL)
if verbose:
print('losses saved to {}'.format(self.losses_save_path))
@torch.no_grad()
def __only_bad_actions_allowed(self, state, network):
x = (state[0, :] == self.allowed_action_state).nonzero()
q_next = network(state.to(self.device).float())[x].max()
return True if q_next < 0 else False
def train_step(self, transitions):
states, actions, rewards, states_next, dones = transitions
if self.acting_in_reversible_spin_env:
# Calculate target Q
with torch.no_grad():
if self.double_dqn:
greedy_actions = self.network(states_next.float()).argmax(1, True)
q_value_target = self.target_network(states_next.float()).gather(1, greedy_actions)
else:
q_value_target = self.target_network(states_next.float()).max(1, True)[0]
else:
target_preds = self.target_network(states_next.float())
disallowed_actions_mask = (states_next[:, 0, :] != self.allowed_action_state)
# Calculate target Q, selecting ONLY ALLOWED ACTIONS greedily.
with torch.no_grad():
if self.double_dqn:
network_preds = self.network(states_next.float())
# Set the Q-value of disallowed actions to a large negative number (-10000) so they are not selected.
network_preds_allowed = network_preds.masked_fill(disallowed_actions_mask,-10000)
greedy_actions = network_preds_allowed.argmax(1, True)
q_value_target = target_preds.gather(1, greedy_actions)
else:
q_value_target = target_preds.masked_fill(disallowed_actions_mask,-10000).max(1, True)[0]
if self.clip_Q_targets:
q_value_target[q_value_target < 0] = 0
# Calculate TD target
td_target = rewards + (1 - dones) * self.gamma * q_value_target
# Calculate Q value
q_value = self.network(states.float()).gather(1, actions)
# Calculate loss
loss = self.loss(q_value, td_target, reduction='mean')
# Update weights
self.optimizer.zero_grad()
loss.backward()
if self.max_grad_norm is not None: #Optional gradient clipping
torch.nn.utils.clip_grad_norm_(self.network.parameters(), self.max_grad_norm)
self.optimizer.step()
return loss.item()
def act(self, state, is_training_ready=True):
if is_training_ready and random.uniform(0, 1) >= self.epsilon:
# Action that maximises Q function
action = self.predict(state)
else:
if self.acting_in_reversible_spin_env:
# Random random spin.
action = np.random.randint(0, self.env.action_space.n)
else:
# Flip random spin from that hasn't yet been flipped.
x = (state[0, :] == self.allowed_action_state).nonzero()
action = x[np.random.randint(0, len(x))].item()
return action
def update_epsilon(self, timestep):
eps = self.initial_exploration_rate - (self.initial_exploration_rate - self.final_exploration_rate) * (
timestep / self.final_exploration_step
)
self.epsilon = max(eps, self.final_exploration_rate)
def update_lr(self, timestep):
if timestep <= self.peak_learning_rate_step:
lr = self.initial_learning_rate - (self.initial_learning_rate - self.peak_learning_rate) * (
timestep / self.peak_learning_rate_step
)
elif timestep <= self.final_learning_rate_step:
lr = self.peak_learning_rate - (self.peak_learning_rate - self.final_learning_rate) * (
(timestep - self.peak_learning_rate_step) / (self.final_learning_rate_step - self.peak_learning_rate_step)
)
else:
lr = None
if lr is not None:
for g in self.optimizer.param_groups:
g['lr'] = lr
@torch.no_grad()
def predict(self, states, acting_in_reversible_spin_env=None):
if acting_in_reversible_spin_env is None:
acting_in_reversible_spin_env = self.acting_in_reversible_spin_env
qs = self.network(states)
if acting_in_reversible_spin_env:
if qs.dim() == 1:
actions = qs.argmax().item()
else:
actions = qs.argmax(1, True).squeeze(1).cpu().numpy()
return actions
else:
if qs.dim() == 1:
x = (states[0, :] == self.allowed_action_state).nonzero()
actions = x[qs[x].argmax().item()].item()
else:
disallowed_actions_mask = (states[:, :, 0] != self.allowed_action_state)
qs_allowed = qs.masked_fill(disallowed_actions_mask, -10000)
actions = qs_allowed.argmax(1, True).squeeze(1).cpu().numpy()
return actions
@torch.no_grad()
def evaluate_agent(self, batch_size=None):
"""
Evaluates agent's current performance. Run multiple evaluations at once
so the network predictions can be done in batches.
"""
if batch_size is None:
batch_size = self.minibatch_size
i_test = 0
i_comp = 0
test_scores = []
batch_scores = [0]*batch_size
test_envs = np.array([None]*batch_size)
obs_batch = []
while i_comp < self.test_episodes:
for i, env in enumerate(test_envs):
if env is None and i_test < self.test_episodes:
test_env, testing_in_reversible_spin_env = self.get_random_env(self.test_envs)
obs = test_env.reset()
test_env = deepcopy(test_env)
test_envs[i] = test_env
obs_batch.append(obs)
i_test += 1
actions = self.predict(torch.FloatTensor(np.array(obs_batch)).to(self.device),
testing_in_reversible_spin_env)
obs_batch = []
i = 0
for env, action in zip(test_envs, actions):
if env is not None:
obs, rew, done, info = env.step(action)
if self.test_metric == TestMetric.CUMULATIVE_REWARD:
batch_scores[i] += rew
if done:
if self.test_metric == TestMetric.BEST_ENERGY:
batch_scores[i] = env.best_energy
elif self.test_metric == TestMetric.ENERGY_ERROR:
batch_scores[i] = abs(env.best_energy - env.calculate_best()[0])
elif self.test_metric == TestMetric.MAX_CUT:
batch_scores[i] = env.get_best_cut()
elif self.test_metric == TestMetric.FINAL_CUT:
batch_scores[i] = env.calculate_cut()
test_scores.append(batch_scores[i])
if self.test_metric == TestMetric.CUMULATIVE_REWARD:
batch_scores[i] = 0
i_comp += 1
test_envs[i] = None
else:
obs_batch.append(obs)
i += 1
if self.test_metric == TestMetric.ENERGY_ERROR:
print("\n{}/{} graphs solved optimally".format(np.count_nonzero(np.array(test_scores)==0),self.test_episodes), end="")
return np.mean(test_scores)
def save(self, path='network.pth'):
if os.path.splitext(path)[-1]=='':
path + '.pth'
torch.save(self.network.state_dict(), path)
def load(self,path):
self.network.load_state_dict(torch.load(path,map_location=self.device)) | 22,538 | 37.396934 | 132 | py |
eco-dqn | eco-dqn-master/src/agents/dqn/__init__.py | 0 | 0 | 0 | py | |
eco-dqn | eco-dqn-master/experiments/utils.py | import os
import pickle
import networkx as nx
import time
import numpy as np
import scipy as sp
import pandas as pd
import torch
from collections import namedtuple
from copy import deepcopy
import src.envs.core as ising_env
from src.envs.utils import (SingleGraphGenerator, SpinBasis)
from src.agents.solver import Network, Greedy
####################################################
# TESTING ON GRAPHS
####################################################
def test_network(network, env_args, graphs_test,device=None, step_factor=1, batched=True,
n_attempts=50, return_raw=False, return_history=False, max_batch_size=None):
if batched:
return __test_network_batched(network, env_args, graphs_test, device, step_factor,
n_attempts, return_raw, return_history, max_batch_size)
else:
if max_batch_size is not None:
print("Warning: max_batch_size argument will be ignored for when batched=False.")
return __test_network_sequential(network, env_args, graphs_test, step_factor,
n_attempts, return_raw, return_history)
def __test_network_batched(network, env_args, graphs_test, device=None, step_factor=1,
n_attempts=50, return_raw=False, return_history=False, max_batch_size=None):
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
# HELPER FUNCTION FOR NETWORK TESTING
acting_in_reversible_spin_env = env_args['reversible_spins']
if env_args['reversible_spins']:
# If MDP is reversible, both actions are allowed.
if env_args['spin_basis'] == SpinBasis.BINARY:
allowed_action_state = (0, 1)
elif env_args['spin_basis'] == SpinBasis.SIGNED:
allowed_action_state = (1, -1)
else:
# If MDP is irreversible, only return the state of spins that haven't been flipped.
if env_args['spin_basis'] == SpinBasis.BINARY:
allowed_action_state = 0
if env_args['spin_basis'] == SpinBasis.SIGNED:
allowed_action_state = 1
def predict(states):
qs = network(states)
if acting_in_reversible_spin_env:
if qs.dim() == 1:
actions = [qs.argmax().item()]
else:
actions = qs.argmax(1, True).squeeze(1).cpu().numpy()
return actions
else:
if qs.dim() == 1:
x = (states.squeeze()[:,0] == allowed_action_state).nonzero()
actions = [x[qs[x].argmax().item()].item()]
else:
disallowed_actions_mask = (states[:, :, 0] != allowed_action_state)
qs_allowed = qs.masked_fill(disallowed_actions_mask, -1000)
actions = qs_allowed.argmax(1, True).squeeze(1).cpu().numpy()
return actions
# NETWORK TESTING
results = []
results_raw = []
if return_history:
history = []
n_attempts = n_attempts if env_args["reversible_spins"] else 1
for j, test_graph in enumerate(graphs_test):
i_comp = 0
i_batch = 0
t_total = 0
n_spins = test_graph.shape[0]
n_steps = int(n_spins * step_factor)
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(test_graph),
n_steps,
**env_args)
print("Running greedy solver with +1 initialisation of spins...", end="...")
# Calculate the greedy cut with all spins initialised to +1
greedy_env = deepcopy(test_env)
greedy_env.reset(spins=np.array([1] * test_graph.shape[0]))
greedy_agent = Greedy(greedy_env)
greedy_agent.solve()
greedy_single_cut = greedy_env.get_best_cut()
greedy_single_spins = greedy_env.best_spins
print("done.")
if return_history:
actions_history = []
rewards_history = []
scores_history = []
best_cuts = []
init_spins = []
best_spins = []
greedy_cuts = []
greedy_spins = []
while i_comp < n_attempts:
if max_batch_size is None:
batch_size = n_attempts
else:
batch_size = min(n_attempts - i_comp, max_batch_size)
i_comp_batch = 0
if return_history:
actions_history_batch = [[None]*batch_size]
rewards_history_batch = [[None] * batch_size]
scores_history_batch = []
test_envs = [None] * batch_size
best_cuts_batch = [-1e3] * batch_size
init_spins_batch = [[] for _ in range(batch_size)]
best_spins_batch = [[] for _ in range(batch_size)]
greedy_envs = [None] * batch_size
greedy_cuts_batch = []
greedy_spins_batch = []
obs_batch = [None] * batch_size
print("Preparing batch of {} environments for graph {}.".format(batch_size,j), end="...")
for i in range(batch_size):
env = deepcopy(test_env)
obs_batch[i] = env.reset()
test_envs[i] = env
greedy_envs[i] = deepcopy(env)
init_spins_batch[i] = env.best_spins
if return_history:
scores_history_batch.append([env.calculate_score() for env in test_envs])
print("done.")
# Calculate the max cut acting w.r.t. the network
t_start = time.time()
# pool = mp.Pool(processes=16)
k = 0
while i_comp_batch < batch_size:
t1 = time.time()
# Note: Do not convert list of np.arrays to FloatTensor, it is very slow!
# see: https://github.com/pytorch/pytorch/issues/13918
# Hence, here we convert a list of np arrays to a np array.
obs_batch = torch.FloatTensor(np.array(obs_batch)).to(device)
actions = predict(obs_batch)
obs_batch = []
if return_history:
scores = []
rewards = []
i = 0
for env, action in zip(test_envs,actions):
if env is not None:
obs, rew, done, info = env.step(action)
if return_history:
scores.append(env.calculate_score())
rewards.append(rew)
if not done:
obs_batch.append(obs)
else:
best_cuts_batch[i] = env.get_best_cut()
best_spins_batch[i] = env.best_spins
i_comp_batch += 1
i_comp += 1
test_envs[i] = None
i+=1
k+=1
if return_history:
actions_history_batch.append(actions)
scores_history_batch.append(scores)
rewards_history_batch.append(rewards)
# print("\t",
# "Par. steps :", k,
# "Env steps : {}/{}".format(k/batch_size,n_steps),
# 'Time: {0:.3g}s'.format(time.time()-t1))
t_total += (time.time() - t_start)
i_batch+=1
print("Finished agent testing batch {}.".format(i_batch))
if env_args["reversible_spins"]:
print("Running greedy solver with {} random initialisations of spins for batch {}...".format(batch_size, i_batch), end="...")
for env in greedy_envs:
Greedy(env).solve()
cut = env.get_best_cut()
greedy_cuts_batch.append(cut)
greedy_spins_batch.append(env.best_spins)
print("done.")
if return_history:
actions_history += actions_history_batch
rewards_history += rewards_history_batch
scores_history += scores_history_batch
best_cuts += best_cuts_batch
init_spins += init_spins_batch
best_spins += best_spins_batch
if env_args["reversible_spins"]:
greedy_cuts += greedy_cuts_batch
greedy_spins += greedy_spins_batch
# print("\tGraph {}, par. steps: {}, comp: {}/{}".format(j, k, i_comp, batch_size),
# end="\r" if n_spins<100 else "")
i_best = np.argmax(best_cuts)
best_cut = best_cuts[i_best]
sol = best_spins[i_best]
mean_cut = np.mean(best_cuts)
if env_args["reversible_spins"]:
idx_best_greedy = np.argmax(greedy_cuts)
greedy_random_cut = greedy_cuts[idx_best_greedy]
greedy_random_spins = greedy_spins[idx_best_greedy]
greedy_random_mean_cut = np.mean(greedy_cuts)
else:
greedy_random_cut = greedy_single_cut
greedy_random_spins = greedy_single_spins
greedy_random_mean_cut = greedy_single_cut
print('Graph {}, best(mean) cut: {}({}), greedy cut (rand init / +1 init) : {} / {}. ({} attempts in {}s)\t\t\t'.format(
j, best_cut, mean_cut, greedy_random_cut, greedy_single_cut, n_attempts, np.round(t_total,2)))
results.append([best_cut, sol,
mean_cut,
greedy_single_cut, greedy_single_spins,
greedy_random_cut, greedy_random_spins,
greedy_random_mean_cut,
t_total/(n_attempts)])
results_raw.append([init_spins,
best_cuts, best_spins,
greedy_cuts, greedy_spins])
if return_history:
history.append([np.array(actions_history).T.tolist(),
np.array(scores_history).T.tolist(),
np.array(rewards_history).T.tolist()])
results = pd.DataFrame(data=results, columns=["cut", "sol",
"mean cut",
"greedy (+1 init) cut", "greedy (+1 init) sol",
"greedy (rand init) cut", "greedy (rand init) sol",
"greedy (rand init) mean cut",
"time"])
results_raw = pd.DataFrame(data=results_raw, columns=["init spins",
"cuts", "sols",
"greedy cuts", "greedy sols"])
if return_history:
history = pd.DataFrame(data=history, columns=["actions", "scores", "rewards"])
if return_raw==False and return_history==False:
return results
else:
ret = [results]
if return_raw:
ret.append(results_raw)
if return_history:
ret.append(history)
return ret
def __test_network_sequential(network, env_args, graphs_test, step_factor=1,
n_attempts=50, return_raw=False, return_history=False):
if return_raw or return_history:
raise NotImplementedError("I've not got to this yet! Used the batched test script (it's faster anyway).")
results = []
n_attempts = n_attempts if env_args["reversible_spins"] else 1
for i, test_graph in enumerate(graphs_test):
n_steps = int(test_graph.shape[0] * step_factor)
best_cut = -1e3
best_spins = []
greedy_random_cut = -1e3
greedy_random_spins = []
greedy_single_cut = -1e3
greedy_single_spins = []
times = []
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(test_graph),
n_steps,
**env_args)
net_agent = Network(network, test_env,
record_cut=False, record_rewards=False, record_qs=False)
greedy_env = deepcopy(test_env)
greedy_env.reset(spins=np.array([1] * test_graph.shape[0]))
greedy_agent = Greedy(greedy_env)
greedy_agent.solve()
greedy_single_cut = greedy_env.get_best_cut()
greedy_single_spins = greedy_env.best_spins
for k in range(n_attempts):
net_agent.reset(clear_history=True)
greedy_env = deepcopy(test_env)
greedy_agent = Greedy(greedy_env)
tstart = time.time()
net_agent.solve()
times.append(time.time() - tstart)
cut = test_env.get_best_cut()
if cut > best_cut:
best_cut = cut
best_spins = test_env.best_spins
greedy_agent.solve()
greedy_cut = greedy_env.get_best_cut()
if greedy_cut > greedy_random_cut:
greedy_random_cut = greedy_cut
greedy_random_spins = greedy_env.best_spins
# print('\nGraph {}, attempt : {}/{}, best cut : {}, greedy cut (rand init / +1 init) : {} / {}\t\t\t'.format(
# i + 1, k, n_attemps, best_cut, greedy_random_cut, greedy_single_cut),
# end="\r")
print('\nGraph {}, attempt : {}/{}, best cut : {}, greedy cut (rand init / +1 init) : {} / {}\t\t\t'.format(
i + 1, k, n_attempts, best_cut, greedy_random_cut, greedy_single_cut),
end=".")
results.append([best_cut, best_spins,
greedy_single_cut, greedy_single_spins,
greedy_random_cut, greedy_random_spins,
np.mean(times)])
return pd.DataFrame(data=results, columns=["cut", "sol",
"greedy (+1 init) cut", "greedy (+1 init) sol",
"greedy (rand init) cut", "greedy (rand init) sol",
"time"])
####################################################
# LOADING GRAPHS
####################################################
Graph = namedtuple('Graph', 'name n_vertices n_edges matrix bk_val bk_sol')
def load_graph(graph_dir, graph_name):
inst_loc = os.path.join(graph_dir, 'instances', graph_name+'.mc')
val_loc = os.path.join(graph_dir, 'bkvl', graph_name+'.bkvl')
sol_loc = os.path.join(graph_dir, 'bksol', graph_name+'.bksol')
vertices, edges, matrix = 0, 0, None
bk_val, bk_sol = None, None
with open(inst_loc) as f:
for line in f:
arr = list(map(int, line.strip().split(' ')))
if len(arr) == 2: # contains the number of vertices and edges
n_vertices, n_edges = arr
matrix = np.zeros((n_vertices,n_vertices))
else:
assert type(matrix)==np.ndarray, 'First line in file should define graph dimensions.'
i, j, w = arr[0]-1, arr[1]-1, arr[2]
matrix[ [i,j], [j,i] ] = w
with open(val_loc) as f:
bk_val = float( f.readline() )
with open(sol_loc) as f:
bk_sol_str = f.readline().strip()
bk_sol = np.array([int(x) for x in list(bk_sol_str)] + [ np.random.choice([0,1]) ]) # final spin is 'no-action'
return Graph(graph_name, n_vertices, n_edges, matrix, bk_val, bk_sol)
def load_graph_set(graph_save_loc):
graphs_test = pickle.load(open(graph_save_loc,'rb'))
def graph_to_array(g):
if type(g) == nx.Graph:
g = nx.to_numpy_array(g)
elif type(g) == sp.sparse.csr_matrix:
g = g.toarray()
return g
graphs_test = [graph_to_array(g) for g in graphs_test]
print('{} target graphs loaded from {}'.format(len(graphs_test), graph_save_loc))
return graphs_test
####################################################
# FILE UTILS
####################################################
def mk_dir(export_dir, quite=False):
if not os.path.exists(export_dir):
try:
os.makedirs(export_dir)
print('created dir: ', export_dir)
except OSError as exc: # Guard against race condition
if exc.errno != exc.errno.EEXIST:
raise
except Exception:
pass
else:
print('dir already exists: ', export_dir) | 16,669 | 36.209821 | 141 | py |
eco-dqn | eco-dqn-master/experiments/BA_60spin/train_and_test_eco.py | """
Trains and tests ECO-DQN on 60 spin BA graphs.
"""
import experiments.BA_60spin.train.train_eco as train
import experiments.BA_60spin.test.test_eco as test
save_loc="BA_60spin/eco"
train.run(save_loc)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_60spin_m4_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_100spin_m4_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_200spin_m4_100graphs.pkl", batched=True, max_batch_size=25)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_500spin_m4_100graphs.pkl", batched=True, max_batch_size=5)
| 678 | 44.266667 | 118 | py |
eco-dqn | eco-dqn-master/experiments/BA_60spin/train_and_test_s2v.py | """
Trains and tests S2V-DQN on 20 spin BA graphs.
"""
import experiments.BA_60spin.test.test_s2v as test
import experiments.BA_60spin.train.train_s2v as train
save_loc="BA_60spin/s2v"
train.run(save_loc)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_60spin_m4_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_100spin_m4_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_200spin_m4_100graphs.pkl", batched=True, max_batch_size=25)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_500spin_m4_100graphs.pkl", batched=True, max_batch_size=5) | 677 | 47.428571 | 118 | py |
eco-dqn | eco-dqn-master/experiments/BA_60spin/test/test_eco.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="BA_60spin/eco",
graph_save_loc="_graphs/validation/BA_60spin_m4_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma = 0.95
step_factor = 2
env_args = {'observables': DEFAULT_OBSERVABLES,
'reward_signal': RewardSignal.BLS,
'extra_action': ExtraAction.NONE,
'optimisation_target': OptimisationTarget.CUT,
'spin_basis': SpinBasis.BINARY,
'norm_rewards': True,
'memory_length': None,
'horizon_length': None,
'stag_punishment': None,
'basin_reward': 1. / 60,
'reversible_spins': True}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,506 | 35.942623 | 108 | py |
eco-dqn | eco-dqn-master/experiments/BA_60spin/test/test_s2v.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="BA_60spin/s2v",
graph_save_loc="_graphs/validation/BA_60spin_m4_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_factor = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,485 | 35.770492 | 108 | py |
eco-dqn | eco-dqn-master/experiments/BA_60spin/train/train_s2v.py | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import src.envs.core as ising_env
from experiments.utils import load_graph_set, mk_dir
from src.agents.dqn.dqn import DQN
from src.agents.dqn.utils import TestMetric
from src.envs.utils import (SetGraphGenerator,
RandomBarabasiAlbertGraphGenerator,
EdgeType, RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
import time
def run(save_loc="BA_60spin/s2v"):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_fact = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# SET UP TRAINING AND TEST GRAPHS
####################################################
n_spins_train = 60
train_graph_generator = RandomBarabasiAlbertGraphGenerator(n_spins=n_spins_train,m_insertion_edges=4,edge_type=EdgeType.DISCRETE)
####
# Pre-generated test graphs
####
graph_save_loc = "_graphs/testing/BA_60spin_m4_50graphs.pkl"
graphs_test = load_graph_set(graph_save_loc)
n_tests = len(graphs_test)
test_graph_generator = SetGraphGenerator(graphs_test, ordered=True)
####################################################
# SET UP TRAINING AND TEST ENVIRONMENTS
####################################################
train_envs = [ising_env.make("SpinSystem",
train_graph_generator,
int(n_spins_train*step_fact),
**env_args)]
n_spins_test = train_graph_generator.get().shape[0]
test_envs = [ising_env.make("SpinSystem",
test_graph_generator,
int(n_spins_test*step_fact),
**env_args)]
####################################################
# SET UP FOLDERS FOR SAVING DATA
####################################################
data_folder = os.path.join(save_loc,'data')
network_folder = os.path.join(save_loc, 'network')
mk_dir(data_folder)
mk_dir(network_folder)
# print(data_folder)
network_save_path = os.path.join(network_folder,'network.pth')
test_save_path = os.path.join(network_folder,'test_scores.pkl')
loss_save_path = os.path.join(network_folder, 'losses.pkl')
####################################################
# SET UP AGENT
####################################################
nb_steps = 5000000
network_fn = lambda: MPNN(n_obs_in=train_envs[0].observation_space.shape[1],
n_layers=3,
n_features=64,
n_hid_readout=[],
tied_weights=False)
agent = DQN(train_envs,
network_fn,
init_network_params=None,
init_weight_std=0.01,
double_dqn=True,
clip_Q_targets=True,
replay_start_size=500,
replay_buffer_size=5000, # 20000
gamma=gamma, # 1
update_target_frequency=1000, # 500
update_learning_rate=False,
initial_learning_rate=1e-4,
peak_learning_rate=1e-4,
peak_learning_rate_step=20000,
final_learning_rate=1e-4,
final_learning_rate_step=200000,
update_frequency=32, # 1
minibatch_size=64, # 128
max_grad_norm=None,
weight_decay=0,
update_exploration=True,
initial_exploration_rate=1,
final_exploration_rate=0.05, # 0.05
final_exploration_step=300000, # 40000
adam_epsilon=1e-8,
logging=False,
loss="mse",
save_network_frequency=200000,
network_save_path=network_save_path,
evaluate=True,
test_envs=test_envs,
test_episodes=n_tests,
test_frequency=20000, # 10000
test_save_path=test_save_path,
test_metric=TestMetric.MAX_CUT,
seed=None
)
print("\n Created DQN agent with network:\n\n", agent.network)
#############
# TRAIN AGENT
#############
start = time.time()
agent.learn(timesteps=nb_steps, verbose=True)
print(time.time() - start)
agent.save()
############
# PLOT - learning curve
############
data = pickle.load(open(test_save_path, 'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder, "training_curve")
plt.plot(data[:, 0], data[:, 1])
plt.xlabel("Timestep")
plt.ylabel("Mean reward")
if agent.test_metric == TestMetric.ENERGY_ERROR:
plt.ylabel("Energy Error")
elif agent.test_metric == TestMetric.BEST_ENERGY:
plt.ylabel("Best Energy")
elif agent.test_metric == TestMetric.CUMULATIVE_REWARD:
plt.ylabel("Cumulative Reward")
elif agent.test_metric == TestMetric.MAX_CUT:
plt.ylabel("Max Cut")
elif agent.test_metric == TestMetric.FINAL_CUT:
plt.ylabel("Final Cut")
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
plt.clf()
############
# PLOT - losses
############
data = pickle.load(open(loss_save_path, 'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder, "loss")
N = 50
data_x = np.convolve(data[:, 0], np.ones((N,)) / N, mode='valid')
data_y = np.convolve(data[:, 1], np.ones((N,)) / N, mode='valid')
plt.plot(data_x, data_y)
plt.xlabel("Timestep")
plt.ylabel("Loss")
plt.yscale("log")
plt.grid(True)
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
if __name__ == "__main__":
run() | 6,850 | 30.283105 | 133 | py |
eco-dqn | eco-dqn-master/experiments/BA_60spin/train/train_eco.py | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import src.envs.core as ising_env
from experiments.utils import load_graph_set, mk_dir
from src.agents.dqn.dqn import DQN
from src.agents.dqn.utils import TestMetric
from src.envs.utils import (SetGraphGenerator,
RandomBarabasiAlbertGraphGenerator,
EdgeType, RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
import time
def run(save_loc="BA_60spin/eco"):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=0.95
step_fact = 2
env_args = {'observables':DEFAULT_OBSERVABLES,
'reward_signal':RewardSignal.BLS,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':1./60,
'reversible_spins':True}
####################################################
# SET UP TRAINING AND TEST GRAPHS
####################################################
n_spins_train = 60
train_graph_generator = RandomBarabasiAlbertGraphGenerator(n_spins=n_spins_train,m_insertion_edges=4,edge_type=EdgeType.DISCRETE)
####
# Pre-generated test graphs
####
graph_save_loc = "_graphs/testing/BA_60spin_m4_50graphs.pkl"
graphs_test = load_graph_set(graph_save_loc)
n_tests = len(graphs_test)
test_graph_generator = SetGraphGenerator(graphs_test, ordered=True)
####################################################
# SET UP TRAINING AND TEST ENVIRONMENTS
####################################################
train_envs = [ising_env.make("SpinSystem",
train_graph_generator,
int(n_spins_train*step_fact),
**env_args)]
n_spins_test = train_graph_generator.get().shape[0]
test_envs = [ising_env.make("SpinSystem",
test_graph_generator,
int(n_spins_test*step_fact),
**env_args)]
####################################################
# SET UP FOLDERS FOR SAVING DATA
####################################################
data_folder = os.path.join(save_loc,'data')
network_folder = os.path.join(save_loc, 'network')
mk_dir(data_folder)
mk_dir(network_folder)
# print(data_folder)
network_save_path = os.path.join(network_folder,'network.pth')
test_save_path = os.path.join(network_folder,'test_scores.pkl')
loss_save_path = os.path.join(network_folder, 'losses.pkl')
####################################################
# SET UP AGENT
####################################################
nb_steps = 5000000
network_fn = lambda: MPNN(n_obs_in=train_envs[0].observation_space.shape[1],
n_layers=3,
n_features=64,
n_hid_readout=[],
tied_weights=False)
agent = DQN(train_envs,
network_fn,
init_network_params=None,
init_weight_std=0.01,
double_dqn=True,
clip_Q_targets=False,
replay_start_size=500,
replay_buffer_size=5000, # 20000
gamma=gamma, # 1
update_target_frequency=1000, # 500
update_learning_rate=False,
initial_learning_rate=1e-4,
peak_learning_rate=1e-4,
peak_learning_rate_step=20000,
final_learning_rate=1e-4,
final_learning_rate_step=200000,
update_frequency=32, # 1
minibatch_size=64, # 128
max_grad_norm=None,
weight_decay=0,
update_exploration=True,
initial_exploration_rate=1,
final_exploration_rate=0.05, # 0.05
final_exploration_step=300000, # 40000
adam_epsilon=1e-8,
logging=False,
loss="mse",
save_network_frequency=200000,
network_save_path=network_save_path,
evaluate=True,
test_envs=test_envs,
test_episodes=n_tests,
test_frequency=20000, # 10000
test_save_path=test_save_path,
test_metric=TestMetric.MAX_CUT,
seed=None
)
print("\n Created DQN agent with network:\n\n", agent.network)
#############
# TRAIN AGENT
#############
start = time.time()
agent.learn(timesteps=nb_steps, verbose=True)
print(time.time() - start)
agent.save()
############
# PLOT - learning curve
############
data = pickle.load(open(test_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"training_curve")
plt.plot(data[:,0],data[:,1])
plt.xlabel("Timestep")
plt.ylabel("Mean reward")
if agent.test_metric==TestMetric.ENERGY_ERROR:
plt.ylabel("Energy Error")
elif agent.test_metric==TestMetric.BEST_ENERGY:
plt.ylabel("Best Energy")
elif agent.test_metric==TestMetric.CUMULATIVE_REWARD:
plt.ylabel("Cumulative Reward")
elif agent.test_metric==TestMetric.MAX_CUT:
plt.ylabel("Max Cut")
elif agent.test_metric==TestMetric.FINAL_CUT:
plt.ylabel("Final Cut")
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
plt.clf()
############
# PLOT - losses
############
data = pickle.load(open(loss_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"loss")
N=50
data_x = np.convolve(data[:,0], np.ones((N,))/N, mode='valid')
data_y = np.convolve(data[:,1], np.ones((N,))/N, mode='valid')
plt.plot(data_x,data_y)
plt.xlabel("Timestep")
plt.ylabel("Loss")
plt.yscale("log")
plt.grid(True)
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
if __name__ == "__main__":
run() | 6,820 | 30.288991 | 133 | py |
eco-dqn | eco-dqn-master/experiments/ER_20spin/train_and_test_eco.py | """
Trains and tests ECO-DQN on 20 spin ER graphs.
"""
import experiments.ER_20spin.test.test_eco as test
import experiments.ER_20spin.train.train_eco as train
save_loc="ER_20spin/eco"
train.run(save_loc)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_20spin_p15_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_40spin_p15_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_60spin_p15_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_100spin_p15_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_200spin_p15_100graphs.pkl", batched=True, max_batch_size=25)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_500spin_p15_100graphs.pkl", batched=True, max_batch_size=5)
| 923 | 45.2 | 119 | py |
eco-dqn | eco-dqn-master/experiments/ER_20spin/train_and_test_s2v.py | """
Trains and tests S2V-DQN on 20 spin ER graphs.
"""
import experiments.ER_20spin.test.test_s2v as test
import experiments.ER_20spin.train.train_s2v as train
save_loc="ER_20spin/s2v"
train.run(save_loc)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_20spin_p15_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_40spin_p15_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_60spin_p15_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_100spin_p15_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_200spin_p15_100graphs.pkl", batched=True, max_batch_size=25)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_500spin_p15_100graphs.pkl", batched=True, max_batch_size=5) | 919 | 56.5 | 119 | py |
eco-dqn | eco-dqn-master/experiments/ER_20spin/test/test_eco.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="ER_20spin/eco",
graph_save_loc="_graphs/validation/ER_20spin_p15_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma = 0.95
step_factor = 2
env_args = {'observables': DEFAULT_OBSERVABLES,
'reward_signal': RewardSignal.BLS,
'extra_action': ExtraAction.NONE,
'optimisation_target': OptimisationTarget.CUT,
'spin_basis': SpinBasis.BINARY,
'norm_rewards': True,
'memory_length': None,
'horizon_length': None,
'stag_punishment': None,
'basin_reward': 1. / 20,
'reversible_spins': True}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,507 | 35.95082 | 108 | py |
eco-dqn | eco-dqn-master/experiments/ER_20spin/test/test_s2v.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="ER_20spin/s2v",
graph_save_loc="_graphs/validation/ER_20spin_p15_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_factor = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,486 | 35.778689 | 108 | py |
eco-dqn | eco-dqn-master/experiments/ER_20spin/train/train_s2v.py | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import src.envs.core as ising_env
from experiments.utils import load_graph_set, mk_dir
from src.agents.dqn.dqn import DQN
from src.agents.dqn.utils import TestMetric
from src.envs.utils import (SetGraphGenerator,
RandomErdosRenyiGraphGenerator,
EdgeType, RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
import time
def run(save_loc="ER_20spin/s2v"):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_fact = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# SET UP TRAINING AND TEST GRAPHS
####################################################
n_spins_train = 20
train_graph_generator = RandomErdosRenyiGraphGenerator(n_spins=n_spins_train,p_connection=0.15,edge_type=EdgeType.DISCRETE)
####
# Pre-generated test graphs
####
graph_save_loc = "_graphs/testing/ER_20spin_p15_50graphs.pkl"
graphs_test = load_graph_set(graph_save_loc)
n_tests = len(graphs_test)
test_graph_generator = SetGraphGenerator(graphs_test, ordered=True)
####################################################
# SET UP TRAINING AND TEST ENVIRONMENTS
####################################################
train_envs = [ising_env.make("SpinSystem",
train_graph_generator,
int(n_spins_train*step_fact),
**env_args)]
n_spins_test = train_graph_generator.get().shape[0]
test_envs = [ising_env.make("SpinSystem",
test_graph_generator,
int(n_spins_test*step_fact),
**env_args)]
####################################################
# SET UP FOLDERS FOR SAVING DATA
####################################################
data_folder = os.path.join(save_loc,'data')
network_folder = os.path.join(save_loc, 'network')
mk_dir(data_folder)
mk_dir(network_folder)
# print(data_folder)
network_save_path = os.path.join(network_folder,'network.pth')
test_save_path = os.path.join(network_folder,'test_scores.pkl')
loss_save_path = os.path.join(network_folder, 'losses.pkl')
####################################################
# SET UP AGENT
####################################################
nb_steps = 2500000
network_fn = lambda: MPNN(n_obs_in=train_envs[0].observation_space.shape[1],
n_layers=3,
n_features=64,
n_hid_readout=[],
tied_weights=False)
agent = DQN(train_envs,
network_fn,
init_network_params=None,
init_weight_std=0.01,
double_dqn=True,
clip_Q_targets=True,
replay_start_size=500,
replay_buffer_size=5000, # 20000
gamma=gamma, # 1
update_target_frequency=1000, # 500
update_learning_rate=False,
initial_learning_rate=1e-4,
peak_learning_rate=1e-4,
peak_learning_rate_step=20000,
final_learning_rate=1e-4,
final_learning_rate_step=200000,
update_frequency=32, # 1
minibatch_size=64, # 128
max_grad_norm=None,
weight_decay=0,
update_exploration=True,
initial_exploration_rate=1,
final_exploration_rate=0.05, # 0.05
final_exploration_step=150000, # 40000
adam_epsilon=1e-8,
logging=False,
loss="mse",
save_network_frequency=100000,
network_save_path=network_save_path,
evaluate=True,
test_envs=test_envs,
test_episodes=n_tests,
test_frequency=10000, # 10000
test_save_path=test_save_path,
test_metric=TestMetric.MAX_CUT,
seed=None
)
print("\n Created DQN agent with network:\n\n", agent.network)
#############
# TRAIN AGENT
#############
start = time.time()
agent.learn(timesteps=nb_steps, verbose=True)
print(time.time() - start)
agent.save()
############
# PLOT - learning curve
############
data = pickle.load(open(test_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"training_curve")
plt.plot(data[:,0],data[:,1])
plt.xlabel("Training run")
plt.ylabel("Mean reward")
if agent.test_metric==TestMetric.ENERGY_ERROR:
plt.ylabel("Energy Error")
elif agent.test_metric==TestMetric.BEST_ENERGY:
plt.ylabel("Best Energy")
elif agent.test_metric==TestMetric.CUMULATIVE_REWARD:
plt.ylabel("Cumulative Reward")
elif agent.test_metric==TestMetric.MAX_CUT:
plt.ylabel("Max Cut")
elif agent.test_metric==TestMetric.FINAL_CUT:
plt.ylabel("Final Cut")
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
############
# PLOT - losses
############
data = pickle.load(open(loss_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"loss")
N=50
data_x = np.convolve(data[:,0], np.ones((N,))/N, mode='valid')
data_y = np.convolve(data[:,1], np.ones((N,))/N, mode='valid')
plt.plot(data_x,data_y)
plt.xlabel("Timestep")
plt.ylabel("Loss")
plt.yscale("log")
plt.grid(True)
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
if __name__ == "__main__":
run() | 6,794 | 30.313364 | 127 | py |
eco-dqn | eco-dqn-master/experiments/ER_20spin/train/train_eco.py | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import src.envs.core as ising_env
from experiments.utils import load_graph_set, mk_dir
from src.agents.dqn.dqn import DQN
from src.agents.dqn.utils import TestMetric
from src.envs.utils import (SetGraphGenerator,
RandomErdosRenyiGraphGenerator,
EdgeType, RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
import time
def run(save_loc="ER_20spin/eco"):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=0.95
step_fact = 2
env_args = {'observables':DEFAULT_OBSERVABLES,
'reward_signal':RewardSignal.BLS,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':1./20,
'reversible_spins':True}
####################################################
# SET UP TRAINING AND TEST GRAPHS
####################################################
n_spins_train = 20
train_graph_generator = RandomErdosRenyiGraphGenerator(n_spins=n_spins_train,p_connection=0.15,edge_type=EdgeType.DISCRETE)
####
# Pre-generated test graphs
####
graph_save_loc = "_graphs/testing/ER_20spin_p15_50graphs.pkl"
graphs_test = load_graph_set(graph_save_loc)
n_tests = len(graphs_test)
test_graph_generator = SetGraphGenerator(graphs_test, ordered=True)
####################################################
# SET UP TRAINING AND TEST ENVIRONMENTS
####################################################
train_envs = [ising_env.make("SpinSystem",
train_graph_generator,
int(n_spins_train*step_fact),
**env_args)]
n_spins_test = train_graph_generator.get().shape[0]
test_envs = [ising_env.make("SpinSystem",
test_graph_generator,
int(n_spins_test*step_fact),
**env_args)]
####################################################
# SET UP FOLDERS FOR SAVING DATA
####################################################
data_folder = os.path.join(save_loc,'data')
network_folder = os.path.join(save_loc, 'network')
mk_dir(data_folder)
mk_dir(network_folder)
# print(data_folder)
network_save_path = os.path.join(network_folder,'network.pth')
test_save_path = os.path.join(network_folder,'test_scores.pkl')
loss_save_path = os.path.join(network_folder, 'losses.pkl')
####################################################
# SET UP AGENT
####################################################
nb_steps = 2500000
network_fn = lambda: MPNN(n_obs_in=train_envs[0].observation_space.shape[1],
n_layers=3,
n_features=64,
n_hid_readout=[],
tied_weights=False)
agent = DQN(train_envs,
network_fn,
init_network_params=None,
init_weight_std=0.01,
double_dqn=True,
clip_Q_targets=False,
replay_start_size=500,
replay_buffer_size=5000, # 20000
gamma=gamma, # 1
update_target_frequency=1000, # 500
update_learning_rate=False,
initial_learning_rate=1e-4,
peak_learning_rate=1e-4,
peak_learning_rate_step=20000,
final_learning_rate=1e-4,
final_learning_rate_step=200000,
update_frequency=32, # 1
minibatch_size=64, # 128
max_grad_norm=None,
weight_decay=0,
update_exploration=True,
initial_exploration_rate=1,
final_exploration_rate=0.05, # 0.05
final_exploration_step=150000, # 40000
adam_epsilon=1e-8,
logging=False,
loss="mse",
save_network_frequency=100000,
network_save_path=network_save_path,
evaluate=True,
test_envs=test_envs,
test_episodes=n_tests,
test_frequency=10000, # 10000
test_save_path=test_save_path,
test_metric=TestMetric.MAX_CUT,
seed=None
)
print("\n Created DQN agent with network:\n\n", agent.network)
#############
# TRAIN AGENT
#############
start = time.time()
agent.learn(timesteps=nb_steps, verbose=True)
print(time.time() - start)
agent.save()
############
# PLOT - learning curve
############
data = pickle.load(open(test_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"training_curve")
plt.plot(data[:,0],data[:,1])
plt.xlabel("Timestep")
plt.ylabel("Mean reward")
if agent.test_metric==TestMetric.ENERGY_ERROR:
plt.ylabel("Energy Error")
elif agent.test_metric==TestMetric.BEST_ENERGY:
plt.ylabel("Best Energy")
elif agent.test_metric==TestMetric.CUMULATIVE_REWARD:
plt.ylabel("Cumulative Reward")
elif agent.test_metric==TestMetric.MAX_CUT:
plt.ylabel("Max Cut")
elif agent.test_metric==TestMetric.FINAL_CUT:
plt.ylabel("Final Cut")
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
plt.clf()
############
# PLOT - losses
############
data = pickle.load(open(loss_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"loss")
N=50
data_x = np.convolve(data[:,0], np.ones((N,))/N, mode='valid')
data_y = np.convolve(data[:,1], np.ones((N,))/N, mode='valid')
plt.plot(data_x,data_y)
plt.xlabel("Timestep")
plt.ylabel("Loss")
plt.yscale("log")
plt.grid(True)
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
if __name__ == "__main__":
run() | 6,811 | 30.247706 | 127 | py |
eco-dqn | eco-dqn-master/experiments/pretrained_agent/test_eco.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set, mk_dir
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="pretrained_agent/eco",
network_save_loc="experiments_new/pretrained_agent/networks/eco/network_best_ER_200spin.pth",
graph_save_loc="_graphs/validation/ER_200spin_p15_100graphs.pkl",
batched=True,
max_batch_size=None,
step_factor=None,
n_attemps=50):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# FOLDER LOCATIONS
####################################################
print("save location :", save_loc)
print("network params :", network_save_loc)
mk_dir(save_loc)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
if step_factor is None:
step_factor = 2
env_args = {'observables': DEFAULT_OBSERVABLES,
'reward_signal': RewardSignal.BLS,
'extra_action': ExtraAction.NONE,
'optimisation_target': OptimisationTarget.CUT,
'spin_basis': SpinBasis.BINARY,
'norm_rewards': True,
'memory_length': None,
'horizon_length': None,
'stag_punishment': None,
'basin_reward': None,
'reversible_spins': True}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0] * step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_loc, map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True, n_attempts=n_attemps,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(save_loc, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,430 | 36.550847 | 108 | py |
eco-dqn | eco-dqn-master/experiments/pretrained_agent/test_s2v.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Tests an agent.
"""
import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set, mk_dir
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="pretrained_agent/s2v",
network_save_loc="experiments_new/pretrained_agent/networks/s2v/network_best_ER_200spin.pth",
graph_save_loc="_graphs/benchmarks/ising_125spin_graphs.pkl",
batched=True,
max_batch_size=5):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# FOLDER LOCATIONS
####################################################
print("save location :", save_loc)
print("network params :", network_save_loc)
mk_dir(save_loc)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
step_factor = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0] * step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_loc, map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True, n_attempts=50,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(save_loc, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,395 | 35.330579 | 108 | py |
eco-dqn | eco-dqn-master/experiments/BA_40spin/train_and_test_eco.py | """
Trains and tests ECO-DQN on 40 spin BA graphs.
"""
import experiments.BA_40spin.test.test_eco as test
import experiments.BA_40spin.train.train_eco as train
save_loc="BA_40spin/eco"
train.run(save_loc)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_40spin_m4_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_60spin_m4_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_100spin_m4_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_200spin_m4_100graphs.pkl", batched=True, max_batch_size=25)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_500spin_m4_100graphs.pkl", batched=True, max_batch_size=5)
| 796 | 48.8125 | 118 | py |
eco-dqn | eco-dqn-master/experiments/BA_40spin/train_and_test_s2v.py | """
Trains and tests S2V-DQN on 20 spin BA graphs.
"""
import experiments.BA_40spin.test.test_s2v as test
import experiments.BA_40spin.train.train_s2v as train
save_loc="BA_40spin/s2v"
train.run(save_loc)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_40spin_m4_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_60spin_m4_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_100spin_m4_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_200spin_m4_100graphs.pkl", batched=True, max_batch_size=25)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_500spin_m4_100graphs.pkl", batched=True, max_batch_size=5) | 795 | 52.066667 | 118 | py |
eco-dqn | eco-dqn-master/experiments/BA_40spin/test/test_eco.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="BA_40spin/eco",
graph_save_loc="_graphs/validation/BA_40spin_m4_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma = 0.95
step_factor = 2
env_args = {'observables': DEFAULT_OBSERVABLES,
'reward_signal': RewardSignal.BLS,
'extra_action': ExtraAction.NONE,
'optimisation_target': OptimisationTarget.CUT,
'spin_basis': SpinBasis.BINARY,
'norm_rewards': True,
'memory_length': None,
'horizon_length': None,
'stag_punishment': None,
'basin_reward': 1. / 40,
'reversible_spins': True}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,506 | 35.942623 | 108 | py |
eco-dqn | eco-dqn-master/experiments/BA_40spin/test/test_s2v.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="BA_40spin/s2v",
graph_save_loc="_graphs/validation/BA_40spin_m4_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_factor = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,485 | 35.770492 | 108 | py |
eco-dqn | eco-dqn-master/experiments/BA_40spin/train/train_s2v.py | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import src.envs.core as ising_env
from experiments.utils import load_graph_set, mk_dir
from src.agents.dqn.dqn import DQN
from src.agents.dqn.utils import TestMetric
from src.envs.utils import (SetGraphGenerator,
RandomBarabasiAlbertGraphGenerator,
EdgeType, RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
import time
def run(save_loc="BA_40spin/s2v"):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_fact = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# SET UP TRAINING AND TEST GRAPHS
####################################################
n_spins_train = 40
train_graph_generator = RandomBarabasiAlbertGraphGenerator(n_spins=n_spins_train,m_insertion_edges=4,edge_type=EdgeType.DISCRETE)
####
# Pre-generated test graphs
####
graph_save_loc = "_graphs/testing/BA_40spin_m4_50graphs.pkl"
graphs_test = load_graph_set(graph_save_loc)
n_tests = len(graphs_test)
test_graph_generator = SetGraphGenerator(graphs_test, ordered=True)
####################################################
# SET UP TRAINING AND TEST ENVIRONMENTS
####################################################
train_envs = [ising_env.make("SpinSystem",
train_graph_generator,
int(n_spins_train*step_fact),
**env_args)]
n_spins_test = train_graph_generator.get().shape[0]
test_envs = [ising_env.make("SpinSystem",
test_graph_generator,
int(n_spins_test*step_fact),
**env_args)]
####################################################
# SET UP FOLDERS FOR SAVING DATA
####################################################
data_folder = os.path.join(save_loc,'data')
network_folder = os.path.join(save_loc, 'network')
mk_dir(data_folder)
mk_dir(network_folder)
# print(data_folder)
network_save_path = os.path.join(network_folder,'network.pth')
test_save_path = os.path.join(network_folder,'test_scores.pkl')
loss_save_path = os.path.join(network_folder, 'losses.pkl')
####################################################
# SET UP AGENT
####################################################
nb_steps = 2500000
network_fn = lambda: MPNN(n_obs_in=train_envs[0].observation_space.shape[1],
n_layers=3,
n_features=64,
n_hid_readout=[],
tied_weights=False)
agent = DQN(train_envs,
network_fn,
init_network_params=None,
init_weight_std=0.01,
double_dqn=True,
clip_Q_targets=True,
replay_start_size=500,
replay_buffer_size=5000, # 20000
gamma=gamma, # 1
update_target_frequency=1000, # 500
update_learning_rate=False,
initial_learning_rate=1e-4,
peak_learning_rate=1e-4,
peak_learning_rate_step=20000,
final_learning_rate=1e-4,
final_learning_rate_step=200000,
update_frequency=32, # 1
minibatch_size=64, # 128
max_grad_norm=None,
weight_decay=0,
update_exploration=True,
initial_exploration_rate=1,
final_exploration_rate=0.05, # 0.05
final_exploration_step=150000, # 40000
adam_epsilon=1e-8,
logging=False,
loss="mse",
save_network_frequency=100000,
network_save_path=network_save_path,
evaluate=True,
test_envs=test_envs,
test_episodes=n_tests,
test_frequency=10000, # 10000
test_save_path=test_save_path,
test_metric=TestMetric.MAX_CUT,
seed=None
)
print("\n Created DQN agent with network:\n\n", agent.network)
#############
# TRAIN AGENT
#############
start = time.time()
agent.learn(timesteps=nb_steps, verbose=True)
print(time.time() - start)
agent.save()
############
# PLOT - learning curve
############
data = pickle.load(open(test_save_path, 'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder, "training_curve")
plt.plot(data[:, 0], data[:, 1])
plt.xlabel("Timestep")
plt.ylabel("Mean reward")
if agent.test_metric == TestMetric.ENERGY_ERROR:
plt.ylabel("Energy Error")
elif agent.test_metric == TestMetric.BEST_ENERGY:
plt.ylabel("Best Energy")
elif agent.test_metric == TestMetric.CUMULATIVE_REWARD:
plt.ylabel("Cumulative Reward")
elif agent.test_metric == TestMetric.MAX_CUT:
plt.ylabel("Max Cut")
elif agent.test_metric == TestMetric.FINAL_CUT:
plt.ylabel("Final Cut")
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
plt.clf()
############
# PLOT - losses
############
data = pickle.load(open(loss_save_path, 'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder, "loss")
N = 50
data_x = np.convolve(data[:, 0], np.ones((N,)) / N, mode='valid')
data_y = np.convolve(data[:, 1], np.ones((N,)) / N, mode='valid')
plt.plot(data_x, data_y)
plt.xlabel("Timestep")
plt.ylabel("Loss")
plt.yscale("log")
plt.grid(True)
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
if __name__ == "__main__":
run() | 6,849 | 30.422018 | 133 | py |
eco-dqn | eco-dqn-master/experiments/BA_40spin/train/train_eco.py | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import src.envs.core as ising_env
from experiments.utils import load_graph_set, mk_dir
from src.agents.dqn.dqn import DQN
from src.agents.dqn.utils import TestMetric
from src.envs.utils import (SetGraphGenerator,
RandomBarabasiAlbertGraphGenerator,
EdgeType, RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
import time
def run(save_loc="BA_40spin/eco"):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=0.95
step_fact = 2
env_args = {'observables':DEFAULT_OBSERVABLES,
'reward_signal':RewardSignal.BLS,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':1./40,
'reversible_spins':True}
####################################################
# SET UP TRAINING AND TEST GRAPHS
####################################################
n_spins_train = 40
train_graph_generator = RandomBarabasiAlbertGraphGenerator(n_spins=n_spins_train,m_insertion_edges=4,edge_type=EdgeType.DISCRETE)
####
# Pre-generated test graphs
####
graph_save_loc = "_graphs/testing/BA_40spin_m4_50graphs.pkl"
graphs_test = load_graph_set(graph_save_loc)
n_tests = len(graphs_test)
test_graph_generator = SetGraphGenerator(graphs_test, ordered=True)
####################################################
# SET UP TRAINING AND TEST ENVIRONMENTS
####################################################
train_envs = [ising_env.make("SpinSystem",
train_graph_generator,
int(n_spins_train*step_fact),
**env_args)]
n_spins_test = train_graph_generator.get().shape[0]
test_envs = [ising_env.make("SpinSystem",
test_graph_generator,
int(n_spins_test*step_fact),
**env_args)]
####################################################
# SET UP FOLDERS FOR SAVING DATA
####################################################
data_folder = os.path.join(save_loc,'data')
network_folder = os.path.join(save_loc, 'network')
mk_dir(data_folder)
mk_dir(network_folder)
# print(data_folder)
network_save_path = os.path.join(network_folder,'network.pth')
test_save_path = os.path.join(network_folder,'test_scores.pkl')
loss_save_path = os.path.join(network_folder, 'losses.pkl')
####################################################
# SET UP AGENT
####################################################
nb_steps = 2500000
network_fn = lambda: MPNN(n_obs_in=train_envs[0].observation_space.shape[1],
n_layers=3,
n_features=64,
n_hid_readout=[],
tied_weights=False)
agent = DQN(train_envs,
network_fn,
init_network_params=None,
init_weight_std=0.01,
double_dqn=True,
clip_Q_targets=False,
replay_start_size=500,
replay_buffer_size=5000, # 20000
gamma=gamma, # 1
update_target_frequency=1000, # 500
update_learning_rate=False,
initial_learning_rate=1e-4,
peak_learning_rate=1e-4,
peak_learning_rate_step=20000,
final_learning_rate=1e-4,
final_learning_rate_step=200000,
update_frequency=32, # 1
minibatch_size=64, # 128
max_grad_norm=None,
weight_decay=0,
update_exploration=True,
initial_exploration_rate=1,
final_exploration_rate=0.05, # 0.05
final_exploration_step=150000, # 40000
adam_epsilon=1e-8,
logging=False,
loss="mse",
save_network_frequency=100000,
network_save_path=network_save_path,
evaluate=True,
test_envs=test_envs,
test_episodes=n_tests,
test_frequency=10000, # 10000
test_save_path=test_save_path,
test_metric=TestMetric.MAX_CUT,
seed=None
)
print("\n Created DQN agent with network:\n\n", agent.network)
#############
# TRAIN AGENT
#############
start = time.time()
agent.learn(timesteps=nb_steps, verbose=True)
print(time.time() - start)
agent.save()
############
# PLOT - learning curve
############
data = pickle.load(open(test_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"training_curve")
plt.plot(data[:,0],data[:,1])
plt.xlabel("Timestep")
plt.ylabel("Mean reward")
if agent.test_metric==TestMetric.ENERGY_ERROR:
plt.ylabel("Energy Error")
elif agent.test_metric==TestMetric.BEST_ENERGY:
plt.ylabel("Best Energy")
elif agent.test_metric==TestMetric.CUMULATIVE_REWARD:
plt.ylabel("Cumulative Reward")
elif agent.test_metric==TestMetric.MAX_CUT:
plt.ylabel("Max Cut")
elif agent.test_metric==TestMetric.FINAL_CUT:
plt.ylabel("Final Cut")
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
plt.clf()
############
# PLOT - losses
############
data = pickle.load(open(loss_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"loss")
N=50
data_x = np.convolve(data[:,0], np.ones((N,))/N, mode='valid')
data_y = np.convolve(data[:,1], np.ones((N,))/N, mode='valid')
plt.plot(data_x,data_y)
plt.xlabel("Timestep")
plt.ylabel("Loss")
plt.yscale("log")
plt.grid(True)
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
if __name__ == "__main__":
run() | 6,820 | 30.288991 | 133 | py |
eco-dqn | eco-dqn-master/experiments/BA_100spin/train_and_test_eco.py | """
Trains and tests ECO-DQN on 100 spin BA graphs.
"""
import experiments.BA_100spin.test.test_eco as test
import experiments.BA_100spin.train.train_eco as train
save_loc="BA_100spin/eco"
train.run(save_loc)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_100spin_m4_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_200spin_m4_100graphs.pkl", batched=True, max_batch_size=25)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_500spin_m4_100graphs.pkl", batched=True, max_batch_size=5)
| 566 | 34.4375 | 118 | py |
eco-dqn | eco-dqn-master/experiments/BA_100spin/train_and_test_s2v.py | """
Trains and tests S2V-DQN on 20 spin BA graphs.
"""
import experiments.BA_100spin.test.test_s2v as test
import experiments.BA_100spin.train.train_s2v as train
save_loc="BA_100spin/s2v"
train.run(save_loc)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_100spin_m4_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_200spin_m4_100graphs.pkl", batched=True, max_batch_size=25)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_500spin_m4_100graphs.pkl", batched=True, max_batch_size=5) | 562 | 42.307692 | 118 | py |
eco-dqn | eco-dqn-master/experiments/BA_100spin/test/test_eco.py | """
Tests an agent.
"""
import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="BA_100spin/eco",
graph_save_loc="_graphs/validation/BA_100spin_m4_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma = 0.95
step_factor = 2
env_args = {'observables': DEFAULT_OBSERVABLES,
'reward_signal': RewardSignal.BLS,
'extra_action': ExtraAction.NONE,
'optimisation_target': OptimisationTarget.CUT,
'spin_basis': SpinBasis.BINARY,
'norm_rewards': True,
'memory_length': None,
'horizon_length': None,
'stag_punishment': None,
'basin_reward': 1. / 100,
'reversible_spins': True}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,535 | 34.716535 | 108 | py |
eco-dqn | eco-dqn-master/experiments/BA_100spin/test/test_s2v.py | """
Tests an agent.
"""
import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="BA_100spin/s2v",
graph_save_loc="_graphs/validation/BA_100spin_m4_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_factor = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,512 | 34.81746 | 108 | py |
eco-dqn | eco-dqn-master/experiments/BA_100spin/train/train_s2v.py | """
Trains an agent using a message passing neural network
"""
import os
import src.envs.core as ising_env
from experiments.utils import load_graph_set, mk_dir
from src.envs.utils import (SetGraphGenerator,
RandomBarabasiAlbertGraphGenerator,
EdgeType, RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable, DEFAULT_OBSERVABLES)
from src.agents.dqn.dqn import DQN
from src.agents.dqn.utils import TestMetric
from src.networks.mpnn import MPNN
import numpy as np
import pickle
import matplotlib.pyplot as plt
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
import time
def run(save_loc="BA_100spin/s2v"):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_fact = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# SET UP TRAINING AND TEST GRAPHS
####################################################
n_spins_train = 100
train_graph_generator = RandomBarabasiAlbertGraphGenerator(n_spins=n_spins_train,m_insertion_edges=4,edge_type=EdgeType.DISCRETE)
####
# Pre-generated test graphs
####
graph_save_loc = "_graphs/testing/BA_100spin_m4_50graphs.pkl"
graphs_test = load_graph_set(graph_save_loc)
n_tests = len(graphs_test)
test_graph_generator = SetGraphGenerator(graphs_test, ordered=True)
####################################################
# SET UP TRAINING AND TEST ENVIRONMENTS
####################################################
train_envs = [ising_env.make("SpinSystem",
train_graph_generator,
int(n_spins_train*step_fact),
**env_args)]
n_spins_test = train_graph_generator.get().shape[0]
test_envs = [ising_env.make("SpinSystem",
test_graph_generator,
int(n_spins_test*step_fact),
**env_args)]
####################################################
# SET UP FOLDERS FOR SAVING DATA
####################################################
data_folder = os.path.join(save_loc,'data')
network_folder = os.path.join(save_loc, 'network')
mk_dir(data_folder)
mk_dir(network_folder)
# print(data_folder)
network_save_path = os.path.join(network_folder,'network.pth')
test_save_path = os.path.join(network_folder,'test_scores.pkl')
loss_save_path = os.path.join(network_folder, 'losses.pkl')
####################################################
# SET UP AGENT
####################################################
nb_steps = 8000000
network_fn = lambda: MPNN(n_obs_in=train_envs[0].observation_space.shape[1],
n_layers=3,
n_features=64,
n_hid_readout=[],
tied_weights=False)
agent = DQN(train_envs,
network_fn,
init_network_params=None,
init_weight_std=0.01,
double_dqn=True,
clip_Q_targets=True,
replay_start_size=1500,
replay_buffer_size=10000, # 20000
gamma=gamma, # 1
update_target_frequency=2500, # 500
update_learning_rate=False,
initial_learning_rate=1e-4,
peak_learning_rate=1e-4,
peak_learning_rate_step=20000,
final_learning_rate=1e-4,
final_learning_rate_step=200000,
update_frequency=32, # 1
minibatch_size=64, # 128
max_grad_norm=None,
weight_decay=0,
update_exploration=True,
initial_exploration_rate=1,
final_exploration_rate=0.05, # 0.05
final_exploration_step=800000, # 40000
adam_epsilon=1e-8,
logging=False,
loss="mse",
save_network_frequency=400000,
network_save_path=network_save_path,
evaluate=True,
test_envs=test_envs,
test_episodes=n_tests,
test_frequency=50000, # 10000
test_save_path=test_save_path,
test_metric=TestMetric.MAX_CUT,
seed=None
)
print("\n Created DQN agent with network:\n\n", agent.network)
#############
# TRAIN AGENT
#############
start = time.time()
agent.learn(timesteps=nb_steps, verbose=True)
print(time.time() - start)
agent.save()
############
# PLOT - learning curve
############
data = pickle.load(open(test_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"training_curve")
plt.plot(data[:,0],data[:,1])
plt.xlabel("Training run")
plt.ylabel("Mean reward")
if agent.test_metric==TestMetric.ENERGY_ERROR:
plt.ylabel("Energy Error")
elif agent.test_metric==TestMetric.BEST_ENERGY:
plt.ylabel("Best Energy")
elif agent.test_metric==TestMetric.CUMULATIVE_REWARD:
plt.ylabel("Cumulative Reward")
elif agent.test_metric==TestMetric.MAX_CUT:
plt.ylabel("Max Cut")
elif agent.test_metric==TestMetric.FINAL_CUT:
plt.ylabel("Final Cut")
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
plt.clf()
############
# PLOT - losses
############
data = pickle.load(open(loss_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"loss")
N=50
data_x = np.convolve(data[:,0], np.ones((N,))/N, mode='valid')
data_y = np.convolve(data[:,1], np.ones((N,))/N, mode='valid')
plt.plot(data_x,data_y)
plt.xlabel("Timestep")
plt.ylabel("Loss")
plt.yscale("log")
plt.grid(True)
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
if __name__ == "__main__":
run() | 6,913 | 29.324561 | 133 | py |
eco-dqn | eco-dqn-master/experiments/BA_100spin/train/train_eco.py | """
Trains an agent using a message passing neural network
"""
import os
import src.envs.core as ising_env
from experiments.utils import load_graph_set, mk_dir
from src.envs.utils import (SetGraphGenerator,
RandomBarabasiAlbertGraphGenerator,
EdgeType, RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable, DEFAULT_OBSERVABLES)
from src.agents.dqn.dqn import DQN
from src.agents.dqn.utils import TestMetric
from src.networks.mpnn import MPNN
import numpy as np
import pickle
import matplotlib.pyplot as plt
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
import time
def run(save_loc="BA_100spin/eco"):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=0.95
step_fact = 2
env_args = {'observables':DEFAULT_OBSERVABLES,
'reward_signal':RewardSignal.BLS,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':1./100,
'reversible_spins':True}
####################################################
# SET UP TRAINING AND TEST GRAPHS
####################################################
n_spins_train = 100
train_graph_generator = RandomBarabasiAlbertGraphGenerator(n_spins=n_spins_train,m_insertion_edges=4,edge_type=EdgeType.DISCRETE)
####
# Pre-generated test graphs
####
graph_save_loc = "_graphs/testing/BA_100spin_m4_50graphs.pkl"
graphs_test = load_graph_set(graph_save_loc)
n_tests = len(graphs_test)
test_graph_generator = SetGraphGenerator(graphs_test, ordered=True)
####################################################
# SET UP TRAINING AND TEST ENVIRONMENTS
####################################################
train_envs = [ising_env.make("SpinSystem",
train_graph_generator,
int(n_spins_train*step_fact),
**env_args)]
n_spins_test = train_graph_generator.get().shape[0]
test_envs = [ising_env.make("SpinSystem",
test_graph_generator,
int(n_spins_test*step_fact),
**env_args)]
####################################################
# SET UP FOLDERS FOR SAVING DATA
####################################################
data_folder = os.path.join(save_loc,'data')
network_folder = os.path.join(save_loc, 'network')
mk_dir(data_folder)
mk_dir(network_folder)
# print(data_folder)
network_save_path = os.path.join(network_folder,'network.pth')
test_save_path = os.path.join(network_folder,'test_scores.pkl')
loss_save_path = os.path.join(network_folder, 'losses.pkl')
####################################################
# SET UP AGENT
####################################################
nb_steps = 8000000
network_fn = lambda: MPNN(n_obs_in=train_envs[0].observation_space.shape[1],
n_layers=3,
n_features=64,
n_hid_readout=[],
tied_weights=False)
agent = DQN(train_envs,
network_fn,
init_network_params=None,
init_weight_std=0.01,
double_dqn=True,
clip_Q_targets=False,
replay_start_size=1500,
replay_buffer_size=10000, # 20000
gamma=gamma, # 1
update_target_frequency=2500, # 500
update_learning_rate=False,
initial_learning_rate=1e-4,
peak_learning_rate=1e-4,
peak_learning_rate_step=20000,
final_learning_rate=1e-4,
final_learning_rate_step=200000,
update_frequency=32, # 1
minibatch_size=64, # 128
max_grad_norm=None,
weight_decay=0,
update_exploration=True,
initial_exploration_rate=1,
final_exploration_rate=0.05, # 0.05
final_exploration_step=800000, # 40000
adam_epsilon=1e-8,
logging=False,
loss="mse",
save_network_frequency=400000,
network_save_path=network_save_path,
evaluate=True,
test_envs=test_envs,
test_episodes=n_tests,
test_frequency=50000, # 10000
test_save_path=test_save_path,
test_metric=TestMetric.MAX_CUT,
seed=None
)
print("\n Created DQN agent with network:\n\n", agent.network)
#############
# TRAIN AGENT
#############
start = time.time()
agent.learn(timesteps=nb_steps, verbose=True)
print(time.time() - start)
agent.save()
############
# PLOT - learning curve
############
data = pickle.load(open(test_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"training_curve")
plt.plot(data[:,0],data[:,1])
plt.xlabel("Timestep")
plt.ylabel("Mean reward")
if agent.test_metric==TestMetric.ENERGY_ERROR:
plt.ylabel("Energy Error")
elif agent.test_metric==TestMetric.BEST_ENERGY:
plt.ylabel("Best Energy")
elif agent.test_metric==TestMetric.CUMULATIVE_REWARD:
plt.ylabel("Cumulative Reward")
elif agent.test_metric==TestMetric.MAX_CUT:
plt.ylabel("Max Cut")
elif agent.test_metric==TestMetric.FINAL_CUT:
plt.ylabel("Final Cut")
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
plt.clf()
############
# PLOT - losses
############
data = pickle.load(open(loss_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"loss")
N=50
data_x = np.convolve(data[:,0], np.ones((N,))/N, mode='valid')
data_y = np.convolve(data[:,1], np.ones((N,))/N, mode='valid')
plt.plot(data_x,data_y)
plt.xlabel("Timestep")
plt.ylabel("Loss")
plt.yscale("log")
plt.grid(True)
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
if __name__ == "__main__":
run() | 6,908 | 29.302632 | 133 | py |
eco-dqn | eco-dqn-master/experiments/ER_40spin/train_and_test_eco.py | """
Trains and tests ECO-DQN on 40 spin ER graphs.
"""
import experiments.ER_40spin.test.test_eco as test
import experiments.ER_40spin.train.train_eco as train
save_loc="ER_40spin/eco"
train.run(save_loc)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_40spin_p15_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_60spin_p15_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_100spin_p15_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_200spin_p15_100graphs.pkl", batched=True, max_batch_size=25)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_500spin_p15_100graphs.pkl", batched=True, max_batch_size=5)
| 802 | 46.235294 | 119 | py |
eco-dqn | eco-dqn-master/experiments/ER_40spin/train_and_test_s2v.py | """
Trains and tests S2V-DQN on 40 spin ER graphs.
"""
import experiments.ER_40spin.test.test_s2v as test
import experiments.ER_40spin.train.train_s2v as train
save_loc="ER_40spin/s2v"
train.run(save_loc)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_40spin_p15_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_60spin_p15_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_100spin_p15_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_200spin_p15_100graphs.pkl", batched=True, max_batch_size=25)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_500spin_p15_100graphs.pkl", batched=True, max_batch_size=5) | 800 | 52.4 | 119 | py |
eco-dqn | eco-dqn-master/experiments/ER_40spin/test/test_eco.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
import datetime
def run(save_loc="ER_40spin/eco",
graph_save_loc="_graphs/validation/ER_40spin_p15_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
# info_str = "train_mpnn"
date = datetime.datetime.now().strftime("%Y-%m")
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma = 0.95
step_factor = 2
env_args = {'observables': DEFAULT_OBSERVABLES,
'reward_signal': RewardSignal.BLS,
'extra_action': ExtraAction.NONE,
'optimisation_target': OptimisationTarget.CUT,
'spin_basis': SpinBasis.BINARY,
'norm_rewards': True,
'memory_length': None,
'horizon_length': None,
'stag_punishment': None,
'basin_reward': 1. / 40,
'reversible_spins': True}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0] * step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path, map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,610 | 35.595238 | 108 | py |
eco-dqn | eco-dqn-master/experiments/ER_40spin/test/test_s2v.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
import datetime
def run(save_loc="ER_40spin/s2v",
graph_save_loc="_graphs/validation/ER_40spin_p15_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
# info_str = "train_s2v"
date = datetime.datetime.now().strftime("%Y-%m")
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_factor = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0] * step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path, map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,588 | 35.420635 | 108 | py |
eco-dqn | eco-dqn-master/experiments/ER_40spin/train/train_s2v.py | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import src.envs.core as ising_env
from experiments.utils import load_graph_set, mk_dir
from src.agents.dqn.dqn import DQN
from src.agents.dqn.utils import TestMetric
from src.envs.utils import (SetGraphGenerator,
RandomErdosRenyiGraphGenerator,
EdgeType, RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
import time
def run(save_loc="ER_40spin/s2v"):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_fact = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# SET UP TRAINING AND TEST GRAPHS
####################################################
n_spins_train = 40
train_graph_generator = RandomErdosRenyiGraphGenerator(n_spins=n_spins_train,p_connection=0.15,edge_type=EdgeType.DISCRETE)
####
# Pre-generated test graphs
####
graph_save_loc = "_graphs/testing/ER_40spin_p15_50graphs.pkl"
graphs_test = load_graph_set(graph_save_loc)
n_tests = len(graphs_test)
test_graph_generator = SetGraphGenerator(graphs_test, ordered=True)
####################################################
# SET UP TRAINING AND TEST ENVIRONMENTS
####################################################
train_envs = [ising_env.make("SpinSystem",
train_graph_generator,
int(n_spins_train*step_fact),
**env_args)]
n_spins_test = train_graph_generator.get().shape[0]
test_envs = [ising_env.make("SpinSystem",
test_graph_generator,
int(n_spins_test*step_fact),
**env_args)]
####################################################
# SET UP FOLDERS FOR SAVING DATA
####################################################
data_folder = os.path.join(save_loc,'data')
network_folder = os.path.join(save_loc, 'network')
mk_dir(data_folder)
mk_dir(network_folder)
# print(data_folder)
network_save_path = os.path.join(network_folder,'network.pth')
test_save_path = os.path.join(network_folder,'test_scores.pkl')
loss_save_path = os.path.join(network_folder, 'losses.pkl')
####################################################
# SET UP AGENT
####################################################
nb_steps = 2500000
network_fn = lambda: MPNN(n_obs_in=train_envs[0].observation_space.shape[1],
n_layers=3,
n_features=64,
n_hid_readout=[],
tied_weights=False)
agent = DQN(train_envs,
network_fn,
init_network_params=None,
init_weight_std=0.01,
double_dqn=True,
clip_Q_targets=True,
replay_start_size=500,
replay_buffer_size=5000, # 20000
gamma=gamma, # 1
update_target_frequency=1000, # 500
update_learning_rate=False,
initial_learning_rate=1e-4,
peak_learning_rate=1e-4,
peak_learning_rate_step=20000,
final_learning_rate=1e-4,
final_learning_rate_step=200000,
update_frequency=32, # 1
minibatch_size=64, # 128
max_grad_norm=None,
weight_decay=0,
update_exploration=True,
initial_exploration_rate=1,
final_exploration_rate=0.05, # 0.05
final_exploration_step=150000, # 40000
adam_epsilon=1e-8,
logging=False,
loss="mse",
save_network_frequency=100000,
network_save_path=network_save_path,
evaluate=True,
test_envs=test_envs,
test_episodes=n_tests,
test_frequency=10000, # 10000
test_save_path=test_save_path,
test_metric=TestMetric.MAX_CUT,
seed=None
)
print("\n Created DQN agent with network:\n\n", agent.network)
#############
# TRAIN AGENT
#############
start = time.time()
agent.learn(timesteps=nb_steps, verbose=True)
print(time.time() - start)
agent.save()
############
# PLOT - learning curve
############
data = pickle.load(open(test_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"training_curve")
plt.plot(data[:,0],data[:,1])
plt.xlabel("Training run")
plt.ylabel("Mean reward")
if agent.test_metric==TestMetric.ENERGY_ERROR:
plt.ylabel("Energy Error")
elif agent.test_metric==TestMetric.BEST_ENERGY:
plt.ylabel("Best Energy")
elif agent.test_metric==TestMetric.CUMULATIVE_REWARD:
plt.ylabel("Cumulative Reward")
elif agent.test_metric==TestMetric.MAX_CUT:
plt.ylabel("Max Cut")
elif agent.test_metric==TestMetric.FINAL_CUT:
plt.ylabel("Final Cut")
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
############
# PLOT - losses
############
data = pickle.load(open(loss_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"loss")
N=50
data_x = np.convolve(data[:,0], np.ones((N,))/N, mode='valid')
data_y = np.convolve(data[:,1], np.ones((N,))/N, mode='valid')
plt.plot(data_x,data_y)
plt.xlabel("Timestep")
plt.ylabel("Loss")
plt.yscale("log")
plt.grid(True)
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
plt.clf()
if __name__ == "__main__":
run() | 6,809 | 30.09589 | 127 | py |
eco-dqn | eco-dqn-master/experiments/ER_40spin/train/train_eco.py | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import src.envs.core as ising_env
from experiments.utils import load_graph_set, mk_dir
from src.agents.dqn.dqn import DQN
from src.agents.dqn.utils import TestMetric
from src.envs.utils import (SetGraphGenerator,
RandomErdosRenyiGraphGenerator,
EdgeType, RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
import time
def run(save_loc="ER_40spin/eco"):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=0.95
step_fact = 2
env_args = {'observables':DEFAULT_OBSERVABLES,
'reward_signal':RewardSignal.BLS,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':1./40,
'reversible_spins':True}
####################################################
# SET UP TRAINING AND TEST GRAPHS
####################################################
n_spins_train = 40
train_graph_generator = RandomErdosRenyiGraphGenerator(n_spins=n_spins_train,p_connection=0.15,edge_type=EdgeType.DISCRETE)
####
# Pre-generated test graphs
####
graph_save_loc = "_graphs/testing/ER_40spin_p15_50graphs.pkl"
graphs_test = load_graph_set(graph_save_loc)
n_tests = len(graphs_test)
test_graph_generator = SetGraphGenerator(graphs_test, ordered=True)
####################################################
# SET UP TRAINING AND TEST ENVIRONMENTS
####################################################
train_envs = [ising_env.make("SpinSystem",
train_graph_generator,
int(n_spins_train*step_fact),
**env_args)]
n_spins_test = train_graph_generator.get().shape[0]
test_envs = [ising_env.make("SpinSystem",
test_graph_generator,
int(n_spins_test*step_fact),
**env_args)]
####################################################
# SET UP FOLDERS FOR SAVING DATA
####################################################
data_folder = os.path.join(save_loc,'data')
network_folder = os.path.join(save_loc, 'network')
mk_dir(data_folder)
mk_dir(network_folder)
# print(data_folder)
network_save_path = os.path.join(network_folder,'network.pth')
test_save_path = os.path.join(network_folder,'test_scores.pkl')
loss_save_path = os.path.join(network_folder, 'losses.pkl')
####################################################
# SET UP AGENT
####################################################
nb_steps = 2500000
network_fn = lambda: MPNN(n_obs_in=train_envs[0].observation_space.shape[1],
n_layers=3,
n_features=64,
n_hid_readout=[],
tied_weights=False)
agent = DQN(train_envs,
network_fn,
init_network_params=None,
init_weight_std=0.01,
double_dqn=True,
clip_Q_targets=False,
replay_start_size=500,
replay_buffer_size=5000, # 20000
gamma=gamma, # 1
update_target_frequency=1000, # 500
update_learning_rate=False,
initial_learning_rate=1e-4,
peak_learning_rate=1e-4,
peak_learning_rate_step=20000,
final_learning_rate=1e-4,
final_learning_rate_step=200000,
update_frequency=32, # 1
minibatch_size=64, # 128
max_grad_norm=None,
weight_decay=0,
update_exploration=True,
initial_exploration_rate=1,
final_exploration_rate=0.05, # 0.05
final_exploration_step=150000, # 40000
adam_epsilon=1e-8,
logging=False,
loss="mse",
save_network_frequency=100000,
network_save_path=network_save_path,
evaluate=True,
test_envs=test_envs,
test_episodes=n_tests,
test_frequency=10000, # 10000
test_save_path=test_save_path,
test_metric=TestMetric.MAX_CUT,
seed=None
)
print("\n Created DQN agent with network:\n\n", agent.network)
#############
# TRAIN AGENT
#############
start = time.time()
agent.learn(timesteps=nb_steps, verbose=True)
print(time.time() - start)
agent.save()
############
# PLOT - learning curve
############
data = pickle.load(open(test_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"training_curve")
plt.plot(data[:,0],data[:,1])
plt.xlabel("Training run")
plt.ylabel("Mean reward")
if agent.test_metric==TestMetric.ENERGY_ERROR:
plt.ylabel("Energy Error")
elif agent.test_metric==TestMetric.BEST_ENERGY:
plt.ylabel("Best Energy")
elif agent.test_metric==TestMetric.CUMULATIVE_REWARD:
plt.ylabel("Cumulative Reward")
elif agent.test_metric==TestMetric.MAX_CUT:
plt.ylabel("Max Cut")
elif agent.test_metric==TestMetric.FINAL_CUT:
plt.ylabel("Final Cut")
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
############
# PLOT - losses
############
data = pickle.load(open(loss_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"loss")
N=50
data_x = np.convolve(data[:,0], np.ones((N,))/N, mode='valid')
data_y = np.convolve(data[:,1], np.ones((N,))/N, mode='valid')
plt.plot(data_x,data_y)
plt.xlabel("Timestep")
plt.ylabel("Loss")
plt.yscale("log")
plt.grid(True)
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
plt.clf()
if __name__ == "__main__":
run() | 6,815 | 30.266055 | 127 | py |
eco-dqn | eco-dqn-master/experiments/BA_200spin/train_and_test_eco.py | """
Trains and tests ECO-DQN on 200 spin BA graphs.
"""
import experiments.BA_200spin.test.test_eco as test
import experiments.BA_200spin.train.train_eco as train
save_loc="BA_200spin/eco"
train.run(save_loc)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_200spin_m4_100graphs.pkl", batched=True, max_batch_size=25)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_500spin_m4_100graphs.pkl", batched=True, max_batch_size=5)
| 447 | 28.866667 | 116 | py |
eco-dqn | eco-dqn-master/experiments/BA_200spin/train_and_test_s2v.py | """
Trains and tests S2V-DQN on 20 spin BA graphs.
"""
import experiments.BA_200spin.test.test_s2v as test
import experiments.BA_200spin.train.train_s2v as train
save_loc="BA_200spin/s2v"
train.run(save_loc)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_200spin_m4_100graphs.pkl", batched=True, max_batch_size=25)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_500spin_m4_100graphs.pkl", batched=True, max_batch_size=5) | 443 | 36 | 116 | py |
eco-dqn | eco-dqn-master/experiments/BA_200spin/test/test_eco.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="BA_200spin/eco",
graph_save_loc="_graphs/validation/BA_200spin_m4_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma = 0.95
step_factor = 2
env_args = {'observables': DEFAULT_OBSERVABLES,
'reward_signal': RewardSignal.BLS,
'extra_action': ExtraAction.NONE,
'optimisation_target': OptimisationTarget.CUT,
'spin_basis': SpinBasis.BINARY,
'norm_rewards': True,
'memory_length': None,
'horizon_length': None,
'stag_punishment': None,
'basin_reward': 1. / 200,
'reversible_spins': True}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,509 | 35.967213 | 108 | py |
eco-dqn | eco-dqn-master/experiments/BA_200spin/test/test_s2v.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="BA_200spin/s2v",
graph_save_loc="_graphs/validation/BA_200spin_m4_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_factor = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,486 | 36.082645 | 108 | py |
eco-dqn | eco-dqn-master/experiments/BA_200spin/train/train_s2v.py | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import src.envs.core as ising_env
from experiments.utils import load_graph_set, mk_dir
from src.agents.dqn.dqn import DQN
from src.agents.dqn.utils import TestMetric
from src.envs.utils import (SetGraphGenerator,
RandomBarabasiAlbertGraphGenerator,
EdgeType, RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
import time
def run(save_loc="BA_200spin/s2v"):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_fact = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# SET UP TRAINING AND TEST GRAPHS
####################################################
n_spins_train = 200
train_graph_generator = RandomBarabasiAlbertGraphGenerator(n_spins=n_spins_train,m_insertion_edges=4,edge_type=EdgeType.DISCRETE)
####
# Pre-generated test graphs
####
graph_save_loc = "_graphs/testing/BA_200spin_m4_50graphs.pkl"
graphs_test = load_graph_set(graph_save_loc)
n_tests = len(graphs_test)
test_graph_generator = SetGraphGenerator(graphs_test, ordered=True)
####################################################
# SET UP TRAINING AND TEST ENVIRONMENTS
####################################################
train_envs = [ising_env.make("SpinSystem",
train_graph_generator,
int(n_spins_train*step_fact),
**env_args)]
n_spins_test = train_graph_generator.get().shape[0]
test_envs = [ising_env.make("SpinSystem",
test_graph_generator,
int(n_spins_test*step_fact),
**env_args)]
####################################################
# SET UP FOLDERS FOR SAVING DATA
####################################################
data_folder = os.path.join(save_loc,'data')
network_folder = os.path.join(save_loc, 'network')
mk_dir(data_folder)
mk_dir(network_folder)
# print(data_folder)
network_save_path = os.path.join(network_folder,'network.pth')
test_save_path = os.path.join(network_folder,'test_scores.pkl')
loss_save_path = os.path.join(network_folder, 'losses.pkl')
####################################################
# SET UP AGENT
####################################################
nb_steps = 10000000
network_fn = lambda: MPNN(n_obs_in=train_envs[0].observation_space.shape[1],
n_layers=3,
n_features=64,
n_hid_readout=[],
tied_weights=False)
agent = DQN(train_envs,
network_fn,
init_network_params=None,
init_weight_std=0.01,
double_dqn=True,
clip_Q_targets=True,
replay_start_size=3000,
replay_buffer_size=15000, # 20000
gamma=gamma, # 1
update_target_frequency=4000, # 500
update_learning_rate=False,
initial_learning_rate=1e-4,
peak_learning_rate=1e-4,
peak_learning_rate_step=20000,
final_learning_rate=1e-4,
final_learning_rate_step=200000,
update_frequency=32, # 1
minibatch_size=64, # 128
max_grad_norm=None,
weight_decay=0,
update_exploration=True,
initial_exploration_rate=1,
final_exploration_rate=0.05, # 0.05
final_exploration_step=800000, # 40000
adam_epsilon=1e-8,
logging=False,
loss="mse",
save_network_frequency=400000,
network_save_path=network_save_path,
evaluate=True,
test_envs=test_envs,
test_episodes=n_tests,
test_frequency=50000, # 10000
test_save_path=test_save_path,
test_metric=TestMetric.MAX_CUT,
seed=None
)
print("\n Created DQN agent with network:\n\n", agent.network)
#############
# TRAIN AGENT
#############
start = time.time()
agent.learn(timesteps=nb_steps, verbose=True)
print(time.time() - start)
agent.save()
############
# PLOT - learning curve
############
data = pickle.load(open(test_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"training_curve")
plt.plot(data[:,0],data[:,1])
plt.xlabel("Training run")
plt.ylabel("Mean reward")
if agent.test_metric==TestMetric.ENERGY_ERROR:
plt.ylabel("Energy Error")
elif agent.test_metric==TestMetric.BEST_ENERGY:
plt.ylabel("Best Energy")
elif agent.test_metric==TestMetric.CUMULATIVE_REWARD:
plt.ylabel("Cumulative Reward")
elif agent.test_metric==TestMetric.MAX_CUT:
plt.ylabel("Max Cut")
elif agent.test_metric==TestMetric.FINAL_CUT:
plt.ylabel("Final Cut")
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
plt.clf()
############
# PLOT - losses
############
data = pickle.load(open(loss_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"loss")
N=50
data_x = np.convolve(data[:,0], np.ones((N,))/N, mode='valid')
data_y = np.convolve(data[:,1], np.ones((N,))/N, mode='valid')
plt.plot(data_x,data_y)
plt.xlabel("Timestep")
plt.ylabel("Loss")
plt.yscale("log")
plt.grid(True)
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
if __name__ == "__main__":
run() | 6,824 | 30.164384 | 133 | py |
eco-dqn | eco-dqn-master/experiments/BA_200spin/train/train_eco.py | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import src.envs.core as ising_env
from experiments.utils import load_graph_set, mk_dir
from src.agents.dqn.dqn import DQN
from src.agents.dqn.utils import TestMetric
from src.envs.utils import (SetGraphGenerator,
RandomBarabasiAlbertGraphGenerator,
EdgeType, RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
import time
def run(save_loc="BA_200spin/eco"):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=0.95
step_fact = 2
env_args = {'observables':DEFAULT_OBSERVABLES,
'reward_signal':RewardSignal.BLS,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
# 'basin_reward':1./200,
'basin_reward': 1. / 40,
'reversible_spins':True}
####################################################
# SET UP TRAINING AND TEST GRAPHS
####################################################
n_spins_train = 200
train_graph_generator = RandomBarabasiAlbertGraphGenerator(n_spins=n_spins_train,m_insertion_edges=4,edge_type=EdgeType.DISCRETE)
####
# Pre-generated test graphs
####
graph_save_loc = "_graphs/testing/BA_200spin_m4_50graphs.pkl"
graphs_test = load_graph_set(graph_save_loc)
n_tests = len(graphs_test)
test_graph_generator = SetGraphGenerator(graphs_test, ordered=True)
####################################################
# SET UP TRAINING AND TEST ENVIRONMENTS
####################################################
train_envs = [ising_env.make("SpinSystem",
train_graph_generator,
int(n_spins_train*step_fact),
**env_args)]
n_spins_test = train_graph_generator.get().shape[0]
test_envs = [ising_env.make("SpinSystem",
test_graph_generator,
int(n_spins_test*step_fact),
**env_args)]
####################################################
# SET UP FOLDERS FOR SAVING DATA
####################################################
data_folder = os.path.join(save_loc,'data')
network_folder = os.path.join(save_loc, 'network')
mk_dir(data_folder)
mk_dir(network_folder)
# print(data_folder)
network_save_path = os.path.join(network_folder,'network.pth')
test_save_path = os.path.join(network_folder,'test_scores.pkl')
loss_save_path = os.path.join(network_folder, 'losses.pkl')
####################################################
# SET UP AGENT
####################################################
nb_steps = 10000000
network_fn = lambda: MPNN(n_obs_in=train_envs[0].observation_space.shape[1],
n_layers=3,
n_features=64,
n_hid_readout=[],
tied_weights=False)
agent = DQN(train_envs,
network_fn,
init_network_params=None,
init_weight_std=0.01,
double_dqn=True,
clip_Q_targets=False,
replay_start_size=3000,
replay_buffer_size=15000, # 20000
gamma=gamma, # 1
update_target_frequency=4000, # 500
update_learning_rate=False,
initial_learning_rate=1e-4,
peak_learning_rate=1e-4,
peak_learning_rate_step=20000,
final_learning_rate=1e-4,
final_learning_rate_step=200000,
update_frequency=32, # 1
minibatch_size=64, # 128
max_grad_norm=None,
weight_decay=0,
update_exploration=True,
initial_exploration_rate=1,
final_exploration_rate=0.05, # 0.05
final_exploration_step=800000, # 40000
adam_epsilon=1e-8,
logging=False,
loss="mse",
# save_network_frequency=400000,
save_network_frequency=80000,
network_save_path=network_save_path,
evaluate=True,
test_envs=test_envs,
test_episodes=n_tests,
# test_frequency=50000, # 10000
test_frequency=10000, # 10000
test_save_path=test_save_path,
test_metric=TestMetric.MAX_CUT,
seed=None
)
print("\n Created DQN agent with network:\n\n", agent.network)
#############
# TRAIN AGENT
#############
start = time.time()
agent.learn(timesteps=nb_steps, verbose=True)
print(time.time() - start)
agent.save()
############
# PLOT - learning curve
############
data = pickle.load(open(test_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"training_curve")
plt.plot(data[:,0],data[:,1])
plt.xlabel("Timestep")
plt.ylabel("Mean reward")
if agent.test_metric==TestMetric.ENERGY_ERROR:
plt.ylabel("Energy Error")
elif agent.test_metric==TestMetric.BEST_ENERGY:
plt.ylabel("Best Energy")
elif agent.test_metric==TestMetric.CUMULATIVE_REWARD:
plt.ylabel("Cumulative Reward")
elif agent.test_metric==TestMetric.MAX_CUT:
plt.ylabel("Max Cut")
elif agent.test_metric==TestMetric.FINAL_CUT:
plt.ylabel("Final Cut")
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
plt.clf()
############
# PLOT - losses
############
data = pickle.load(open(loss_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"loss")
N=50
data_x = np.convolve(data[:,0], np.ones((N,))/N, mode='valid')
data_y = np.convolve(data[:,1], np.ones((N,))/N, mode='valid')
plt.plot(data_x,data_y)
plt.xlabel("Timestep")
plt.ylabel("Loss")
plt.yscale("log")
plt.grid(True)
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
if __name__ == "__main__":
run() | 6,968 | 30.391892 | 133 | py |
eco-dqn | eco-dqn-master/experiments/ER_200spin/train_and_test_eco.py | """
Trains and tests ECO-DQN on 200 spin ER graphs.
"""
import experiments.ER_200spin.test.test_eco as test
import experiments.ER_200spin.train.train_eco as train
save_loc="ER_200spin/eco"
train.run(save_loc)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_200spin_p15_100graphs.pkl", batched=True, max_batch_size=25)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_500spin_p15_100graphs.pkl", batched=True, max_batch_size=5)
| 449 | 29 | 117 | py |
eco-dqn | eco-dqn-master/experiments/ER_200spin/train_and_test_s2v.py | """
Trains and tests S2V-DQN on 200 spin ER graphs.
"""
import experiments.ER_200spin.test.test_s2v as test
import experiments.ER_200spin.train.train_s2v as train
save_loc="ER_200spin/s2v"
train.run(save_loc)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_200spin_p15_100graphs.pkl", batched=True, max_batch_size=25)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_500spin_p15_100graphs.pkl", batched=True, max_batch_size=5) | 446 | 36.25 | 117 | py |
eco-dqn | eco-dqn-master/experiments/ER_200spin/test/test_eco.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="ER_200spin/eco",
graph_save_loc="_graphs/validation/ER_200spin_p15_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma = 0.95
step_factor = 2
env_args = {'observables': DEFAULT_OBSERVABLES,
'reward_signal': RewardSignal.BLS,
'extra_action': ExtraAction.NONE,
'optimisation_target': OptimisationTarget.CUT,
'spin_basis': SpinBasis.BINARY,
'norm_rewards': True,
'memory_length': None,
'horizon_length': None,
'stag_punishment': None,
'basin_reward': 1. / 200,
'reversible_spins': True}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,510 | 35.97541 | 108 | py |
eco-dqn | eco-dqn-master/experiments/ER_200spin/test/test_s2v.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="ER_200spin/s2v",
graph_save_loc="_graphs/validation/ER_200spin_p15_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_factor = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,487 | 36.090909 | 108 | py |
eco-dqn | eco-dqn-master/experiments/ER_200spin/train/train_s2v.py | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import src.envs.core as ising_env
from experiments.utils import load_graph_set, mk_dir
from src.agents.dqn.dqn import DQN
from src.agents.dqn.utils import TestMetric
from src.envs.utils import (SetGraphGenerator,
RandomErdosRenyiGraphGenerator,
EdgeType, RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
import time
def run(save_loc="ER_200spin/s2v"):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_fact = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# SET UP TRAINING AND TEST GRAPHS
####################################################
n_spins_train = 200
train_graph_generator = RandomErdosRenyiGraphGenerator(n_spins=n_spins_train,p_connection=0.15,edge_type=EdgeType.DISCRETE)
####
# Pre-generated test graphs
####
graph_save_loc = "_graphs/testing/ER_200spin_p15_50graphs.pkl"
graphs_test = load_graph_set(graph_save_loc)
n_tests = len(graphs_test)
test_graph_generator = SetGraphGenerator(graphs_test, ordered=True)
####################################################
# SET UP TRAINING AND TEST ENVIRONMENTS
####################################################
train_envs = [ising_env.make("SpinSystem",
train_graph_generator,
int(n_spins_train*step_fact),
**env_args)]
n_spins_test = train_graph_generator.get().shape[0]
test_envs = [ising_env.make("SpinSystem",
test_graph_generator,
int(n_spins_test*step_fact),
**env_args)]
####################################################
# SET UP FOLDERS FOR SAVING DATA
####################################################
data_folder = os.path.join(save_loc,'data')
network_folder = os.path.join(save_loc, 'network')
mk_dir(data_folder)
mk_dir(network_folder)
# print(data_folder)
network_save_path = os.path.join(network_folder,'network.pth')
test_save_path = os.path.join(network_folder,'test_scores.pkl')
loss_save_path = os.path.join(network_folder, 'losses.pkl')
####################################################
# SET UP AGENT
####################################################
nb_steps = 10000000
network_fn = lambda: MPNN(n_obs_in=train_envs[0].observation_space.shape[1],
n_layers=3,
n_features=64,
n_hid_readout=[],
tied_weights=False)
agent = DQN(train_envs,
network_fn,
init_network_params=None,
init_weight_std=0.01,
double_dqn=True,
clip_Q_targets=True,
replay_start_size=3000,
replay_buffer_size=15000, # 20000
gamma=gamma, # 1
update_target_frequency=4000, # 500
update_learning_rate=False,
initial_learning_rate=1e-4,
peak_learning_rate=1e-4,
peak_learning_rate_step=20000,
final_learning_rate=1e-4,
final_learning_rate_step=200000,
update_frequency=32, # 1
minibatch_size=64, # 128
max_grad_norm=None,
weight_decay=0,
update_exploration=True,
initial_exploration_rate=1,
final_exploration_rate=0.05, # 0.05
final_exploration_step=800000, # 40000
adam_epsilon=1e-8,
logging=False,
loss="mse",
save_network_frequency=400000,
network_save_path=network_save_path,
evaluate=True,
test_envs=test_envs,
test_episodes=n_tests,
test_frequency=50000, # 10000
test_save_path=test_save_path,
test_metric=TestMetric.MAX_CUT,
seed=None
)
print("\n Created DQN agent with network:\n\n", agent.network)
#############
# TRAIN AGENT
#############
start = time.time()
agent.learn(timesteps=nb_steps, verbose=True)
print(time.time() - start)
agent.save()
############
# PLOT - learning curve
############
data = pickle.load(open(test_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"training_curve")
plt.plot(data[:,0],data[:,1])
plt.xlabel("Training run")
plt.ylabel("Mean reward")
if agent.test_metric==TestMetric.ENERGY_ERROR:
plt.ylabel("Energy Error")
elif agent.test_metric==TestMetric.BEST_ENERGY:
plt.ylabel("Best Energy")
elif agent.test_metric==TestMetric.CUMULATIVE_REWARD:
plt.ylabel("Cumulative Reward")
elif agent.test_metric==TestMetric.MAX_CUT:
plt.ylabel("Max Cut")
elif agent.test_metric==TestMetric.FINAL_CUT:
plt.ylabel("Final Cut")
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
plt.clf()
############
# PLOT - losses
############
data = pickle.load(open(loss_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"loss")
N=50
data_x = np.convolve(data[:,0], np.ones((N,))/N, mode='valid')
data_y = np.convolve(data[:,1], np.ones((N,))/N, mode='valid')
plt.plot(data_x,data_y)
plt.xlabel("Timestep")
plt.ylabel("Loss")
plt.yscale("log")
plt.grid(True)
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
if __name__ == "__main__":
run() | 6,815 | 30.123288 | 127 | py |
eco-dqn | eco-dqn-master/experiments/ER_200spin/train/train_eco.py | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import src.envs.core as ising_env
from experiments.utils import load_graph_set, mk_dir
from src.agents.dqn.dqn import DQN
from src.agents.dqn.utils import TestMetric
from src.envs.utils import (SetGraphGenerator,
RandomErdosRenyiGraphGenerator,
EdgeType, RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
import time
def run(save_loc="ER_200spin/eco"):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=0.95
step_fact = 2
env_args = {'observables':DEFAULT_OBSERVABLES,
'reward_signal':RewardSignal.BLS,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':1./200,
'reversible_spins':True}
####################################################
# SET UP TRAINING AND TEST GRAPHS
####################################################
n_spins_train = 200
train_graph_generator = RandomErdosRenyiGraphGenerator(n_spins=n_spins_train,p_connection=0.15,edge_type=EdgeType.DISCRETE)
####
# Pre-generated test graphs
####
graph_save_loc = "_graphs/testing/ER_200spin_p15_50graphs.pkl"
graphs_test = load_graph_set(graph_save_loc)
n_tests = len(graphs_test)
test_graph_generator = SetGraphGenerator(graphs_test, ordered=True)
####################################################
# SET UP TRAINING AND TEST ENVIRONMENTS
####################################################
train_envs = [ising_env.make("SpinSystem",
train_graph_generator,
int(n_spins_train*step_fact),
**env_args)]
n_spins_test = train_graph_generator.get().shape[0]
test_envs = [ising_env.make("SpinSystem",
test_graph_generator,
int(n_spins_test*step_fact),
**env_args)]
####################################################
# SET UP FOLDERS FOR SAVING DATA
####################################################
data_folder = os.path.join(save_loc,'data')
network_folder = os.path.join(save_loc, 'network')
mk_dir(data_folder)
mk_dir(network_folder)
# print(data_folder)
network_save_path = os.path.join(network_folder,'network.pth')
test_save_path = os.path.join(network_folder,'test_scores.pkl')
loss_save_path = os.path.join(network_folder, 'losses.pkl')
####################################################
# SET UP AGENT
####################################################
nb_steps = 10000000
network_fn = lambda: MPNN(n_obs_in=train_envs[0].observation_space.shape[1],
n_layers=3,
n_features=64,
n_hid_readout=[],
tied_weights=False)
agent = DQN(train_envs,
network_fn,
init_network_params=None,
init_weight_std=0.01,
double_dqn=True,
clip_Q_targets=False,
replay_start_size=3000,
replay_buffer_size=15000, # 20000
gamma=gamma, # 1
update_target_frequency=4000, # 500
update_learning_rate=False,
initial_learning_rate=1e-4,
peak_learning_rate=1e-4,
peak_learning_rate_step=20000,
final_learning_rate=1e-4,
final_learning_rate_step=200000,
update_frequency=32, # 1
minibatch_size=64, # 128
max_grad_norm=None,
weight_decay=0,
update_exploration=True,
initial_exploration_rate=1,
final_exploration_rate=0.05, # 0.05
final_exploration_step=800000, # 40000
adam_epsilon=1e-8,
logging=False,
loss="mse",
save_network_frequency=400000,
network_save_path=network_save_path,
evaluate=True,
test_envs=test_envs,
test_episodes=n_tests,
test_frequency=50000, # 10000
test_save_path=test_save_path,
test_metric=TestMetric.MAX_CUT,
seed=None
)
print("\n Created DQN agent with network:\n\n", agent.network)
#############
# TRAIN AGENT
#############
start = time.time()
agent.learn(timesteps=nb_steps, verbose=True)
print(time.time() - start)
agent.save()
############
# PLOT - learning curve
############
data = pickle.load(open(test_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"training_curve")
plt.plot(data[:,0],data[:,1])
plt.xlabel("Timestep")
plt.ylabel("Mean reward")
if agent.test_metric==TestMetric.ENERGY_ERROR:
plt.ylabel("Energy Error")
elif agent.test_metric==TestMetric.BEST_ENERGY:
plt.ylabel("Best Energy")
elif agent.test_metric==TestMetric.CUMULATIVE_REWARD:
plt.ylabel("Cumulative Reward")
elif agent.test_metric==TestMetric.MAX_CUT:
plt.ylabel("Max Cut")
elif agent.test_metric==TestMetric.FINAL_CUT:
plt.ylabel("Final Cut")
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
plt.clf()
############
# PLOT - losses
############
data = pickle.load(open(loss_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"loss")
N=50
data_x = np.convolve(data[:,0], np.ones((N,))/N, mode='valid')
data_y = np.convolve(data[:,1], np.ones((N,))/N, mode='valid')
plt.plot(data_x,data_y)
plt.xlabel("Timestep")
plt.ylabel("Loss")
plt.yscale("log")
plt.grid(True)
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
if __name__ == "__main__":
run() | 6,819 | 30.141553 | 127 | py |
eco-dqn | eco-dqn-master/experiments/ER_100spin/train_and_test_eco.py | """
Trains and tests ECO-DQN on 100 spin ER graphs.
"""
import experiments.ER_100spin.test.test_eco as test
import experiments.ER_100spin.train.train_eco as train
save_loc="ER_100spin/mpnn"
train.run(save_loc)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_100spin_p15_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_200spin_p15_100graphs.pkl", batched=True, max_batch_size=25)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_500spin_p15_100graphs.pkl", batched=True, max_batch_size=5)
| 570 | 34.6875 | 119 | py |
eco-dqn | eco-dqn-master/experiments/ER_100spin/train_and_test_s2v.py | """
Trains and tests S2V-DQN on 100 spin ER graphs.
"""
import experiments.ER_100spin.test.test_s2v as test
import experiments.ER_100spin.train.train_s2v as train
save_loc="ER_100spin/s2v"
train.run(save_loc)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_100spin_p15_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_200spin_p15_100graphs.pkl", batched=True, max_batch_size=25)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_500spin_p15_100graphs.pkl", batched=True, max_batch_size=5) | 566 | 42.615385 | 119 | py |
eco-dqn | eco-dqn-master/experiments/ER_100spin/test/test_eco.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="ER_100spin/eco",
graph_save_loc="_graphs/validation/ER_100spin_p15_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma = 0.95
step_factor = 2
env_args = {'observables': DEFAULT_OBSERVABLES,
'reward_signal': RewardSignal.BLS,
'extra_action': ExtraAction.NONE,
'optimisation_target': OptimisationTarget.CUT,
'spin_basis': SpinBasis.BINARY,
'norm_rewards': True,
'memory_length': None,
'horizon_length': None,
'stag_punishment': None,
'basin_reward': 1. / 100,
'reversible_spins': True}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,510 | 35.97541 | 108 | py |
eco-dqn | eco-dqn-master/experiments/ER_100spin/test/test_s2v.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="ER_100spin/s2v",
graph_save_loc="_graphs/validation/ER_100spin_p15_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_factor = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,487 | 36.090909 | 108 | py |
eco-dqn | eco-dqn-master/experiments/ER_100spin/train/train_s2v.py | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import src.envs.core as ising_env
from experiments.utils import load_graph_set, mk_dir
from src.agents.dqn.dqn import DQN
from src.agents.dqn.utils import TestMetric
from src.envs.utils import (SetGraphGenerator,
RandomErdosRenyiGraphGenerator,
EdgeType, RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
import time
def run(save_loc="ER_100spin/s2v"):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_fact = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# SET UP TRAINING AND TEST GRAPHS
####################################################
n_spins_train = 100
train_graph_generator = RandomErdosRenyiGraphGenerator(n_spins=n_spins_train,p_connection=0.15,edge_type=EdgeType.DISCRETE)
####
# Pre-generated test graphs
####
graph_save_loc = "_graphs/testing/ER_100spin_p15_50graphs.pkl"
graphs_test = load_graph_set(graph_save_loc)
n_tests = len(graphs_test)
test_graph_generator = SetGraphGenerator(graphs_test, ordered=True)
####################################################
# SET UP TRAINING AND TEST ENVIRONMENTS
####################################################
train_envs = [ising_env.make("SpinSystem",
train_graph_generator,
int(n_spins_train*step_fact),
**env_args)]
n_spins_test = train_graph_generator.get().shape[0]
test_envs = [ising_env.make("SpinSystem",
test_graph_generator,
int(n_spins_test*step_fact),
**env_args)]
####################################################
# SET UP FOLDERS FOR SAVING DATA
####################################################
data_folder = os.path.join(save_loc,'data')
network_folder = os.path.join(save_loc, 'network')
mk_dir(data_folder)
mk_dir(network_folder)
# print(data_folder)
network_save_path = os.path.join(network_folder,'network.pth')
test_save_path = os.path.join(network_folder,'test_scores.pkl')
loss_save_path = os.path.join(network_folder, 'losses.pkl')
####################################################
# SET UP AGENT
####################################################
nb_steps = 8000000
network_fn = lambda: MPNN(n_obs_in=train_envs[0].observation_space.shape[1],
n_layers=3,
n_features=64,
n_hid_readout=[],
tied_weights=False)
agent = DQN(train_envs,
network_fn,
init_network_params=None,
init_weight_std=0.01,
double_dqn=True,
clip_Q_targets=True,
replay_start_size=1500,
replay_buffer_size=10000, # 20000
gamma=gamma, # 1
update_target_frequency=2500, # 500
update_learning_rate=False,
initial_learning_rate=1e-4,
peak_learning_rate=1e-4,
peak_learning_rate_step=20000,
final_learning_rate=1e-4,
final_learning_rate_step=200000,
update_frequency=32, # 1
minibatch_size=64, # 128
max_grad_norm=None,
weight_decay=0,
update_exploration=True,
initial_exploration_rate=1,
final_exploration_rate=0.05, # 0.05
final_exploration_step=800000, # 40000
adam_epsilon=1e-8,
logging=False,
loss="mse",
save_network_frequency=400000,
network_save_path=network_save_path,
evaluate=True,
test_envs=test_envs,
test_episodes=n_tests,
test_frequency=50000, # 10000
test_save_path=test_save_path,
test_metric=TestMetric.MAX_CUT,
seed=None
)
print("\n Created DQN agent with network:\n\n", agent.network)
#############
# TRAIN AGENT
#############
start = time.time()
agent.learn(timesteps=nb_steps, verbose=True)
print(time.time() - start)
agent.save()
############
# PLOT - learning curve
############
data = pickle.load(open(test_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"training_curve")
plt.plot(data[:,0],data[:,1])
plt.xlabel("Training run")
plt.ylabel("Mean reward")
if agent.test_metric==TestMetric.ENERGY_ERROR:
plt.ylabel("Energy Error")
elif agent.test_metric==TestMetric.BEST_ENERGY:
plt.ylabel("Best Energy")
elif agent.test_metric==TestMetric.CUMULATIVE_REWARD:
plt.ylabel("Cumulative Reward")
elif agent.test_metric==TestMetric.MAX_CUT:
plt.ylabel("Max Cut")
elif agent.test_metric==TestMetric.FINAL_CUT:
plt.ylabel("Final Cut")
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
plt.clf()
############
# PLOT - losses
############
data = pickle.load(open(loss_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"loss")
N=50
data_x = np.convolve(data[:,0], np.ones((N,))/N, mode='valid')
data_y = np.convolve(data[:,1], np.ones((N,))/N, mode='valid')
plt.plot(data_x,data_y)
plt.xlabel("Timestep")
plt.ylabel("Loss")
plt.yscale("log")
plt.grid(True)
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
if __name__ == "__main__":
run() | 6,814 | 30.118721 | 127 | py |
eco-dqn | eco-dqn-master/experiments/ER_100spin/train/train_eco.py | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import src.envs.core as ising_env
from experiments.utils import load_graph_set, mk_dir
from src.agents.dqn.dqn import DQN
from src.agents.dqn.utils import TestMetric
from src.envs.utils import (SetGraphGenerator,
RandomErdosRenyiGraphGenerator,
EdgeType, RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
import time
def run(save_loc="ER_100spin/eco"):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=0.95
step_fact = 2
env_args = {'observables':DEFAULT_OBSERVABLES,
'reward_signal':RewardSignal.BLS,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':1./100,
'reversible_spins':True}
####################################################
# SET UP TRAINING AND TEST GRAPHS
####################################################
n_spins_train = 100
train_graph_generator = RandomErdosRenyiGraphGenerator(n_spins=n_spins_train,p_connection=0.15,edge_type=EdgeType.DISCRETE)
####
# Pre-generated test graphs
####
graph_save_loc = "_graphs/testing/ER_100spin_p15_50graphs.pkl"
graphs_test = load_graph_set(graph_save_loc)
n_tests = len(graphs_test)
test_graph_generator = SetGraphGenerator(graphs_test, ordered=True)
####################################################
# SET UP TRAINING AND TEST ENVIRONMENTS
####################################################
train_envs = [ising_env.make("SpinSystem",
train_graph_generator,
int(n_spins_train*step_fact),
**env_args)]
n_spins_test = train_graph_generator.get().shape[0]
test_envs = [ising_env.make("SpinSystem",
test_graph_generator,
int(n_spins_test*step_fact),
**env_args)]
####################################################
# SET UP FOLDERS FOR SAVING DATA
####################################################
data_folder = os.path.join(save_loc,'data')
network_folder = os.path.join(save_loc, 'network')
mk_dir(data_folder)
mk_dir(network_folder)
# print(data_folder)
network_save_path = os.path.join(network_folder,'network.pth')
test_save_path = os.path.join(network_folder,'test_scores.pkl')
loss_save_path = os.path.join(network_folder, 'losses.pkl')
####################################################
# SET UP AGENT
####################################################
nb_steps = 8000000
network_fn = lambda: MPNN(n_obs_in=train_envs[0].observation_space.shape[1],
n_layers=3,
n_features=64,
n_hid_readout=[],
tied_weights=False)
agent = DQN(train_envs,
network_fn,
init_network_params=None,
init_weight_std=0.01,
double_dqn=True,
clip_Q_targets=False,
replay_start_size=1500,
replay_buffer_size=10000, # 20000
gamma=gamma, # 1
update_target_frequency=2500, # 500
update_learning_rate=False,
initial_learning_rate=1e-4,
peak_learning_rate=1e-4,
peak_learning_rate_step=20000,
final_learning_rate=1e-4,
final_learning_rate_step=200000,
update_frequency=32, # 1
minibatch_size=64, # 128
max_grad_norm=None,
weight_decay=0,
update_exploration=True,
initial_exploration_rate=1,
final_exploration_rate=0.05, # 0.05
final_exploration_step=800000, # 40000
adam_epsilon=1e-8,
logging=False,
loss="mse",
save_network_frequency=400000,
network_save_path=network_save_path,
evaluate=True,
test_envs=test_envs,
test_episodes=n_tests,
test_frequency=50000, # 10000
test_save_path=test_save_path,
test_metric=TestMetric.MAX_CUT,
seed=None
)
print("\n Created DQN agent with network:\n\n", agent.network)
#############
# TRAIN AGENT
#############
start = time.time()
agent.learn(timesteps=nb_steps, verbose=True)
print(time.time() - start)
agent.save()
############
# PLOT - learning curve
############
data = pickle.load(open(test_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"training_curve")
plt.plot(data[:,0],data[:,1])
plt.xlabel("Timestep")
plt.ylabel("Mean reward")
if agent.test_metric==TestMetric.ENERGY_ERROR:
plt.ylabel("Energy Error")
elif agent.test_metric==TestMetric.BEST_ENERGY:
plt.ylabel("Best Energy")
elif agent.test_metric==TestMetric.CUMULATIVE_REWARD:
plt.ylabel("Cumulative Reward")
elif agent.test_metric==TestMetric.MAX_CUT:
plt.ylabel("Max Cut")
elif agent.test_metric==TestMetric.FINAL_CUT:
plt.ylabel("Final Cut")
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
plt.clf()
############
# PLOT - losses
############
data = pickle.load(open(loss_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"loss")
N=50
data_x = np.convolve(data[:,0], np.ones((N,))/N, mode='valid')
data_y = np.convolve(data[:,1], np.ones((N,))/N, mode='valid')
plt.plot(data_x,data_y)
plt.xlabel("Timestep")
plt.ylabel("Loss")
plt.yscale("log")
plt.grid(True)
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
if __name__ == "__main__":
run() | 6,818 | 30.136986 | 127 | py |
eco-dqn | eco-dqn-master/experiments/ER_60spin/train_and_test_eco.py | """
Trains and tests ECO-DQN on 60 spin ER graphs.
"""
import experiments.ER_60spin.train.train_eco as train
import experiments.ER_60spin.test.test_eco as test
save_loc="ER_60spin/eco"
train.run(save_loc)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_60spin_p15_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_100spin_p15_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_200spin_p15_100graphs.pkl", batched=True, max_batch_size=25)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_500spin_p15_100graphs.pkl", batched=True, max_batch_size=5)
| 684 | 39.294118 | 119 | py |
eco-dqn | eco-dqn-master/experiments/ER_60spin/train_and_test_s2v.py | """
Trains and tests S2V-DQN on 60 spin ER graphs.
"""
import experiments.ER_60spin.test.test_s2v as test
import experiments.ER_60spin.train.train_s2v as train
save_loc="ER_60spin/s2v"
train.run(save_loc)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_60spin_p15_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_100spin_p15_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_200spin_p15_100graphs.pkl", batched=True, max_batch_size=25)
test.run(save_loc, graph_save_loc="_graphs/validation/ER_500spin_p15_100graphs.pkl", batched=True, max_batch_size=5) | 681 | 47.714286 | 119 | py |
eco-dqn | eco-dqn-master/experiments/ER_60spin/test/test_eco.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="ER_60spin/eco",
graph_save_loc="_graphs/validation/ER_60spin_p15_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma = 0.95
step_factor = 2
env_args = {'observables': DEFAULT_OBSERVABLES,
'reward_signal': RewardSignal.BLS,
'extra_action': ExtraAction.NONE,
'optimisation_target': OptimisationTarget.CUT,
'spin_basis': SpinBasis.BINARY,
'norm_rewards': True,
'memory_length': None,
'horizon_length': None,
'stag_punishment': None,
'basin_reward': 1. / 60,
'reversible_spins': True}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,507 | 35.95082 | 108 | py |
eco-dqn | eco-dqn-master/experiments/ER_60spin/test/test_s2v.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="ER_60spin/s2v",
graph_save_loc="_graphs/validation/ER_60spin_p15_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_factor = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,485 | 36.07438 | 108 | py |
eco-dqn | eco-dqn-master/experiments/ER_60spin/train/train_s2v.py | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import src.envs.core as ising_env
from experiments.utils import load_graph_set, mk_dir
from src.agents.dqn.dqn import DQN
from src.agents.dqn.utils import TestMetric
from src.envs.utils import (SetGraphGenerator,
RandomErdosRenyiGraphGenerator,
EdgeType, RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
import time
def run(save_loc="ER_60spin/s2v"):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_fact = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# SET UP TRAINING AND TEST GRAPHS
####################################################
n_spins_train = 60
train_graph_generator = RandomErdosRenyiGraphGenerator(n_spins=n_spins_train,p_connection=0.15,edge_type=EdgeType.DISCRETE)
####
# Pre-generated test graphs
####
graph_save_loc = "_graphs/testing/ER_60spin_p15_50graphs.pkl"
graphs_test = load_graph_set(graph_save_loc)
n_tests = len(graphs_test)
test_graph_generator = SetGraphGenerator(graphs_test, ordered=True)
####################################################
# SET UP TRAINING AND TEST ENVIRONMENTS
####################################################
train_envs = [ising_env.make("SpinSystem",
train_graph_generator,
int(n_spins_train*step_fact),
**env_args)]
n_spins_test = train_graph_generator.get().shape[0]
test_envs = [ising_env.make("SpinSystem",
test_graph_generator,
int(n_spins_test*step_fact),
**env_args)]
####################################################
# SET UP FOLDERS FOR SAVING DATA
####################################################
data_folder = os.path.join(save_loc,'data')
network_folder = os.path.join(save_loc, 'network')
mk_dir(data_folder)
mk_dir(network_folder)
# print(data_folder)
network_save_path = os.path.join(network_folder,'network.pth')
test_save_path = os.path.join(network_folder,'test_scores.pkl')
loss_save_path = os.path.join(network_folder, 'losses.pkl')
####################################################
# SET UP AGENT
####################################################
nb_steps = 5000000
network_fn = lambda: MPNN(n_obs_in=train_envs[0].observation_space.shape[1],
n_layers=3,
n_features=64,
n_hid_readout=[],
tied_weights=False)
agent = DQN(train_envs,
network_fn,
init_network_params=None,
init_weight_std=0.01,
double_dqn=True,
clip_Q_targets=True,
replay_start_size=500,
replay_buffer_size=5000, # 20000
gamma=gamma, # 1
update_target_frequency=1000, # 500
update_learning_rate=False,
initial_learning_rate=1e-4,
peak_learning_rate=1e-4,
peak_learning_rate_step=20000,
final_learning_rate=1e-4,
final_learning_rate_step=200000,
update_frequency=32, # 1
minibatch_size=64, # 128
max_grad_norm=None,
weight_decay=0,
update_exploration=True,
initial_exploration_rate=1,
final_exploration_rate=0.05, # 0.05
final_exploration_step=300000, # 40000
adam_epsilon=1e-8,
logging=False,
loss="mse",
save_network_frequency=200000,
network_save_path=network_save_path,
evaluate=True,
test_envs=test_envs,
test_episodes=n_tests,
test_frequency=20000, # 10000
test_save_path=test_save_path,
test_metric=TestMetric.MAX_CUT,
seed=None
)
print("\n Created DQN agent with network:\n\n", agent.network)
#############
# TRAIN AGENT
#############
start = time.time()
agent.learn(timesteps=nb_steps, verbose=True)
print(time.time() - start)
agent.save()
############
# PLOT - learning curve
############
data = pickle.load(open(test_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"training_curve")
plt.plot(data[:,0],data[:,1])
plt.xlabel("Training run")
plt.ylabel("Mean reward")
if agent.test_metric==TestMetric.ENERGY_ERROR:
plt.ylabel("Energy Error")
elif agent.test_metric==TestMetric.BEST_ENERGY:
plt.ylabel("Best Energy")
elif agent.test_metric==TestMetric.CUMULATIVE_REWARD:
plt.ylabel("Cumulative Reward")
elif agent.test_metric==TestMetric.MAX_CUT:
plt.ylabel("Max Cut")
elif agent.test_metric==TestMetric.FINAL_CUT:
plt.ylabel("Final Cut")
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
plt.clf()
############
# PLOT - losses
############
data = pickle.load(open(loss_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"loss")
N=50
data_x = np.convolve(data[:,0], np.ones((N,))/N, mode='valid')
data_y = np.convolve(data[:,1], np.ones((N,))/N, mode='valid')
plt.plot(data_x,data_y)
plt.xlabel("Timestep")
plt.ylabel("Loss")
plt.yscale("log")
plt.grid(True)
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
if __name__ == "__main__":
run() | 6,809 | 30.09589 | 127 | py |
eco-dqn | eco-dqn-master/experiments/ER_60spin/train/train_eco.py | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import src.envs.core as ising_env
from experiments.utils import load_graph_set, mk_dir
from src.agents.dqn.dqn import DQN
from src.agents.dqn.utils import TestMetric
from src.envs.utils import (SetGraphGenerator,
RandomErdosRenyiGraphGenerator,
EdgeType, RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
import time
def run(save_loc="ER_60spin/eco"):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=0.95
step_fact = 2
env_args = {'observables':DEFAULT_OBSERVABLES,
'reward_signal':RewardSignal.BLS,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':1./60,
'reversible_spins':True}
####################################################
# SET UP TRAINING AND TEST GRAPHS
####################################################
n_spins_train = 60
train_graph_generator = RandomErdosRenyiGraphGenerator(n_spins=n_spins_train,p_connection=0.15,edge_type=EdgeType.DISCRETE)
####
# Pre-generated test graphs
####
graph_save_loc = "_graphs/testing/ER_60spin_p15_50graphs.pkl"
graphs_test = load_graph_set(graph_save_loc)
n_tests = len(graphs_test)
test_graph_generator = SetGraphGenerator(graphs_test, ordered=True)
####################################################
# SET UP TRAINING AND TEST ENVIRONMENTS
####################################################
train_envs = [ising_env.make("SpinSystem",
train_graph_generator,
int(n_spins_train*step_fact),
**env_args)]
n_spins_test = train_graph_generator.get().shape[0]
test_envs = [ising_env.make("SpinSystem",
test_graph_generator,
int(n_spins_test*step_fact),
**env_args)]
####################################################
# SET UP FOLDERS FOR SAVING DATA
####################################################
data_folder = os.path.join(save_loc,'data')
network_folder = os.path.join(save_loc, 'network')
mk_dir(data_folder)
mk_dir(network_folder)
# print(data_folder)
network_save_path = os.path.join(network_folder,'network.pth')
test_save_path = os.path.join(network_folder,'test_scores.pkl')
loss_save_path = os.path.join(network_folder, 'losses.pkl')
####################################################
# SET UP AGENT
####################################################
nb_steps = 5000000
network_fn = lambda: MPNN(n_obs_in=train_envs[0].observation_space.shape[1],
n_layers=3,
n_features=64,
n_hid_readout=[],
tied_weights=False)
agent = DQN(train_envs,
network_fn,
init_network_params=None,
init_weight_std=0.01,
double_dqn=True,
clip_Q_targets=False,
replay_start_size=500,
replay_buffer_size=5000, # 20000
gamma=gamma, # 1
update_target_frequency=1000, # 500
update_learning_rate=False,
initial_learning_rate=1e-4,
peak_learning_rate=1e-4,
peak_learning_rate_step=20000,
final_learning_rate=1e-4,
final_learning_rate_step=200000,
update_frequency=32, # 1
minibatch_size=64, # 128
max_grad_norm=None,
weight_decay=0,
update_exploration=True,
initial_exploration_rate=1,
final_exploration_rate=0.05, # 0.05
final_exploration_step=300000, # 40000
adam_epsilon=1e-8,
logging=False,
loss="mse",
save_network_frequency=200000,
network_save_path=network_save_path,
evaluate=True,
test_envs=test_envs,
test_episodes=n_tests,
test_frequency=20000, # 10000
test_save_path=test_save_path,
test_metric=TestMetric.MAX_CUT,
seed=None
)
print("\n Created DQN agent with network:\n\n", agent.network)
#############
# TRAIN AGENT
#############
start = time.time()
agent.learn(timesteps=nb_steps, verbose=True)
print(time.time() - start)
agent.save()
############
# PLOT - learning curve
############
data = pickle.load(open(test_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"training_curve")
plt.plot(data[:,0],data[:,1])
plt.xlabel("Timestep")
plt.ylabel("Mean reward")
if agent.test_metric==TestMetric.ENERGY_ERROR:
plt.ylabel("Energy Error")
elif agent.test_metric==TestMetric.BEST_ENERGY:
plt.ylabel("Best Energy")
elif agent.test_metric==TestMetric.CUMULATIVE_REWARD:
plt.ylabel("Cumulative Reward")
elif agent.test_metric==TestMetric.MAX_CUT:
plt.ylabel("Max Cut")
elif agent.test_metric==TestMetric.FINAL_CUT:
plt.ylabel("Final Cut")
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
plt.clf()
############
# PLOT - losses
############
data = pickle.load(open(loss_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"loss")
N=50
data_x = np.convolve(data[:,0], np.ones((N,))/N, mode='valid')
data_y = np.convolve(data[:,1], np.ones((N,))/N, mode='valid')
plt.plot(data_x,data_y)
plt.xlabel("Timestep")
plt.ylabel("Loss")
plt.yscale("log")
plt.grid(True)
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
plt.clf()
if __name__ == "__main__":
run() | 6,827 | 29.895928 | 127 | py |
eco-dqn | eco-dqn-master/experiments/BA_20spin/train_and_test_eco.py | """
Trains and tests ECO-DQN on 20 spin BA graphs.
"""
import experiments.BA_20spin.test.test_eco as test
import experiments.BA_20spin.train.train_eco as train
save_loc="BA_20spin/eco"
train.run(save_loc)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_20spin_m4_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_40spin_m4_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_60spin_m4_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_100spin_m4_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_200spin_m4_100graphs.pkl", batched=True, max_batch_size=25)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_500spin_m4_100graphs.pkl", batched=True, max_batch_size=5)
| 914 | 52.823529 | 118 | py |
eco-dqn | eco-dqn-master/experiments/BA_20spin/train_and_test_s2v.py | """
Trains and tests S2V-DQN on 20 spin BA graphs.
"""
import experiments.BA_20spin.test.test_s2v as test
import experiments.BA_20spin.train.train_s2v as train
save_loc="BA_20spin/s2v"
train.run(save_loc)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_20spin_m4_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_40spin_m4_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_60spin_m4_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_100spin_m4_100graphs.pkl", batched=True, max_batch_size=None)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_200spin_m4_100graphs.pkl", batched=True, max_batch_size=25)
test.run(save_loc, graph_save_loc="_graphs/validation/BA_500spin_m4_100graphs.pkl", batched=True, max_batch_size=5) | 913 | 56.125 | 118 | py |
eco-dqn | eco-dqn-master/experiments/BA_20spin/test/test_eco.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="BA_20spin/eco",
graph_save_loc="_graphs/validation/BA_20spin_m4_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma = 0.95
step_factor = 2
env_args = {'observables': DEFAULT_OBSERVABLES,
'reward_signal': RewardSignal.BLS,
'extra_action': ExtraAction.NONE,
'optimisation_target': OptimisationTarget.CUT,
'spin_basis': SpinBasis.BINARY,
'norm_rewards': True,
'memory_length': None,
'horizon_length': None,
'stag_punishment': None,
'basin_reward': 1. / 20,
'reversible_spins': True}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,506 | 35.942623 | 108 | py |
eco-dqn | eco-dqn-master/experiments/BA_20spin/test/test_s2v.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="BA_20spin/s2v",
graph_save_loc="_graphs/validation/BA_20spin_m4_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_factor = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,485 | 35.770492 | 108 | py |
eco-dqn | eco-dqn-master/experiments/BA_20spin/train/train_s2v.py | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import src.envs.core as ising_env
from experiments.utils import load_graph_set, mk_dir
from src.agents.dqn.dqn import DQN
from src.agents.dqn.utils import TestMetric
from src.envs.utils import (SetGraphGenerator,
RandomBarabasiAlbertGraphGenerator,
EdgeType, RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
import time
def run(save_loc="BA_20spin/s2v"):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_fact = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# SET UP TRAINING AND TEST GRAPHS
####################################################
n_spins_train = 20
train_graph_generator = RandomBarabasiAlbertGraphGenerator(n_spins=n_spins_train,m_insertion_edges=4,edge_type=EdgeType.DISCRETE)
####
# Pre-generated test graphs
####
graph_save_loc = "_graphs/testing/BA_20spin_m4_50graphs.pkl"
graphs_test = load_graph_set(graph_save_loc)
n_tests = len(graphs_test)
test_graph_generator = SetGraphGenerator(graphs_test, ordered=True)
####################################################
# SET UP TRAINING AND TEST ENVIRONMENTS
####################################################
train_envs = [ising_env.make("SpinSystem",
train_graph_generator,
int(n_spins_train*step_fact),
**env_args)]
n_spins_test = train_graph_generator.get().shape[0]
test_envs = [ising_env.make("SpinSystem",
test_graph_generator,
int(n_spins_test*step_fact),
**env_args)]
####################################################
# SET UP FOLDERS FOR SAVING DATA
####################################################
data_folder = os.path.join(save_loc,'data')
network_folder = os.path.join(save_loc, 'network')
mk_dir(data_folder)
mk_dir(network_folder)
# print(data_folder)
network_save_path = os.path.join(network_folder,'network.pth')
test_save_path = os.path.join(network_folder,'test_scores.pkl')
loss_save_path = os.path.join(network_folder, 'losses.pkl')
####################################################
# SET UP AGENT
####################################################
nb_steps = 2500000
network_fn = lambda: MPNN(n_obs_in=train_envs[0].observation_space.shape[1],
n_layers=3,
n_features=64,
n_hid_readout=[],
tied_weights=False)
agent = DQN(train_envs,
network_fn,
init_network_params=None,
init_weight_std=0.01,
double_dqn=True,
clip_Q_targets=True,
replay_start_size=500,
replay_buffer_size=5000, # 20000
gamma=gamma, # 1
update_target_frequency=1000, # 500
update_learning_rate=False,
initial_learning_rate=1e-4,
peak_learning_rate=1e-4,
peak_learning_rate_step=20000,
final_learning_rate=1e-4,
final_learning_rate_step=200000,
update_frequency=32, # 1
minibatch_size=64, # 128
max_grad_norm=None,
weight_decay=0,
update_exploration=True,
initial_exploration_rate=1,
final_exploration_rate=0.05, # 0.05
final_exploration_step=150000, # 40000
adam_epsilon=1e-8,
logging=False,
loss="mse",
save_network_frequency=100000,
network_save_path=network_save_path,
evaluate=True,
test_envs=test_envs,
test_episodes=n_tests,
test_frequency=10000, # 10000
test_save_path=test_save_path,
test_metric=TestMetric.MAX_CUT,
seed=None
)
print("\n Created DQN agent with network:\n\n", agent.network)
#############
# TRAIN AGENT
#############
start = time.time()
agent.learn(timesteps=nb_steps, verbose=True)
print(time.time() - start)
agent.save()
############
# PLOT - learning curve
############
data = pickle.load(open(test_save_path, 'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder, "training_curve")
plt.plot(data[:, 0], data[:, 1])
plt.xlabel("Timestep")
plt.ylabel("Mean reward")
if agent.test_metric == TestMetric.ENERGY_ERROR:
plt.ylabel("Energy Error")
elif agent.test_metric == TestMetric.BEST_ENERGY:
plt.ylabel("Best Energy")
elif agent.test_metric == TestMetric.CUMULATIVE_REWARD:
plt.ylabel("Cumulative Reward")
elif agent.test_metric == TestMetric.MAX_CUT:
plt.ylabel("Max Cut")
elif agent.test_metric == TestMetric.FINAL_CUT:
plt.ylabel("Final Cut")
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
plt.clf()
############
# PLOT - losses
############
data = pickle.load(open(loss_save_path, 'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder, "loss")
N = 50
data_x = np.convolve(data[:, 0], np.ones((N,)) / N, mode='valid')
data_y = np.convolve(data[:, 1], np.ones((N,)) / N, mode='valid')
plt.plot(data_x, data_y)
plt.xlabel("Timestep")
plt.ylabel("Loss")
plt.yscale("log")
plt.grid(True)
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
if __name__ == "__main__":
run() | 6,850 | 30.283105 | 133 | py |
eco-dqn | eco-dqn-master/experiments/BA_20spin/train/train_eco.py | import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
import src.envs.core as ising_env
from experiments.utils import load_graph_set, mk_dir
from src.agents.dqn.dqn import DQN
from src.agents.dqn.utils import TestMetric
from src.envs.utils import (SetGraphGenerator,
RandomBarabasiAlbertGraphGenerator,
EdgeType, RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
import time
def run(save_loc="BA_20spin/eco"):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=0.95
step_fact = 2
env_args = {'observables':DEFAULT_OBSERVABLES,
'reward_signal':RewardSignal.BLS,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':1./20,
'reversible_spins':True}
####################################################
# SET UP TRAINING AND TEST GRAPHS
####################################################
n_spins_train = 20
train_graph_generator = RandomBarabasiAlbertGraphGenerator(n_spins=n_spins_train,m_insertion_edges=4,edge_type=EdgeType.DISCRETE)
####
# Pre-generated test graphs
####
graph_save_loc = "_graphs/testing/BA_20spin_m4_50graphs.pkl"
graphs_test = load_graph_set(graph_save_loc)
n_tests = len(graphs_test)
test_graph_generator = SetGraphGenerator(graphs_test, ordered=True)
####################################################
# SET UP TRAINING AND TEST ENVIRONMENTS
####################################################
train_envs = [ising_env.make("SpinSystem",
train_graph_generator,
int(n_spins_train*step_fact),
**env_args)]
n_spins_test = train_graph_generator.get().shape[0]
test_envs = [ising_env.make("SpinSystem",
test_graph_generator,
int(n_spins_test*step_fact),
**env_args)]
####################################################
# SET UP FOLDERS FOR SAVING DATA
####################################################
data_folder = os.path.join(save_loc,'data')
network_folder = os.path.join(save_loc, 'network')
mk_dir(data_folder)
mk_dir(network_folder)
# print(data_folder)
network_save_path = os.path.join(network_folder,'network.pth')
test_save_path = os.path.join(network_folder,'test_scores.pkl')
loss_save_path = os.path.join(network_folder, 'losses.pkl')
####################################################
# SET UP AGENT
####################################################
nb_steps = 2500000
network_fn = lambda: MPNN(n_obs_in=train_envs[0].observation_space.shape[1],
n_layers=3,
n_features=64,
n_hid_readout=[],
tied_weights=False)
agent = DQN(train_envs,
network_fn,
init_network_params=None,
init_weight_std=0.01,
double_dqn=True,
clip_Q_targets=False,
replay_start_size=500,
replay_buffer_size=5000, # 20000
gamma=gamma, # 1
update_target_frequency=1000, # 500
update_learning_rate=False,
initial_learning_rate=1e-4,
peak_learning_rate=1e-4,
peak_learning_rate_step=20000,
final_learning_rate=1e-4,
final_learning_rate_step=200000,
update_frequency=32, # 1
minibatch_size=64, # 128
max_grad_norm=None,
weight_decay=0,
update_exploration=True,
initial_exploration_rate=1,
final_exploration_rate=0.05, # 0.05
final_exploration_step=150000, # 40000
adam_epsilon=1e-8,
logging=False,
loss="mse",
save_network_frequency=100000,
network_save_path=network_save_path,
evaluate=True,
test_envs=test_envs,
test_episodes=n_tests,
test_frequency=10000, # 10000
test_save_path=test_save_path,
test_metric=TestMetric.MAX_CUT,
seed=None
)
print("\n Created DQN agent with network:\n\n", agent.network)
#############
# TRAIN AGENT
#############
start = time.time()
agent.learn(timesteps=nb_steps, verbose=True)
print(time.time() - start)
agent.save()
############
# PLOT - learning curve
############
data = pickle.load(open(test_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"training_curve")
plt.plot(data[:,0],data[:,1])
plt.xlabel("Timestep")
plt.ylabel("Mean reward")
if agent.test_metric==TestMetric.ENERGY_ERROR:
plt.ylabel("Energy Error")
elif agent.test_metric==TestMetric.BEST_ENERGY:
plt.ylabel("Best Energy")
elif agent.test_metric==TestMetric.CUMULATIVE_REWARD:
plt.ylabel("Cumulative Reward")
elif agent.test_metric==TestMetric.MAX_CUT:
plt.ylabel("Max Cut")
elif agent.test_metric==TestMetric.FINAL_CUT:
plt.ylabel("Final Cut")
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
plt.clf()
############
# PLOT - losses
############
data = pickle.load(open(loss_save_path,'rb'))
data = np.array(data)
fig_fname = os.path.join(network_folder,"loss")
N=50
data_x = np.convolve(data[:,0], np.ones((N,))/N, mode='valid')
data_y = np.convolve(data[:,1], np.ones((N,))/N, mode='valid')
plt.plot(data_x,data_y)
plt.xlabel("Timestep")
plt.ylabel("Loss")
plt.yscale("log")
plt.grid(True)
plt.savefig(fig_fname + ".png", bbox_inches='tight')
plt.savefig(fig_fname + ".pdf", bbox_inches='tight')
if __name__ == "__main__":
run() | 6,820 | 30.288991 | 133 | py |
USLN | USLN-master/test.py | from PIL import Image
import os
import numpy as np
import torch
from model import USLN
from SegDataset import read_file_list
from tqdm import trange
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = USLN()
model.load_state_dict(torch.load(r'logs/UFO.pth'))
model.eval()
model = model.to(device)
test, path_list_images_test= read_file_list( type='test')
for id in trange(len(test)):
image = Image.open(test[id]).convert('RGB')
input = np.transpose(np.array(image, np.float64),(2,0,1))
input=input/255
input = torch.from_numpy(input).type(torch.FloatTensor)
input = input.to(device)
input= input.unsqueeze(0)
output = model(input)
output_np=output.cpu().detach().numpy().copy()
output_np=output_np.squeeze()
predictimag=np.transpose(output_np, [1, 2, 0])*255
a=Image.fromarray(predictimag.astype('uint8'))
a.save(os.path.join(r"datasets/pred", path_list_images_test[id]))
| 961 | 21.904762 | 69 | py |
USLN | USLN-master/loss.py | import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
import torch.nn as nn
from torchvision import models
class VGG_loss(nn.Module):
def __init__(self, model):
super(VGG_loss, self).__init__()
self.features = nn.Sequential(*list(model.children())[0][:-3])
self.l1loss = nn.L1Loss()
def forward(self, x,y):
x_vgg=self.features(x)
y_vgg=self.features(y)
loss=self.l1loss(x_vgg, y_vgg)
return loss
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)])
return gauss / gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average=True):
mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)
mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq
sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq
sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2
C1 = 0.01 ** 2
C2 = 0.03 ** 2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return 1-_ssim(img1, img2, window, self.window_size, channel, self.size_average)
def ssim(img1, img2, window_size=11, size_average=True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average)
class Combinedloss(torch.nn.Module):
def __init__(self):
super(Combinedloss, self).__init__()
self.ssim=SSIM()
self.l1loss = torch.nn.L1Loss()
self.l2loss = torch.nn.MSELoss()
vgg = models.vgg19_bn(pretrained=True)
self.vggloss = VGG_loss(vgg)
def forward(self, out, label):
ssim_loss = self.ssim(out, label)
l1_loss = self.l1loss(out, label)
vgg_loss = self.vggloss(out, label)
total_loss = 0.25*ssim_loss + l1_loss +vgg_loss
# total_loss = 0.25 * ssim_loss + l1_loss
return total_loss | 3,620 | 33.485714 | 114 | py |
USLN | USLN-master/color_change.py | import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# torch.cuda.set_device(0)
# device='cpu'
def rgb2hsi(img):
img = torch.clamp(img, 0, 1)
r = img[:, 0, :, :]
g = img[:, 1, :, :]
b = img[:, 2, :, :]
i = (r + g + b) / 3
s = 1 - 3 * img.min(1)[0] / (r + g + b + 1e-5)
x1 = (2 * r - b - g) / 2
x2 = ((r - g) ** 2 + (r - b) * (g - b) + 1e-5) ** 0.5
angle = torch.arccos(x1 / x2) / 2 / torch.pi
# h = torch.Tensor(img.shape[0], img.shape[2], img.shape[3]).to(img.device)
h = (b <= r) * angle + (b > r) * (1 - angle)
h = h.unsqueeze(1)
s = s.unsqueeze(1)
i = i.unsqueeze(1)
out = torch.cat((h, s, i), dim=1)
return out
def hsi2rgb(img):
img = torch.clamp(img, 0, 1)
h = img[:, 0, :, :]
s = img[:, 1, :, :]
i = img[:, 2, :, :]
r = torch.zeros_like(h)
g = torch.zeros_like(h)
b = torch.zeros_like(h)
h1 = torch.zeros_like(h)
hi0 = (h < 1 / 3)
hi2 = (h >= 2 / 3)
hi1 = 1 - hi0.int() - hi2.int()
hi1 = (hi1 == 1)
h1[hi0] = 2 * torch.pi * h[hi0]
h1[hi1] = 2 * torch.pi * (h[hi1] - 1 / 3)
h1[hi2] = 2 * torch.pi * (h[hi2] - 2 / 3)
p = i * (1 - s)
q = i * (1 + s * torch.cos(h1) / (torch.cos(torch.pi / 3 - h1) + 1e-5))
r[hi0] = q[hi0]
b[hi0] = p[hi0]
g[hi0] = 3 * i[hi0] - r[hi0] - b[hi0]
g[hi1] = q[hi1]
r[hi1] = p[hi1]
b[hi1] = 3 * i[hi1] - r[hi1] - g[hi1]
b[hi2] = q[hi2]
g[hi2] = p[hi2]
r[hi2] = 3 * i[hi2] - g[hi2] - b[hi2]
r = r.unsqueeze(1)
g = g.unsqueeze(1)
b = b.unsqueeze(1)
out = torch.cat((r, g, b), dim=1)
return out
def rgb2hsv(img):
img = torch.clamp(img, 0, 1)
hue = torch.Tensor(img.shape[0], img.shape[2], img.shape[3]).to(img.device)
hue[img[:, 2] == img.max(1)[0]] = 4.0 + ((img[:, 0] - img[:, 1]) / (img.max(1)[0] - img.min(1)[0] + 1e-5))[
img[:, 2] == img.max(1)[0]]
hue[img[:, 1] == img.max(1)[0]] = 2.0 + ((img[:, 2] - img[:, 0]) / (img.max(1)[0] - img.min(1)[0] + 1e-5))[
img[:, 1] == img.max(1)[0]]
hue[img[:, 0] == img.max(1)[0]] = (0.0 + ((img[:, 1] - img[:, 2]) / (img.max(1)[0] - img.min(1)[0] + 1e-5))[
img[:, 0] == img.max(1)[0]]) % 6
hue[img.min(1)[0] == img.max(1)[0]] = 0.0
hue = hue / 6
saturation = (img.max(1)[0] - img.min(1)[0]) / (img.max(1)[0] + 1e-5)
saturation[img.max(1)[0] == 0] = 0
value = img.max(1)[0]
hue = hue.unsqueeze(1)
saturation = saturation.unsqueeze(1)
value = value.unsqueeze(1)
hsv = torch.cat([hue, saturation, value], dim=1)
return hsv
def hsv2rgb(hsv):
h, s, v = hsv[:, 0, :, :], hsv[:, 1, :, :], hsv[:, 2, :, :]
# 对出界值的处理
h = h % 1
s = torch.clamp(s, 0, 1)
v = torch.clamp(v, 0, 1)
r = torch.zeros_like(h)
g = torch.zeros_like(h)
b = torch.zeros_like(h)
hi = torch.floor(h * 6)
f = h * 6 - hi
p = v * (1 - s)
q = v * (1 - (f * s))
t = v * (1 - ((1 - f) * s))
hi0 = hi == 0
hi1 = hi == 1
hi2 = hi == 2
hi3 = hi == 3
hi4 = hi == 4
hi5 = hi == 5
r[hi0] = v[hi0]
g[hi0] = t[hi0]
b[hi0] = p[hi0]
r[hi1] = q[hi1]
g[hi1] = v[hi1]
b[hi1] = p[hi1]
r[hi2] = p[hi2]
g[hi2] = v[hi2]
b[hi2] = t[hi2]
r[hi3] = p[hi3]
g[hi3] = q[hi3]
b[hi3] = v[hi3]
r[hi4] = t[hi4]
g[hi4] = p[hi4]
b[hi4] = v[hi4]
r[hi5] = v[hi5]
g[hi5] = p[hi5]
b[hi5] = q[hi5]
r = r.unsqueeze(1)
g = g.unsqueeze(1)
b = b.unsqueeze(1)
rgb = torch.cat([r, g, b], dim=1)
return rgb
MAT_RGB2XYZ = torch.Tensor([[0.412453, 0.357580, 0.180423],
[0.212671, 0.715160, 0.072169],
[0.019334, 0.119193, 0.950227]]).to(device)
MAT_XYZ2RGB = torch.Tensor([[ 3.2405, -1.5372, -0.4985],
[-0.9693, 1.8760, 0.0416],
[ 0.0556, -0.2040, 1.0573]]).to(device)
XYZ_REF_WHITE = torch.Tensor([0.95047, 1.0, 1.08883]).to(device)
def rgb2lab(rgb):
rgb=torch.clamp(rgb,0,1)
return xyz_to_lab(rgb_to_xyz(rgb))
def lab2rgb(lab):
lab=torch.clamp(lab,0,1)
return xyz_to_rgb(lab_to_xyz(lab))
def rgb_to_xyz(rgb):
# convert dtype from uint8 to float
# xyz = rgb.astype(np.float64) / 255.0
# xyz = rgb.astype(np.float64)
xyz = rgb
# gamma correction
mask = xyz > 0.04045
abc=torch.zeros_like(xyz)
abc[mask] = ((xyz[mask] + 0.055) / 1.055)**2.4
abc[~mask] = xyz[~mask]/12.92
xyz = abc.permute(0, 2, 3, 1)
# linear transform
xyz = torch.matmul(xyz , MAT_RGB2XYZ.T)
xyz = xyz.permute(0, 3, 1, 2)
return xyz
def xyz_to_lab(xyz):
xyz=xyz.permute(0, 2, 3, 1)
xyz = xyz/XYZ_REF_WHITE
# nonlinear transform
mask = xyz > 0.008856
xyz[mask] = torch.pow(xyz[mask], 1.0 / 3.0)
xyz[~mask] = 7.787 * xyz[~mask] + 16.0 / 116.0
x, y, z = xyz[..., 0], xyz[..., 1], xyz[..., 2]
# linear transform
lab = torch.zeros_like(xyz)
# lab = torch.zeros(xyz.shape, requires_grad=True)
lab[..., 0] = (116.0 * y) - 16.0 # L channel
lab[..., 1] = 500.0 * (x - y) # a channel
lab[..., 2] = 200.0 * (y - z) # b channel
lab[..., 0] = lab[..., 0]/100 # L channel
lab[..., 1] = (lab[..., 1]+86.183030)/184.416084 # a channel
lab[..., 2] = (lab[..., 2]+107.857300)/202.335422 # b channel
lab=lab.permute(0, 3, 1, 2)
return lab
def lab_to_xyz(lab):
lab=lab.permute(0, 2, 3, 1)
l, a, b = lab[..., 0], lab[..., 1], lab[..., 2]
l=l*100
a=a*184.416084-86.183030
b=b*202.335422-107.857300
xyz = torch.zeros_like(lab)
# xyz = torch.zeros(lab.shape,requires_grad=True)
xyz[..., 1] = (l + 16.0) / 116.0
xyz[..., 0] = a / 500.0 + xyz[..., 1]
xyz[..., 2] = xyz[..., 1] - b / 200.0
# index = xyz[..., 2] < 0
# xyz[index, 2] = 0
torch.clamp(xyz, min=0.0)
# nonlinear transform
mask = xyz > 0.2068966
xyz[mask] = torch.pow(xyz[mask], 3.0)
xyz[~mask] = (xyz[~mask] - 16.0 / 116.0) / 7.787
# de-normalization
xyz = xyz*XYZ_REF_WHITE
xyz=xyz.permute(0, 3, 1, 2)
return xyz
def xyz_to_rgb(xyz):
rgb = xyz.permute(0, 2, 3, 1)
rgb = torch.matmul(rgb, MAT_XYZ2RGB.T)
# gamma correction
mask = rgb > 0.0031308
rgb[mask] = 1.055 * torch.pow(rgb[mask], 1.0 / 2.4) - 0.055
rgb[~mask] = rgb[~mask] * 12.92
# clip and convert dtype from float to uint8
# rgb = np.round(255.0 * np.clip(rgb, 0, 1)).astype(np.uint8)
rgb = torch.clip(rgb, 0, 1)
rgb = rgb.permute(0, 3, 1, 2)
return rgb
if __name__ == '__main__':
with torch.autograd.set_detect_anomaly(True):
rgb = torch.Tensor([[[0.8, 0.5, 0.5]]])
rgb.requires_grad_()
rgb = torch.unsqueeze(rgb.permute(2,0,1),0)
xzy=rgb_to_xyz(rgb)
lab=xyz_to_lab(xzy)
xzy1=lab_to_xyz(lab)
rgb1=xyz_to_rgb(xzy1)
print(rgb1)
rgb1 = rgb1.sum()
rgb1.backward()
| 7,245 | 26.656489 | 116 | py |
USLN | USLN-master/model.py | import torch.nn as nn
from ptflops import get_model_complexity_info
from color_change import *
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class WB(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 3, kernel_size=1, stride=1)
self.avgpool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
self.maxpool = nn.AdaptiveMaxPool2d(output_size=(1, 1))
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(3, 3, kernel_size=1, stride=1)
self.conv2 = nn.Conv2d(3, 3, kernel_size=3, stride=1,padding=1)
self.conv3 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1)
self.conv4 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1)
self.conv5 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1)
self.tanh=nn.Tanh()
def forward(self, x):
out1 = self.avgpool(x)
out2 = self.conv(x)
out3 = self.maxpool(x)
out4 = self.conv1(x)
out =self.conv2(out2 / (out1 + 1e-5)+self.tanh(self.conv4(x))) + self.conv3(out4 / (out3 + 1e-5)+self.tanh(self.conv5(x)))
return out
class RGBhs(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3,3,kernel_size=1,stride=1)
self.maxpool = nn.AdaptiveMaxPool2d(output_size=(1, 1))
def forward(self,x):
min = -self.maxpool(-x)
max = self.maxpool(x)
out = (x - min) / (max - min + 1e-5)
out = self.conv(out)
return out
class Labhs(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3,3,kernel_size=1,stride=1)
self.maxpool = nn.AdaptiveMaxPool2d(output_size=(1, 1))
def forward(self,x):
x = rgb2lab(x)
min = -self.maxpool(-x)
max = self.maxpool(x)
out = (x - min) / (max - min + 1e-5)
out = self.conv(out)
out = lab2rgb(out)
return out
class HSIhs(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(2, 2, kernel_size=1, stride=1)
self.maxpool = nn.AdaptiveMaxPool2d(output_size=(1, 1))
def forward(self, x):
out = rgb2hsi(x)
h, si = torch.split(out, [1, 2], dim=1)
minsi = -self.maxpool(-si)
maxsi = self.maxpool(si)
si = (si - minsi) / (maxsi - minsi + 1e-5)
si = si + self.conv(si)
out = torch.cat((h, si), dim=1)
out = hsi2rgb(out)
return out
class USLN(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 3, kernel_size=3, stride=1,padding=1)
self.conv2 = nn.Conv2d(3, 3, kernel_size=3, stride=1,padding=1)
self.conv3 = nn.Conv2d(3, 3, kernel_size=3, stride=1,padding=1)
self.conv4 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1)
self.conv5 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1)
self.conv6 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1)
self.tanh=nn.Tanh()
self.step1=WB()
self.step2=RGBhs()
self.step3=HSIhs()
self.step4=Labhs()
self.relu=nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self,x):
out=self.step1(x)
out1=self.step2(out)+self.tanh(self.conv1(out))
out2 = self.step3(out)+self.tanh(self.conv2(out))
out3 = self.step4(out) + self.tanh(self.conv3(out))
out=self.conv4(out1)+self.conv5(out2)+self.conv6(out3)
out=1-self.relu(1-self.relu(out))
return out
if __name__ == '__main__':
net=USLN().to(device)
flops,params=get_model_complexity_info(net,(3,256,256))
print(flops,params)
| 3,686 | 31.342105 | 130 | py |
USLN | USLN-master/SegDataset.py | import os
from PIL import Image
import numpy as np
import torch
from torch.utils.data import Dataset
def read_file_list(type='train'):
path_list_images_train = os.listdir(r"datasets/images_train")
path_list_labels_train = os.listdir(r"datasets/labels_train")
path_list_images_val = os.listdir(r"datasets/images_val")
path_list_labels_val = os.listdir(r"datasets/labels_val")
path_list_images_test = os.listdir(r"datasets/images_test")
images_train = [os.path.join(r"datasets/images_train", i) for i in path_list_images_train]
labels_train = [os.path.join(r"datasets/labels_train", i) for i in path_list_labels_train]
images_val = [os.path.join(r"datasets/images_val", i) for i in path_list_images_val]
labels_val = [os.path.join(r"datasets/labels_val", i) for i in path_list_labels_val]
images_test = [os.path.join(r"datasets/images_test", i) for i in path_list_images_test]
if type == 'train':
return images_train, labels_train # 两者路径的列表
elif type == 'val':
return images_val, labels_val
elif type == 'test':
return images_test, path_list_images_test
def preprocess_input(image):
image /= 255.0
return image
class SegDataset(torch.utils.data.Dataset):
def __init__(self, type):
images, labels = read_file_list(type=type)
self.images = images
self.labels = labels
print('Read ' + str(len(self.images)) + ' valid examples')
def rand(self, a=0, b=1):
return np.random.rand() * (b - a) + a
def __getitem__(self, idx):
image = self.images[idx]
label = self.labels[idx]
image = Image.open(image).convert('RGB')
label = Image.open(label).convert('RGB')
image = np.transpose(preprocess_input(np.array(image, np.float64)), [2, 0, 1])
image = torch.from_numpy(image).type(torch.FloatTensor)
label = np.transpose(preprocess_input(np.array(label, np.float64)), [2, 0, 1])
label = torch.from_numpy(label).type(torch.FloatTensor)
return image, label # float32 tensor, uint8 tensor
def __len__(self):
return len(self.images)
| 2,160 | 28.60274 | 94 | py |
USLN | USLN-master/train.py | import numpy as np
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import sys
from model import USLN
from SegDataset import SegDataset
from loss import Combinedloss
########################################################
num_workers = 0 if sys.platform.startswith('win32') else 8
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
#############################################################
torch.cuda.set_device(1) # 指定GPU运行
if __name__ == "__main__":
Init_Epoch = 0
Final_Epoch = 100
batch_size = 10
lr = 1e-2
model = USLN()
save_model_epoch = 1
model = model.to(device)
data_train = SegDataset('train')
data_test = SegDataset('val')
myloss = Combinedloss().to(device)
if True:
batch_size = batch_size
start_epoch = Init_Epoch
end_epoch = Final_Epoch
optimizer = optim.Adam(model.train().parameters(), lr=lr, weight_decay = 5e-4)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size = 1, gamma = 0.94)
for epo in range(start_epoch, end_epoch):
train_loss = 0
model.train() # 启用batch normalization和drop out
train_iter = torch.utils.data.DataLoader(data_train, batch_size, shuffle=True,
drop_last=True, num_workers=num_workers,pin_memory=True)
test_iter = torch.utils.data.DataLoader(data_test, batch_size, drop_last=True,
num_workers=num_workers,pin_memory=True)
for index, (bag, bag_msk) in enumerate(train_iter):
bag = bag.to(device)
bag_msk = bag_msk.to(device)
optimizer.zero_grad()
output = model(bag)
loss = myloss(output, bag_msk)
loss.backward()
iter_loss = loss.item()
train_loss += iter_loss
optimizer.step()
if np.mod(index, 15) == 0:
print('epoch {}, {}/{},train loss is {}'.format(epo, index, len(train_iter), iter_loss))
# 验证
test_loss = 0
model.eval()
with torch.no_grad():
for index, (bag, bag_msk) in enumerate(test_iter):
bag = bag.to(device)
bag_msk = bag_msk.to(device)
optimizer.zero_grad()
output = model(bag)
loss = myloss(output, bag_msk)
# loss = criterion(output, torch.argmax(bag_msk, axis=1))
iter_loss = loss.item()
test_loss += iter_loss
print('<---------------------------------------------------->')
print('epoch: %f' % epo)
print('epoch train loss = %f, epoch test loss = %f'
% (train_loss / len(train_iter), test_loss / len(test_iter)))
lr_scheduler.step()
# 每5个epoch存储一次模型
if np.mod(epo, save_model_epoch) == 0:
# 只存储模型参数
torch.save(model.state_dict(), 'logs/ep%03d-loss%.3f-val_loss%.3f.pth' % (
(epo + 1), (100*train_loss / len(train_iter)), (100*test_loss / len(test_iter)))
)
print('saveing checkpoints/model_{}.pth'.format(epo))
| 3,435 | 31.415094 | 109 | py |
LFSC | LFSC-master/tests/run_test.py | #!/usr/bin/env python
import re
import os.path
import sys
import subprocess
import resource
class TestConfiguration(object):
''' Represents a test to run. '''
def __init__(self):
''' Initialized from program arguments.
Exists with code 2 and prints usage message on invalid arguments.
'''
if len(sys.argv) < 3 or \
(not os.path.isfile(sys.argv[1])) or \
(not os.path.isfile(sys.argv[2])):
print(sys.argv)
print('''
Usage: {} <lfscc> <plf>
Return:
Returns the exit code of LFSCC.
Echos LFSCC's stdout and stderr if the exit code is non-zero
Dependencies:
The PLF file may contain lines like
; Deps: [<file to include before this one> ...]
Dependencies are recursively resolved''')
sys.exit(2)
self.lfscc = sys.argv[1]
self.path = sys.argv[2]
self.dep_graph = DepGraph(self.path)
self.file = TestFile(self.path)
class DepGraph(object):
''' Represents a dependency graph of LFSC input files '''
def __init__(self, root_path):
''' Creates a dependency graph rooted a `root_path`.
Computes a root-last topological sort.
Exits with exitcode 1 on cyclic dependencies'''
# Root of the graph
self._r = root_path
# Nodes (paths) that have been visited
self._visited = set()
# Nodes (paths) that have been ordered
self._ordered_set = set()
# The order of nodes (paths). Root is last.
self._ordered_paths = []
# Start DFS topo-order
self._visit(root_path)
def _visit(self, p):
''' Puts the descendents of p in the order, parent-last '''
node = TestFile(p)
self._visited.add(p)
for n in node.dep_paths:
if n not in self._ordered_set:
if n in self._visited:
# Our child is is an ancestor our ours!?
print("{} and {} are in a dependency cycle".format(p, n))
sys.exit(1)
else:
self._visit(n)
self._ordered_paths.append(p)
self._ordered_set.add(p)
def getPathsInOrder(self):
return self._ordered_paths
class TestFile(object):
''' Represents a testable input file to LFSC '''
def __init__(self, path):
''' Read the file at `path` and determine its immediate dependencies'''
self.path = path
self._get_config_map()
self.deps = self.config_map['deps'].split() if ('deps' in self.config_map) else []
self.dir = os.path.dirname(self.path)
self.dep_paths = [os.path.join(self.dir, d) for d in self.deps]
def _get_comment_lines(self):
''' Return an iterator over comment lines, ;'s included '''
with open(self.path, 'r') as test_file:
return (line for line in test_file.readlines() if \
re.match(r'^\s*;.*$', line) is not None)
def _get_config_map(self):
''' Populate self.config_map.
Config variables are set using the syntax
; Var Name Spaces Okay: space separated values'''
m = {}
for l in self._get_comment_lines():
match = re.match(r'^.*;\s*(\w+(?:\s+\w+)*)\s*:(.*)$', l)
if match is not None:
m[match.group(1).replace(' ','').lower()] = match.group(2)
self.config_map = m
def main():
# Units: bytes
soft, hard = resource.getrlimit(resource.RLIMIT_STACK)
resource.setrlimit(resource.RLIMIT_STACK, (min(2**25, hard), hard))
configuration = TestConfiguration()
cmd = [configuration.lfscc] + configuration.dep_graph.getPathsInOrder()
print('Command: ', cmd)
result = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
(stdout, _) = result.communicate()
print(configuration.file.config_map)
if 'errorline' in configuration.file.config_map:
lineno = int(configuration.file.config_map['errorline'].strip())
if 0 == result.returncode:
print("Should have errored but did not")
return 1
else:
act_lineno_g = re.search(r' at (\d+):', stdout.decode())
if act_lineno_g is None:
print("Cannot find error line #")
return 1
act_lineno = int(act_lineno_g.group(1))
if lineno != act_lineno:
print("Should have errored on line {} but errored on line {}".format(lineno, act_lineno))
return 1
return 0
else:
if 0 != result.returncode:
print("Exited with code {}".format(result.returncode))
if stdout:
print(stdout.decode())
return result.returncode
if __name__ == '__main__':
sys.exit(main())
| 4,831 | 33.76259 | 105 | py |
nonlinear_frag | nonlinear_frag-main/kmul/scripts/plots_loglog_kmul.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from mpmath import *
import sympy as sym
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset
#options for plots
plt.rcParams["font.size"]= 16
plt.rcParams['lines.linewidth'] = 3
plt.rcParams["legend.columnspacing"] = 0.5
marker_style = dict( marker='o',markersize=12, markerfacecolor='white', linestyle='',markeredgewidth=2)
savefig_options=dict(bbox_inches='tight')
#analytic solution kmul g(x,0)=x exp(-x)
def solkmulDL(x,tau):
res = x*(1+tau)**2*np.exp(-x*(1+tau))
return res
path_data = '../data'
path_plot = '../plots'
nbins=20
#data k0
################################
k0=0
massgridk0 = np.loadtxt(path_data+'/kmax='+str(k0)+'/massgrid.txt')
xmeanlogk0 = np.loadtxt(path_data+'/kmax='+str(k0)+'/xmeanlog.txt')
gt0_xmeanlog_k0 = np.genfromtxt(path_data+'/kmax='+str(k0)+'/gt0_xmeanlog.txt')
gtend_xmeanlog_k0 = np.genfromtxt(path_data+'/kmax='+str(k0)+'/gtend_xmeanlog.txt')
timek0 = np.loadtxt(path_data+'/kmax='+str(k0)+'/time.txt')
#data k1
################################
k1=1
massgridk1 = np.loadtxt(path_data+'/kmax='+str(k1)+'/massgrid.txt')
xmeanlogk1 = np.loadtxt(path_data+'/kmax='+str(k1)+'/xmeanlog.txt')
gt0_xmeanlog_k1 = np.genfromtxt(path_data+'/kmax='+str(k1)+'/gt0_xmeanlog.txt')
gtend_xmeanlog_k1 = np.genfromtxt(path_data+'/kmax='+str(k1)+'/gtend_xmeanlog.txt')
timek1 = np.loadtxt(path_data+'/kmax='+str(k1)+'/time.txt')
#data k2
################################
k2=2
massgridk2 = np.loadtxt(path_data+'/kmax='+str(k2)+'/massgrid.txt')
xmeanlogk2 = np.loadtxt(path_data+'/kmax='+str(k2)+'/xmeanlog.txt')
gt0_xmeanlog_k2 = np.genfromtxt(path_data+'/kmax='+str(k2)+'/gt0_xmeanlog.txt')
gtend_xmeanlog_k2 = np.genfromtxt(path_data+'/kmax='+str(k2)+'/gtend_xmeanlog.txt')
timek2 = np.loadtxt(path_data+'/kmax='+str(k2)+'/time.txt')
#data k3
################################
k3=3
massgridk3 = np.loadtxt(path_data+'/kmax='+str(k3)+'/massgrid.txt')
xmeanlogk3 = np.loadtxt(path_data+'/kmax='+str(k3)+'/xmeanlog.txt')
gt0_xmeanlog_k3 = np.genfromtxt(path_data+'/kmax='+str(k3)+'/gt0_xmeanlog.txt')
gtend_xmeanlog_k3 = np.genfromtxt(path_data+'/kmax='+str(k3)+'/gtend_xmeanlog.txt')
timek3 = np.loadtxt(path_data+'/kmax='+str(k3)+'/time.txt')
xmin = np.float(massgridk1[0])
xmax = np.float(massgridk1[-1])
yminloglog = 10**(-17)
ymaxloglog = 10**(3)
x=np.logspace(np.log10(xmin),np.log10(xmax),num=1000,dtype=np.float64)
#figure in log-log scale
fig,ax = plt.subplots()
ax.set_ylim(yminloglog,10**15)
ax.set_xlim(xmin,xmax)
ax.set_xscale('log')
ax.set_yscale('log')
ax.loglog(x,solkmulDL(x,timek0[-1]),'--',c='C0',label='Analytic')
ax.plot(xmeanlogk0,gtend_xmeanlog_k0,markeredgecolor='black',label=r'$k=0$',**marker_style)
ax.plot(xmeanlogk1,gtend_xmeanlog_k1,markeredgecolor='C3',label=r'$k=1$',**marker_style)
ax.plot(xmeanlogk2,gtend_xmeanlog_k2,markeredgecolor='C1',label=r'$k=2$',**marker_style)
ax.plot(xmeanlogk3,gtend_xmeanlog_k3,markeredgecolor='C2',label=r'$k=3$',**marker_style)
#to show the position of the pieak of the curbe at t=0
ax.axvline(1.,c='C4',linestyle='dashdot')
ax.text(1.5, 10**(-7),r'$\tau=0$',rotation='vertical')
#zoomin around the peak in lin-log scale
axins2 = zoomed_inset_axes(ax, 3, loc=1)
axins2.semilogx(x,solkmulDL(x,timek0[-1]),'--',c='C0',label='Analytic')
axins2.semilogx(xmeanlogk0,gtend_xmeanlog_k0,markeredgecolor='black',label=r'$k=0$',**marker_style)
axins2.semilogx(xmeanlogk1,gtend_xmeanlog_k1,markeredgecolor='C3',label=r'$k=1$',**marker_style)
axins2.semilogx(xmeanlogk2,gtend_xmeanlog_k2,markeredgecolor='C1',label=r'$k=2$',**marker_style)
axins2.semilogx(xmeanlogk3,gtend_xmeanlog_k3,markeredgecolor='C2',label=r'$k=3$',**marker_style)
#to select automatically a rectangle around the peak based on the position of the peak
index_maxvalue = np.where(gtend_xmeanlog_k1==np.max(gtend_xmeanlog_k1))[0][0]
xlim_r = xmeanlogk0[index_maxvalue]*10
xlim_l = xmeanlogk0[index_maxvalue]/10.
axins2.set_xlim(xlim_l, xlim_r)
ylim_up = gtend_xmeanlog_k1[index_maxvalue]*1.2
ylim_down = gtend_xmeanlog_k1[index_maxvalue]/1000.
axins2.set_ylim(ylim_down, ylim_up)
axins2.yaxis.tick_left()
axins2.tick_params(labelsize=10)
plt.setp(axins2.get_yticklabels(), visible=True)
ax.set_xlabel(r'mass $x$')
ax.set_ylabel(r'mass density $g(x,\tau)$')
ax.set_title(r'$\tau=%d$' %(timek0[-1]))
ax.legend(loc='lower left',ncol=1,fontsize=14)
mark_inset(ax, axins2, loc1=2, loc2=4, fc="none", ec="0.5")
# plt.savefig(path_plot+'/kmul_loglog.png',dpi=192,**savefig_options)
plt.show()
| 4,611 | 32.179856 | 103 | py |
nonlinear_frag | nonlinear_frag-main/kmul/scripts/plot_M1_kmul.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
from matplotlib import pyplot as plt
#options for plots
plt.rcParams["font.size"]= 16
plt.rcParams['lines.linewidth'] = 3
savefig_options=dict(bbox_inches='tight')
marker_style = dict( marker='o',markersize=8, markerfacecolor='white', linestyle='',markeredgewidth=1.2)
nbins=20
path_data = '../data'
path_plot = '../plots'
# data k0
################################
k0=0
abserrM1_k0 = np.loadtxt(path_data+'/kmax='+str(k0)+'/abserrM1.txt')
################################
#data k1
################################
k1=1
abserrM1_k1 = np.loadtxt(path_data+'/kmax='+str(k1)+'/abserrM1.txt')
################################
#data k2
################################
k2=2
abserrM1_k2 = np.loadtxt(path_data+'/kmax='+str(k2)+'/abserrM1.txt')
###############################
#data k3
################################
k3=3
abserrM1_k3 = np.loadtxt(path_data+'/kmax='+str(k3)+'/abserrM1.txt')
###############################
xmin = abserrM1_k0[0,0]
xmax = abserrM1_k0[-1,0]
yminloglog = 10**(-8)
ymaxloglog = 10**(-3)
#figure total mass versus time
plt.figure(1)
plt.loglog(abserrM1_k0[:,0],abserrM1_k0[:,1],linestyle='dotted',color='black',label=r'$k=0$')
plt.loglog(abserrM1_k1[:,0],abserrM1_k1[:,1],linestyle='dashdot',color='C3',label=r'$k=1$')
plt.loglog(abserrM1_k2[:,0],abserrM1_k2[:,1],linestyle='dashed',color='C1',label=r'$k=2$')
plt.loglog(abserrM1_k3[:,0],abserrM1_k3[:,1],linestyle='solid',color='C2',label=r'$k=3$')
plt.legend(loc='upper center',ncol=2)
plt.xlabel(r'time $\tau$')
plt.ylabel(r'numerical error $e_{M_1,N}$');
plt.xlim(xmin,xmax)
plt.ylim(yminloglog,ymaxloglog)
plt.tight_layout()
# plt.savefig(path_plot+'/abserrM1_kmul.png',dpi=192,**savefig_options)
plt.show()
| 1,778 | 24.782609 | 104 | py |
nonlinear_frag | nonlinear_frag-main/kmul/scripts/plot_errors_kmul.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
from matplotlib import pyplot as plt
#options for plots
plt.rcParams["font.size"]= 16
plt.rcParams['lines.linewidth'] = 3
savefig_options=dict(bbox_inches='tight')
marker_style = dict( marker='o',markersize=8, markerfacecolor='white', linestyle='',markeredgewidth=1.2)
nbins=20
path_data = '../data'
path_plot = '../plots'
# data k0
################################
k0=0
errL1cont_k0 = np.loadtxt(path_data+'/kmax='+str(k0)+'/errL1cont.txt')
errL1dis_k0 = np.loadtxt(path_data+'/kmax='+str(k0)+'/errL1dis.txt')
################################
#data k1
################################
k1=1
errL1cont_k1 = np.loadtxt(path_data+'/kmax='+str(k1)+'/errL1cont.txt')
errL1dis_k1 = np.loadtxt(path_data+'/kmax='+str(k1)+'/errL1dis.txt')
################################
#data k2
################################
k2=2
errL1cont_k2 = np.loadtxt(path_data+'/kmax='+str(k2)+'/errL1cont.txt')
errL1dis_k2 = np.loadtxt(path_data+'/kmax='+str(k2)+'/errL1dis.txt')
###############################
#data k3
################################
k3=3
errL1cont_k3 = np.loadtxt(path_data+'/kmax='+str(k3)+'/errL1cont.txt')
errL1dis_k3 = np.loadtxt(path_data+'/kmax='+str(k3)+'/errL1dis.txt')
###############################
xmin = np.min(errL1cont_k0[:,0])
xmax = np.max(errL1cont_k0[:,0])
yminloglog = 10**(-5)
ymaxloglog = 10**(1)
#figure continuous erreur L1 norm
plt.figure(1)
plt.loglog(errL1cont_k0[:,0],errL1cont_k0[:,1],linestyle='dotted',color='black',label=r'$k=0$')
plt.loglog(errL1cont_k1[:,0],errL1cont_k1[:,1],linestyle='dashdot',color='C3',label=r'$k=1$')
plt.loglog(errL1cont_k2[:,0],errL1cont_k2[:,1],linestyle='dashed',color='C1',label=r'$k=2$')
plt.loglog(errL1cont_k3[:,0],errL1cont_k3[:,1],linestyle='solid',color='C2',label=r'$k=3$')
plt.legend(loc='lower center',ncol=2)
plt.xlabel(r'time $\tau$')
plt.ylabel(r'numerical error $e_{c,N}$');
plt.xlim(xmin,xmax)
plt.ylim(yminloglog,ymaxloglog)
plt.tight_layout()
# plt.savefig(path_plot+'/errL1_cont_kmul.png',dpi=192,**savefig_options)
#figure discrete erreur L1 norm
plt.figure(2)
plt.loglog(errL1dis_k0[:,0],errL1dis_k0[:,1],linestyle='dotted',color='black',label=r'$k=0$')
plt.loglog(errL1dis_k1[:,0],errL1dis_k1[:,1],linestyle='dashdot',color='C3',label=r'$k=1$')
plt.loglog(errL1dis_k2[:,0],errL1dis_k2[:,1],linestyle='dashed',color='C1',label=r'$k=2$')
plt.loglog(errL1dis_k3[:,0],errL1dis_k3[:,1],linestyle='solid',color='C2',label=r'$k=3$')
plt.legend(loc='lower center',ncol=2)
plt.xlabel(r'time $\tau$')
plt.ylabel(r'numerical error $e_{d,N}$');
plt.xlim(xmin,xmax)
plt.ylim(yminloglog,ymaxloglog)
plt.tight_layout()
# plt.savefig(path_plot+'/errL1_dis_kmul.png',dpi=192,**savefig_options)
plt.show() | 2,756 | 30.689655 | 104 | py |
nonlinear_frag | nonlinear_frag-main/kmul/scripts/plot_EOC_kmul.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
from matplotlib import pyplot as plt
#options for plots
plt.rcParams["font.size"]= 16
plt.rcParams['lines.linewidth'] = 3
savefig_options=dict(bbox_inches='tight')
marker_style = dict( marker='o',markersize=8, markerfacecolor='white', linestyle='',markeredgewidth=1.2)
path_data = '../data'
path_plot = '../plots'
# data k0
################################
k0=0
EOCL1cont_k0 = np.loadtxt(path_data+'/kmax='+str(k0)+'/EOCL1cont_k0.txt')
EOCL1dis_k0 = np.loadtxt(path_data+'/kmax='+str(k0)+'/EOCL1dis_k0.txt')
################################
#data k1
################################
k1=1
EOCL1cont_k1 = np.loadtxt(path_data+'/kmax='+str(k1)+'/EOCL1cont_k1.txt')
EOCL1dis_k1 = np.loadtxt(path_data+'/kmax='+str(k1)+'/EOCL1dis_k1.txt')
################################
#data k2
################################
k2=2
EOCL1cont_k2 = np.loadtxt(path_data+'/kmax='+str(k2)+'/EOCL1cont_k2.txt')
EOCL1dis_k2 = np.loadtxt(path_data+'/kmax='+str(k2)+'/EOCL1dis_k2.txt')
###############################
#data k3
################################
k3=3
EOCL1cont_k3 = np.loadtxt(path_data+'/kmax='+str(k3)+'/EOCL1cont_k3.txt')
EOCL1dis_k3 = np.loadtxt(path_data+'/kmax='+str(k3)+'/EOCL1dis_k3.txt')
###############################
#figure for order of convergence continuous L1 norm
plt.figure(1)
plt.loglog(EOCL1cont_k0[:,0],EOCL1cont_k0[:,1],'o',c='black',label=r'$k=0$')
plt.loglog(EOCL1cont_k1[:,0],EOCL1cont_k1[:,1],'o',c='C3',label=r'$k=1$')
plt.loglog(EOCL1cont_k2[:,0],EOCL1cont_k2[:,1],'o',c='C1',label=r'$k=2$')
plt.loglog(EOCL1cont_k3[:,0],EOCL1cont_k3[:,1],'o',c='C2',label=r'$k=3$')
plt.loglog(EOCL1cont_k0[1:,0],1*EOCL1cont_k0[1:,0]**(-1),':',c='black')
plt.text(6,0.2,r'$\propto N_{\mathrm{bins}/\mathrm{dec}}^{-1}$',color='black')
plt.loglog(EOCL1cont_k1[1:,0],0.7*EOCL1cont_k1[1:,0]**(-2),':',c='C3')
plt.text(6,0.02,r'$\propto N_{\mathrm{bins}/\mathrm{dec}}^{-2}$',color='C3')
plt.loglog(EOCL1cont_k2[1:,0],0.5*EOCL1cont_k2[1:,0]**(-3),':',c='C1')
plt.text(6,0.0025,r'$\propto N_{\mathrm{bins}/\mathrm{dec}}^{-3}$',color='C1')
plt.loglog(EOCL1cont_k3[1:,0],0.1*EOCL1cont_k3[1:,0]**(-4),':',c='C2')
plt.text(6,1*10**(-4),r'$\propto N_{\mathrm{bins}/\mathrm{dec}}^{-4}$',color='C2')
plt.xlabel(r'$N_{\mathrm{bins}/\mathrm{decade}}$')
plt.ylabel(r'$e_{\mathrm{c},N}$')
plt.xlim(xmax=17)
plt.legend(loc='lower left')
plt.tight_layout()
# plt.savefig(path_plot+'/EOCL1cont_kmul.png',dpi=192,**savefig_options)
plt.figure(2)
plt.loglog(EOCL1dis_k0[:,0],EOCL1dis_k0[:,1],'o',c='black',label=r'$k=0$')
plt.loglog(EOCL1dis_k1[:,0],EOCL1dis_k1[:,1],'o',c='C3',label=r'$k=1$')
plt.loglog(EOCL1dis_k2[:,0],EOCL1dis_k2[:,1],'o',c='C1',label=r'$k=2$')
plt.loglog(EOCL1dis_k3[:,0],EOCL1dis_k3[:,1],'o',c='C2',label=r'$k=3$')
plt.loglog(EOCL1dis_k0[1:,0],1.2*EOCL1dis_k0[1:,0]**(-2),':',c='black')
plt.text(6,0.04,r'$\propto N_{\mathrm{bins}/\mathrm{dec}}^{-2}$',color='black')
plt.loglog(EOCL1dis_k1[1:,0],0.4*EOCL1dis_k1[1:,0]**(-2),':',c='C3')
plt.text(6,0.0015,r'$\propto N_{\mathrm{bins}/\mathrm{dec}}^{-2}$',color='C3')
plt.loglog(EOCL1dis_k2[1:,0],0.5*EOCL1dis_k2[1:,0]**(-4),':',c='C1')
plt.text(6,4*10**(-4),r'$\propto N_{\mathrm{bins}/\mathrm{dec}}^{-4}$',color='C1')
plt.loglog(EOCL1dis_k3[1:,0],0.1*EOCL1dis_k3[1:,0]**(-4),':',c='C2')
plt.text(3,2*10**(-5),r'$\propto N_{\mathrm{bins}/\mathrm{dec}}^{-4}$',color='C2')
plt.xlabel(r'$N_{\mathrm{bins}/\mathrm{decade}}$')
plt.ylabel(r'$e_{\mathrm{d},N}$')
plt.xlim(xmax=17)
plt.legend(loc='lower left')
plt.tight_layout()
# plt.savefig(path_plot+'/EOCL1dis_kmul.png',dpi=192,**savefig_options)
plt.show() | 3,660 | 35.61 | 104 | py |
nonlinear_frag | nonlinear_frag-main/kmul/scripts/plots_linlog_kmul.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sympy as sym
import numpy as np
from scipy.special import legendre,iv
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
#options for plots
plt.rcParams["font.size"]= 16
plt.rcParams['lines.linewidth'] = 3
savefig_options=dict(bbox_inches='tight')
#functions to reconstruct the polynomials approximation
#legendre polynomials
def LegendreP(i,x):
coeffs=legendre(i)
res = 0
for j in range(0,i+1):
res = res+ coeffs[j]*x**j
return res
#gtilde
def gtilde(massgrid,massbins,gij,theta,k,j,x):
xij = 2/(massgrid[j+1]-massgrid[j])*(x-massbins[j])
res1 = 0
res2 = 0
if k==0:
res2 = gij[j]
else:
for i in range(k+1):
res1 = res1+ gij[j,i]*LegendreP(i,xij)
res2 = theta[j]*(res1-gij[j,0])+gij[j,0]
return res2
def gk0(gij,j,x):
xij = x
return gij[j]*LegendreP(0,xij)
def I(massgrid,j):
res= np.logspace(np.log10(massgrid[j]),np.log10(massgrid[j+1]),num=100)
return res
#solution kmul g(x,0)=x exp(-x)
def solkmulDL(x,tau):
res = x*(1+tau)**2*np.exp(-x*(1+tau))
return res
marker_style = dict( marker='o',markersize=8, markerfacecolor='white', linestyle='',markeredgewidth=1.2)
path_data = '../data'
path_plot = '../plots'
nbins=20
#data k0
################################
k0=0
massgridk0 = np.loadtxt(path_data+'/kmax='+str(k0)+'/massgrid.txt')
massbinsk0 = np.loadtxt(path_data+'/kmax='+str(k0)+'/massbins.txt')
gij_t0_k0 = np.genfromtxt(path_data+'/kmax='+str(k0)+'/gij_t0.txt')
gij_tend_k0 = np.genfromtxt(path_data+'/kmax='+str(k0)+'/gij_tend.txt')
timek0 = np.loadtxt(path_data+'/kmax='+str(k0)+'/time.txt')
################################
#data k1
################################
k1=1
massgridk1 = np.loadtxt(path_data+'/kmax='+str(k1)+'/massgrid.txt')
massbinsk1 = np.loadtxt(path_data+'/kmax='+str(k1)+'/massbins.txt')
gij_t0_k1 = np.genfromtxt(path_data+'/kmax='+str(k1)+'/gij_t0.txt')
gij_tend_k1 = np.genfromtxt(path_data+'/kmax='+str(k1)+'/gij_tend.txt')
theta_k1_t0 = np.loadtxt(path_data+'/kmax='+str(k1)+'/theta_t0.txt')
theta_k1_tend = np.loadtxt(path_data+'/kmax='+str(k1)+'/theta_tend.txt')
timek1 = np.loadtxt(path_data+'/kmax='+str(k1)+'/time.txt')
gij_t0_k1 = np.reshape(gij_t0_k1,(nbins,k1+1))
gij_tend_k1 = np.reshape(gij_tend_k1,(nbins,k1+1))
################################
#data k2
################################
k2=2
massgridk2 = np.loadtxt(path_data+'/kmax='+str(k2)+'/massgrid.txt')
massbinsk2 = np.loadtxt(path_data+'/kmax='+str(k2)+'/massbins.txt')
gij_t0_k2 = np.genfromtxt(path_data+'/kmax='+str(k2)+'/gij_t0.txt')
gij_tend_k2 = np.genfromtxt(path_data+'/kmax='+str(k2)+'/gij_tend.txt')
theta_k2_t0 = np.loadtxt(path_data+'/kmax='+str(k2)+'/theta_t0.txt')
theta_k2_tend = np.loadtxt(path_data+'/kmax='+str(k2)+'/theta_tend.txt')
timek2 = np.loadtxt(path_data+'/kmax='+str(k2)+'/time.txt')
gij_t0_k2 = np.reshape(gij_t0_k2,(nbins,k2+1))
gij_tend_k2 = np.reshape(gij_tend_k2,(nbins,k2+1))
###############################
#data k3
################################
k3=3
massgridk3 = np.loadtxt(path_data+'/kmax='+str(k3)+'/massgrid.txt')
massbinsk3 = np.loadtxt(path_data+'/kmax='+str(k3)+'/massbins.txt')
gij_t0_k3 = np.genfromtxt(path_data+'/kmax='+str(k3)+'/gij_t0.txt')
gij_tend_k3 = np.genfromtxt(path_data+'/kmax='+str(k3)+'/gij_tend.txt')
theta_k3_t0 = np.loadtxt(path_data+'/kmax='+str(k3)+'/theta_t0.txt')
theta_k3_tend = np.loadtxt(path_data+'/kmax='+str(k3)+'/theta_tend.txt')
timek3 = np.loadtxt(path_data+'/kmax='+str(k3)+'/time.txt')
gij_t0_k3 = np.reshape(gij_t0_k3,(nbins,k3+1))
gij_tend_k3 = np.reshape(gij_tend_k3,(nbins,k3+1))
###############################
xmin = massgridk0[0]
xmax = massgridk0[-1]
ymint0linlog=-0.01
ymaxt0linlog=0.4
ymintendlinlog=-10**(-5)
ymaxtendlinlog=10** (-5)
yminloglog = 10**(-16)
ymaxloglog = 1
x=np.logspace(np.log10(xmin),np.log10(xmax),num=1000)
#grid plot in lin-log scale
fig, axes = plt.subplots(4,2,figsize=(10,12),sharex='col', gridspec_kw={'hspace': 0, 'wspace': 0.05})
#add grey lines to highlight bins
for (m,n), subplot in np.ndenumerate(axes):
axes[m,n].ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
axes[m,n].set_xlim(xmin,xmax)
axes[m,n].autoscale(enable=True, axis="y", tight=False)
axes[m,n].axvline(massgridk0[0],c='grey',alpha=0.3)
axes[m,n].axhline(0,c='grey',alpha=0.3,linestyle='--')
axes[m,1].yaxis.tick_right()
for j in range(nbins):
axes[m,n].axvline(massgridk0[j+1],ymin=-0.01,c='grey',alpha=0.3)
#add dashed line for peak of the curve at tau=0 + arrow to highlight the movement of the curve to small masses (fragmentation)
arrow0 = mpatches.FancyArrowPatch((1.1, 150), (0.005, 150),mutation_scale=20,color="C4")
arrow1 = mpatches.FancyArrowPatch((1.1, 150), (0.005, 150),mutation_scale=20,color="C4")
arrow2 = mpatches.FancyArrowPatch((1.1, 150), (0.005, 150),mutation_scale=20,color="C4")
arrow3 = mpatches.FancyArrowPatch((1.1, 150), (0.005, 150),mutation_scale=20,color="C4")
axes[0,1].axvline(1.,c='C4',linestyle='dashdot')
axes[0,1].text(1.5, 140,r'$\tau=0$',rotation='vertical')
axes[0,1].add_patch(arrow0)
axes[1,1].axvline(1.,c='C4',linestyle='dashdot')
axes[1,1].text(1.5, 140,r'$\tau=0$',rotation='vertical')
axes[1,1].add_patch(arrow1)
axes[2,1].axvline(1.,c='C4',linestyle='dashdot')
axes[2,1].text(1.5, 140,r'$\tau=0$',rotation='vertical')
axes[2,1].add_patch(arrow2)
axes[3,1].axvline(1.,c='C4',linestyle='dashdot')
axes[3,1].text(1.5, 140,r'$\tau=0$',rotation='vertical')
axes[3,1].add_patch(arrow3)
#add analytic solution in each plot
axes[0,0].semilogx(x,solkmulDL(x,timek0[0]),'--',c='C0',label='Analytic')
axes[0,1].semilogx(x,solkmulDL(x,timek0[-1]),'--',c='C0',label='Analytic')
axes[1,0].semilogx(x,solkmulDL(x,timek1[0]),'--',c='C0',label='Analytic')
axes[1,1].semilogx(x,solkmulDL(x,timek1[-1]),'--',c='C0',label='Analytic')
axes[2,0].semilogx(x,solkmulDL(x,timek2[0]),'--',c='C0',label='Analytic')
axes[2,1].semilogx(x,solkmulDL(x,timek2[-1]),'--',c='C0',label='Analytic')
axes[3,0].semilogx(x,solkmulDL(x,timek3[0]),'--',c='C0',label='Analytic')
axes[3,1].semilogx(x,solkmulDL(x,timek3[-1]),'--',c='C0',label='Analytic')
#add numerical solution
for j in range(nbins):
axes[0,0].plot(I(massgridk0,j),gk0(gij_t0_k0,j,I(massgridk0,j)),c='black',label=(r'$p_j(x,\tau)$' if j==0 else '_'))
axes[0,1].plot(I(massgridk0,j),gk0(gij_tend_k0,j,I(massgridk0,j)),c='black',label=(r'$p_j(x,\tau)$' if j==0 else '_'))
axes[1,0].plot(I(massgridk1,j),gtilde(massgridk1,massbinsk1,gij_t0_k1,theta_k1_t0,k1,j,I(massgridk0,j)),c='C3',label=(r'$p_j(x,\tau)$' if j==0 else '_'))
axes[1,1].plot(I(massgridk1,j),gtilde(massgridk1,massbinsk1,gij_tend_k1,theta_k1_tend,k1,j,I(massgridk1,j)),c='C3',label=(r'$p_j(x,\tau)$' if j==0 else '_'))
axes[2,0].plot(I(massgridk2,j),gtilde(massgridk2,massbinsk2,gij_t0_k2,theta_k2_t0,k2,j,I(massgridk2,j)),c='C1',label=(r'$p_j(x,\tau)$' if j==0 else '_'))
axes[2,1].plot(I(massgridk2,j),gtilde(massgridk2,massbinsk2,gij_tend_k2,theta_k2_tend,k2,j,I(massgridk2,j)),c='C1',label=(r'$p_j(x,\tau)$' if j==0 else '_'))
axes[3,0].plot(I(massgridk3,j),gtilde(massgridk3,massbinsk3,gij_t0_k3,theta_k3_t0,k3,j,I(massgridk3,j)),c='C2',label=(r'$p_j(x,\tau)$' if j==0 else '_'))
axes[3,1].plot(I(massgridk3,j),gtilde(massgridk3,massbinsk3,gij_tend_k3,theta_k3_tend,k3,j,I(massgridk3,j)),c='C2',label=(r'$p_j(x,\tau)$' if j==0 else '_'))
axes[0,0].plot([], [], ' ', label=r'$k=0$')
axes[1,0].plot([], [], ' ', label=r'$k=1$')
axes[2,0].plot([], [], ' ', label=r'$k=2$')
axes[3,0].plot([], [], ' ', label=r'$k=3$')
axes[0,0].legend()
axes[1,0].legend()
axes[2,0].legend()
axes[3,0].legend()
axes[0,0].set_title(r'$\tau=%d$' %(timek0[0]))
axes[0,1].set_title(r'$\tau=%d$' %(timek0[-1]))
axes[1,0].yaxis.get_offset_text().set_visible(False)
axes[1,1].yaxis.get_offset_text().set_visible(False)
axes[2,0].yaxis.get_offset_text().set_visible(False)
axes[2,1].yaxis.get_offset_text().set_visible(False)
axes[3,0].yaxis.get_offset_text().set_visible(False)
axes[3,1].yaxis.get_offset_text().set_visible(False)
for j in range(4):
axes[j,0].set_ylabel(r'mass density $g$')
axes[3,0].set_xlabel(r'mass $x$')
axes[3,1].set_xlabel(r'mass $x$')
# plt.savefig(path_plot+'/kmul_linlog.png',dpi=192,**savefig_options)
plt.show()
| 8,340 | 33.899582 | 160 | py |
nonlinear_frag | nonlinear_frag-main/kconst/scripts/plots_loglog_kconst.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import scipy.special as special
import scipy.integrate as integrate
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset
#options for plots
plt.rcParams["font.size"]= 16
plt.rcParams['lines.linewidth'] = 3
plt.rcParams["legend.columnspacing"] = 0.5
marker_style = dict( marker='o',markersize=12, markerfacecolor='white', linestyle='',markeredgewidth=2)
savefig_options=dict(bbox_inches='tight')
#solution kconst g(x,0)=x exp(-x)
def solkconstDL(x,t):
f = np.exp(-x)*np.exp(-t) + np.sqrt(2.*t)*np.exp(-t)*integrate.quad(lambda y: (special.iv(1,2.*np.sqrt(2.*t*np.log(y/x))) * np.exp(-y))/(y*np.sqrt(np.log(y/x))),x,np.inf)[0]
return x*f
nbins=20
path_data = '../data'
path_plot = '../plots'
#data k0
################################
k0=0
massgridk0 = np.loadtxt(path_data+'/kmax='+str(k0)+'/massgrid.txt')
xmeanlogk0 = np.loadtxt(path_data+'/kmax='+str(k0)+'/xmeanlog.txt')
gt0_xmeanlog_k0 = np.genfromtxt(path_data+'/kmax='+str(k0)+'/gt0_xmeanlog.txt')
gtend_xmeanlog_k0 = np.genfromtxt(path_data+'/kmax='+str(k0)+'/gtend_xmeanlog.txt')
timek0 = np.loadtxt(path_data+'/kmax='+str(k0)+'/time.txt')
#data k1
################################
k1=1
massgridk1 = np.loadtxt(path_data+'/kmax='+str(k1)+'/massgrid.txt')
xmeanlogk1 = np.loadtxt(path_data+'/kmax='+str(k1)+'/xmeanlog.txt')
gt0_xmeanlog_k1 = np.genfromtxt(path_data+'/kmax='+str(k1)+'/gt0_xmeanlog.txt')
gtend_xmeanlog_k1 = np.genfromtxt(path_data+'/kmax='+str(k1)+'/gtend_xmeanlog.txt')
timek1 = np.loadtxt(path_data+'/kmax='+str(k1)+'/time.txt')
#data k2
################################
k2=2
massgridk2 = np.loadtxt(path_data+'/kmax='+str(k2)+'/massgrid.txt')
xmeanlogk2 = np.loadtxt(path_data+'/kmax='+str(k2)+'/xmeanlog.txt')
gt0_xmeanlog_k2 = np.genfromtxt(path_data+'/kmax='+str(k2)+'/gt0_xmeanlog.txt')
gtend_xmeanlog_k2 = np.genfromtxt(path_data+'/kmax='+str(k2)+'/gtend_xmeanlog.txt')
timek2 = np.loadtxt(path_data+'/kmax='+str(k2)+'/time.txt')
#data k3
################################
k3=3
massgridk3 = np.loadtxt(path_data+'/kmax='+str(k3)+'/massgrid.txt')
xmeanlogk3 = np.loadtxt(path_data+'/kmax='+str(k3)+'/xmeanlog.txt')
gt0_xmeanlog_k3 = np.genfromtxt(path_data+'/kmax='+str(k3)+'/gt0_xmeanlog.txt')
gtend_xmeanlog_k3 = np.genfromtxt(path_data+'/kmax='+str(k3)+'/gtend_xmeanlog.txt')
timek3 = np.loadtxt(path_data+'/kmax='+str(k3)+'/time.txt')
xmin = massgridk1[0]
xmax = massgridk1[-1]
yminloglog = 10**(-17)
ymaxloglog = 10**(3)
x=np.logspace(np.log10(xmin),np.log10(xmax),num=1000,dtype=np.float64)
#load data for exact solution at tau=tend
solkconstDLtend = [solkconstDL(x[i],timek0[-1]) for i in range(len(x))]
# np.savetxt(path_data+"/data_solkconstDL_tend.txt",solkconstDLtend)
# solkconstDLtend = np.loadtxt(path_data+"/data_solkconstDL_tend.txt")
#figure in log-log scale
fig,ax = plt.subplots()
ax.set_ylim(yminloglog,10**15)
ax.set_xlim(xmin,xmax)
ax.set_xscale('log')
ax.set_yscale('log')
ax.loglog(x,solkconstDLtend,'--',c='C0',label='Analytic')
ax.plot(xmeanlogk0,gtend_xmeanlog_k0,markeredgecolor='black',label=r'$k=0$',**marker_style)
ax.plot(xmeanlogk1,gtend_xmeanlog_k1,markeredgecolor='C3',label=r'$k=1$',**marker_style)
ax.plot(xmeanlogk2,gtend_xmeanlog_k2,markeredgecolor='C1',label=r'$k=2$',**marker_style)
ax.plot(xmeanlogk3,gtend_xmeanlog_k3,markeredgecolor='C2',label=r'$k=3$',**marker_style)
#zoomin around the peak in lin-log scale
axins2 = zoomed_inset_axes(ax, 3, loc=1)
axins2.semilogx(x,solkconstDLtend,'--',c='C0',label='Analytic')
axins2.semilogx(xmeanlogk0,gtend_xmeanlog_k0,markeredgecolor='black',label=r'$k=0$',**marker_style)
axins2.semilogx(xmeanlogk1,gtend_xmeanlog_k1,markeredgecolor='C3',label=r'$k=1$',**marker_style)
axins2.semilogx(xmeanlogk2,gtend_xmeanlog_k2,markeredgecolor='C1',label=r'$k=2$',**marker_style)
axins2.semilogx(xmeanlogk3,gtend_xmeanlog_k3,markeredgecolor='C2',label=r'$k=3$',**marker_style)
index_maxvalue = np.where(gtend_xmeanlog_k1==np.max(gtend_xmeanlog_k1))[0][0]
xlim_r = xmeanlogk0[index_maxvalue]*50
xlim_l = xmeanlogk0[index_maxvalue]/10.
axins2.set_xlim(xlim_l, xlim_r)
ylim_up = gtend_xmeanlog_k1[index_maxvalue]*1.2
ylim_down = gtend_xmeanlog_k1[index_maxvalue]/1000.
axins2.set_ylim(ylim_down, ylim_up)
axins2.yaxis.tick_left()
axins2.tick_params(labelsize=10)
plt.setp(axins2.get_yticklabels(), visible=True)
ax.set_xlabel(r'mass $x$')
ax.set_ylabel(r"mass density $g(x,\tau')$")
ax.set_title(r"$\tau'=%.1f$" %(timek0[-1]))
ax.legend(loc='lower left',ncol=2)
mark_inset(ax, axins2, loc1=2, loc2=4, fc="none", ec="0.5")
# plt.savefig(path_plot+'/kconst_loglog.png',dpi=192,**savefig_options)
plt.show()
| 4,745 | 31.958333 | 176 | py |
nonlinear_frag | nonlinear_frag-main/kconst/scripts/plot_EOC_kconst.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
from matplotlib import pyplot as plt
#options for plots
plt.rcParams["font.size"]= 16
plt.rcParams['lines.linewidth'] = 3
savefig_options=dict(bbox_inches='tight')
marker_style = dict( marker='o',markersize=8, markerfacecolor='white', linestyle='',markeredgewidth=1.2)
path_data = '../data'
path_plot = '../plots'
# data k0
################################
k0=0
EOCL1cont_k0 = np.loadtxt(path_data+'/kmax='+str(k0)+'/EOCL1cont_k0.txt')
EOCL1dis_k0 = np.loadtxt(path_data+'/kmax='+str(k0)+'/EOCL1dis_k0.txt')
################################
#data k1
################################
k1=1
EOCL1cont_k1 = np.loadtxt(path_data+'/kmax='+str(k1)+'/EOCL1cont_k1.txt')
EOCL1dis_k1 = np.loadtxt(path_data+'/kmax='+str(k1)+'/EOCL1dis_k1.txt')
################################
#data k2
################################
k2=2
EOCL1cont_k2 = np.loadtxt(path_data+'/kmax='+str(k2)+'/EOCL1cont_k2.txt')
EOCL1dis_k2 = np.loadtxt(path_data+'/kmax='+str(k2)+'/EOCL1dis_k2.txt')
###############################
#data k3
################################
k3=3
EOCL1cont_k3 = np.loadtxt(path_data+'/kmax='+str(k3)+'/EOCL1cont_k3.txt')
EOCL1dis_k3 = np.loadtxt(path_data+'/kmax='+str(k3)+'/EOCL1dis_k3.txt')
###############################
#figure for order of convergence continuous L1 norm
plt.figure(1)
plt.loglog(EOCL1cont_k0[:,0],EOCL1cont_k0[:,1],'o',c='black',label=r'$k=0$')
plt.loglog(EOCL1cont_k1[:,0],EOCL1cont_k1[:,1],'o',c='C3',label=r'$k=1$')
plt.loglog(EOCL1cont_k2[:,0],EOCL1cont_k2[:,1],'o',c='C1',label=r'$k=2$')
plt.loglog(EOCL1cont_k3[:,0],EOCL1cont_k3[:,1],'o',c='C2',label=r'$k=3$')
plt.loglog(EOCL1cont_k0[1:,0],1*EOCL1cont_k0[1:,0]**(-1),':',c='black')
plt.text(6,0.2,r'$\propto N_{\mathrm{bins}/\mathrm{dec}}^{-1}$',color='black')
plt.loglog(EOCL1cont_k1[1:,0],0.7*EOCL1cont_k1[1:,0]**(-2),':',c='C3')
plt.text(6,0.02,r'$\propto N_{\mathrm{bins}/\mathrm{dec}}^{-2}$',color='C3')
plt.loglog(EOCL1cont_k2[1:,0],0.5*EOCL1cont_k2[1:,0]**(-3),':',c='C1')
plt.text(6,0.0025,r'$\propto N_{\mathrm{bins}/\mathrm{dec}}^{-3}$',color='C1')
plt.loglog(EOCL1cont_k3[1:,0],0.1*EOCL1cont_k3[1:,0]**(-4),':',c='C2')
plt.text(6,1*10**(-4),r'$\propto N_{\mathrm{bins}/\mathrm{dec}}^{-4}$',color='C2')
plt.xlabel(r'$N_{\mathrm{bins}/\mathrm{decade}}$')
plt.ylabel(r'$e_{\mathrm{c},N}$')
plt.xlim(xmax=17)
plt.legend(loc='lower left')
plt.tight_layout()
# plt.savefig(path_plot+'/EOCL1cont_kconst.png',dpi=192,**savefig_options)
#figure for order of convergence discrete L1 norm
plt.figure(2)
plt.loglog(EOCL1dis_k0[:,0],EOCL1dis_k0[:,1],'o',c='black',label=r'$k=0$')
plt.loglog(EOCL1dis_k1[:,0],EOCL1dis_k1[:,1],'o',c='C3',label=r'$k=1$')
plt.loglog(EOCL1dis_k2[:,0],EOCL1dis_k2[:,1],'o',c='C1',label=r'$k=2$')
plt.loglog(EOCL1dis_k3[:,0],EOCL1dis_k3[:,1],'o',c='C2',label=r'$k=3$')
plt.loglog(EOCL1dis_k0[1:,0],1.2*EOCL1dis_k0[1:,0]**(-2),':',c='black')
plt.text(6,0.04,r'$\propto N_{\mathrm{bins}/\mathrm{dec}}^{-2}$',color='black')
plt.loglog(EOCL1dis_k1[1:,0],0.4*EOCL1dis_k1[1:,0]**(-2),':',c='C3')
plt.text(6,0.0015,r'$\propto N_{\mathrm{bins}/\mathrm{dec}}^{-2}$',color='C3')
plt.loglog(EOCL1dis_k2[1:,0],0.5*EOCL1dis_k2[1:,0]**(-4),':',c='C1')
plt.text(6,4*10**(-4),r'$\propto N_{\mathrm{bins}/\mathrm{dec}}^{-4}$',color='C1')
plt.loglog(EOCL1dis_k3[1:,0],0.1*EOCL1dis_k3[1:,0]**(-4),':',c='C2')
plt.text(3,2*10**(-5),r'$\propto N_{\mathrm{bins}/\mathrm{dec}}^{-4}$',color='C2')
plt.xlabel(r'$N_{\mathrm{bins}/\mathrm{decade}}$')
plt.ylabel(r'$e_{\mathrm{d},N}$')
plt.xlim(xmax=17)
plt.legend(loc='lower left')
plt.tight_layout()
# plt.savefig(path_plot+'/EOCL1dis_kconst.png',dpi=192,**savefig_options)
plt.show() | 3,714 | 35.782178 | 104 | py |
nonlinear_frag | nonlinear_frag-main/kconst/scripts/plot_M1_kconst.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
from matplotlib import pyplot as plt
#options for plots
plt.rcParams["font.size"]= 16
plt.rcParams['lines.linewidth'] = 3
savefig_options=dict(bbox_inches='tight')
marker_style = dict( marker='o',markersize=8, markerfacecolor='white', linestyle='',markeredgewidth=1.2)
nbins=20
path_data = '../data'
path_plot = '../plots'
# data k0
################################
k0=0
abserrM1_k0 = np.loadtxt(path_data+'/kmax='+str(k0)+'/abserrM1.txt')
################################
#data k1
################################
k1=1
abserrM1_k1 = np.loadtxt(path_data+'/kmax='+str(k1)+'/abserrM1.txt')
################################
#data k2
################################
k2=2
abserrM1_k2 = np.loadtxt(path_data+'/kmax='+str(k2)+'/abserrM1.txt')
###############################
#data k3
################################
k3=3
abserrM1_k3 = np.loadtxt(path_data+'/kmax='+str(k3)+'/abserrM1.txt')
###############################
xmin = abserrM1_k0[0,0]
xmax = abserrM1_k0[-1,0]
yminloglog = 10**(-8)
ymaxloglog = 10**(-3)
#figure total mass versus time
plt.figure(1)
plt.loglog(abserrM1_k0[:,0],abserrM1_k0[:,1],linestyle='dotted',color='black',label=r'$k=0$')
plt.loglog(abserrM1_k1[:,0],abserrM1_k1[:,1],linestyle='dashdot',color='C3',label=r'$k=1$')
plt.loglog(abserrM1_k2[:,0],abserrM1_k2[:,1],linestyle='dashed',color='C1',label=r'$k=2$')
plt.loglog(abserrM1_k3[:,0],abserrM1_k3[:,1],linestyle='solid',color='C2',label=r'$k=3$')
plt.legend(loc='upper center',ncol=2)
plt.xlabel(r"time $\tau'$")
plt.ylabel(r'numerical error $e_{M_1,N}$');
plt.xlim(xmin,xmax)
plt.ylim(yminloglog,ymaxloglog)
plt.tight_layout()
# plt.savefig(path_plot+'/abserrM1_kconst.png',dpi=192,**savefig_options)
plt.show()
| 1,781 | 24.826087 | 104 | py |
nonlinear_frag | nonlinear_frag-main/kconst/scripts/plot_errors_kconst.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
from matplotlib import pyplot as plt
#options for plots
plt.rcParams["font.size"]= 16
plt.rcParams['lines.linewidth'] = 3
savefig_options=dict(bbox_inches='tight')
marker_style = dict( marker='o',markersize=8, markerfacecolor='white', linestyle='',markeredgewidth=1.2)
nbins=20
path_data = '../data'
path_plot = '../plots'
# data k0
################################
k0=0
errL1cont_k0 = np.loadtxt(path_data+'/kmax='+str(k0)+'/errL1cont.txt')
errL1dis_k0 = np.loadtxt(path_data+'/kmax='+str(k0)+'/errL1dis.txt')
################################
#data k1
################################
k1=1
errL1cont_k1 = np.loadtxt(path_data+'/kmax='+str(k1)+'/errL1cont.txt')
errL1dis_k1 = np.loadtxt(path_data+'/kmax='+str(k1)+'/errL1dis.txt')
################################
#data k2
################################
k2=2
errL1cont_k2 = np.loadtxt(path_data+'/kmax='+str(k2)+'/errL1cont.txt')
errL1dis_k2 = np.loadtxt(path_data+'/kmax='+str(k2)+'/errL1dis.txt')
###############################
#data k3
################################
k3=3
errL1cont_k3 = np.loadtxt(path_data+'/kmax='+str(k3)+'/errL1cont.txt')
errL1dis_k3 = np.loadtxt(path_data+'/kmax='+str(k3)+'/errL1dis.txt')
###############################
xmin = np.min(errL1dis_k0[:,0])
xmax = np.max(errL1dis_k0[:,0])
yminloglog = 10**(-5)
ymaxloglog = 10**(1)
#figure continuous erreur L1 norm
plt.figure(1)
plt.loglog(errL1cont_k0[:,0],errL1cont_k0[:,1],linestyle='dotted',color='black',label=r'$k=0$')
plt.loglog(errL1cont_k1[:,0],errL1cont_k1[:,1],linestyle='dashdot',color='C3',label=r'$k=1$')
plt.loglog(errL1cont_k2[:,0],errL1cont_k2[:,1],linestyle='dashed',color='C1',label=r'$k=2$')
plt.loglog(errL1cont_k3[:,0],errL1cont_k3[:,1],linestyle='solid',color='C2',label=r'$k=3$')
plt.legend(loc='lower center',ncol=2)
plt.xlabel(r'time $\tau$')
plt.ylabel(r'numerical error $e_{c,N}$');
plt.xlim(xmin,xmax)
plt.ylim(yminloglog,ymaxloglog)
plt.tight_layout()
# plt.savefig(path_plot+'/errL1_cont_kconst.png',dpi=192,**savefig_options)
#figure discrete erreur L1 norm
plt.figure(2)
plt.loglog(errL1dis_k0[:,0],errL1dis_k0[:,1],linestyle='dotted',color='black',label=r'$k=0$')
plt.loglog(errL1dis_k1[:,0],errL1dis_k1[:,1],linestyle='dashdot',color='C3',label=r'$k=1$')
plt.loglog(errL1dis_k2[:,0],errL1dis_k2[:,1],linestyle='dashed',color='C1',label=r'$k=2$')
plt.loglog(errL1dis_k3[:,0],errL1dis_k3[:,1],linestyle='solid',color='C2',label=r'$k=3$')
plt.legend(loc='lower center',ncol=2)
plt.xlabel(r'time $\tau$')
plt.ylabel(r'numerical error $e_{d,N}$');
plt.xlim(xmin,xmax)
plt.ylim(yminloglog,ymaxloglog)
plt.tight_layout()
# plt.savefig(path_plot+'/errL1_dis_kconst.png',dpi=192,**savefig_options)
plt.show() | 2,755 | 31.809524 | 104 | py |
nonlinear_frag | nonlinear_frag-main/kconst/scripts/plots_linlog_kconst.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sympy as sym
import numpy as np
from mpmath import *
from matplotlib import pyplot as plt
import scipy.special as special
import scipy.integrate as integrate
import scipy.optimize as optimize
import matplotlib.patches as mpatches
#options for plots
plt.rcParams["font.size"]= 16
plt.rcParams['lines.linewidth'] = 3
savefig_options=dict(bbox_inches='tight')
#functions to reconstruct the polynomials approximation
#legendre polynomials
def LegendreP(i,x):
coeffs=special.legendre(i)
res = 0
for j in range(0,i+1):
res = res+ coeffs[j]*x**j
return res
#gtilde
def gtilde(massgrid,massbins,gij,theta,k,j,x):
xij = 2/(massgrid[j+1]-massgrid[j])*(x-massbins[j])
res1 = 0
res2 = 0
if k==0:
res2 = gij[j]
else:
for i in range(k+1):
res1 = res1+ gij[j,i]*LegendreP(i,xij)
res2 = theta[j]*(res1-gij[j,0])+gij[j,0]
return res2
def gk0(gij,j,x):
xij = x
return gij[j]*LegendreP(0,xij)
def I(massgrid,j):
res= np.logspace(np.log10(massgrid[j]),np.log10(massgrid[j+1]),num=100)
return res
def solkconstDLt0(x):
return x*np.exp(-x)
def solkconstDL(x,t):
f = np.exp(-x)*np.exp(-t) + np.sqrt(2*t)*np.exp(-t)*integrate.quad(lambda y: (special.iv(1,2*np.sqrt(2*t*np.log(y/x))) * np.exp(-y))/(y*np.sqrt(np.log(y/x))),x,np.inf)[0]
return x*f
marker_style = dict( marker='o',markersize=8, markerfacecolor='white', linestyle='',markeredgewidth=1.2)
nbins=20
path_data = '../data'
path_plot = '../plots'
#data k0
################################
k0=0
massgridk0 = np.loadtxt(path_data+'/kmax='+str(k0)+'/massgrid.txt')
massbinsk0 = np.loadtxt(path_data+'/kmax='+str(k0)+'/massbins.txt')
gij_t0_k0 = np.genfromtxt(path_data+'/kmax='+str(k0)+'/gij_t0.txt')
gij_tend_k0 = np.genfromtxt(path_data+'/kmax='+str(k0)+'/gij_tend.txt')
timek0 = np.loadtxt(path_data+'/kmax='+str(k0)+'/time.txt')
gij_k0 = np.genfromtxt(path_data+'/kmax='+str(k0)+'/gij.txt')
################################
#data k1
################################
k1=1
massgridk1 = np.loadtxt(path_data+'/kmax='+str(k1)+'/massgrid.txt')
massbinsk1 = np.loadtxt(path_data+'/kmax='+str(k1)+'/massbins.txt')
gij_t0_k1 = np.genfromtxt(path_data+'/kmax='+str(k1)+'/gij_t0.txt')
gij_tend_k1 = np.genfromtxt(path_data+'/kmax='+str(k1)+'/gij_tend.txt')
theta_k1 = np.loadtxt(path_data+'/kmax='+str(k1)+'/theta.txt')
gij_k1 = np.genfromtxt(path_data+'/kmax='+str(k1)+'/gij.txt')
timek1 = np.loadtxt(path_data+'/kmax='+str(k1)+'/time.txt')
gij_t0_k1 = np.reshape(gij_t0_k1,(nbins,k1+1))
gij_tend_k1 = np.reshape(gij_tend_k1,(nbins,k1+1))
gij_k1 = np.reshape(gij_k1,(len(timek1),nbins,k1+1))
################################
#data k2
################################
k2=2
massgridk2 = np.loadtxt(path_data+'/kmax='+str(k2)+'/massgrid.txt')
massbinsk2 = np.loadtxt(path_data+'/kmax='+str(k2)+'/massbins.txt')
gij_t0_k2 = np.genfromtxt(path_data+'/kmax='+str(k2)+'/gij_t0.txt')
gij_tend_k2 = np.genfromtxt(path_data+'/kmax='+str(k2)+'/gij_tend.txt')
theta_k2 = np.loadtxt(path_data+'/kmax='+str(k2)+'/theta.txt')
timek2 = np.loadtxt(path_data+'/kmax='+str(k2)+'/time.txt')
gij_t0_k2 = np.reshape(gij_t0_k2,(nbins,k2+1))
gij_tend_k2 = np.reshape(gij_tend_k2,(nbins,k2+1))
###############################
#data k3
################################
k3=3
massgridk3 = np.loadtxt(path_data+'/kmax='+str(k3)+'/massgrid.txt')
massbinsk3 = np.loadtxt(path_data+'/kmax='+str(k3)+'/massbins.txt')
gij_t0_k3 = np.genfromtxt(path_data+'/kmax='+str(k3)+'/gij_t0.txt')
gij_tend_k3 = np.genfromtxt(path_data+'/kmax='+str(k3)+'/gij_tend.txt')
theta_k3 = np.loadtxt(path_data+'/kmax='+str(k3)+'/theta.txt')
timek3 = np.loadtxt(path_data+'/kmax='+str(k3)+'/time.txt')
gij_t0_k3 = np.reshape(gij_t0_k3,(nbins,k3+1))
gij_tend_k3 = np.reshape(gij_tend_k3,(nbins,k3+1))
###############################
xmin = massgridk0[0]
xmax = massgridk0[-1]
ymint0linlog=-0.01
ymaxt0linlog=0.4
ymintendlinlog=-10**(-5)
ymaxtendlinlog=10** (-5)
yminloglog = 10**(-16)
ymaxloglog = 1
x=np.logspace(np.log10(xmin),np.log10(xmax),num=1000)
#grid plot in lin-log scale
fig, axes = plt.subplots(4,2,figsize=(10,12),sharex='col', gridspec_kw={'hspace': 0, 'wspace': 0.05})
#add grey lines to highlight bins
for (m,n), subplot in np.ndenumerate(axes):
axes[m,n].ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
axes[m,n].set_xlim(xmin,xmax)
axes[m,n].autoscale(enable=True, axis="y", tight=False)
axes[m,n].axvline(massgridk0[0],c='grey',alpha=0.3)
axes[m,n].axhline(0,c='grey',alpha=0.3,linestyle='--')
axes[m,1].yaxis.tick_right()
for j in range(nbins):
axes[m,n].axvline(massgridk0[j+1],ymin=-0.01,c='grey',alpha=0.3)
#add dashed line for peak of the curve at tau=0 + arrow to highlight the movement of the curve to small masses (fragmentation)
arrow0 = mpatches.FancyArrowPatch((1.1, 1.5), (0.05, 1.5),mutation_scale=20,color="C4")
arrow1 = mpatches.FancyArrowPatch((1.1, 1.5), (0.05, 1.5),mutation_scale=20,color="C4")
arrow2 = mpatches.FancyArrowPatch((1.1, 1.5), (0.05, 1.5),mutation_scale=20,color="C4")
arrow3 = mpatches.FancyArrowPatch((1.1, 1.5), (0.05, 1.5),mutation_scale=20,color="C4")
axes[0,1].axvline(1.,c='C4',linestyle='dashdot')
axes[0,1].text(1.5, 1,r"$\tau'=0$",rotation='vertical')
axes[0,1].add_patch(arrow0)
axes[1,1].axvline(1.,c='C4',linestyle='dashdot')
axes[1,1].text(1.5, 1,r"$\tau'=0$",rotation='vertical')
axes[1,1].add_patch(arrow1)
axes[2,1].axvline(1.,c='C4',linestyle='dashdot')
axes[2,1].text(1.5, 1,r"$\tau'=0$",rotation='vertical')
axes[2,1].add_patch(arrow2)
axes[3,1].axvline(1.,c='C4',linestyle='dashdot')
axes[3,1].text(1.5, 1,r"$\tau'=0$",rotation='vertical')
axes[3,1].add_patch(arrow3)
solkconstDLtend = [solkconstDL(x[i],timek0[-1]) for i in range(len(x))]
# np.savetxt(path_data+"data_solkconstDL_tend.txt",solkconstDLtend)
# solkconstDLtend = np.loadtxt("./data_solkconstDL_tend.txt")
#add analytic solution in each plot
axes[0,0].semilogx(x,solkconstDLt0(x),'--',c='C0',label='Analytic')
axes[0,1].semilogx(x,solkconstDLtend,'--',c='C0',label='Analytic')
axes[1,0].semilogx(x,solkconstDLt0(x),'--',c='C0',label='Analytic')
axes[1,1].semilogx(x,solkconstDLtend,'--',c='C0',label='Analytic')
axes[2,0].semilogx(x,solkconstDLt0(x),'--',c='C0',label='Analytic')
axes[2,1].semilogx(x,solkconstDLtend,'--',c='C0',label='Analytic')
axes[3,0].semilogx(x,solkconstDLt0(x),'--',c='C0',label='Analytic')
axes[3,1].semilogx(x,solkconstDLtend,'--',c='C0',label='Analytic')
#add numerical solutions
for j in range(nbins):
axes[0,0].plot(I(massgridk0,j),gk0(gij_t0_k0,j,I(massgridk0,j)),c='black',label=(r"$p_j(x,\tau')$" if j==0 else '_'))
axes[0,1].plot(I(massgridk0,j),gk0(gij_tend_k0,j,I(massgridk0,j)),c='black',label=(r"$p_j(x,\tau')$" if j==0 else '_'))
axes[1,0].plot(I(massgridk1,j),gtilde(massgridk1,massbinsk1,gij_t0_k1,theta_k1[0,:],k1,j,I(massgridk0,j)),c='C3',label=(r'$p_j(x,\tau)$' if j==0 else '_'))
axes[1,1].plot(I(massgridk1,j),gtilde(massgridk1,massbinsk1,gij_tend_k1,theta_k1[-1,:],k1,j,I(massgridk1,j)),c='C3',label=(r'$p_j(x,\tau)$' if j==0 else '_'))
axes[2,0].plot(I(massgridk2,j),gtilde(massgridk2,massbinsk2,gij_t0_k2,theta_k2[0,:],k2,j,I(massgridk2,j)),c='C1',label=(r'$p_j(x,\tau)$' if j==0 else '_'))
axes[2,1].plot(I(massgridk2,j),gtilde(massgridk2,massbinsk2,gij_tend_k2,theta_k2[-1,:],k2,j,I(massgridk2,j)),c='C1',label=(r'$p_j(x,\tau)$' if j==0 else '_'))
axes[3,0].plot(I(massgridk3,j),gtilde(massgridk3,massbinsk3,gij_t0_k3,theta_k3[0,:],k3,j,I(massgridk3,j)),c='C2',label=(r'$p_j(x,\tau)$' if j==0 else '_'))
axes[3,1].plot(I(massgridk3,j),gtilde(massgridk3,massbinsk3,gij_tend_k3,theta_k3[-1,:],k3,j,I(massgridk3,j)),c='C2',label=(r'$p_j(x,\tau)$' if j==0 else '_'))
axes[0,0].plot([], [], ' ', label=r'$k=0$')
axes[1,0].plot([], [], ' ', label=r'$k=1$')
axes[2,0].plot([], [], ' ', label=r'$k=2$')
axes[3,0].plot([], [], ' ', label=r'$k=3$')
axes[0,0].legend()
axes[1,0].legend()
axes[2,0].legend()
axes[3,0].legend()
axes[0,0].set_title(r"$\tau'=%d$" %(timek0[0]))
axes[0,1].set_title(r"$\tau'=%.1f$" %(timek0[-1]))
axes[1,0].yaxis.get_offset_text().set_visible(False)
axes[1,1].yaxis.get_offset_text().set_visible(False)
axes[2,0].yaxis.get_offset_text().set_visible(False)
axes[2,1].yaxis.get_offset_text().set_visible(False)
axes[3,0].yaxis.get_offset_text().set_visible(False)
axes[3,1].yaxis.get_offset_text().set_visible(False)
for j in range(4):
axes[j,0].set_ylabel(r'mass density $g$')
axes[3,0].set_xlabel(r'mass $x$')
axes[3,1].set_xlabel(r'mass $x$')
# plt.savefig(path_plot+'/kconst_linlog.png',dpi=192,**savefig_options)
plt.show()
| 8,684 | 33.058824 | 173 | py |
ccdetection | ccdetection-master/main.py | '''
Created on Sep 28, 2015
@author: Tommi Unruh
'''
import sys
from args_parser import ModeArgsParser
import os
import itertools
from LazyMP import LazyMP
from LazyMP import ProcessIdGenerator
from neo4j_helper import Neo4jHelper
from configurator import Configurator
from query_file import QueryFile
from results.shared_data import SharedData
import multiprocessing
from multiprocessing import Value
ARGS_HELP = "help"
ARGS_SEARCH = "search"
ARGS_CONFIG = "config"
ARGS_CONSOLE = "console"
ARGS_PRINT_STATS = "print_stats"
ARGS_PRINT_RESULTS = "print_results"
ARGS_CONTINUOUS_SEARCH = "continuous_search"
ARGS_COMBINE_STATISTICS = "combine_stats"
CONFIG_PATH = "config"
DEFAULT_STATS_PATH = "stats"
def main(argv):
# Setup command line arguments.
parser = ModeArgsParser()
setupArgs(parser)
try:
flow = parser.parseArgs(argv[1], argv[2:])
except:
parser.printHelp(argv[0])
sys.exit()
# Make config point to the absolute path.
full_path = os.path.abspath(argv[0])
last_slash_index = full_path.rfind("/")
base_dir = full_path[0:last_slash_index]
global CONFIG_PATH
CONFIG_PATH = base_dir + "/" + CONFIG_PATH
# Set debugging.
try:
getArg(flow, "d", "debug")
Configurator.setDebugging(True)
except ArgException as err:
# # Debugging was not specified.
pass
if flow[parser.KEY_MODE] != ARGS_CONFIG:
# Load config.
Configurator.load(CONFIG_PATH)
if flow[parser.KEY_MODE] == ARGS_HELP:
parser.printHelp(argv[0])
sys.exit()
elif flow[parser.KEY_MODE] == ARGS_COMBINE_STATISTICS:
combineStatsFiles(flow)
elif flow[parser.KEY_MODE] == ARGS_CONSOLE:
id = 1
try:
id = getArg(flow, "id")
except:
pass
heap_size = Configurator.getHeapVal()
Neo4jHelper().setHeapsize(heap_size[0], heap_size[1])
Neo4jHelper().startConsole(os.path.abspath(flow["in"]), str(id))
elif flow[parser.KEY_MODE] == ARGS_SEARCH:
startSearchMode(flow)
elif flow[parser.KEY_MODE] == ARGS_CONTINUOUS_SEARCH:
startSearchMode(flow, continuous=True)
elif flow[parser.KEY_MODE] == ARGS_CONFIG:
Configurator().setupConfig(
CONFIG_PATH, base_dir, getArg(flow, "p", "path")
)
elif flow[parser.KEY_MODE] == ARGS_PRINT_STATS:
printStats(flow)
elif flow[parser.KEY_MODE] == ARGS_PRINT_RESULTS:
printResults(flow)
else:
parser.printHelp(argv[0])
sys.exit()
def printStats(flow):
try:
stats_path = getArg(flow, 's', 'statsfile')
except:
stats_path = DEFAULT_STATS_PATH
if os.path.isfile(stats_path):
print "Statistics file: '%s'" % (stats_path)
print SharedData(stats_path, multiprocessing.Lock())
else:
print "Given path is not a valid file: '%s'" % (stats_path)
def printResults(flow):
try:
stats_path = getArg(flow, 's', 'statsfile')
except:
stats_path = DEFAULT_STATS_PATH
if os.path.isfile(stats_path):
clones = SharedData(stats_path, multiprocessing.Lock()).getClones()
print "Found %d code clones saved in file '%s':" % (
len(clones), stats_path
)
for i, clone in enumerate(clones):
print str(i) + ".", clone
else:
print "Given path is not a valid file: '%s'" % (stats_path)
def combineStatsFiles(flow):
_files = getArg(flow, "s", "stats")
_out = getArg(flow, "out")
_files = _files.split(",")
try:
# Remove empty elements.
_files.remove("")
except ValueError as err:
# No element was removed.
pass
# Remove unnecessary whitespace characters.
_files = [_file.strip() for _file in _files]
if len(_files) > 1:
for _file in _files:
if not os.path.isfile(_file):
raise Exception("'%s' is not a file." % (_file))
lock = multiprocessing.Lock()
first_data = SharedData(_files[0], lock)
for _file in _files[1:]:
data = SharedData(_file, lock)
first_data.combineWith(data)
first_data.saveToFile(_out)
print "Stats files combined into '%s'" % (_out)
else:
print "Specify more than one statistics file. Separate them with commas."
print "I.e. -s \"\""
def startSearchMode(flow, continuous=False):
flow["in"] = os.path.abspath(flow["in"])
if not os.path.exists(flow["in"]):
print "Given path (-in) does not exist."
sys.exit()
level = 0
multithreads = 0
neo4j_helper = Neo4jHelper()
heap_size = Configurator.getHeapVal()
neo4j_helper.setHeapsize(heap_size[0], heap_size[1])
if continuous:
# Continuous mode was specified, so read the config file
try:
stats_path = getArg(flow, "s", "statsfile")
except:
stats_path = DEFAULT_STATS_PATH
lock = multiprocessing.Lock()
shared_data = SharedData(stats_path, lock, in_path=flow["in"])
in_path = shared_data.getInPath()
if in_path != flow["in"]:
print (
"The given path with \"-in\" is not the path, "
"which was used before."
)
_ = raw_input("Ctrl-C or Ctrl-D to abort. "
"Press any key to continue")
shared_data.setInPath(flow["in"])
Neo4jHelper.setStatisticsObj(shared_data)
try:
multithreads = int(getArg(flow, "m", "multithreading"))
except:
pass
code_path = getArg(flow, "q", "queries")
code = []
# Read given query.
if os.path.isfile(code_path):
with open(code_path, "r") as fh:
code.append(
QueryFile(code_path, fh.read())
)
elif os.path.isdir(code_path):
# Given path is a directory - get all files recursively inside the
# directory.
for path, _, files in os.walk(code_path):
for name in files:
file_path = os.path.join(path, name)
with open(file_path, "r") as fh:
code.append(
QueryFile(file_path, fh.read())
)
if not code:
# Did not find any file recursively inside 'code_path'.
print "Query-path (-q/--queries) does not contain any files."
sys.exit()
else:
# Path does not exist
print "Query-Path (-q/--queries) does not exist."
sys.exit()
try:
level = int(getArg(flow, "l", "level"))
except ArgException:
# Parameter "-l/--level" was not specified.
pass
if level == 0:
# Analyse specified file/files in specified directory with given
# gremlin query/queries.
Neo4jHelper.analyseData((
code, flow["in"], 1
))
else:
# Analyse folders 'level' levels under specified path.
try:
# Get the root directory of every project in a generator.
path_generator = getRootDirectories(flow["in"], level)
if continuous:
# Check if given in-path is the same as the one in the
# given stats file.
if in_path == flow["in"]:
# Skip generator elements if they were already
# analysed before.
projects_analysed_count = shared_data.getProjectsCount()
skipGeneratorElements(path_generator, projects_analysed_count)
except Exception as err:
print "An exception occured: %s" % err
sys.exit()
projects_analysed = 0
if multithreads > 1:
# Multithreading was specified.
process_number_generator = ProcessIdGenerator()
# Start a lazy pool of processes.
pool = LazyMP().poolImapUnordered(
analyseDataHelper, itertools.izip(
itertools.repeat(code), path_generator,
process_number_generator.getGenerator([1,2,3,4]),
),
multithreads,
process_number_generator
)
# And let them work.
try:
while True:
# Let multiprocessing pool process all arguments.
pool.next()
projects_analysed += 1
except Exception as err:
# Done
print err
pass
else:
# No multithreading.
for path in path_generator:
neo4j_helper.analyseData((
code, path, 1
))
projects_analysed += 1
if projects_analysed == 0:
print "No project analysed for path: '%s'" %(
flow["in"]
)
def analyseDataHelper(args):
return Neo4jHelper.analyseData(args)
def skipGeneratorElements(_gen, count):
try:
for _ in xrange(count):
print "Skipping", _gen.next()
except StopIteration:
raise Exception("Could not skip given paths as expected, stopping.")
def getRootDirectories(path, level):
"""
Get all paths of the root directories.
'level' specifies where the root directories are relatively to 'path'.
E.g. a value of level=1 specifies, that any directory inside of 'path'
is a root directory of a project.
A value of level=2 specifies, that any directory inside a directory inside
'path' is a root directory etc.
"""
# Wrong arguments handling #1.
if not os.path.isdir(path):
raise Exception("Specified path '%s' is not a directory." % path)
# Wrong arguments handling #2.
if level < 1:
raise Exception(
"Program argument '-l/--level %s has no effect. "
"Do not use it or specify a value bigger than 0." % str(level)
)
root_directories = []
# traversePath(root_directories, path, 1, level)
# return root_directories
return traversePath(root_directories, path, 1, level)
def traversePath(destination_list, path, current_level, target_level):
"""
Create a list of absolute paths to directories, which are on a specific
level below a given path, recursively.
"""
for _path in os.listdir(path):
# Create absolute path
_path = "%s/%s" % (path, _path)
if os.path.isdir(_path):
if current_level == target_level:
# This path is a directory and on the desired level - append it.
yield _path
# destination_list.append(_path)
else:
# Traverse directory deeper.
for path in traversePath(
destination_list, _path,
current_level + 1, target_level
):
yield path
def setupArgs(parser):
"""
Setup command line arguments combinations.
"""
# Help: help
explanation = "Print this help."
parser.addArgumentsCombination(ARGS_HELP, explanation=explanation)
# Search code clones: search -q file -in dir/file
explanation = (
"Search clones of the code snippet in file specified with "
"-q/--queries in directory or file specified with -in. In the "
"case of a dictionary full of project dictionaries, "
"you can analyse each project on its own by "
"specifying -l/--level 1. Level==1 means, that "
"the top level directory of each project is one directory "
"deeper than the path specified. "
"-d/--debug enables debugging output."
)
parser.addArgumentsCombination(
ARGS_SEARCH,
[
["q=", "queries"],
["in=", None]
],
[
["l=", "level"],
["m=", "multithreading"],
["d", "debug"]
],
explanation=explanation
)
# Search code clones: continuous_search -q file -in paths_file (-s file)
explanation = (
"Extension of the search mode: Additionally to searching "
"code clones, a file is used to record statistical data "
"across multiple executions of the continuous_search. "
"The default path for the stats file is './stats', but it "
"can be modified using the -s/--statsfile parameter."
)
parser.addArgumentsCombination(
ARGS_CONTINUOUS_SEARCH,
[
["q=", "queries"],
["in=", None]
],
[
["l=", "level"],
["m=", "multithreading"],
["d", "debug"],
["s=", "statsfile"]
],
explanation=explanation
)
# Print statistical data: print_stats (-s file)
explanation = (
"Print out the statistical data, that has been collected "
"by using the continuous_search mode before."
)
parser.addArgumentsCombination(
ARGS_PRINT_STATS,
[],
[["s=", "statsfile"]],
explanation=explanation
)
# Print results (found code clones): print_results (-s file)
explanation = (
"Print out the results (=found code clones) from previous "
"'continuous_search'-mode runs."
)
parser.addArgumentsCombination(
ARGS_PRINT_RESULTS,
[],
[["s=", "statsfile"]],
explanation=explanation
)
# Configurate paths for ccdetection: config -p/--path dir/file
explanation = (
"Setup the config file using the specified path in '-p/--path'."
" The config file contains all the necessary paths for "
"ccdetection to work correctly."
)
parser.addArgumentsCombination(
ARGS_CONFIG,
[["p=", "path"]],
explanation=explanation
)
# Open neo4j console: console -in dir/file
explanation = (
"Import the php file/project AST into the Neo4j database and "
"start the Neo4j console. "
"Use \"-id integer\" to use another Neo4j database instance."
)
parser.addArgumentsCombination(
ARGS_CONSOLE,
[["in=", None]],
[["id=", None]],
explanation=explanation
)
# Combine stats files: console -s/--stats "file1, file2, ..." -out file
explanation = (
"Combine multiple statistics files specified with -s/--stats"
", separated by commas, into one and save it "
"to the path specified with -out."
)
parser.addArgumentsCombination(
ARGS_COMBINE_STATISTICS,
[
["s=", "stats"],
["out=", None]
],
explanation=explanation
)
def getArg(_list, key1, key2=None):
result = ""
try:
result = _list[key1]
except:
if key2:
try:
result = _list[key2]
except:
raise ArgException()
else:
raise ArgException()
return result
class ArgException(BaseException):
def __init__(self, msg=None):
if msg:
self.message = msg
else:
self.message = (
"Parameter was not specified."
)
def __str__(self):
return self.message
if __name__ == '__main__':
main(sys.argv) | 17,700 | 32.085981 | 82 | py |
ccdetection | ccdetection-master/manual_search.py | '''
Created on Oct 27, 2015
@author: Tommi Unruh
'''
from joern.all import JoernSteps
import time
from configurator import Configurator
from results.code_clone_data import CodeCloneData
class ManualCCSearch(object):
'''
classdocs
'''
UNTRUSTED_DATA = """attacker_sources = [
"_GET", "_POST", "_COOKIE",
"_REQUEST", "_ENV", "HTTP_ENV_VARS"
]\n"""
SQL_QUERY_FUNCS = """sql_query_funcs = [
"mysql_query", "pg_query", "sqlite_query"
]\n"""
# Gremlin operations
ORDER_LN = ".order{it.a.lineno <=> it.b.lineno}" # Order by linenumber
def __init__(self, port):
'''
Constructor
'''
self.j = JoernSteps()
self.j.setGraphDbURL('http://localhost:%d/db/data/' % (int(port)))
# self.j.addStepsDir(
# Configurator.getPath(Configurator.KEY_PYTHON_JOERN) +
# "/joern/phpjoernsteps"
# )
self.j.addStepsDir(
Configurator.getPath(Configurator.KEY_BASE_DIR) +
"/custom_gremlin_steps"
)
self.j.connectToDatabase()
# self.QUERIES_DIR = Configurator.getPath(Configurator.BASE_DIR) + \
# "/gremlin_queries"
def searchCCOne(self):
"""
Search for the first vulnerable tutorial (SQL injection from stackoverflow):
$user_alcohol_permitted_selection = $_POST['alcohol_check']; //Value sent using jquery .load()
$user_social_club_name_input = $_POST['name']; //Value sent using jquery .load()
$query="SELECT * FROM social_clubs
WHERE name = $user_social_club_name_input";
if ($user_alcohol_permitted_selection != "???")
{
$query.= "AND WHERE alcohol_permitted = $user_alcohol_permitted_selection";
}
"""
# construct gremlin query step by step:
# 1. Find variable name X of "variable = $_POST[..]"
# 2. Go to next statement list.
# (3. Find variable name Y of "variable = $_POST[..]"
# (4. Go to next statement list.
# 5. Find variable name Z and string str1 of "variable = string"
# 6. Check if str1 contains regexp "WHERE any_word=$Y".
# (7. Go to next statement list.)
# (8. Check for if-statement with variable $X.)
# 9. Check if variable $Z is extended using string with regexp
# "and where any_word=$X"
# (10. Check for mysql_query($Z))
# all nodes
# query = "g.V(NODE_TYPE, TYPE_STMT_LIST).out"
#
# # AST_ASSIGN nodes' right side
# query += ".rval"
query = "g.V"
return query
def sqlNewIndirect(self):
query = self.UNTRUSTED_DATA + self.SQL_QUERY_FUNCS
query += open(self.QUERIES_DIR + "sql_new_indirect.query", 'r').read()
return query
def runQuery(self, query):
return query
def runTimedQuery(self, myFunction, query=None):
start = time.time()
res = None
try:
if query:
res = self.j.runGremlinQuery(myFunction(query))
else:
res = self.j.runGremlinQuery(myFunction())
except Exception as err:
print "Caught exception:", type(err), err
elapsed = time.time() - start
# print "Query done in %f seconds." % (elapsed)
result = []
try:
for node in res:
print node
data = CodeCloneData()
data.stripDataFromOutput(node)
data.setQueryTime(elapsed)
result.append(data)
except TypeError:
# res is not iterable, because it is one/no node.
# print res
if res:
data = CodeCloneData()
data.stripDataFromOutput(node)
data.setQueryTime(elapsed)
result.append(data)
print res
return (result, elapsed) | 4,188 | 31.472868 | 102 | py |
ccdetection | ccdetection-master/neo4j_helper.py | '''
Created on Nov 11, 2015
@author: Tommi Unruh
'''
from manual_search import ManualCCSearch
import pexpect
import subprocess
import os
import signal
from configurator import Configurator
import sys
class Neo4jHelper(object):
"""
Handling of everything concerning neo4j.
"""
HEAP_SIZE = [100, "G"] # G = Gigabyte, M = Megabyte
def __init__(self):
'''
Constructor
'''
@staticmethod
def setHeapsize(size, unit):
# Allowed units are "G" (gigabyte) or "M" (megabyte).
if unit == "G" or unit == "M" and isinstance(size, int):
Neo4jHelper.HEAP_SIZE[0:2] = size, unit
else:
raise Exception("Trying to set faulty heap size: %s%s." % (
str(size), unit
))
@staticmethod
def setStatisticsObj(obj):
Neo4jHelper.stats = obj
@staticmethod
def analyseData(code_and_path_and_process_number):
"""
Create the PHP AST of the files in 'path', import them into the
neo4j graph database and start the neo4j console.
Finally, run the analysing query against the graph database.
"""
query_objects, path, process_number = code_and_path_and_process_number
port = 7473 + process_number
try:
process = Neo4jHelper.prepareData(path, process_number)
cc_tester = ManualCCSearch(port)
first_query = True
clones = []
queries = []
for query_file_obj in query_objects:
result, elapsed_time = cc_tester.runTimedQuery(
cc_tester.runQuery,
query=query_file_obj.getCode()
)
if hasattr(Neo4jHelper, "stats"):
# Statistics are enabled, so add results to the statistics.
if result:
# Code clones found.
for clone in result:
clone.setQueryFile(query_file_obj.getFilename())
code_clone = {
"clone": clone,
"first": first_query
}
clones.append(code_clone)
else:
# No code clone found.
query = {
"query_time": elapsed_time,
"first": first_query
}
queries.append(query)
first_query = False
if hasattr(Neo4jHelper, "stats"):
try:
Neo4jHelper.stats.saveData(queries, clones)
except:
pass
# Kill neo4j database server.
process.sendcontrol('c')
process.close()
except BindException:
print (
"Port %d is taken. Trying to kill a neo4j graph database "
"listening on that port and start an updated one." % port
)
Neo4jHelper.killProcess(process_number)
return Neo4jHelper.analyseData(code_and_path_and_process_number)
except PathException:
exception = "Could not create directory in ccdetection/graphs/."
print exception
return Neo4jHelper.analyseData(code_and_path_and_process_number)
except HeapException:
print "There is insufficient memory for allocating the heap size."
print "Created a hs_err_pid* file with more information."
if Neo4jHelper.HEAP_SIZE[0] > 1 and Neo4jHelper.HEAP_SIZE[1] == "G":
new_size = Neo4jHelper.HEAP_SIZE[0] - 1
unit = "G"
elif (
Neo4jHelper.HEAP_SIZE[0] == 1 and
Neo4jHelper.HEAP_SIZE[1] == "G"
):
new_size = Neo4jHelper.HEAP_SIZE[0] = 512
unit = "M"
if Neo4jHelper.HEAP_SIZE[1] == "M":
new_size = Neo4jHelper.HEAP_SIZE[0] / 2
unit = "M"
print "Trying again with %d%s memory for the heap." % (
new_size, unit
)
print (
"Change the heap_size parameter in the config file for a "
"permanent solution."
)
Neo4jHelper.setHeapsize(new_size, unit)
return Neo4jHelper.analyseData(code_and_path_and_process_number)
except Exception as err:
print err
raise Exception("Critical error, exiting.")
return process_number
@staticmethod
def prepareData(path, process_number=1):
print "Analysing path: %s" % path
process = pexpect.spawn(
Configurator.getPath(Configurator.KEY_SPAWN_SCRIPT),
[
Configurator.getPath(Configurator.KEY_BASE_DIR) + "/config",
path, str(process_number),
"%d%s" % (Neo4jHelper.HEAP_SIZE[0],
Neo4jHelper.HEAP_SIZE[1])
],
None
)
if Configurator.isDebuggingEnabled():
process.logfile = sys.stdout
expectation = process.expect([
# "graph.db still exists",
"Remote interface ready",
"java.net.BindException",
"java.io.IOException: Unable to create directory path",
pexpect.EOF,
"ERROR: No write access to data/ directory",
(
"There is insufficient memory for the Java Runtime "
"Environment to continue."
)
])
if expectation == 1:
# BindException (port already taken?)
raise BindException()
elif expectation == 2:
# Unable to create directory path
raise PathException()
elif expectation == 3:
# EOF
# print process.before
raise BindException()
elif expectation == 4:
raise Exception(
"ERROR: No write access to neo4j data directory. "
"Check for sufficient write permissions in all neo4j "
"instances' data directory."
)
elif expectation == 5:
# Not enough space to allocate the specified amount of heap space.
raise HeapException()
return process
def startConsole(self, path, port=1):
"""
Import the php file/project AST from 'path' into the neo4j
database and start the neo4j console, using the 'SPAWN_SCRIPT' file.
"""
process = subprocess.call(
[
Configurator.getPath(Configurator.KEY_SPAWN_SCRIPT),
Configurator.getPath(Configurator.KEY_BASE_DIR) + "/config",
path, str(port),
"%d%s" % (Neo4jHelper.HEAP_SIZE[0],
Neo4jHelper.HEAP_SIZE[1])
],
preexec_fn=os.setsid
)
def signalHandler(signalnum, handler):
os.killpg(process.pid, signal.SIGINT)
signal.signal(signal.SIGINT, signalHandler)
signal.signal(signal.SIGTERM, signalHandler)
@staticmethod
def killProcess(process_number):
kill_regex = "\"java -cp .*neo4j-0%d.*\"" % process_number
process = pexpect.spawn("pkill -f %s" % (kill_regex))
# Wait until child finishes.
process.expect(pexpect.EOF)
class BindException(BaseException):
def __init__(self, msg=None):
if msg:
self.message = msg
else:
self.message = (
"java.net.BindException: Address already in use. "
"-> pkill neo4j."
)
def __str__(self):
return self.message
class PathException(Exception):
pass
class HeapException(Exception):
pass
| 8,947 | 34.507937 | 81 | py |
ccdetection | ccdetection-master/LazyMP.py | '''
Created on Nov 10, 2015
Taken from
http://www.grantjenks.com/wiki/random/python_multiprocessing_lazy_iterating_map
(2013/12/03 11:14 by grant)
and extended by me.
@author: Tommi Unruh
'''
import multiprocessing as mp
import Queue
class LazyMP(object):
"""
Lazy multiprocessing class.
"""
def __init__(self):
'''
Constructor
'''
def work(self, recvq, sendq):
for _func, args in iter(recvq.get, None):
result = _func(args)
sendq.put(result)
def poolImapUnordered(self,
_func, _iterable,
procs=mp.cpu_count(),
custom_ids_object=None
):
sendq = mp.Queue(procs)
recvq = mp.Queue()
# Start processes.
# Processes start working when they receive a task from the
# receive queue.
for _ in xrange(procs):
mp.Process(target=self.work, args=(sendq, recvq)).start()
# Iterate iterable and populate queues.
send_len = 0
recv_len = 0
try:
working_processes = 0
while True:
if working_processes < procs:
# There is a process not working at the moment.
# Give it a task.
working_processes += 1
# Send new task to queue.
sendq.put((_func, _iterable.next()), True, 0.1)
send_len += 1
else:
# Queue of new tasks is full,
# so work until a process finishes.
while True:
try:
# Wait for result of a process.
# In this case, the result is a custom id number,
# as returned by the function analyseData in
# main.py
result = recvq.get(False)
working_processes -= 1
# If an object was passed for custom process IDs,
# we fill it up with the returned id from 'result'.
if custom_ids_object:
custom_ids_object.addToGenerator(result)
recv_len += 1
yield result
except Queue.Empty:
break
except StopIteration:
# _iterable was consumed completely.
pass
# Collect remaining results.
while recv_len < send_len:
result = recvq.get()
if custom_ids_object:
custom_ids_object.addToGenerator(result)
recv_len += 1
yield result
# Terminate worker processes.
for _ in xrange(procs):
sendq.put(None)
class ProcessIdGenerator(object):
"""
Process number management.
Allows a lazy multithreading pool to pass the correct process ID
for a waiting process.
"""
def __init__(self):
pass
def getGenerator(self, ids):
self.waiting_ids = mp.Queue(len(ids))
for _id in ids:
self.waiting_ids.put(int(_id))
while True:
# Raises IndexError on empty list.
try:
_next = int(self.waiting_ids.get())
yield _next
except:
pass
def addToGenerator(self, _id):
self.waiting_ids.put(int(_id))
| 3,779 | 29.483871 | 79 | py |
ccdetection | ccdetection-master/configurator.py | '''
Created on Nov 14, 2015
@author: Tommi Unruh
'''
import os
import stat
class Configurator(object):
"""
Writes and loads data from a config file.
"""
DEFAULT_HEAP_SIZE = "6G"
KEY_PHP7 = "php7"
KEY_NEO4J = "neo4j"
KEY_HEAPSIZE = "heap_size"
KEY_GRAPHDBS = "graphdbs"
KEY_BASE_DIR = "basedir"
KEY_PHP_JOERN = "phpjoern"
KEY_PHP_PARSER = "php_parser"
KEY_SPAWN_SCRIPT = "spawn_script"
KEY_PYTHON_JOERN = "python_joern"
KEY_BATCH_IMPORT = "batch_import"
KEY_PHP_PARSE_RESULTS = "php_parser_results"
PATH_PHP7 = "php7"
PATH_NEO4j = "neo4j"
PATH_GRAPHDBS = "graphs"
PATH_PHP_JOERN = "phpjoern"
PATH_PHP_PARSER = "AST_parser"
PATH_SPAWN_SCRIPT = "spawn_neodb.sh"
PATH_PYTHON_JOERN = "python-joern"
PATH_BATCH_IMPORT = "batch-import"
PATH_PHP_PARSE_RESULTS = "parse_results"
debugging = False
def __init__(self):
pass
@staticmethod
def readLine(_line):
"""
Return (key, value) of a read line.
None for empty lines and ValueError on format errors.
"""
if _line.strip() == "":
return None
key, value = _line.split("=", 1)
key = key.strip()
value = value.strip()
return (key, value)
@staticmethod
def load(config_path):
Configurator.readFullConfigFile(config_path)
@staticmethod
def readFullConfigFile(config_path):
Configurator.config = {}
with open(config_path, 'r') as fh:
cnt = 0
for _line in fh:
cnt += 1
# Parse line
try:
res = Configurator.readLine(_line)
if res:
key, value = res
Configurator.config[key] = value
except:
# Format error
raise ConfigException(
"Format error in config file on line %d." % (cnt)
)
@staticmethod
def getHeapVal():
heap_size_str = Configurator.config[Configurator.KEY_HEAPSIZE]
heap_size = [int(heap_size_str[:-1]), heap_size_str[-1]]
return heap_size
def setupConfig(self, config_path, base_dir, path, start_port=7473):
config_dict = {}
if path[-1] == "/":
path = path[:-1]
config_dict[self.KEY_PHP7] = path + "/" + self.PATH_PHP7
config_dict[self.KEY_NEO4J] = path + "/" + self.PATH_NEO4j
config_dict[self.KEY_BASE_DIR] = base_dir
config_dict[self.KEY_GRAPHDBS] = base_dir + "/" + self.PATH_GRAPHDBS
config_dict[self.KEY_PHP_JOERN] = path + "/" + self.PATH_PHP_JOERN
config_dict[self.KEY_PYTHON_JOERN] = path + "/" + self.PATH_PYTHON_JOERN
config_dict[self.KEY_BATCH_IMPORT] = path + "/" + self.PATH_BATCH_IMPORT
config_dict[self.KEY_SPAWN_SCRIPT] = base_dir + "/" + \
self.PATH_SPAWN_SCRIPT
config_dict[self.KEY_PHP_PARSER] = base_dir + "/" + self.PATH_PHP_PARSER
config_dict[self.KEY_PHP_PARSE_RESULTS] = (
config_dict[self.KEY_PHP_PARSER] + "/" +
self.PATH_PHP_PARSE_RESULTS
)
config_dict[self.KEY_HEAPSIZE] = self.DEFAULT_HEAP_SIZE
self.writeConfigFile(
config_path,
config_dict
)
if not os.path.exists(config_dict[self.KEY_GRAPHDBS]):
# Ignore the race condition here, it does not matter.
# Create the directory for the several graph databases.
os.makedirs(config_dict[self.KEY_GRAPHDBS])
if not os.path.exists(config_dict[self.KEY_PHP_PARSE_RESULTS]):
# Ignore the race condition here, it does not matter.
# Create the directory for the PHP AST parsing results.
os.makedirs(config_dict[self.KEY_PHP_PARSE_RESULTS])
# Write the server config file for every neoj4 instance.
# They differ in the ports (http and https) they use.
neo_4j_path = config_dict[self.KEY_NEO4J] + \
"/neo4j-0%d/conf/neo4j-server.properties"
# Check number of neo4j instances
neo4j_count = 0
for path in os.listdir(config_dict[self.KEY_NEO4J]):
if os.path.isdir(os.path.join(config_dict[self.KEY_NEO4J], path)):
if path[0:6] == "neo4j-":
neo4j_count += 1
for i in range(1, neo4j_count + 1):
self.writeNeo4jConfig(
neo_4j_path % i, start_port + i, start_port + 100 + i
)
# Make scripts executable
filepath = config_dict[self.KEY_SPAWN_SCRIPT]
st = os.stat(filepath)
os.chmod(filepath, st.st_mode | stat.S_IEXEC)
filepath = config_dict[self.KEY_PHP_PARSER] + "/parser"
st = os.stat(filepath)
os.chmod(filepath, st.st_mode | stat.S_IEXEC)
def writeConfigFile(self, filepath, _dict):
config_format = "%s = %s"
with open(filepath, 'w') as fh:
for key in _dict:
fh.write(config_format % (key, _dict[key]) + "\n")
@staticmethod
def isDebuggingEnabled():
return Configurator.debugging
@staticmethod
def setDebugging(_bool):
Configurator.debugging = _bool
@staticmethod
def getPath(_key):
val = Configurator.config[_key]
if val:
return val
else:
raise ValueError("'%s' is not specified in the config file.")
def writeNeo4jConfig(self, path, port, port_ssl):
default_config = """################################################################
# Neo4j configuration
#
################################################################
#***************************************************************
# Server configuration
#***************************************************************
# location of the database directory
org.neo4j.server.database.location=data/graph.db
# Let the webserver only listen on the specified IP. Default is localhost (only
# accept local connections). Uncomment to allow any connection. Please see the
# security section in the neo4j manual before modifying this.
#org.neo4j.server.webserver.address=0.0.0.0
#
# HTTP Connector
#
# http port (for all data, administrative, and UI access)
org.neo4j.server.webserver.port=%d
#
# HTTPS Connector
#
# Turn https-support on/off
org.neo4j.server.webserver.https.enabled=true
# https port (for all data, administrative, and UI access)
org.neo4j.server.webserver.https.port=%d
# Certificate location (auto generated if the file does not exist)
org.neo4j.server.webserver.https.cert.location=conf/ssl/snakeoil.cert
# Private key location (auto generated if the file does not exist)
org.neo4j.server.webserver.https.key.location=conf/ssl/snakeoil.key
# Internally generated keystore (don't try to put your own
# keystore there, it will get deleted when the server starts)
org.neo4j.server.webserver.https.keystore.location=data/keystore
#*****************************************************************
# Administration client configuration
#*****************************************************************
# location of the servers round-robin database directory. Possible values:
# - absolute path like /var/rrd
# - path relative to the server working directory like data/rrd
# - commented out, will default to the database data directory.
org.neo4j.server.webadmin.rrdb.location=data/rrd
# REST endpoint for the data API
# Note the / in the end is mandatory
org.neo4j.server.webadmin.data.uri=/db/data/
# REST endpoint of the administration API (used by Webadmin)
org.neo4j.server.webadmin.management.uri=/db/manage/
# Low-level graph engine tuning file
org.neo4j.server.db.tuning.properties=conf/neo4j.properties
# The console services to be enabled
org.neo4j.server.manage.console_engines=shell
# Comma separated list of JAX-RS packages containing JAX-RS resources, one
# package name for each mountpoint. The listed package names will be loaded
# under the mountpoints specified. Uncomment this line to mount the
# org.neo4j.examples.server.unmanaged.HelloWorldResource.java from
# neo4j-server-examples under /examples/unmanaged, resulting in a final URL of
# http://localhost:7474/examples/unmanaged/helloworld/{nodeId}
#org.neo4j.server.thirdparty_jaxrs_classes=org.neo4j.examples.server.unmanaged=/examples/unmanaged
#*****************************************************************
# HTTP logging configuration
#*****************************************************************
# HTTP logging is disabled. HTTP logging can be enabled by setting this
# property to 'true'.
org.neo4j.server.http.log.enabled=false
# Logging policy file that governs how HTTP log output is presented and
# archived. Note: changing the rollover and retention policy is sensible, but
# changing the output format is less so, since it is configured to use the
# ubiquitous common log format
org.neo4j.server.http.log.config=conf/neo4j-http-logging.xml"""
with open(path, 'w') as fh:
fh.write(default_config % (port, port_ssl))
class ConfigException(BaseException):
def __init__(self, msg=None):
if msg:
self.message = msg
else:
self.message = (
"Format error in config file."
)
def __str__(self):
return self.message | 9,907 | 34.010601 | 98 | py |
ccdetection | ccdetection-master/query_file.py | '''
Created on Dec 7, 2015
@author: Tommi Unruh
'''
class QueryFile(object):
"""
Struct-class.
"""
def __init__(self, filename, code):
'''
Constructor
'''
self.filename = filename
self.code = code
def getCode(self):
return self.code
def getFilename(self):
return self.filename
| 388 | 13.961538 | 39 | py |
ccdetection | ccdetection-master/args_parser.py | '''
Created on Jul 19, 2015
@author: Tommi Unruh
'''
import re
import copy
class ModeArgsParser(object):
'''
classdocs
'''
KEY_MODE = "mode"
KEY_ORDER = "order"
KEY_EXPLANATION = "key_explanation"
KEY_ARGS_OPTIONAL = "optional_args"
KEY_ARGS_NECESSARY = "necessary_args"
KEY_ARGS_OPTIONAL_WVAL = "optional_args_w_value"
KEY_ARGS_NECESSARY_WVAL = "necessary_args_w_value"
def __init__(self):
'''
Constructor
'''
self.combinations = {}
def addArgumentsCombination(self, mode, necessary_args=None,
optional_args=None, order=None,
explanation=None):
"""
Prepare a dictionary of necessary and optional values,
with and without values respectively.
"""
self.combinations[mode] = {
self.KEY_ORDER: [],
self.KEY_EXPLANATION: None,
self.KEY_ARGS_OPTIONAL: [],
self.KEY_ARGS_NECESSARY: [],
self.KEY_ARGS_OPTIONAL_WVAL: [],
self.KEY_ARGS_NECESSARY_WVAL: [],
}
# Parse necessary arguments.
if necessary_args:
# Parse short versions first
for s_arg, l_arg in necessary_args:
# If a key ends in "=", we expect it to
# be a key-value pair.
if s_arg:
if s_arg[-1] == "=":
(self.combinations[mode]
[self.KEY_ARGS_NECESSARY_WVAL].append(
[s_arg[:-1], l_arg]
))
else:
# Key does not end in "=".
(self.combinations[mode]
[self.KEY_ARGS_NECESSARY].append(
[s_arg, l_arg]
))
elif not l_arg:
# s_arg and l_arg are both None, which is not correct.
raise NoneTypeCombinationException()
# Parse optional arguments.
if optional_args:
# Parse short versions first
for s_arg, l_arg in optional_args:
# If a key ends in "=", we expect it to
# be a key-value pair.
if s_arg:
if s_arg[-1] == "=":
(self.combinations[mode]
[self.KEY_ARGS_OPTIONAL_WVAL].append(
[s_arg[:-1], l_arg]
))
else:
# Key does not end in "=".
(self.combinations[mode]
[self.KEY_ARGS_OPTIONAL].append(
[s_arg, l_arg]
))
elif not l_arg:
# s_arg and l_arg are both None, which is not correct.
raise NoneTypeCombinationException()
# Setup order of arguments.
# This is important for returning the results.
# Arguments on the command line can be mixed up!
if order:
self.combinations[mode][self.KEY_ORDER] = order
else:
# No order specified, so build the default one:
# Necessary arguments first, as specified. Then optional ones.
if necessary_args:
for s_arg, l_arg in necessary_args:
if s_arg[-1] == "=":
self.combinations[mode][self.KEY_ORDER].append(
s_arg[:-1]
)
else:
self.combinations[mode][self.KEY_ORDER].append(
s_arg
)
if optional_args:
for s_arg, l_arg in optional_args:
if s_arg[-1] == "=":
self.combinations[mode][self.KEY_ORDER].append(
s_arg[:-1]
)
else:
self.combinations[mode][self.KEY_ORDER].append(
s_arg
)
if explanation:
self.combinations[mode][self.KEY_EXPLANATION] = explanation
# Create a duplicate of combinations as a helper variable.
# It is necessary to construct the usage() message.
self.combinations_helper = copy.deepcopy(self.combinations)
def parseMode(self, arg):
"""
Check if mode ('arg') is implemented.
"""
mode = None
arg = arg.strip()
if arg[0] == "-":
raise WrongFormatException(arg)
else:
# Check if this mode is available.
for key in self.combinations:
if key == arg:
mode = arg
break
if mode:
return mode
else:
raise WrongModeException(arg)
def parseArgs(self, mode, args):
# Expects args[0] to be a mode value,
# i.e. it should not have a minus sign in front of it.
mode = self.parseMode(mode)
return self.getOpts(mode, args)
def getOpts(self, mode, args):
"""
Parse args and return them in order, as specified by self.combinations.
"""
# Remark: re_short_option will also match long options.
# Therefore, look for long options first, then for short options.
re_long_option = re.compile("--([a-zA-Z]+)")
re_short_option = re.compile("-([a-zA-Z]+)")
result = {}
skip = False
parsed_vals = {}
for i, _ in enumerate(args):
if not skip:
key = None
full_key = None
# Check for long option.
long_hit = re_long_option.match(args[i])
if long_hit:
key = long_hit.group(1)
full_key = long_hit.group(0)
else:
# No long option found, check for short option.
short_hit = re_short_option.match(args[i])
if short_hit:
key = short_hit.group(1)
full_key = short_hit.group(0)
if not key:
# No short, no long option found.
raise WrongFormatException(args[i])
val = self.parseNextKeyValue(args, i)
if val:
skip = True
# Check if key-val pair is correct for this command.
is_permitted = self.argPermitted(full_key, val, mode)
if is_permitted:
result[key] = val
else:
skip = False
# Are necessary arguments still missing?
if self.isMissingArgs(self.combinations[mode]):
raise MissingParameterException(self.combinations[mode])
# Add mode to result
parsed_vals[self.KEY_MODE] = mode
# Bring arguments in order.
# for elem in self.combinations[mode][self.KEY_ORDER]:
# if elem in result:
for key in result:
parsed_vals[key] = result[key]
return parsed_vals
def parseNextKeyValue(self, args, i):
"""
Check next argument for a given value for this key.
"""
val = None
if len(args) > i + 1:
parsed_val = args[i+1]
if len(parsed_val) > 1 and parsed_val[0:2] != "--" and parsed_val[0] != "-":
val = parsed_val
elif len(parsed_val) == 1 and parsed_val != "-":
val = parsed_val
return val
def isMissingArgs(self, combination):
if (
combination[self.KEY_ARGS_NECESSARY] or
combination[self.KEY_ARGS_NECESSARY_WVAL]
):
return True
def argPermitted(self, key, val, mode):
"""
Check if a given key-val pair is correctly specified.
If so, remove it from the combination dictionary, so that
it will be ignored for further parsing.
"""
KEY_SHORT = 0
KEY_LONG = 1
combination = self.combinations[mode]
found_permitted_arg = False
orig_key = key
key_type = -1
# clear key from leading minuses. (e.g. --abc or -abc = abc)
if key[0] == "-":
key = key[1:]
key_type = KEY_SHORT
if key[0] == "-":
key = key[1:]
key_type = KEY_LONG
# Check if value is permitted in keys which do not need a value.
for i, combinations_key in enumerate(
combination[self.KEY_ARGS_NECESSARY]
):
if (
key_type == KEY_SHORT and combinations_key[KEY_SHORT] == key or
key_type == KEY_LONG and combinations_key[KEY_LONG] == key
):
# Key found.
# Was a value given?
if val:
raise UnneccessaryValueException(orig_key)
else:
combination[self.KEY_ARGS_NECESSARY].pop(i)
found_permitted_arg = True
if not found_permitted_arg:
# Check if value is permitted in keys which do need a value.
for i, combinations_key in enumerate(
combination[self.KEY_ARGS_NECESSARY_WVAL]
):
if (
key_type == KEY_SHORT and combinations_key[KEY_SHORT] == key or
key_type == KEY_LONG and combinations_key[KEY_LONG] == key
):
# Key found.
# Was a value given?
if val:
combination[self.KEY_ARGS_NECESSARY_WVAL].pop(i)
found_permitted_arg = True
else:
raise MissingValueException(orig_key)
if not found_permitted_arg:
# Check if value is permitted in optional keys
# which do not need a value.
for i, combinations_key in enumerate(
combination[self.KEY_ARGS_OPTIONAL]
):
if (
key_type == KEY_SHORT and combinations_key[KEY_SHORT] == key or
key_type == KEY_LONG and combinations_key[KEY_LONG] == key
):
# Key found.
# Was a value given?
if val:
raise UnneccessaryValueException(orig_key)
else:
combination[self.KEY_ARGS_OPTIONAL].pop(i)
found_permitted_arg = True
if not found_permitted_arg:
# Check if value is permitted in optional keys
# which do need a value.
for i, combinations_key in enumerate(
combination[self.KEY_ARGS_OPTIONAL_WVAL]
):
if (
key_type == KEY_SHORT and combinations_key[KEY_SHORT] == key or
key_type == KEY_LONG and combinations_key[KEY_LONG] == key
):
# Key found.
# Was a value given?
if val:
combination[self.KEY_ARGS_OPTIONAL_WVAL].pop(i)
found_permitted_arg = True
else:
raise MissingValueException(orig_key)
if not found_permitted_arg:
raise WrongParameterException(mode, orig_key)
return found_permitted_arg
def printHelp(self, arg0):
"""
Print usage.
"""
# Construct usage string
usage = (
"Usage: python " + str(arg0) + " MODE necessary_arg0, necessary_arg1"
", .. optional_arg0, optional_arg1, ...\n"
)
# Print all modes.
modes = "\nMODES: "
for key in self.combinations_helper:
modes += str(key) + ", "
modes = modes[:-2] + "\n"
args = "\nMODE ARGS [OPTIONAL_ARGS]:\n"
# Construct mode-argument combination-strings.
for mode in self.combinations_helper:
counter = 0
arg = "\t" + mode + "\t\t"
for key in self.combinations_helper[mode][self.KEY_ARGS_NECESSARY_WVAL]:
arg += "-" + str(key[0])
if key[1]:
arg += "/--" + str(key[1])
arg += " arg" + str(counter) + " "
counter += 1
for key in self.combinations_helper[mode][self.KEY_ARGS_NECESSARY]:
arg += "-" + str(key[0])
if key[1]:
arg += "/--" + str(key[1]) + " "
if (
self.combinations_helper[mode][self.KEY_ARGS_OPTIONAL_WVAL] or
self.combinations_helper[mode][self.KEY_ARGS_OPTIONAL]
):
arg += "["
for key in self.combinations_helper[mode][self.KEY_ARGS_OPTIONAL_WVAL]:
arg += "-" + str(key[0])
if key[1]:
arg += "/--" + str(key[1])
arg += " arg" + str(counter) + ", "
counter += 1
for key in self.combinations_helper[mode][self.KEY_ARGS_OPTIONAL]:
arg += "-" + str(key[0])
if key[1]:
arg += "/--" + str(key[1]) + ", "
if (
self.combinations_helper[mode][self.KEY_ARGS_OPTIONAL_WVAL] or
self.combinations_helper[mode][self.KEY_ARGS_OPTIONAL]
):
arg = arg[:-2] + "]"
args += arg + "\n"
# Also print explanations for each mode.
explanations = "\nDESCRIPTION:\n"
tabulator = "\t"
for key in self.combinations_helper:
if self.combinations_helper[key][self.KEY_EXPLANATION]:
explanation = "Mode: " + str(key) + "\n" + tabulator
explanation += self.combinations_helper[key][self.KEY_EXPLANATION]
explanations += explanation + "\n\n"
print (usage + modes + args + explanations),
class WrongModeException(BaseException):
def __init__(self, val=None):
self.val = val
def __str__(self):
if self.val:
return "Mode '%s' is not implemented." % self.val
else:
return "Given mode is not implemented."
class WrongFormatException(BaseException):
def __init__(self, val=None):
self.val = val
def __str__(self):
if self.val:
return "Argument '%s' is malformed." % self.val
else:
return "An argument is malformed."
class NoneTypeCombinationException(BaseException):
def __str__(self):
return "Combination cannot contain combination [None, None]."
class MissingValueException(BaseException):
def __init__(self, val=None):
self.val = val
def __str__(self):
if self.val:
return "You did not specify a value for key '%s'." % self.val
else:
return "You did not specify a necessary value."
class MissingParameterException(BaseException):
def __init__(self, combinations=None):
self.combinations = combinations
def __str__(self):
KEY_ARGS_NECESSARY = "necessary_args"
KEY_ARGS_NECESSARY_WVAL = "necessary_args_w_value"
if self.combinations:
missing = ""
for _list in self.combinations[KEY_ARGS_NECESSARY]:
if _list[1] != None:
missing += "-%s/--%s, " % (_list[0], _list[1])
else:
missing += "-%s, " % (_list[0])
for _list in self.combinations[KEY_ARGS_NECESSARY_WVAL]:
if _list[1] != None:
missing += "-%s/--%s, " % (_list[0], _list[1])
else:
missing += "-%s, " % (_list[0])
missing = missing[:-2]
return "Missing parameters: %s" % missing
else:
return "Missing parameters. Aborting..."
class UnneccessaryValueException(BaseException):
def __init__(self, val=None):
self.val = val
def __str__(self):
if self.val:
return (
"You did specify a value for key '%s',"
" but it does not need one." % self.val
)
else:
return (
"You did specify a value for a key,"
" which does not need one."
)
class WrongParameterException(BaseException):
def __init__(self, mode, param):
self.mode = mode
self.param = param
def __str__(self):
return (
"Parameter '%s' is not allowed for command '%s'." % (
self.param, self.mode
)
)
| 18,607 | 34.92278 | 88 | py |
ccdetection | ccdetection-master/results/shared_data.py | '''
Created on Dec 7, 2015
@author: Tommi Unruh
'''
import os
import cPickle as pickle
from multiprocessing import Value, Manager
class SharedData(object):
"""
Handles shared statistical data, which we want to collect over several
executions of the ccdetection tool.
Only shared values are used, so that multiple child processes
can manipulate them.
"""
KEY_NODES = "nodes_total"
KEY_INPATH = "in_path"
KEY_CLONES = "clones"
KEY_COUNTER = "counter"
KEY_QUERY_TIME = "query_time_total"
KEY_FIRST_COUNTER = "first_query_counter"
KEY_PROJECTS_COUNTER = "projects_counter"
KEY_FIRST_QUERY_TIME = "first_query_time_total"
def __init__(self, path, lock, in_path=None):
"""
Setup all values to be shared (between processes) values.
"""
self.lock = lock
self.path = path
if os.path.isfile(path):
self.loadData()
else:
self.in_path = in_path
self.clones = Manager().list()
self.counter = Value("i", 0)
self.nodes_total = Value("i", 0)
self.first_counter = Value("i", 0)
self.query_time_total = Value("d", 0)
self.projects_counter = Value("i", 0)
self.first_query_time_total = Value("d", 0)
def incProjectsCounter(self):
"""
Increase the counter of projects analysed.
"""
self.projects_counter.value += 1
def addQuery(self, query_time, first=False):
"""
Add the statistical data of a query that did not find a code clone.
"""
if first:
self.first_counter.value += 1
self.first_query_time_total.value += query_time
else:
self.counter.value += 1
self.query_time_total.value += query_time
def addFoundCodeClone(self, code_clone_data, first=False):
"""
Add the statistical data of a query that did find a code clone.
"""
self.addQuery(code_clone_data.getQueryTime(), first)
self.clones.append(code_clone_data)
def loadData(self):
with open(self.path, "rb") as fh:
data = pickle.load(fh)
# Restore state from load data.
self.in_path = data[self.KEY_INPATH]
self.clones = Manager().list(data[self.KEY_CLONES])
self.counter = Value("i", data[self.KEY_COUNTER])
self.nodes_total = Value("i", data[self.KEY_NODES])
self.first_counter = Value("i", data[self.KEY_FIRST_COUNTER])
self.query_time_total = Value("d", data[self.KEY_QUERY_TIME])
self.projects_counter = Value("i", data[self.KEY_PROJECTS_COUNTER])
self.first_query_time_total = Value("d", data[self.KEY_FIRST_QUERY_TIME])
def saveData(self, queries, code_clones):
"""
Save the data of an analysed project to file.
To avoid conflicts of multiple processes adding and saving data
at the same time, we save all data atomically and using a lock, which
prevents multiple executions at once.
"""
self.lock.acquire()
# Increase projects counter.
self.incProjectsCounter()
# Add all query data.
for query_dict in queries:
self.addQuery(query_dict["query_time"], query_dict["first"])
# Add all data from found code clones
for clone_dict in code_clones:
self.addFoundCodeClone(clone_dict["clone"], clone_dict["first"])
self.saveToFile(self.path)
self.lock.release()
def __str__(self):
try:
avg_query_time_nofirst = (self.query_time_total.value/
float(self.counter.value))
except:
avg_query_time_nofirst = 0
try:
avg_query_time = (
(self.query_time_total.value + self.first_query_time_total.value)/
float(self.counter.value + self.first_counter.value)
)
except:
avg_query_time = 0
try:
avg_first_query_time = (self.first_query_time_total.value/
float(self.first_counter.value))
except:
avg_first_query_time = 0
try:
avg_nodes = self.nodes_total.value/float(self.counter.value)
except:
avg_nodes = 0
data = (
"Projects analysed: %d\n"
"Total queries executed: %d\n"
"Average query time: %fs\n"
"Average query time (without first query): %fs\n"
"Average query time (first query only): %fs\n"
"Average number of nodes in AST: %f\n"
"Code clones found: %d"
) % (
self.projects_counter.value,
self.counter.value + self.first_counter.value, avg_query_time,
avg_query_time_nofirst,
avg_first_query_time,
avg_nodes, len(self.clones)
)
return data
def combineWith(self, shared_data):
self.lock.acquire()
# Add the data of shared_data to this file.
self.in_path = shared_data.in_path
self.nodes_total.value += shared_data.nodes_total.value
for clone in shared_data.clones:
self.clones.append(clone)
self.counter.value += shared_data.counter.value
self.query_time_total.value += shared_data.query_time_total.value
self.first_counter.value += shared_data.first_counter.value
self.projects_counter.value += shared_data.projects_counter.value
self.first_query_time_total.value += (
shared_data.first_query_time_total.value
)
self.lock.release()
def saveToFile(self, out_file):
# Transform data to dictionary for easy pickling.
data = {}
data[self.KEY_INPATH] = self.in_path
data[self.KEY_NODES] = self.nodes_total.value
data[self.KEY_CLONES] = []
for clone in self.clones:
data[self.KEY_CLONES].append(clone)
data[self.KEY_COUNTER] = self.counter.value
data[self.KEY_QUERY_TIME] = self.query_time_total.value
data[self.KEY_FIRST_COUNTER] = self.first_counter.value
data[self.KEY_PROJECTS_COUNTER] = self.projects_counter.value
data[self.KEY_FIRST_QUERY_TIME] = self.first_query_time_total.value
# Save data to file.
with open(out_file, "wb") as fh:
pickle.dump(data, fh, pickle.HIGHEST_PROTOCOL)
def getClones(self):
clones = []
for clone in self.clones:
clones.append(clone)
return clones
def getProjectsCount(self):
return self.projects_counter.value
def getInPath(self):
return self.in_path
def setInPath(self, path):
self.in_path = path | 7,210 | 33.835749 | 86 | py |
ccdetection | ccdetection-master/results/code_clone_data.py | '''
Created on Dec 8, 2015
@author: tommi
'''
import StringIO
class CodeCloneData(object):
"""
Class to save and analyse the output of the ccdetection search.
"""
def __init__(self, path="", query_path="", ln_start=-1, ln_end=-1, query_time=-1):
'''
Constructor
'''
self.path = path
self.ln_end = ln_end
self.ln_start = ln_start
self.query_time = query_time
self.query_path = query_path
def stripDataFromOutput(self, output):
"""
Extract the data from the ccdetection search output.
Example output for a found code clone looks like this:
Found a code clone on lines x to y
File: filepath
"""
buf = StringIO.StringIO(output)
# Extract start and end linenumbers from first line.
start_index = len("Found a code clone on lines ")
ln_start, _, ln_end = buf.readline()[start_index:].split(" ")
# Extract filepath from second line.
_, filepath = buf.readline().split(" ")
self.path = filepath.strip()
self.ln_end = int(ln_end)
self.ln_start = int(ln_start)
def setQueryFile(self, path):
self.query_path = path
def setQueryTime(self, qt):
self.query_time = qt
def getQueryTime(self):
if self.query_time > 0:
return self.query_time
else:
raise Exception(
"Query time value is wrong. Maybe "
"it was not set? Query time: %d" % (self.query_time)
)
def __str__(self):
_repr = (
"Code clone of query file '%s' "
"found in file '%s' "
"on lines %d to %d." % (
self.query_path, self.path,
self.ln_start, self.ln_end
)
)
return _repr | 2,009 | 26.916667 | 86 | py |
ccdetection | ccdetection-master/results/__init__.py | 0 | 0 | 0 | py | |
ccdetection | ccdetection-master/custom_gremlin_steps/__init__.py | 0 | 0 | 0 | py | |
ccdetection | ccdetection-master/custom_gremlin_steps/syntax/__init__.py | 0 | 0 | 0 | py | |
nmap-generation | nmap-generation-main/generators/heightmap_generator.py | from skimage.color import rgba2rgb, rgb2gray
import numpy as np
from generator import Generator
from sobel_filter import normalized_sobel_filter
class HeightmapGenerator(Generator):
def __init__(self):
super().__init__(
folder='D:/git/nmap-generation/assets',
img_file='knight_hm.png',
normal_file_suffix='_heightmap_normal'
)
def _apply_filters(self, img, normal_intensity=1):
# grayscale filter
img_gray = rgb2gray(rgba2rgb(img))
self._register_img_to_plot([img_gray, 'gray'])
# normalized sobel filter
img_sobel, img_sobel_vert, img_sobel_hor = normalized_sobel_filter(1 - img_gray, normal_intensity)
self._register_img_to_plot(img_sobel_vert, img_sobel_hor, img_sobel)
return img_sobel
if __name__ == '__main__':
HeightmapGenerator().run(.9)
| 876 | 30.321429 | 106 | py |
nmap-generation | nmap-generation-main/generators/albedo_generator.py | from skimage.color import rgb2gray, rgba2rgb
from matplotlib import pyplot as plt
from generator import Generator
from sobel_filter import normalized_sobel_filter
import numpy as np
class AlbedoGenerator(Generator):
def __init__(self):
super().__init__(
folder='D:/git/nmap-generation/assets',
img_file='knight.png',
normal_file_suffix='_albedo_normal'
)
def _apply_filters(self, img, normal_intensity=1.0):
# grayscale filter
img_gray = rgb2gray(rgba2rgb(img))
self._register_img_to_plot([img_gray, plt.cm.get_cmap('gray')])
# normalized sobel filter
img_sobel, img_sobel_vert, img_sobel_hor = normalized_sobel_filter(img_gray, normal_intensity)
self._register_img_to_plot(img_sobel_vert, img_sobel_hor, img_sobel)
return img_sobel
if __name__ == '__main__':
AlbedoGenerator().run(.9)
| 917 | 29.6 | 102 | py |
nmap-generation | nmap-generation-main/generators/gaussian_filter.py | from scipy.signal import convolve2d as conv2d
import numpy as np
def gaussian_blur(img, intensity=4):
return conv2d(img, np.array([
[1,2,1],
[2,4,2],
[1,2,1]
])*intensity/4,
mode='same'
) / 16
| 255 | 18.692308 | 45 | py |
nmap-generation | nmap-generation-main/generators/bevel_generator.py | from turtle import shape
from cv2 import threshold
from skimage.color import rgb2gray, rgba2rgb
import numpy as np
from scipy.ndimage import distance_transform_edt
from generator import Generator
from sobel_filter import normalized_sobel_filter, sobel_filter, sobel_gradient
from gaussian_filter import gaussian_blur
class BevelGenerator(Generator):
def __init__(self):
super().__init__(
folder='D:/git/nmap-generation/assets',
img_file='knight.png',
normal_file_suffix='_bevel_normal'
)
def _apply_filters(self, img,
normal_intensity = 1.0, # multiplier
shape_intensity = 2.0, # multiplier
info_intensity = 1.0, # multiplier
gray_treshold = 0.3, # greater == more darker colors will be cut off
sobel_treshold = 0.75, # greater == more edges will be considered
edt_mix=0.25 # greater == more info from color / less == more shape from silhouette
):
# grayscale
img_gray = rgb2gray(rgba2rgb(img))
## MASKS
# ranges 0 ~ 1
# alpha mask
img_alpha_mask = np.ceil(img[:,:,3])/255.0
self._register_img_to_plot(img_alpha_mask)
# grayscale treshold mask
img_gray_mask = np.clip(np.ceil(img_gray - gray_treshold), 0, 1)
self._register_img_to_plot(img_gray_mask)
# sobel mask
_, sobel_mask_v, sobel_mask_h = sobel_filter(img_gray)
img_sobel_mask = sobel_gradient(sobel_mask_v, sobel_mask_h)
img_sobel_mask /= np.amax(img_sobel_mask)
img_sobel_mask = np.clip(img_sobel_mask + sobel_treshold, 0, 1)
img_sobel_mask = 1 - np.floor(img_sobel_mask)
self._register_img_to_plot(img_sobel_mask)
# final mask 0 ~ 1
img_final_mask = img_alpha_mask * img_gray_mask * img_sobel_mask
self._register_img_to_plot(img_final_mask)
## euclidean distance transform
# alpha mask / grayscale mask / final mix
img_edt_shape = self._apply_intensity(self._apply_edt(img_alpha_mask), shape_intensity)
self._register_img_to_plot(img_edt_shape)
img_edt_info = self._apply_intensity(self._apply_edt(img_final_mask), info_intensity)
self._register_img_to_plot(img_edt_info)
img_edt = img_edt_shape * (edt_mix) + img_edt_info * (1-edt_mix)
self._register_img_to_plot(img_edt)
# blur
img_edt_blur = gaussian_blur(img_edt)
self._register_img_to_plot(img_edt_blur)
# reverse mask to get correct normal directions
img_edt_final = 1 - img_edt_blur
self._register_img_to_plot(img_edt_final)
# sobel filter to generate normals
img_sobel, _,_ = normalized_sobel_filter(img_edt_final, normal_intensity)
self._register_img_to_plot(img_sobel)
return img_sobel
def _apply_edt(self, img):
img_edt = distance_transform_edt(img)
_max = np.amax(img_edt)
if _max != 0:
img_edt = (img_edt/_max) # normalize into 0 ~ 1
return img_edt
def _apply_intensity(self, img, intensity):
return np.clip(img * intensity, 0, 1)
if __name__ == '__main__':
BevelGenerator().run(.95)
| 3,244 | 32.802083 | 95 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.