repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
DACBench | DACBench-main/dacbench/agents/dynamic_random_agent.py | from gymnasium import spaces
from dacbench.abstract_agent import AbstractDACBenchAgent
class DynamicRandomAgent(AbstractDACBenchAgent):
def __init__(self, env, switching_interval):
self.sample_action = env.action_space.sample
self.switching_interval = switching_interval
self.count = 0
self.action = self.sample_action()
self.shortbox = (
isinstance(env.action_space, spaces.Box) and len(env.action_space.low) == 1
)
def act(self, state, reward):
if self.count >= self.switching_interval:
self.action = self.sample_action()
self.count = 0
self.count += 1
if self.shortbox:
return self.action[0]
else:
return self.action
def train(self, next_state, reward):
pass
def end_episode(self, state, reward):
pass
| 884 | 26.65625 | 87 | py |
DACBench | DACBench-main/dacbench/agents/simple_agents.py | from gymnasium import spaces
from dacbench.abstract_agent import AbstractDACBenchAgent
class RandomAgent(AbstractDACBenchAgent):
def __init__(self, env):
self.sample_action = env.action_space.sample
self.shortbox = isinstance(env.action_space, spaces.Box)
if self.shortbox:
self.shortbox = self.shortbox and len(env.action_space.low) == 1
def act(self, state, reward):
if self.shortbox:
return self.sample_action()[0]
else:
return self.sample_action()
def train(self, next_state, reward):
pass
def end_episode(self, state, reward):
pass
class StaticAgent(AbstractDACBenchAgent):
def __init__(self, env, action):
self.action = action
def act(self, state, reward):
return self.action
def train(self, next_state, reward):
pass
def end_episode(self, state, reward):
pass
| 934 | 23.605263 | 76 | py |
DACBench | DACBench-main/dacbench/agents/__init__.py | from dacbench.agents.dynamic_random_agent import DynamicRandomAgent
from dacbench.agents.generic_agent import GenericAgent
from dacbench.agents.simple_agents import RandomAgent, StaticAgent
__all__ = ["StaticAgent", "RandomAgent", "GenericAgent", "DynamicRandomAgent"]
| 270 | 44.166667 | 78 | py |
DACBench | DACBench-main/dacbench/challenge_benchmarks/reward_quality_challenge/reward_functions.py | import numpy as np
def easy_sigmoid(self):
sigmoids = [
np.abs(self._sig(self.c_step, slope, shift))
for slope, shift in zip(self.shifts, self.slopes)
]
action = []
for i in range(len(self.action_vals)):
best_action = None
dist = 100
for a in range(self.action_vals[i] + 1):
if np.abs(sigmoids[i] - a / (self.action_vals[i] - 1)) < dist:
dist = np.abs(sigmoids[i] - a / (self.action_vals[i]))
best_action = a
action.append(best_action)
action_diffs = self.action - action
r = 0
for i in range(len(action_diffs)):
r += 10**i * action_diffs[i]
r = max(self.reward_range[0], min(self.reward_range[1], r))
return r
def almost_easy_sigmoid(self):
r = [
1 - np.abs(self._sig(self.c_step, slope, shift) - (act / (max_act - 1)))
for slope, shift, act, max_act in zip(
self.slopes, self.shifts, self.action, self.action_vals
)
]
r = sum(r)
r = max(self.reward_range[0], min(self.reward_range[1], r))
return r
def sum_reward(self):
if self.c_step == 1:
self.rew_sum = 0
self.rew_sum += self.get_default_reward
if self.done:
return self.rew_sum
else:
return 0
def random_reward(self):
return np.random.uniform(self.reward_range[0], self.reward_range[1])
def manhattan_distance_reward_geometric(self):
def manhattan(a, b):
return sum(abs(val1 - val2) for val1, val2 in zip(a, b))
coordinates, action_intervall, highest_coords, lowest_actions = self._pre_reward()
manhattan_dist = manhattan(coordinates, action_intervall)
max_dist = manhattan(lowest_actions, highest_coords)
reward = 1 - (manhattan_dist / max_dist)
return abs(reward)
def quadratic_manhattan_distance_reward_geometric(self):
def manhattan(a, b):
return sum(abs(val1 - val2) for val1, val2 in zip(a, b))
coordinates, action_intervall, highest_coords, lowest_actions = self._pre_reward()
manhattan_dist = manhattan(coordinates, action_intervall)
max_dist = manhattan(lowest_actions, highest_coords)
reward = (1 - (manhattan_dist / max_dist)) ** 2
return abs(reward)
def quadratic_euclidean_distance_reward_geometric(self):
coords, action_coords, highest_coords, lowest_actions = self._pre_reward()
euclidean_dist = np.linalg.norm(action_coords - coords)
max_dist = np.linalg.norm(highest_coords - lowest_actions)
reward = (1 - (euclidean_dist / max_dist)) ** 2
return abs(reward)
def multiply_reward_geometric(self):
coords, action_coords, highest_coords, lowest_actions = self._pre_reward()
single_dists = [abs(val1 - val2) for val1, val2 in zip(coords, action_coords)]
max_dists = [abs(val1 - val2) for val1, val2 in zip(lowest_actions, highest_coords)]
rewards = []
for dist, max_dist in zip(single_dists, max_dists):
rewards.append(1 - (dist / max_dist))
return np.prod(rewards)
| 3,014 | 29.454545 | 88 | py |
DACBench | DACBench-main/dacbench/challenge_benchmarks/state_space_challenge/random_states.py | import numpy as np
def small_random_luby_state(self):
core_state = self.get_default_state(None)
num_random_elements = 20 - len(core_state)
for i in range(num_random_elements):
core_state.append(np.random.normal(3, 2.5))
return core_state
def random_luby_state(self):
core_state = self.get_default_state(None)
num_random_elements = 250 - len(core_state)
for i in range(num_random_elements):
core_state.append(np.random.normal(3, 2.5))
return core_state
def small_random_sigmoid_state(self):
core_state = self.get_default_state(None)
num_random_elements = 50 - len(core_state)
state = []
for i in range(50):
if num_random_elements > 0 and len(core_state) > 0:
append_random = np.random.choice([0, 1])
if append_random:
state.append(np.random.normal(2, 1.5))
num_random_elements -= 1
else:
state.append(core_state[0])
core_state = core_state[1:]
elif len(core_state) == 0:
state.append(np.random.normal(2, 1.5))
else:
state.append(core_state[0])
core_state = core_state[1:]
return state
def random_sigmoid_state(self):
core_state = self.get_default_state(None)
num_random_elements = 500 - len(core_state)
state = []
for i in range(500):
if num_random_elements > 0 and len(core_state) > 0:
append_random = np.random.choice([0, 1])
if append_random:
state.append(np.random.normal(2, 1.5))
num_random_elements -= 1
else:
state.append(core_state[0])
core_state = core_state[1:]
elif len(core_state) == 0:
state.append(np.random.normal(2, 1.5))
else:
state.append(core_state[0])
core_state = core_state[1:]
return state
| 1,928 | 31.15 | 59 | py |
DACBench | DACBench-main/dacbench/instance_sets/geometric/SampleGeometricInstances.py | from __future__ import generators
import os
import random
from typing import Dict
import numpy as np
FILE_PATH = os.path.dirname(__file__)
# Configure amount of different layers
FUNCTION_CONFIG = {
"sigmoid": 1,
"linear": 1,
"parabel": 1,
"cubic": 1,
"logarithmic": 1,
"constant": 1,
"sinus": 1,
}
# Each function needs fix number of parameters
FUNCTION_PARAMETER_NUMBERS = {
"sigmoid": 2,
"linear": 2,
"parabel": 3,
"cubic": 3,
"logarithmic": 1,
"constant": 1,
"sinus": 1,
}
SAMPLE_SIZE = 100
def save_geometric_instances(
filename: str, config: Dict = FUNCTION_CONFIG, path: str = ""
):
"""
First delete old isntance_set.
Create new instances based on config.
Parameters
----------
filename : str
name of instance set
config : Dict, optional
config that has info about which functions will get selected, by default FUNCTION_CONFIG
path : std
path to save to
"""
if path:
csv_path = os.path.join(path, filename)
else:
csv_path = os.path.join(FILE_PATH, filename)
if os.path.exists(csv_path):
os.remove(csv_path)
with open(csv_path, "a") as fh:
id_string = "ID,fcn_name"
for index in range(1, max(list(FUNCTION_PARAMETER_NUMBERS.values())) + 1):
id_string += f",param{index}"
id_string += "\n"
fh.write(id_string)
for index in range(SAMPLE_SIZE):
for func_name, count in config.items():
for _ in range(count):
instance_string = _create_csv_string(index, func_name)
fh.write(instance_string)
def _create_csv_string(index, func_name: str) -> str:
"""
Create comma separated string with function name and parameter values. Set 0 for irrelevant params.
Parameters
----------
index:
instance index
func_name : str
name of function
Returns
-------
str
comma separated string
"""
count = FUNCTION_PARAMETER_NUMBERS[func_name]
max_count = max(list(FUNCTION_PARAMETER_NUMBERS.values()))
csv_string = str(index) + "," + func_name
if func_name == "sigmoid":
value_generator = sample_sigmoid_value()
elif func_name == "cubic" or func_name == "parabel":
value_generator = sample_parabel_cubic_value()
for i in range(max_count):
if i < count:
if func_name == "sinus":
value = np.round(sample_sinus_value(), 1)
elif func_name == "sigmoid":
value = np.round(next(value_generator), 1)
elif func_name == "cubic":
value = next(value_generator)
elif func_name == "parabel":
value = next(value_generator)
else:
value = np.round(np.random.uniform(low=-10.0, high=10.0), 1)
csv_string += "," + str(value)
else:
csv_string += ",0"
csv_string += "\n"
return csv_string
def sample_sinus_value():
"""Get values for sinus."""
return np.round(np.random.uniform(low=0.5, high=2.0), 1)
def sample_sigmoid_value():
"""Get values for sigmoid."""
scale = np.round(np.random.uniform(low=0.1, high=4.0), 1)
yield scale
infliction = np.round(np.random.uniform(low=0, high=10), 1)
yield infliction
def sample_parabel_cubic_value():
"""Get values for cubic."""
sig = [-1, 1]
yield random.choice(sig)
x_int = list(range(3, 8))
yield random.choice(x_int)
y_int = [-50, -20, -10, -5, -1, 0, 1, 5, 10, 20, 50]
yield random.choice(y_int)
if __name__ == "__main__":
# save_geometric_instances("geometric_unit_test.csv", FUNCTION_CONFIG)
save_geometric_instances("geometric_test.csv", FUNCTION_CONFIG)
| 3,827 | 24.019608 | 103 | py |
DACBench | DACBench-main/dacbench/wrappers/policy_progress_wrapper.py | import matplotlib.pyplot as plt
import numpy as np
from gymnasium import Wrapper
class PolicyProgressWrapper(Wrapper):
"""
Wrapper to track progress towards optimal policy.
Can only be used if a way to obtain the optimal policy given an instance can be obtained.
"""
def __init__(self, env, compute_optimal):
"""
Initialize wrapper.
Parameters
----------
env : gym.Env
Environment to wrap
compute_optimal : function
Function to compute optimal policy
"""
super(PolicyProgressWrapper, self).__init__(env)
self.compute_optimal = compute_optimal
self.episode = []
self.policy_progress = []
def __setattr__(self, name, value):
"""
Set attribute in wrapper if available and in env if not.
Parameters
----------
name : str
Attribute to set
value
Value to set attribute to
"""
if name in [
"compute_optimal",
"env",
"episode",
"policy_progress",
"render_policy_progress",
]:
object.__setattr__(self, name, value)
else:
setattr(self.env, name, value)
def __getattribute__(self, name):
"""
Get attribute value of wrapper if available and of env if not.
Parameters
----------
name : str
Attribute to get
Returns
-------
value
Value of given name
"""
if name in [
"step",
"compute_optimal",
"env",
"episode",
"policy_progress",
"render_policy_progress",
]:
return object.__getattribute__(self, name)
else:
return getattr(self.env, name)
def step(self, action):
"""
Execute environment step and record distance.
Parameters
----------
action : int
action to execute
Returns
-------
np.array, float, bool, bool, dict
state, reward, terminated, truncated, metainfo
"""
state, reward, terminated, truncated, info = self.env.step(action)
self.episode.append(action)
if terminated or truncated:
optimal = self.compute_optimal(self.env.instance)
self.policy_progress.append(
np.linalg.norm(np.array(optimal) - np.array(self.episode))
)
self.episode = []
return state, reward, terminated, truncated, info
def render_policy_progress(self):
"""Plot progress."""
plt.figure(figsize=(12, 6))
plt.plot(np.arange(len(self.policy_progress)), self.policy_progress)
plt.title("Policy progress over time")
plt.xlabel("Episode")
plt.ylabel("Distance to optimal policy")
plt.show()
| 2,968 | 25.274336 | 93 | py |
DACBench | DACBench-main/dacbench/wrappers/performance_tracking_wrapper.py | from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sb
from gymnasium import Wrapper
sb.set_style("darkgrid")
current_palette = list(sb.color_palette())
class PerformanceTrackingWrapper(Wrapper):
"""
Wrapper to track episode performance.
Includes interval mode that returns performance in lists of len(interval) instead of one long list.
"""
def __init__(
self,
env,
performance_interval=None,
track_instance_performance=True,
logger=None,
):
"""
Initialize wrapper.
Parameters
----------
env : gym.Env
Environment to wrap
performance_interval : int
If not none, mean in given intervals is tracked, too
track_instance_performance : bool
Indicates whether to track per-instance performance
logger : dacbench.logger.ModuleLogger
logger to write to
"""
super(PerformanceTrackingWrapper, self).__init__(env)
self.performance_interval = performance_interval
self.overall_performance = []
self.episode_performance = 0
if self.performance_interval:
self.performance_intervals = []
self.current_performance = []
self.track_instances = track_instance_performance
if self.track_instances:
self.instance_performances = defaultdict(lambda: [])
self.logger = logger
def __setattr__(self, name, value):
"""
Set attribute in wrapper if available and in env if not.
Parameters
----------
name : str
Attribute to set
value
Value to set attribute to
"""
if name in [
"performance_interval",
"track_instances",
"overall_performance",
"performance_intervals",
"current_performance",
"env",
"get_performance",
"step",
"instance_performances",
"episode_performance",
"render_performance",
"render_instance_performance",
"logger",
]:
object.__setattr__(self, name, value)
else:
setattr(self.env, name, value)
def __getattribute__(self, name):
"""
Get attribute value of wrapper if available and of env if not.
Parameters
----------
name : str
Attribute to get
Returns
-------
value
Value of given name
"""
if name in [
"performance_interval",
"track_instances",
"overall_performance",
"performance_intervals",
"current_performance",
"env",
"get_performance",
"step",
"instance_performances",
"episode_performance",
"render_performance",
"render_instance_performance",
"logger",
]:
return object.__getattribute__(self, name)
else:
return getattr(self.env, name)
def step(self, action):
"""
Execute environment step and record performance.
Parameters
----------
action : int
action to execute
Returns
-------
np.array, float, bool, dict
state, reward, done, metainfo
"""
state, reward, terminated, truncated, info = self.env.step(action)
self.episode_performance += reward
if terminated or truncated:
self.overall_performance.append(self.episode_performance)
if self.logger is not None:
self.logger.log(
"overall_performance",
self.episode_performance,
)
if self.performance_interval:
if len(self.current_performance) < self.performance_interval:
self.current_performance.append(self.episode_performance)
else:
self.performance_intervals.append(self.current_performance)
self.current_performance = [self.episode_performance]
if self.track_instances:
key = "".join(str(e) for e in self.env.instance)
self.instance_performances[key].append(self.episode_performance)
self.episode_performance = 0
return state, reward, terminated, truncated, info
def get_performance(self):
"""
Get state performance.
Returns
-------
np.array or np.array, np.array or np.array, dict or np.array, np.arry, dict
all states or all states and interval sorted states
"""
if self.performance_interval and self.track_instances:
complete_intervals = self.performance_intervals + [self.current_performance]
return (
self.overall_performance,
complete_intervals,
self.instance_performances,
)
elif self.performance_interval:
complete_intervals = self.performance_intervals + [self.current_performance]
return self.overall_performance, complete_intervals
elif self.track_instances:
return self.overall_performance, self.instance_performances
else:
return self.overall_performance
def render_performance(self):
"""Plot performance."""
plt.figure(figsize=(12, 6))
plt.plot(
np.arange(len(self.overall_performance) // 2),
self.overall_performance[1::2],
)
plt.title("Mean Performance per episode")
plt.xlabel("Episode")
plt.ylabel("Reward")
plt.show()
def render_instance_performance(self):
"""Plot mean performance for each instance."""
plt.figure(figsize=(12, 6))
plt.title("Mean Performance per Instance")
plt.ylabel("Mean reward")
plt.xlabel("Instance")
ax = plt.subplot(111)
for k, i in zip(
self.instance_performances.keys(),
np.arange(len(self.instance_performances.keys())),
):
ax.bar(str(i), np.mean(self.instance_performances[k]))
plt.show()
| 6,372 | 29.203791 | 103 | py |
DACBench | DACBench-main/dacbench/wrappers/state_tracking_wrapper.py | import matplotlib.pyplot as plt
import numpy as np
import seaborn as sb
from gymnasium import Wrapper, spaces
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
sb.set_style("darkgrid")
current_palette = list(sb.color_palette())
class StateTrackingWrapper(Wrapper):
"""
Wrapper to track state changed over time.
Includes interval mode that returns states in lists of len(interval) instead of one long list.
"""
def __init__(self, env, state_interval=None, logger=None):
"""
Initialize wrapper.
Parameters
----------
env : gym.Env
Environment to wrap
state_interval : int
If not none, mean in given intervals is tracked, too
logger : dacbench.logger.ModuleLogger
logger to write to
"""
super(StateTrackingWrapper, self).__init__(env)
self.state_interval = state_interval
self.overall_states = []
if self.state_interval:
self.state_intervals = []
self.current_states = []
self.episode_states = None
self.state_type = type(env.observation_space)
self.logger = logger
if self.logger is not None:
benchmark_info = getattr(env, "benchmark_info", None)
self.state_description = (
benchmark_info.get("state_description", None)
if benchmark_info is not None
else None
)
def __setattr__(self, name, value):
"""
Set attribute in wrapper if available and in env if not.
Parameters
----------
name : str
Attribute to set
value
Value to set attribute to
"""
if name in [
"state_interval",
"overall_states",
"state_intervals",
"current_states",
"state_type",
"env",
"episode_states",
"get_states",
"step",
"reset",
"render_state_tracking",
"logger",
]:
object.__setattr__(self, name, value)
else:
setattr(self.env, name, value)
def __getattribute__(self, name):
"""
Get attribute value of wrapper if available and of env if not.
Parameters
----------
name : str
Attribute to get
Returns
-------
value
Value of given name
"""
if name in [
"state_interval",
"overall_states",
"state_intervals",
"current_states",
"state_type",
"env",
"episode_states",
"get_states",
"step",
"reset",
"render_state_tracking",
"logger",
]:
return object.__getattribute__(self, name)
else:
return getattr(self.env, name)
def reset(self):
"""
Reset environment and record starting state.
Returns
-------
np.array, {}
state, info
"""
state, info = self.env.reset()
self.overall_states.append(state)
if self.state_interval:
if len(self.current_states) < self.state_interval:
self.current_states.append(state)
else:
self.state_intervals.append(self.current_states)
self.current_states = [state]
return state, info
def step(self, action):
"""
Execute environment step and record state.
Parameters
----------
action : int
action to execute
Returns
-------
np.array, float, bool, dict
state, reward, done, metainfo
"""
state, reward, terminated, truncated, info = self.env.step(action)
self.overall_states.append(state)
if self.logger is not None:
self.logger.log_space("state", state, self.state_description)
if self.state_interval:
if len(self.current_states) < self.state_interval:
self.current_states.append(state)
else:
self.state_intervals.append(self.current_states)
self.current_states = [state]
return state, reward, terminated, truncated, info
def get_states(self):
"""
Get state progression.
Returns
-------
np.array or np.array, np.array
all states or all states and interval sorted states
"""
if self.state_interval:
complete_intervals = self.state_intervals + [self.current_states]
return self.overall_states, complete_intervals
else:
return self.overall_states
def render_state_tracking(self):
"""
Render state progression.
Returns
-------
np.array
RBG data of state tracking
"""
def plot_single(ax=None, index=None, title=None, x=False, y=False):
if ax is None:
plt.xlabel("Episode")
plt.ylabel("State")
elif x and y:
ax.set_ylabel("State")
ax.set_xlabel("Episode")
elif x:
ax.set_xlabel("Episode")
elif y:
ax.set_ylabel("State")
if index is not None:
ys = [state[index] for state in self.overall_states]
else:
ys = self.overall_states
if ax is None:
p = plt.plot(
np.arange(len(self.overall_states)),
ys,
label="Episode state",
color="g",
)
else:
p = ax.plot(
np.arange(len(self.overall_states)),
ys,
label="Episode state",
color="g",
)
p2 = None
if self.state_interval:
if index is not None:
y_ints = []
for interval in self.state_intervals:
y_ints.append([state[index] for state in interval])
else:
y_ints = self.state_intervals
if ax is None:
p2 = plt.plot(
np.arange(len(self.state_intervals)),
[np.mean(interval) for interval in y_ints],
label="Mean interval state",
color="orange",
)
plt.legend(loc="upper left")
else:
p2 = ax.plot(
np.arange(len(self.state_intervals)) * self.state_interval,
[np.mean(interval) for interval in y_ints],
label="Mean interval state",
color="orange",
)
ax.legend(loc="upper left")
return p, p2
if self.state_type == spaces.Discrete:
figure = plt.figure(figsize=(20, 20))
canvas = FigureCanvas(figure)
p, p2 = plot_single()
canvas.draw()
elif self.state_type == spaces.Dict:
raise NotImplementedError
elif self.state_type == spaces.Tuple:
raise NotImplementedError
elif (
self.state_type == spaces.MultiDiscrete
or self.state_type == spaces.MultiBinary
or self.state_type == spaces.Box
):
if self.state_type == spaces.MultiDiscrete:
state_length = len(self.env.observation_space.nvec)
elif self.state_type == spaces.MultiBinary:
state_length = self.env.observation_space.n
else:
state_length = len(self.env.observation_space.high)
if state_length == 1:
figure = plt.figure(figsize=(20, 20))
canvas = FigureCanvas(figure)
p, p2 = plot_single()
elif state_length < 5:
dim = 1
figure, axarr = plt.subplots(state_length)
else:
dim = state_length % 4
figure, axarr = plt.subplots(state_length % 4, state_length // dim)
figure.suptitle("State over time")
canvas = FigureCanvas(figure)
for i in range(state_length):
if state_length == 1:
continue
x = False
if i % dim == dim - 1:
x = True
if state_length < 5:
p, p2 = plot_single(axarr[i], i, y=True, x=x)
else:
y = i % state_length // dim == 0
p, p2 = plot_single(axarr[i % dim, i // dim], i, x=x, y=y)
canvas.draw()
else:
raise ValueError("Unknown state type")
width, height = figure.get_size_inches() * figure.get_dpi()
img = np.fromstring(canvas.tostring_rgb(), dtype="uint8").reshape(
int(height), int(width), 3
)
return img
| 9,299 | 30.103679 | 98 | py |
DACBench | DACBench-main/dacbench/wrappers/observation_wrapper.py | import numpy as np
from gymnasium import Wrapper, spaces
class ObservationWrapper(Wrapper):
"""
Wrapper covert observations spaces to spaces.Box for convenience.
Currently only supports Dict -> Box
"""
def __init__(self, env):
"""
Initialize wrapper.
Parameters
----------
env : gym.Env
Environment to wrap
compute_optimal : function
Function to compute optimal policy
"""
super(ObservationWrapper, self).__init__(env)
obs_sample = self.flatten(self.env.observation_space.sample())
size = len(obs_sample)
self.observation_space = spaces.Box(
low=-np.inf * np.ones(size), high=np.inf * np.ones(size)
)
def __setattr__(self, name, value):
"""
Set attribute in wrapper if available and in env if not.
Parameters
----------
name : str
Attribute to set
value
Value to set attribute to
"""
if name in ["observation_space", "step", "env", "flatten", "reset"]:
object.__setattr__(self, name, value)
else:
setattr(self.env, name, value)
def __getattribute__(self, name):
"""
Get attribute value of wrapper if available and of env if not.
Parameters
----------
name : str
Attribute to get
Returns
-------
value
Value of given name
"""
if name in ["observation_space", "step", "env", "flatten", "reset"]:
return object.__getattribute__(self, name)
else:
return getattr(self.env, name)
def step(self, action):
"""
Execute environment step and record distance.
Parameters
----------
action : int
action to execute
Returns
-------
np.array, float, bool, bool, dict
state, reward, terminated, truncated, metainfo
"""
state, reward, terminated, truncated, info = self.env.step(action)
state = self.flatten(state)
return state, reward, terminated, truncated, info
def reset(self):
"""
Execute environment step and record distance.
Returns
-------
np.array, dict
state, info
"""
state, info = self.env.reset()
state = self.flatten(state)
return state, info
def flatten(self, state_dict):
"""Flatten dict to list."""
keys = sorted(list(state_dict.keys()))
values = []
for k in keys:
if isinstance(state_dict[k], np.ndarray):
for s in state_dict[k]:
values.append(s)
else:
values.append(state_dict[k])
return np.array(values).astype(np.float32)
| 2,894 | 24.848214 | 76 | py |
DACBench | DACBench-main/dacbench/wrappers/episode_time_tracker.py | import time
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sb
from gymnasium import Wrapper
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
sb.set_style("darkgrid")
current_palette = list(sb.color_palette())
class EpisodeTimeWrapper(Wrapper):
"""
Wrapper to track time spent per episode.
Includes interval mode that returns times in lists of len(interval) instead of one long list.
"""
def __init__(self, env, time_interval=None, logger=None):
"""
Initialize wrapper.
Parameters
----------
env : gym.Env
Environment to wrap
time_interval : int
If not none, mean in given intervals is tracked, too
logger : dacbench.logger.ModuleLogger
logger to write to
"""
super(EpisodeTimeWrapper, self).__init__(env)
self.time_interval = time_interval
self.all_steps = []
if self.time_interval:
self.step_intervals = []
self.current_step_interval = []
self.overall_times = []
self.episode = []
if self.time_interval:
self.time_intervals = []
self.current_times = []
self.logger = logger
def __setattr__(self, name, value):
"""
Set attribute in wrapper if available and in env if not.
Parameters
----------
name : str
Attribute to set
value
Value to set attribute to
"""
if name in [
"time_interval",
"overall_times",
"time_intervals",
"current_times",
"env",
"get_times",
"step",
"render_step_time",
"render_episode_time",
"reset",
"episode",
"all_steps",
"current_step_interval",
"step_intervals",
"logger",
]:
object.__setattr__(self, name, value)
else:
setattr(self.env, name, value)
def __getattribute__(self, name):
"""
Get attribute value of wrapper if available and of env if not.
Parameters
----------
name : str
Attribute to get
Returns
-------
value
Value of given name
"""
if name in [
"time_interval",
"overall_times",
"time_intervals",
"current_times",
"env",
"get_times",
"step",
"render_step_time",
"render_episode_time",
"reset",
"episode",
"all_steps",
"current_step_interval",
"step_intervals",
"logger",
]:
return object.__getattribute__(self, name)
else:
return getattr(self.env, name)
def step(self, action):
"""
Execute environment step and record time.
Parameters
----------
action : int
action to execute
Returns
-------
np.array, float, bool, bool, dict
state, reward, terminated, truncated, metainfo
"""
start = time.time()
state, reward, terminated, truncated, info = self.env.step(action)
stop = time.time()
duration = stop - start
self.episode.append(duration)
self.all_steps.append(duration)
if self.logger is not None:
self.logger.log("step_duration", duration)
if self.time_interval:
if len(self.current_step_interval) < self.time_interval:
self.current_step_interval.append(duration)
else:
self.step_intervals.append(self.current_step_interval)
self.current_step_interval = [duration]
if terminated or truncated:
self.overall_times.append(self.episode)
if self.logger is not None:
self.logger.log("episode_duration", sum(self.episode))
if self.time_interval:
if len(self.current_times) < self.time_interval:
self.current_times.append(self.episode)
else:
self.time_intervals.append(self.current_times)
self.current_times = []
self.episode = []
return state, reward, terminated, truncated, info
def get_times(self):
"""
Get times.
Returns
-------
np.array or np.array, np.array
all times or all times and interval sorted times
"""
if self.time_interval:
complete_intervals = self.time_intervals + [self.current_times]
complete_step_intervals = self.step_intervals + [self.current_step_interval]
return (
self.overall_times,
self.all_steps,
complete_intervals,
complete_step_intervals,
)
else:
return np.array(self.overall_times), np.array(self.all_steps)
def render_step_time(self):
"""Render step times."""
figure = plt.figure(figsize=(12, 6))
canvas = FigureCanvas(figure)
plt.title("Time per Step")
plt.xlabel("Step")
plt.ylabel("Time (s)")
plt.plot(
np.arange(len(self.all_steps)), self.all_steps, label="Step time", color="g"
)
if self.time_interval:
interval_means = [np.mean(interval) for interval in self.step_intervals] + [
np.mean(self.current_step_interval)
]
plt.plot(
np.arange(len(self.step_intervals) + 2) * self.time_interval,
[interval_means[0]] + interval_means,
label="Mean interval time",
color="orange",
)
plt.legend(loc="upper right")
canvas.draw()
width, height = figure.get_size_inches() * figure.get_dpi()
img = np.fromstring(canvas.tostring_rgb(), dtype="uint8").reshape(
int(height), int(width), 3
)
# plt.close(figure)
return img
def render_episode_time(self):
"""Render episode times."""
figure = plt.figure(figsize=(12, 6))
canvas = FigureCanvas(figure)
plt.title("Time per Episode")
plt.xlabel("Episode")
plt.ylabel("Time (s)")
plt.plot(
np.arange(len(self.overall_times)),
[sum(episode) for episode in self.overall_times],
label="Episode time",
color="g",
)
if self.time_interval:
interval_sums = []
for interval in self.time_intervals:
ep_times = []
for episode in interval:
ep_times.append(sum(episode))
interval_sums.append(np.mean(ep_times))
interval_sums += [np.mean([sum(episode) for episode in self.current_times])]
plt.plot(
np.arange(len(self.time_intervals) + 2) * self.time_interval,
[interval_sums[0]] + interval_sums,
label="Mean interval time",
color="orange",
)
plt.legend(loc="upper right")
canvas.draw()
width, height = figure.get_size_inches() * figure.get_dpi()
img = np.fromstring(canvas.tostring_rgb(), dtype="uint8").reshape(
int(height), int(width), 3
)
return img
| 7,576 | 29.187251 | 97 | py |
DACBench | DACBench-main/dacbench/wrappers/multidiscrete_action_wrapper.py | import itertools
import numpy as np
from gymnasium import Wrapper, spaces
class MultiDiscreteActionWrapper(Wrapper):
"""Wrapper to cast MultiDiscrete action spaces to Discrete. This should improve usability with standard RL libraries."""
def __init__(self, env):
"""
Initialize wrapper.
Parameters
----------
env : gym.Env
Environment to wrap
"""
super().__init__(env)
self.n_actions = len(self.env.action_space.nvec)
self.action_space = spaces.Discrete(np.prod(self.env.action_space.nvec))
self.action_mapper = {}
for idx, prod_idx in zip(
range(np.prod(self.env.action_space.nvec)),
itertools.product(*[np.arange(val) for val in self.env.action_space.nvec]),
):
self.action_mapper[idx] = prod_idx
def step(self, action):
"""Maps discrete action value to array."""
action = self.action_mapper[action]
return self.env.step(action)
| 1,018 | 28.970588 | 124 | py |
DACBench | DACBench-main/dacbench/wrappers/instance_sampling_wrapper.py | import numpy as np
from gymnasium import Wrapper
from scipy.stats import norm
class InstanceSamplingWrapper(Wrapper):
"""
Wrapper to sample a new instance at a given time point.
Instances can either be sampled using a given method or a distribution infered from a given list of instances.
"""
def __init__(self, env, sampling_function=None, instances=None, reset_interval=0):
"""
Initialize wrapper.
Either sampling_function or instances must be given
Parameters
----------
env : gym.Env
Environment to wrap
sampling_function : function
Function to sample instances from
instances : list
List of instances to infer distribution from
reset_interval : int
additional episodes for which to keep an instance
"""
super(InstanceSamplingWrapper, self).__init__(env)
if sampling_function:
self.sampling_function = sampling_function
elif instances:
self.sampling_function = self.fit_dist(instances)
else:
raise Exception("No distribution to sample from given")
self.reset_interval = reset_interval
self.reset_tracker = 0
def __setattr__(self, name, value):
"""
Set attribute in wrapper if available and in env if not.
Parameters
----------
name : str
Attribute to set
value
Value to set attribute to
"""
if name in ["sampling_function", "env", "fit_dist", "reset"]:
object.__setattr__(self, name, value)
else:
setattr(self.env, name, value)
def __getattribute__(self, name):
"""
Get attribute value of wrapper if available and of env if not.
Parameters
----------
name : str
Attribute to get
Returns
-------
value
Value of given name
"""
if name in ["sampling_function", "env", "fit_dist", "reset"]:
return object.__getattribute__(self, name)
else:
return getattr(self.env, name)
def reset(self):
"""
Reset environment and use sampled instance for training.
Returns
-------
np.array
state
"""
if self.reset_tracker >= self.reset_interval:
instance = self.sampling_function()
self.env.use_next_instance(instance=instance)
return self.env.reset()
def fit_dist(self, instances):
"""
Approximate instance distribution in given instance set.
Parameters
----------
instances : List
instance set
Returns
-------
method
sampling method for new instances
"""
dists = []
for i in range(len(instances[0])):
component = [instances[k][i] for k in instances.keys()]
dist = norm.fit(component)
dists.append(dist)
def sample():
instance = []
for d in dists:
instance.append(np.random.normal(d[0], d[1]))
return instance
return sample
| 3,253 | 25.672131 | 114 | py |
DACBench | DACBench-main/dacbench/wrappers/reward_noise_wrapper.py | import numpy as np
from gymnasium import Wrapper
class RewardNoiseWrapper(Wrapper):
"""
Wrapper to add noise to the reward signal.
Noise can be sampled from a custom distribution or any distribution in numpy's random module.
"""
def __init__(
self, env, noise_function=None, noise_dist="standard_normal", dist_args=None
):
"""
Initialize wrapper.
Either noise_function or noise_dist and dist_args need to be given
Parameters
----------
env : gym.Env
Environment to wrap
noise_function : function
Function to sample noise from
noise_dist : str
Name of distribution to sample noise from
dist_args : list
Arguments for noise distribution
"""
super(RewardNoiseWrapper, self).__init__(env)
if noise_function:
self.noise_function = noise_function
elif noise_dist:
self.noise_function = self.add_noise(noise_dist, dist_args)
else:
raise Exception("No distribution to sample noise from given")
def __setattr__(self, name, value):
"""
Set attribute in wrapper if available and in env if not.
Parameters
----------
name : str
Attribute to set
value
Value to set attribute to
"""
if name in ["noise_function", "env", "add_noise", "step"]:
object.__setattr__(self, name, value)
else:
setattr(self.env, name, value)
def __getattribute__(self, name):
"""
Get attribute value of wrapper if available and of env if not.
Parameters
----------
name : str
Attribute to get
Returns
-------
value
Value of given name
"""
if name in ["noise_function", "env", "add_noise", "step"]:
return object.__getattribute__(self, name)
else:
return getattr(self.env, name)
def step(self, action):
"""
Execute environment step and add noise.
Parameters
----------
action : int
action to execute
Returns
-------
np.array, float, bool, bool, dict
state, reward, terminated, truncated, metainfo
"""
state, reward, terminated, truncated, info = self.env.step(action)
reward += self.noise_function()
reward = max(self.env.reward_range[0], min(self.env.reward_range[1], reward))
return state, reward, terminated, truncated, info
def add_noise(self, dist, args):
"""
Make noise function from distribution name and arguments.
Parameters
----------
dist : str
Name of distribution
args : list
List of distribution arguments
Returns
-------
function
Noise sampling function
"""
rng = np.random.default_rng()
function = getattr(rng, dist)
def sample_noise():
if args:
return function(*args)
else:
return function()
return sample_noise
| 3,248 | 24.582677 | 97 | py |
DACBench | DACBench-main/dacbench/wrappers/__init__.py | from dacbench.wrappers.action_tracking_wrapper import ActionFrequencyWrapper
from dacbench.wrappers.episode_time_tracker import EpisodeTimeWrapper
from dacbench.wrappers.instance_sampling_wrapper import InstanceSamplingWrapper
from dacbench.wrappers.multidiscrete_action_wrapper import MultiDiscreteActionWrapper
from dacbench.wrappers.observation_wrapper import ObservationWrapper
from dacbench.wrappers.performance_tracking_wrapper import PerformanceTrackingWrapper
from dacbench.wrappers.policy_progress_wrapper import PolicyProgressWrapper
from dacbench.wrappers.reward_noise_wrapper import RewardNoiseWrapper
from dacbench.wrappers.state_tracking_wrapper import StateTrackingWrapper
__all__ = [
"ActionFrequencyWrapper",
"EpisodeTimeWrapper",
"InstanceSamplingWrapper",
"PolicyProgressWrapper",
"RewardNoiseWrapper",
"StateTrackingWrapper",
"PerformanceTrackingWrapper",
"PolicyProgressWrapper",
"ObservationWrapper",
"MultiDiscreteActionWrapper",
]
| 996 | 42.347826 | 85 | py |
DACBench | DACBench-main/dacbench/wrappers/action_tracking_wrapper.py | import matplotlib.pyplot as plt
import numpy as np
import seaborn as sb
from gymnasium import Wrapper, spaces
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
sb.set_style("darkgrid")
current_palette = list(sb.color_palette())
class ActionFrequencyWrapper(Wrapper):
"""
Wrapper to action frequency.
Includes interval mode that returns frequencies in lists of len(interval) instead of one long list.
"""
def __init__(self, env, action_interval=None, logger=None):
"""
Initialize wrapper.
Parameters
----------
env : gym.Env
Environment to wrap
action_interval : int
If not none, mean in given intervals is tracked, too
logger: logger.ModuleLogger
logger to write to
"""
super(ActionFrequencyWrapper, self).__init__(env)
self.action_interval = action_interval
self.overall_actions = []
if self.action_interval:
self.action_intervals = []
self.current_actions = []
self.action_space_type = type(self.env.action_space)
self.logger = logger
def __setattr__(self, name, value):
"""
Set attribute in wrapper if available and in env if not.
Parameters
----------
name : str
Attribute to set
value
Value to set attribute to
"""
if name in [
"action_interval",
"overall_actions",
"action_intervals",
"current_actions",
"env",
"get_actions",
"step",
"render_action_tracking",
"logger",
]:
object.__setattr__(self, name, value)
else:
setattr(self.env, name, value)
def __getattribute__(self, name):
"""
Get attribute value of wrapper if available and of env if not.
Parameters
----------
name : str
Attribute to get
Returns
-------
value
Value of given name
"""
if name in [
"action_interval",
"overall_actions",
"action_intervals",
"current_actions",
"env",
"get_actions",
"step",
"render_action_tracking",
"logger",
]:
return object.__getattribute__(self, name)
else:
return getattr(self.env, name)
def step(self, action):
"""
Execute environment step and record state.
Parameters
----------
action : int
action to execute
Returns
-------
np.array, float, bool, dict
state, reward, done, metainfo
"""
state, reward, terminated, truncated, info = self.env.step(action)
self.overall_actions.append(action)
if self.logger is not None:
self.logger.log_space("action", action)
if self.action_interval:
if len(self.current_actions) < self.action_interval:
self.current_actions.append(action)
else:
self.action_intervals.append(self.current_actions)
self.current_actions = [action]
return state, reward, terminated, truncated, info
def get_actions(self):
"""
Get state progression.
Returns
-------
np.array or np.array, np.array
all states or all states and interval sorted states
"""
if self.action_interval:
complete_intervals = self.action_intervals + [self.current_actions]
return self.overall_actions, complete_intervals
else:
return self.overall_actions
def render_action_tracking(self):
"""
Render action progression.
Returns
-------
np.array
RBG data of action tracking
"""
def plot_single(ax=None, index=None, title=None, x=False, y=False):
if ax is None:
plt.xlabel("Step")
plt.ylabel("Action value")
elif x and y:
ax.set_ylabel("Action value")
ax.set_xlabel("Step")
elif x:
ax.set_xlabel("Step")
elif y:
ax.set_ylabel("Action value")
if index is not None:
ys = [state[index] for state in self.overall_actions]
else:
ys = self.overall_actions
if ax is None:
p = plt.plot(
np.arange(len(self.overall_actions)),
ys,
label="Step actions",
color="g",
)
else:
p = ax.plot(
np.arange(len(self.overall_actions)),
ys,
label="Step actions",
color="g",
)
p2 = None
if self.action_interval:
if index is not None:
y_ints = []
for interval in self.action_intervals:
y_ints.append([state[index] for state in interval])
else:
y_ints = self.action_intervals
if ax is None:
p2 = plt.plot(
np.arange(len(self.action_intervals)) * self.action_interval,
[np.mean(interval) for interval in y_ints],
label="Mean interval action",
color="orange",
)
plt.legend(loc="upper left")
else:
p2 = ax.plot(
np.arange(len(self.action_intervals)) * self.action_interval,
[np.mean(interval) for interval in y_ints],
label="Mean interval action",
color="orange",
)
ax.legend(loc="upper left")
return p, p2
if self.action_space_type == spaces.Discrete:
figure = plt.figure(figsize=(12, 6))
canvas = FigureCanvas(figure)
p, p2 = plot_single()
canvas.draw()
elif self.action_space_type == spaces.Dict:
raise NotImplementedError
elif self.action_space_type == spaces.Tuple:
raise NotImplementedError
elif (
self.action_space_type == spaces.MultiDiscrete
or self.action_space_type == spaces.MultiBinary
or self.action_space_type == spaces.Box
):
if self.action_space_type == spaces.MultiDiscrete:
action_size = len(self.env.action_space.nvec)
elif self.action_space_type == spaces.MultiBinary:
action_size = self.env.action_space.n
else:
action_size = len(self.env.action_space.high)
if action_size == 1:
figure = plt.figure(figsize=(12, 6))
canvas = FigureCanvas(figure)
p, p2 = plot_single()
elif action_size < 5:
dim = 1
figure, axarr = plt.subplots(action_size)
else:
dim = action_size % 4
figure, axarr = plt.subplots(action_size % 4, action_size // dim)
figure.suptitle("State over time")
canvas = FigureCanvas(figure)
for i in range(action_size):
if action_size == 1:
continue
x = False
if i % dim == dim - 1:
x = True
if action_size < 5:
p, p2 = plot_single(axarr[i], i, y=True, x=x)
else:
y = i % action_size // dim == 0
p, p2 = plot_single(axarr[i % dim, i // dim], i, x=x, y=y)
canvas.draw()
width, height = figure.get_size_inches() * figure.get_dpi()
img = np.fromstring(canvas.tostring_rgb(), dtype="uint8").reshape(
int(height), int(width), 3
)
return img
| 8,280 | 30.249057 | 103 | py |
DACBench | DACBench-main/tests/test_multi_agent_interface.py | import unittest
import numpy as np
from dacbench.benchmarks import ModCMABenchmark, SigmoidBenchmark, ToySGDBenchmark
from dacbench.envs import SigmoidEnv, ToySGDEnv
class TestMultiAgentInterface(unittest.TestCase):
def test_make_env(self):
bench = SigmoidBenchmark()
bench.config["multi_agent"] = True
env = bench.get_environment()
self.assertTrue(issubclass(type(env), SigmoidEnv))
bench = ToySGDBenchmark()
bench.config["multi_agent"] = True
env = bench.get_environment()
self.assertTrue(issubclass(type(env), ToySGDEnv))
def test_empty_reset_step(self):
bench = ModCMABenchmark()
bench.config["multi_agent"] = True
env = bench.get_environment()
out = env.reset()
self.assertTrue(out is None)
env.register_agent(max(env.possible_agents))
out = env.step(0)
self.assertTrue(out is None)
def test_last(self):
bench = ModCMABenchmark()
bench.config["multi_agent"] = True
env = bench.get_environment()
env.reset()
state, reward, terminated, truncated, info = env.last()
self.assertFalse(state is None)
self.assertTrue(reward is None)
self.assertFalse(info is None)
self.assertFalse(terminated)
self.assertFalse(truncated)
env.register_agent(max(env.possible_agents))
env.step(0)
_, reward, _, _, info = env.last()
self.assertFalse(reward is None)
self.assertFalse(info is None)
def test_agent_registration(self):
bench = SigmoidBenchmark()
bench.config["multi_agent"] = True
env = bench.get_environment()
env.reset()
state, _, _, _, _ = env.last()
env.register_agent(0)
env.register_agent(max(env.possible_agents))
self.assertTrue(len(env.agents) == 2)
self.assertTrue(0 in env.agents)
self.assertTrue(max(env.possible_agents) in env.agents)
self.assertTrue(env.current_agent == 0)
env.step(0)
state2, _, _, _, _ = env.last()
self.assertTrue(np.array_equal(state, state2))
self.assertTrue(env.current_agent == max(env.possible_agents))
env.step(1)
state3, _, _, _, _ = env.last()
self.assertFalse(np.array_equal(state, state3))
env.remove_agent(0)
self.assertTrue(len(env.agents) == 1)
self.assertFalse(0 in env.agents)
env.register_agent("value_dim_0")
self.assertTrue(len(env.agents) == 2)
self.assertTrue(0 in env.agents)
| 2,587 | 34.452055 | 82 | py |
DACBench | DACBench-main/tests/test_abstract_benchmark.py | import json
import os
import tempfile
import unittest
import numpy as np
from gymnasium.spaces import Box, Dict, Discrete, MultiBinary, MultiDiscrete
from dacbench.abstract_benchmark import AbstractBenchmark, objdict
from dacbench.challenge_benchmarks.reward_quality_challenge.reward_functions import (
random_reward,
)
from dacbench.challenge_benchmarks.state_space_challenge.random_states import (
small_random_sigmoid_state,
)
class TestAbstractBenchmark(unittest.TestCase):
def test_not_implemented_method(self):
bench = AbstractBenchmark()
with self.assertRaises(NotImplementedError):
bench.get_environment()
def test_setup(self):
bench = AbstractBenchmark()
self.assertTrue(bench.config is None)
def test_config_file_management(self):
bench = AbstractBenchmark()
bench.config = objdict({"seed": 0})
test_config = objdict({"seed": 10})
with open("test_conf.json", "w+") as fp:
json.dump(test_config, fp)
self.assertTrue(bench.config.seed == 0)
bench.read_config_file("test_conf.json")
self.assertTrue(bench.config.seed == 10)
self.assertTrue(len(bench.config.keys()) == 1)
os.remove("test_conf.json")
bench.save_config("test_conf2.json")
with open("test_conf2.json", "r") as fp:
recovered = json.load(fp)
self.assertTrue(recovered["seed"] == 10)
self.assertTrue(len(recovered.keys()) == 2)
os.remove("test_conf2.json")
def test_from_and_to_json(self):
bench1 = AbstractBenchmark(config_path="tests/test_config.json")
json1 = bench1.serialize_config()
bench2 = AbstractBenchmark(config=objdict(json1))
json2 = bench2.serialize_config()
print(json1)
print(json2)
self.assertEqual(json1, json2)
def test_attributes(self):
bench = AbstractBenchmark()
bench.config = objdict({"seed": 0})
self.assertTrue(bench.config.seed == bench.config["seed"])
bench.config.seed = 42
self.assertTrue(bench.config["seed"] == 42)
def test_getters_and_setters(self):
bench = AbstractBenchmark()
bench.config = objdict({"seed": 0})
config = bench.get_config()
self.assertTrue(issubclass(type(config), dict))
bench.set_seed(100)
self.assertTrue(bench.config.seed == 100)
bench.set_action_space("Discrete", [4])
self.assertTrue(bench.config.action_space == "Discrete")
self.assertTrue(bench.config.action_space_args == [4])
bench.set_observation_space("Box", [[1], [0]], float)
self.assertTrue(bench.config.observation_space == "Box")
self.assertTrue(bench.config.observation_space_args[0] == [1])
self.assertTrue(bench.config.observation_space_type == float)
def test_reading_and_saving_config(self):
bench1 = AbstractBenchmark(config_path="tests/test_config.json")
with tempfile.TemporaryDirectory() as temp_dir:
config_file = os.path.join(temp_dir, "config.json")
bench1.save_config(config_file)
bench2 = AbstractBenchmark()
bench2.read_config_file(config_file)
assert bench1.config["state_method"] == bench2.config["state_method"]
assert bench1.config["state_method"] == small_random_sigmoid_state
assert bench1.config["reward_function"] == bench2.config["reward_function"]
assert bench1.config["reward_function"] == random_reward
assert bench1.jsonify_wrappers() == bench2.jsonify_wrappers()
assert bench1.jsonify_wrappers() == [["RewardNoiseWrapper", []]]
def test_jsonify_wrappers_and_dejson_wrappers(self):
bench = AbstractBenchmark()
empty_warpper_list = bench.jsonify_wrappers()
assert empty_warpper_list == []
def test_space_to_list_and_list_to_space(self):
def assert_restorable(space):
space_restored = bench.list_to_space(bench.space_to_list(space))
assert space == space_restored
bench = AbstractBenchmark()
space = Box(
low=np.array([0, 0]),
high=np.array([1, 1]),
)
assert_restorable(space)
space = Discrete(2)
assert_restorable(space)
space = Dict(
{
"box": Box(
low=np.array([0, 0]),
high=np.array([1, 1]),
),
"discrete": Discrete(n=2),
}
)
assert_restorable(space)
space = MultiDiscrete([2, 3])
assert_restorable(space)
space = MultiBinary(3)
assert_restorable(space)
def test_objdict(self):
d = objdict({"dummy": 0})
self.assertTrue(d["dummy"] == d.dummy)
with self.assertRaises(KeyError):
d["error"]
with self.assertRaises(AttributeError):
d.error
d["error"] = 12
self.assertTrue(d.error == 12)
del d.error
self.assertFalse("error" in d.keys())
with self.assertRaises(KeyError):
del d["error"]
with self.assertRaises(AttributeError):
del d.error
def test_objdict_equal(self):
self.assertEqual(objdict({"dummy": 0}), objdict({"dummy": 0}))
self.assertEqual(
objdict({"dummy": np.array([1, 2])}), objdict({"dummy": np.array([1, 2])})
)
self.assertNotEqual(
objdict({"dummy": np.array([1, 2])}), objdict({"dummy": np.array([1, 0])})
)
self.assertNotEqual(objdict({"dummy": np.array([1, 2])}), objdict({"dummy": 0}))
| 5,714 | 32.816568 | 88 | py |
DACBench | DACBench-main/tests/test_logger.py | import json
import tempfile
import unittest
from pathlib import Path
import numpy as np
from gymnasium import spaces
from gymnasium.spaces import Box, Dict, Discrete, MultiDiscrete
from dacbench.agents.simple_agents import RandomAgent
from dacbench.benchmarks import SigmoidBenchmark
from dacbench.logger import Logger, ModuleLogger, log2dataframe
class TestLogger(unittest.TestCase):
def setUp(self) -> None:
self.temp_dir = tempfile.TemporaryDirectory()
episodes = 80
seeds = [0, 1, 3, 4, 5]
experiment_name = "test_env"
logger = Logger(
output_path=Path(self.temp_dir.name),
experiment_name=experiment_name,
step_write_frequency=None,
episode_write_frequency=None,
)
benchmark = SigmoidBenchmark()
env = benchmark.get_benchmark()
agent = RandomAgent(env)
logger.set_env(env)
env_logger = logger.add_module(env)
for seed in seeds:
env.seed(seed)
logger.reset_episode()
for episode in range(episodes):
state, _ = env.reset()
terminated, truncated = False, False
reward = 0
step = 0
while not (terminated or truncated):
action = agent.act(state, reward)
env_logger.log(
"logged_step",
step,
)
env_logger.log("logged_seed", env.initial_seed)
env_logger.log("logged_instance", env.get_inst_id())
env_logger.log(
"logged_episode",
episode,
)
next_state, reward, terminated, truncated, _ = env.step(action)
env_logger.log(
"reward",
reward,
)
env_logger.log(
"terminated",
terminated,
)
env_logger.log(
"truncated",
truncated,
)
agent.train(next_state, reward)
state = next_state
logger.next_step()
step += 1
agent.end_episode(state, reward)
logger.next_episode()
env.close()
logger.close()
self.log_file = env_logger.log_file.name
def tearDown(self) -> None:
self.temp_dir.cleanup()
def test_env_logger(self):
with open(self.log_file, "r") as log_file:
logs = list(map(json.loads, log_file))
for log in logs:
# todo check when nan occurs
if "logged_step" in log:
self.assertEqual(log["logged_step"]["values"][0], log["step"])
if "logged_episode" in log:
self.assertEqual(log["logged_episode"]["values"][0], log["episode"])
# check of only one seed occurs per episode
seeds = set(log["logged_seed"]["values"])
self.assertEqual(len(seeds), 1)
(seed,) = seeds
self.assertEqual(seed, log["seed"])
# check of only one instance occurs per episode
instances = set(log["logged_instance"]["values"])
self.assertEqual(len(seeds), 1)
(instance,) = instances
self.assertEqual(instance, log["instance"])
def test_data_loading(self):
with open(self.log_file, "r") as log_file:
logs = list(map(json.loads, log_file))
dataframe = log2dataframe(
logs,
wide=True,
)
self.assertTrue((dataframe.logged_step == dataframe.step).all())
self.assertTrue((dataframe.logged_episode == dataframe.episode).all())
class TestModuleLogger(unittest.TestCase):
def setUp(self) -> None:
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self) -> None:
self.temp_dir.cleanup()
def test_spaces(self):
experiment_name = "test_spaces"
module_name = "module"
logger = ModuleLogger(
output_path=Path(self.temp_dir.name),
experiment_name=experiment_name,
module=module_name,
step_write_frequency=None,
episode_write_frequency=None,
)
seed = 3
# Discrete
space = Discrete(n=3)
space.seed(seed)
logger.log_space("Discrete", space.sample())
# MultiDiscrete
space = MultiDiscrete(np.array([3, 2]))
space.seed(seed)
logger.log_space("MultiDiscrete", space.sample())
# Dict
space = Dict(
{
"predictiveChangeVarDiscountedAverage": spaces.Box(
low=-np.inf, high=np.inf, shape=(1,)
),
"predictiveChangeVarUncertainty": spaces.Box(
low=0, high=np.inf, shape=(1,)
),
"lossVarDiscountedAverage": spaces.Box(
low=-np.inf, high=np.inf, shape=(1,)
),
"lossVarUncertainty": spaces.Box(low=0, high=np.inf, shape=(1,)),
"currentLR": spaces.Box(low=0, high=1, shape=(1,)),
"trainingLoss": spaces.Box(low=0, high=np.inf, shape=(1,)),
"validationLoss": spaces.Box(low=0, high=np.inf, shape=(1,)),
}
)
space.seed(seed)
logger.log_space("Dict", space.sample())
space = Box(np.array([0, 0]), np.array([2, 2]))
space.seed(seed)
logger.log_space("Box", space.sample())
logger.close()
with open(logger.get_logfile(), "r") as log_file:
logs = list(map(json.loads, log_file))
wide = log2dataframe(logs, wide=True)
long = log2dataframe(logs, drop_columns=None)
self.assertEqual(len(wide), 1)
first_row = wide.iloc[0]
# Discrete
self.assertTrue(not np.isnan(first_row.Discrete))
# MultiDiscrete
self.assertTrue(not np.isnan(first_row.MultiDiscrete_0))
self.assertTrue(not np.isnan(first_row.MultiDiscrete_1))
simultaneous_logged = long[
(long.name == "MultiDiscrete_0") | (long.name == "MultiDiscrete_1")
]
self.assertEqual(len(simultaneous_logged.time.unique()), 1)
# Dict
expected_columns = [
"Dict_currentLR_0",
"Dict_lossVarDiscountedAverage_0",
"Dict_lossVarUncertainty_0",
"Dict_predictiveChangeVarDiscountedAverage_0",
"Dict_predictiveChangeVarUncertainty_0",
"Dict_trainingLoss_0",
]
for expected_column in expected_columns:
self.assertTrue(not np.isnan(first_row[expected_column]))
simultaneous_logged = long[long.name.isin(expected_columns)]
self.assertEqual(len(simultaneous_logged.time.unique()), 1)
# Box
self.assertTrue(not np.isnan(first_row.Box_0))
self.assertTrue(not np.isnan(first_row.Box_1))
simultaneous_logged = long[(long.name == "Box_0") | (long.name == "Box_1")]
self.assertEqual(len(simultaneous_logged.time.unique()), 1)
def test_log_numpy(self):
experiment_name = "test_log_numpy"
module_name = "module"
logger = ModuleLogger(
output_path=Path(self.temp_dir.name),
experiment_name=experiment_name,
module=module_name,
step_write_frequency=None,
episode_write_frequency=None,
)
logger.log(
"state",
np.array([1, 2, 3]),
)
logger.close()
with open(logger.get_logfile(), "r") as log_file:
logs = list(map(json.loads, log_file))
dataframe = log2dataframe(logs, wide=True)
self.assertEqual(dataframe.iloc[0].state, (1, 2, 3))
def test_numpy_logging(self):
experiment_name = "test_numpy_logging"
module_name = "module"
logger = ModuleLogger(
output_path=Path(self.temp_dir.name),
experiment_name=experiment_name,
module=module_name,
step_write_frequency=None,
episode_write_frequency=None,
)
logger.set_additional_info(np=np.zeros((2, 3, 3)))
logger.log("test", 0)
logger.close()
with open(logger.get_logfile(), "r") as log_file:
logs = list(map(json.loads, log_file))
dataframe = log2dataframe(logs, wide=True)
expected_result = (((0,) * 3,) * 3,) * 2
self.assertEqual(dataframe.iloc[0].np, expected_result)
def test_basic_logging(self):
experiment_name = "test_basic_logging"
module_name = "module"
episodes = 10
steps = 3
logger = ModuleLogger(
output_path=Path(self.temp_dir.name),
experiment_name=experiment_name,
module=module_name,
step_write_frequency=None,
episode_write_frequency=None,
)
for episode in range(episodes):
logger.log(
"episode_logged",
episode,
)
for step in range(steps):
logger.log(
"step_logged",
step,
)
logger.next_step()
logger.next_episode()
logger.close() # or logger write
with open(logger.log_file.name, "r") as log_file:
logs = list(map(json.loads, log_file))
self.assertEqual(
episodes * steps,
len(logs),
"For each step with logging done in it one line exit",
)
for log in logs:
if "logged_step" in log:
self.assertTrue(
all(
log["step"] == logged_step
for logged_step in log["logged_step"]["values"]
),
)
if "logged_episode" in log:
self.assertTrue(
all(
log["step"] == logged_episode
for logged_episode in log["episode_logged"]["values"]
),
)
| 10,418 | 31.457944 | 84 | py |
DACBench | DACBench-main/tests/test_runner.py | import os
import tempfile
import unittest
from pathlib import Path
import matplotlib
import numpy as np
import pytest
from gymnasium import spaces
from dacbench.abstract_agent import AbstractDACBenchAgent
# import shutil
from dacbench.runner import run_dacbench # , plot_results
matplotlib.use("Agg")
class TestRunner(unittest.TestCase):
def test_abstract_agent(self):
agent = AbstractDACBenchAgent("dummy")
with pytest.raises(NotImplementedError):
agent.act(0, 0)
with pytest.raises(NotImplementedError):
agent.train(0, 0)
with pytest.raises(NotImplementedError):
agent.end_episode(0, 0)
def test_loop(self):
class DummyAgent(AbstractDACBenchAgent):
def __init__(self, env):
if isinstance(env.action_space, spaces.Discrete):
self.num_actions = 1
elif isinstance(env.action_space, spaces.MultiDiscrete):
self.num_actions = len(env.action_space.nvec)
else:
self.num_actions = len(env.action_space.high)
def act(self, reward, state):
action = np.ones(self.num_actions)
if self.num_actions == 1:
action = 1
return action
def train(self, reward, state):
pass
def end_episode(self, reward, state):
pass
def make(env):
return DummyAgent(env)
with tempfile.TemporaryDirectory() as tmp_dir:
run_dacbench(
tmp_dir,
make,
1,
bench=["LubyBenchmark", "SigmoidBenchmark"],
seeds=[42],
)
path = Path(tmp_dir)
self.assertFalse(os.stat(path / "LubyBenchmark") == 0)
self.assertFalse(os.stat(path / "SigmoidBenchmark") == 0)
# def test_plotting(self):
# plot_results("test_run")
# shutil.rmtree("test_run", ignore_errors=True)
| 2,053 | 27.136986 | 72 | py |
DACBench | DACBench-main/tests/test_run_baselines.py | import tempfile
import unittest
from pathlib import Path
from dacbench.logger import load_logs, log2dataframe
from dacbench.run_baselines import (
DISCRETE_ACTIONS,
main,
run_dynamic_policy,
run_optimal,
run_random,
run_static,
)
class TestRunBaselines(unittest.TestCase):
def run_random_test_with_benchmark(self, benchmark):
seeds = [42]
fixed = 2
num_episodes = 3
with tempfile.TemporaryDirectory() as temp_dir:
result_path = Path(temp_dir)
run_random(result_path, benchmark, num_episodes, seeds, fixed)
expected_experiment_path = (
result_path / benchmark / f"random_fixed{fixed}_{seeds[0]}"
)
self.assertTrue(expected_experiment_path.exists())
performance_tracking_log = (
expected_experiment_path / "PerformanceTrackingWrapper.jsonl"
)
self.assertTrue(performance_tracking_log.exists())
logs = log2dataframe(load_logs(performance_tracking_log))
self.assertEqual(len(logs), num_episodes)
self.assertTrue((logs["seed"] == seeds[0]).all())
def test_run_random_SigmoidBenchmark(self):
self.run_random_test_with_benchmark("SigmoidBenchmark")
def test_run_random_LubyBenchmark(self):
self.run_random_test_with_benchmark("LubyBenchmark")
def test_run_random_FastDownwardBenchmark(self):
self.run_random_test_with_benchmark("FastDownwardBenchmark")
def test_run_random_CMAESBenchmark(self):
self.run_random_test_with_benchmark("CMAESBenchmark")
@unittest.skip("Due to issue #97")
def test_run_random_SGDBenchmark(self):
self.run_random_test_with_benchmark("SGDBenchmark")
# no get_benchmark method
# def test_run_random_ModeaBenchmark(self):
# self.run_random_test_with_benchmark("ModeaBenchmark")
def run_static_test_with_benchmark(self, benchmark):
seeds = [42]
num_episodes = 3
action = DISCRETE_ACTIONS[benchmark][0]
with tempfile.TemporaryDirectory() as temp_dir:
result_path = Path(temp_dir)
run_static(result_path, benchmark, action, num_episodes, seeds)
expected_experiment_path = (
result_path / benchmark / f"static_{action}_{seeds[0]}"
)
self.assertTrue(expected_experiment_path.exists())
performance_tracking_log = (
expected_experiment_path / "PerformanceTrackingWrapper.jsonl"
)
self.assertTrue(performance_tracking_log.exists())
logs = log2dataframe(load_logs(performance_tracking_log))
self.assertEqual(len(logs), num_episodes)
self.assertTrue((logs["seed"] == seeds[0]).all())
def test_run_static_SigmoidBenchmark(self):
self.run_static_test_with_benchmark("SigmoidBenchmark")
def test_run_static_LubyBenchmark(self):
self.run_static_test_with_benchmark("LubyBenchmark")
def test_run_static_FastDownwardBenchmark(self):
self.run_static_test_with_benchmark("FastDownwardBenchmark")
def test_run_static_CMAESBenchmark(self):
self.run_static_test_with_benchmark("CMAESBenchmark")
@unittest.skip("Due to issue #97")
def test_run_static_SGDBenchmark(self):
self.run_static_test_with_benchmark("SGDBenchmark")
# no get_benchmark method
# def test_run_static_ModeaBenchmark(self):
# self.run_static_test_with_benchmark("ModeaBenchmark")
def test_run_dynamic_policy_CMAESBenchmark(self):
benchmark = "CMAESBenchmark"
seeds = [42]
num_episodes = 3
with tempfile.TemporaryDirectory() as temp_dir:
result_path = Path(temp_dir)
run_dynamic_policy(result_path, benchmark, num_episodes, seeds)
expected_experiment_path = result_path / benchmark / f"csa_{seeds[0]}"
self.assertTrue(expected_experiment_path.exists())
performance_tracking_log = (
expected_experiment_path / "PerformanceTrackingWrapper.jsonl"
)
self.assertTrue(performance_tracking_log.exists())
logs = log2dataframe(load_logs(performance_tracking_log))
self.assertEqual(len(logs), num_episodes)
self.assertTrue((logs["seed"] == seeds[0]).all())
def run_optimal_test_with_benchmark(self, benchmark):
seeds = [42]
num_episodes = 3
with tempfile.TemporaryDirectory() as temp_dir:
result_path = Path(temp_dir)
run_optimal(result_path, benchmark, num_episodes, seeds)
expected_experiment_path = result_path / benchmark / f"optimal_{seeds[0]}"
self.assertTrue(expected_experiment_path.exists())
performance_tracking_log = (
expected_experiment_path / "PerformanceTrackingWrapper.jsonl"
)
self.assertTrue(performance_tracking_log.exists())
logs = log2dataframe(load_logs(performance_tracking_log))
self.assertEqual(len(logs), num_episodes)
self.assertTrue((logs["seed"] == seeds[0]).all())
def test_run_optimal_LubyBenchmark(self):
self.run_optimal_test_with_benchmark("LubyBenchmark")
def test_run_optimal_SigmoidBenchmark(self):
self.run_optimal_test_with_benchmark("SigmoidBenchmark")
def test_run_optimal_FastDownwardBenchmark(self):
self.run_optimal_test_with_benchmark("FastDownwardBenchmark")
def test_main_help(self):
with self.assertRaises(SystemExit):
main(["--help"])
| 5,654 | 35.019108 | 86 | py |
DACBench | DACBench-main/tests/test_abstract_env.py | import unittest
import numpy as np
from gymnasium import spaces
from dacbench.abstract_env import AbstractEnv
class TestAbstractEnv(unittest.TestCase):
def test_not_implemented_methods(self):
env = self.make_env()
with self.assertRaises(NotImplementedError):
env.step(0)
with self.assertRaises(NotImplementedError):
env.reset()
def test_exceptions(self):
config = {
"action_space_class": "Discrete",
"action_space_args": [4],
"observation_space_class": "Dict",
"observation_space_type": np.float32,
"observation_space_args": [
np.array([-1, -1, -1], dtype=np.float32),
np.array([1, 1, 1], dtype=np.float32),
],
"reward_range": (-1, 0),
"cutoff": 30,
"instance_set": {0: 1, 1: 1},
"benchmark_info": None,
}
with self.assertRaises(TypeError):
AbstractEnv(config)
config = {
"action_space_class": "Discrete",
"action_space_args": [4],
"observation_space_class": "Box",
"observation_space_type": np.float32,
"reward_range": (-1, 0),
"cutoff": 30,
"benchmark_info": None,
"instance_set": {0: 1, 1: 1},
}
with self.assertRaises(KeyError):
AbstractEnv(config)
config = {
"action_space_class": "Discrete",
"action_space_args": [4],
"observation_space_type": np.float32,
"observation_space_args": [
np.array([-1, -1, -1], dtype=np.float32),
np.array([1, 1, 1], dtype=np.float32),
],
"reward_range": (-1, 0),
"cutoff": 30,
"benchmark_info": None,
"instance_set": {0: 1, 1: 1},
}
with self.assertRaises(KeyError):
AbstractEnv(config)
config = {
"action_space_class": "Tuple",
"action_space_args": np.array([4]).astype(np.float32),
"observation_space_class": "Box",
"observation_space_type": np.float32,
"observation_space_args": [
np.array([-1, -1, -1], dtype=np.float32),
np.array([1, 1, 1], dtype=np.float32),
],
"reward_range": (-1, 0),
"cutoff": 30,
"benchmark_info": None,
"instance_set": {0: 1, 1: 1},
}
with self.assertRaises(TypeError):
AbstractEnv(config)
config = {
"action_space_args": np.array([4]).astype(np.float32),
"observation_space_class": "Box",
"observation_space_type": np.float32,
"observation_space_args": [
np.array([-1, -1, -1], dtype=np.float32),
np.array([1, 1, 1], dtype=np.float32),
],
"reward_range": (-1, 0),
"cutoff": 30,
"benchmark_info": None,
"instance_set": {0: 1, 1: 1},
}
with self.assertRaises(KeyError):
AbstractEnv(config)
def make_env(self):
config = {
"action_space_class": "Discrete",
"action_space_args": [4],
"observation_space_class": "Box",
"observation_space_type": np.float32,
"observation_space_args": [
np.array([-1, -1, -1], dtype=np.float32),
np.array([1, 1, 1], dtype=np.float32),
],
"reward_range": (-1, 0),
"cutoff": 30,
"benchmark_info": None,
"instance_set": {0: 1, 1: 1},
}
env = AbstractEnv(config)
return env
def test_setup(self):
env = self.make_env()
self.assertTrue(len(env.instance_set) >= 1)
self.assertTrue(env.n_steps > 0)
self.assertTrue(type(env.reward_range) is tuple)
self.assertTrue(issubclass(type(env.observation_space), spaces.Space))
self.assertTrue(issubclass(type(env.action_space), spaces.Space))
config = {
"action_space": spaces.Discrete(2),
"observation_space": spaces.Discrete(2),
"reward_range": (-1, 0),
"cutoff": 30,
"benchmark_info": None,
"instance_set": {0: 1, 1: 1},
}
env = AbstractEnv(config)
self.assertTrue(len(env.instance_set) >= 1)
self.assertTrue(env.n_steps > 0)
self.assertTrue(type(env.reward_range) is tuple)
self.assertTrue(issubclass(type(env.observation_space), spaces.Space))
self.assertTrue(issubclass(type(env.action_space), spaces.Space))
def test_pre_step_and_reset(self):
env = self.make_env()
env.n_steps = 10
self.assertFalse(env.step_())
env.n_steps = 1
self.assertTrue(env.step_())
env.inst_id = 0
env.reset_()
self.assertTrue(env.inst_id == 1)
self.assertTrue(env.c_step == 0)
def test_getters_and_setters(self):
env = self.make_env()
self.assertTrue(env.inst_id == env.get_inst_id())
env.set_inst_id(1)
self.assertTrue(1 == env.get_inst_id())
self.assertTrue(env.instance == env.get_instance())
env.set_instance(100)
self.assertTrue(100 == env.get_instance())
self.assertTrue(
all(
[
env.instance_set[k] == env.get_instance_set()[k]
for k in range(len(env.instance_set))
]
)
)
env.set_instance_set({0: 100})
self.assertTrue(100 == env.get_instance_set()[0])
def test_seed(self):
env = self.make_env()
seeds = []
for _ in range(10):
seeds.append(env.seed()[0])
self.assertFalse(len(set(seeds)) < 8)
| 5,931 | 31.955556 | 78 | py |
DACBench | DACBench-main/tests/benchmarks/test_fd_benchmark.py | import json
import os
import unittest
from dacbench.benchmarks import FastDownwardBenchmark
from dacbench.envs import FastDownwardEnv
class TestFDBenchmark(unittest.TestCase):
def test_get_env(self):
bench = FastDownwardBenchmark()
env = bench.get_environment()
self.assertTrue(issubclass(type(env), FastDownwardEnv))
bench.config.instance_set_path = "../instance_sets/fast_downward/childsnack"
bench.read_instance_set()
env = bench.get_environment()
self.assertTrue(issubclass(type(env), FastDownwardEnv))
# TODO: This test breaks remote testing, possibly due to too many open ports.
# Should be investigated
# def test_scenarios(self):
# scenarios = [
# "fd_barman.json",
# # "fd_blocksworld.json",
# # "fd_visitall.json",
# # "fd_childsnack.json",
# # "fd_sokoban.json",
# # "fd_rovers.json",
# ]
# for s in scenarios:
# path = os.path.join("dacbench/additional_configs/fast_downward/", s)
# bench = FastDownwardBenchmark(path)
# self.assertTrue(bench.config is not None)
# env = bench.get_environment()
# state, info = env.reset()
# self.assertTrue(state is not None)
# self.assertTrue(info is not None)
# state, _, _, _, _ = env.step(0)
# self.assertTrue(state is not None)
def test_save_conf(self):
bench = FastDownwardBenchmark()
del bench.config["config_space"]
bench.save_config("test_conf.json")
with open("test_conf.json", "r") as fp:
recovered = json.load(fp)
for k in bench.config.keys():
self.assertTrue(k in recovered.keys())
os.remove("test_conf.json")
def test_read_instances(self):
bench = FastDownwardBenchmark()
bench.read_instance_set()
self.assertTrue(len(bench.config.instance_set.keys()) == 30)
self.assertTrue(type(bench.config.instance_set[0]) == str)
self.assertTrue(os.path.isfile(bench.config.instance_set[0]))
path = bench.config.instance_set[0]
bench2 = FastDownwardBenchmark()
env = bench2.get_environment()
self.assertTrue(type(env.instance_set[0]) == str)
self.assertTrue(len(env.instance_set.keys()) == 30)
self.assertTrue(path == env.instance_set[0])
def test_benchmark_env(self):
bench = FastDownwardBenchmark()
env = bench.get_benchmark()
self.assertTrue(issubclass(type(env), FastDownwardEnv))
def test_from_to_json(self):
bench = FastDownwardBenchmark()
restored_bench = FastDownwardBenchmark.from_json(bench.to_json())
self.assertEqual(bench, restored_bench)
| 2,809 | 36.972973 | 84 | py |
DACBench | DACBench-main/tests/benchmarks/test_luby_benchmark.py | import json
import os
import unittest
import numpy as np
from dacbench.benchmarks import LubyBenchmark
from dacbench.envs import LubyEnv
from dacbench.wrappers import RewardNoiseWrapper
class TestLubyBenchmark(unittest.TestCase):
def test_get_env(self):
bench = LubyBenchmark()
env = bench.get_environment()
self.assertTrue(issubclass(type(env), LubyEnv))
def test_scenarios(self):
scenarios = ["luby_hard.json", "luby_harder.json", "luby_very_hard.json"]
for s in scenarios:
path = os.path.join("dacbench/additional_configs/luby/", s)
bench = LubyBenchmark(path)
self.assertTrue(bench.config is not None)
env = bench.get_environment()
state, info = env.reset()
self.assertTrue(state is not None)
self.assertTrue(info is not None)
state, _, _, _, _ = env.step(0)
self.assertTrue(state is not None)
def test_save_conf(self):
bench = LubyBenchmark()
del bench.config["config_space"]
bench.save_config("test_conf.json")
with open("test_conf.json", "r") as fp:
recovered = json.load(fp)
for k in bench.config.keys():
self.assertTrue(k in recovered.keys())
os.remove("test_conf.json")
def test_read_instances(self):
bench = LubyBenchmark()
bench.read_instance_set()
print(bench.config.instance_set)
self.assertTrue(len(bench.config.instance_set.keys()) == 1)
self.assertTrue(len(bench.config.instance_set[0]) == 2)
self.assertTrue(bench.config.instance_set[0] == [0, 0])
bench2 = LubyBenchmark()
env = bench2.get_environment()
self.assertTrue(len(env.instance_set[0]) == 2)
self.assertTrue(env.instance_set[0] == [0, 0])
self.assertTrue(len(env.instance_set.keys()) == 1)
def test_benchmark_env(self):
bench = LubyBenchmark()
env = bench.get_benchmark()
self.assertTrue(issubclass(type(env), RewardNoiseWrapper))
env.reset()
_, r, _, _, _ = env.step(1)
self.assertTrue(r != 0 and r != -1)
def test_cutoff_setting(self):
bench = LubyBenchmark()
bench.set_cutoff(100)
self.assertTrue(bench.config.cutoff == 100)
self.assertTrue(bench.config.action_space_args == [int(np.log2(100))])
def test_history_len_setting(self):
bench = LubyBenchmark()
bench.set_history_length(20)
self.assertTrue(len(bench.config.observation_space_args[0]) == 21)
def test_from_to_json(self):
bench = LubyBenchmark()
restored_bench = LubyBenchmark.from_json(bench.to_json())
self.assertEqual(bench, restored_bench)
| 2,762 | 34.883117 | 81 | py |
DACBench | DACBench-main/tests/benchmarks/test_sgd_benchmark.py | import json
import os
import unittest
from dacbench.benchmarks import SGDBenchmark
from dacbench.envs import SGDEnv
class TestSGDBenchmark(unittest.TestCase):
def test_get_env(self):
bench = SGDBenchmark()
env = bench.get_environment()
self.assertTrue(issubclass(type(env), SGDEnv))
def test_setup(self):
bench = SGDBenchmark()
self.assertTrue(bench.config is not None)
config = {"dummy": 0}
with open("test_conf.json", "w+") as fp:
json.dump(config, fp)
bench = SGDBenchmark("test_conf.json")
self.assertTrue(bench.config.dummy == 0)
os.remove("test_conf.json")
def test_save_conf(self):
bench = SGDBenchmark()
del bench.config["config_space"]
bench.save_config("test_conf.json")
with open("test_conf.json", "r") as fp:
recovered = json.load(fp)
for k in bench.config.keys():
self.assertTrue(k in recovered.keys())
os.remove("test_conf.json")
def test_read_instances(self):
bench = SGDBenchmark()
bench.read_instance_set()
self.assertTrue(len(bench.config.instance_set.keys()) == 100)
inst = bench.config.instance_set[0]
bench2 = SGDBenchmark()
env = bench2.get_environment()
self.assertTrue(len(env.instance_set.keys()) == 100)
# [3] instance architecture constructor functionally identical but not comparable
self.assertTrue(inst[0] == env.instance_set[0][0])
self.assertTrue(inst[1] == env.instance_set[0][1])
def test_benchmark_env(self):
bench = SGDBenchmark()
env = bench.get_benchmark()
self.assertTrue(issubclass(type(env), SGDEnv))
def test_from_to_json(self):
bench = SGDBenchmark()
restored_bench = SGDBenchmark.from_json(bench.to_json())
self.assertEqual(bench, restored_bench)
| 1,916 | 32.631579 | 89 | py |
DACBench | DACBench-main/tests/benchmarks/test_cma_benchmark.py | import json
import os
import unittest
from dacbench.benchmarks import CMAESBenchmark
from dacbench.envs import CMAESEnv
class TestCMABenchmark(unittest.TestCase):
def test_get_env(self):
bench = CMAESBenchmark()
env = bench.get_environment()
self.assertTrue(issubclass(type(env), CMAESEnv))
def test_setup(self):
bench = CMAESBenchmark()
self.assertTrue(bench.config is not None)
config = {"dummy": 0}
with open("test_conf.json", "w+") as fp:
json.dump(config, fp)
bench = CMAESBenchmark("test_conf.json")
self.assertTrue(bench.config.dummy == 0)
os.remove("test_conf.json")
def test_save_conf(self):
bench = CMAESBenchmark()
del bench.config["config_space"]
bench.save_config("test_conf.json")
with open("test_conf.json", "r") as fp:
recovered = json.load(fp)
for k in bench.config.keys():
self.assertTrue(k in recovered.keys())
os.remove("test_conf.json")
def test_from_to_json(self):
bench = CMAESBenchmark()
restored_bench = CMAESBenchmark.from_json(bench.to_json())
self.assertEqual(bench, restored_bench)
def test_read_instances(self):
bench = CMAESBenchmark()
bench.read_instance_set()
self.assertTrue(len(bench.config.instance_set.keys()) == 100)
self.assertTrue(len(bench.config.instance_set[0]) == 4)
self.assertTrue(bench.config.instance_set[0][2] == 0.6445072293504781)
inst = bench.config.instance_set[0]
bench2 = CMAESBenchmark()
env = bench2.get_environment()
self.assertTrue(len(env.instance_set[0]) == 4)
self.assertTrue(len(env.instance_set.keys()) == 100)
self.assertTrue(inst == env.instance_set[0])
def test_benchmark_env(self):
bench = CMAESBenchmark()
env = bench.get_benchmark()
self.assertTrue(issubclass(type(env), CMAESEnv))
| 1,985 | 33.241379 | 78 | py |
DACBench | DACBench-main/tests/benchmarks/test_sigmoid_benchmark.py | import json
import os
import unittest
from dacbench.benchmarks import SigmoidBenchmark
from dacbench.envs import SigmoidEnv
class TestSigmoidBenchmark(unittest.TestCase):
def test_get_env(self):
bench = SigmoidBenchmark()
env = bench.get_environment()
self.assertTrue(issubclass(type(env), SigmoidEnv))
def test_scenarios(self):
scenarios = [
"sigmoid_1D3M.json",
"sigmoid_2D3M.json",
"sigmoid_3D3M.json",
"sigmoid_5D3M.json",
]
for s in scenarios:
path = os.path.join("dacbench/additional_configs/sigmoid", s)
bench = SigmoidBenchmark(path)
self.assertTrue(bench.config is not None)
env = bench.get_environment()
state, info = env.reset()
self.assertTrue(state is not None)
self.assertTrue(info is not None)
state, _, _, _, _ = env.step(env.action_space.sample())
self.assertTrue(state is not None)
def test_save_conf(self):
bench = SigmoidBenchmark()
del bench.config["config_space"]
bench.save_config("test_conf.json")
with open("test_conf.json", "r") as fp:
recovered = json.load(fp)
for k in bench.config.keys():
self.assertTrue(k in recovered.keys())
os.remove("test_conf.json")
def test_from_to_json(self):
bench = SigmoidBenchmark()
restored_bench = SigmoidBenchmark.from_json(bench.to_json())
self.assertEqual(bench, restored_bench)
def test_read_instances(self):
bench = SigmoidBenchmark()
bench.read_instance_set()
self.assertTrue(len(bench.config.instance_set.keys()) == 300)
self.assertTrue(len(bench.config.instance_set[0]) == 4)
first_inst = bench.config.instance_set[0]
bench2 = SigmoidBenchmark()
env = bench2.get_environment()
self.assertTrue(len(env.instance_set[0]) == 4)
self.assertTrue(env.instance_set[0] == first_inst)
self.assertTrue(len(env.instance_set.keys()) == 300)
| 2,104 | 34.083333 | 73 | py |
DACBench | DACBench-main/tests/envs/test_sgd.py | import os
import unittest
import numpy as np
from dacbench import AbstractEnv
from dacbench.abstract_benchmark import objdict
from dacbench.benchmarks.sgd_benchmark import SGD_DEFAULTS, SGDBenchmark
from dacbench.envs.sgd import Reward, SGDEnv
from dacbench.wrappers import ObservationWrapper
class TestSGDEnv(unittest.TestCase):
def setUp(self):
bench = SGDBenchmark()
self.env = bench.get_benchmark(seed=123)
@staticmethod
def data_path(path):
return os.path.join(os.path.dirname(__file__), "data", path)
def test_setup(self):
self.assertTrue(issubclass(type(self.env), AbstractEnv))
self.assertFalse(self.env.no_cuda)
self.assertTrue(self.env.model is None)
self.assertTrue(self.env.current_training_loss is None)
self.assertTrue(self.env.batch_size == SGD_DEFAULTS["training_batch_size"])
self.assertTrue(self.env.initial_lr == self.env.current_lr)
def test_reward_type(self):
benchmark = SGDBenchmark()
benchmark.config = objdict(SGD_DEFAULTS.copy())
benchmark.read_instance_set()
env = SGDEnv(benchmark.config)
self.assertEqual(env.reward_type, SGD_DEFAULTS.reward_type)
benchmark.config.reward_type = SGD_DEFAULTS.reward_type.name
env = SGDEnv(benchmark.config)
self.assertEqual(env.reward_type, SGD_DEFAULTS.reward_type)
benchmark.config.reward_type = "invalid_reward"
with self.assertRaises(ValueError):
env = SGDEnv(benchmark.config)
benchmark.config.reward_type = 0
with self.assertRaises(ValueError):
env = SGDEnv(benchmark.config)
def test_reset(self):
self.env.reset()
self.assertFalse(self.env.model is None)
self.assertFalse(self.env.train_dataset is None)
self.assertFalse(self.env.validation_dataset is None)
def test_step(self):
benchmark = SGDBenchmark()
benchmark.config = objdict(SGD_DEFAULTS.copy())
benchmark.read_instance_set()
for reward_type in Reward:
benchmark.config.reward_type = reward_type
env = SGDEnv(benchmark.config)
env = ObservationWrapper(env)
self.assertTrue(env.reward_range == reward_type.func.frange)
env.reset()
_, reward, terminated, truncated, meta = env.step(1.0)
self.assertTrue(reward >= env.reward_range[0])
self.assertTrue(reward <= env.reward_range[1])
self.assertFalse(terminated)
self.assertFalse(truncated)
self.assertTrue(len(meta.keys()) == 0)
def test_crash(self):
env = ObservationWrapper(self.env)
env.reset()
state, reward, terminated, truncated, _ = env.step(np.nan)
self.assertTrue(env.crashed)
self.assertFalse(any(np.isnan(state)))
self.assertTrue(reward == env.crash_penalty)
def test_stateless(self):
env = ObservationWrapper(self.env)
rng = np.random.default_rng(123)
mems = []
instance_idxs = []
for _ in range(3):
env.reset()
instance_idxs.append(env.instance_index)
terminated, truncated = False, False
mem = []
step = 0
while not (terminated or truncated) and step < 5:
action = np.exp(rng.integers(low=-10, high=1))
state, reward, terminated, truncated, _ = env.step(action)
mem.append(np.concatenate([state, [reward, int(truncated), action]]))
step += 1
mems.append(np.array(mem))
rng = np.random.default_rng(123)
for i, idx in enumerate(reversed(instance_idxs)):
env.instance_index = idx - 1
env.reset()
self.assertTrue(env.instance_index == idx)
terminated, truncated = False, False
mem = []
step = 0
while not (terminated or truncated) and step < 5:
action = mems[-(i + 1)][step][-1]
state, reward, terminated, truncated, _ = env.step(action)
mem.append(np.concatenate([state, [reward, int(truncated), action]]))
step += 1
np.testing.assert_allclose(mems[-(i + 1)], np.array(mem))
def test_reproducibility(self):
mems = []
instances = []
env = ObservationWrapper(self.env)
for _ in range(2):
rng = np.random.default_rng(123)
env.seed(123)
env.instance_index = 0
instances.append(env.get_instance_set())
env.reset()
terminated, truncated = False, False
mem = []
step = 0
while not (terminated or truncated) and step < 5:
action = np.exp(rng.integers(low=-10, high=1))
state, reward, terminated, truncated, _ = env.step(action)
mem.append(np.concatenate([state, [reward, int(truncated), action]]))
step += 1
mems.append(np.array(mem))
self.assertEqual(mems[0].size, mems[1].size)
self.assertEqual(instances[0], instances[1])
np.testing.assert_allclose(mems[0], mems[1])
def test_get_default_state(self):
self.env.reset()
state, _, _, _, _ = self.env.step(0.5)
self.assertTrue(issubclass(type(state), dict))
self.assertTrue(
np.array_equal(
list(state.keys()),
[
"predictiveChangeVarDiscountedAverage",
"predictiveChangeVarUncertainty",
"lossVarDiscountedAverage",
"lossVarUncertainty",
"currentLR",
"trainingLoss",
"validationLoss",
"step",
"alignment",
"crashed",
],
)
)
self.assertTrue(state["currentLR"] == 0.5)
self.assertTrue(state["trainingLoss"] > 0)
self.assertTrue(state["validationLoss"] > 0)
def test_close(self):
self.assertTrue(self.env.close())
def test_render(self):
self.env.render("human")
with self.assertRaises(NotImplementedError):
self.env.render("random")
| 6,344 | 35.257143 | 85 | py |
DACBench | DACBench-main/tests/envs/test_cma.py | import unittest
import numpy as np
from dacbench import AbstractEnv
from dacbench.benchmarks.cma_benchmark import CMAES_DEFAULTS, CMAESBenchmark
class TestCMAEnv(unittest.TestCase):
def make_env(self):
bench = CMAESBenchmark()
env = bench.get_environment()
return env
def test_setup(self):
env = self.make_env()
self.assertTrue(issubclass(type(env), AbstractEnv))
self.assertTrue(env.fbest is None)
self.assertTrue(env.solutions is None)
self.assertTrue(env.b is None)
self.assertFalse(env.get_state is None)
self.assertTrue(env.history_len == CMAES_DEFAULTS["hist_length"])
self.assertTrue(env.popsize == CMAES_DEFAULTS["popsize"])
def test_reset(self):
env = self.make_env()
env.reset()
self.assertFalse(env.fcn is None)
self.assertFalse(env.dim is None)
self.assertFalse(env.init_sigma is None)
self.assertFalse(env.cur_loc is None)
self.assertFalse(env.es is None)
def test_step(self):
env = self.make_env()
env.reset()
_, reward, terminated, truncated, meta = env.step([1])
self.assertTrue(reward >= env.reward_range[0])
print(reward)
self.assertTrue(reward <= env.reward_range[1])
self.assertFalse(terminated)
self.assertFalse(truncated)
self.assertTrue(len(meta.keys()) == 0)
def test_get_default_state(self):
env = self.make_env()
state, info = env.reset()
self.assertTrue(issubclass(type(state), dict))
self.assertTrue(issubclass(type(info), dict))
self.assertTrue(
np.array_equal(
list(state.keys()),
[
"current_loc",
"past_deltas",
"current_ps",
"current_sigma",
"history_deltas",
"past_sigma_deltas",
],
)
)
self.assertTrue(len(state["current_ps"]) == 1)
self.assertTrue(len(state["current_sigma"]) == 1)
self.assertTrue(len(state["current_loc"]) == 10)
self.assertTrue(len(state["past_deltas"]) == env.history_len)
self.assertTrue(len(state["past_sigma_deltas"]) == env.history_len)
self.assertTrue(len(state["history_deltas"]) == 2 * env.history_len)
env.step([1])
state, _, _, _, _ = env.step([1])
self.assertTrue(issubclass(type(state), dict))
self.assertTrue(
np.array_equal(
list(state.keys()),
[
"current_loc",
"past_deltas",
"current_ps",
"current_sigma",
"history_deltas",
"past_sigma_deltas",
],
)
)
self.assertTrue(len(state["current_ps"]) == 1)
self.assertTrue(len(state["current_sigma"]) == 1)
self.assertTrue(len(state["current_loc"]) == 10)
self.assertTrue(len(state["past_deltas"]) == env.history_len)
self.assertTrue(len(state["past_sigma_deltas"]) == env.history_len)
self.assertTrue(len(state["history_deltas"]) == 2 * env.history_len)
def test_close(self):
env = self.make_env()
self.assertTrue(env.close())
def test_render(self):
env = self.make_env()
env.render("human")
with self.assertRaises(NotImplementedError):
env.render("random")
| 3,546 | 33.77451 | 76 | py |
DACBench | DACBench-main/tests/envs/test_fd.py | import unittest
from dacbench import AbstractEnv
from dacbench.benchmarks.fast_downward_benchmark import FastDownwardBenchmark
class TestFDEnv(unittest.TestCase):
def make_env(self):
bench = FastDownwardBenchmark()
env = bench.get_environment()
return env
def test_setup(self):
env = self.make_env()
self.assertTrue(issubclass(type(env), AbstractEnv))
def test_reset(self):
env = self.make_env()
env.reset()
self.assertFalse(env.socket is None)
self.assertFalse(env.fd is None)
def test_step(self):
env = self.make_env()
env.reset()
state, reward, terminated, truncated, meta = env.step(1)
self.assertTrue(reward >= env.reward_range[0])
self.assertTrue(reward <= env.reward_range[1])
self.assertFalse(terminated)
self.assertFalse(truncated)
self.assertTrue(len(meta.keys()) == 0)
self.assertTrue(len(state) == 10)
def test_close(self):
env = self.make_env()
self.assertTrue(env.close())
self.assertTrue(env.conn is None)
self.assertTrue(env.socket is None)
def test_render(self):
env = self.make_env()
env.render()
| 1,239 | 27.837209 | 77 | py |
DACBench | DACBench-main/tests/envs/test_deterministic.py | import unittest
import numpy as np
from numpy.testing import assert_almost_equal
from dacbench import benchmarks, run_baselines
def assert_state_space_equal(state1, state2):
assert type(state1) == type(state2)
if isinstance(state1, np.ndarray):
assert_almost_equal(state1, state2)
elif isinstance(state1, dict):
assert state1.keys() == state2.keys()
for key in state1.keys():
if "history" not in key:
assert_almost_equal(state1[key], state2[key])
else:
raise NotImplementedError(f"State space type {type(state1)} not comparable")
class TestDeterministic(unittest.TestCase):
def run_deterministic_test(self, benchmark_name, seed=42):
print(benchmark_name)
bench = getattr(benchmarks, benchmark_name)()
action = run_baselines.DISCRETE_ACTIONS[benchmark_name][0]
env1 = bench.get_benchmark(seed=seed)
init_state1, info1 = env1.reset()
_, reward1, terminated1, truncated1, info1 = env1.step(action)
env2 = bench.get_benchmark(seed=seed)
init_state2, info2 = env2.reset()
_, reward2, terminated2, truncated2, info2 = env2.step(action)
assert_state_space_equal(init_state1, init_state2)
self.assertEqual(info1, info2)
self.assertEqual(terminated1, terminated2)
self.assertEqual(truncated1, truncated2)
self.assertEqual(info1, info2)
def test_LubyBenchmark(self):
self.run_deterministic_test("LubyBenchmark")
def test_SigmoidBenchmark(self):
self.run_deterministic_test("SigmoidBenchmark")
def test_FastDownwardBenchmark(self):
benchmark_name = "FastDownwardBenchmark"
seed = 42
bench = getattr(benchmarks, benchmark_name)()
action = run_baselines.DISCRETE_ACTIONS[benchmark_name][0]
env1 = bench.get_benchmark(seed=seed)
init_state1, info1 = env1.reset()
state1, reward1, terminated1, truncated1, info1 = env1.step(action)
env1.close()
env2 = bench.get_benchmark(seed=seed)
init_state2, info2 = env2.reset()
state2, reward2, terminated2, truncated2, info2 = env2.step(action)
env2.close()
assert_state_space_equal(init_state1, init_state2)
assert_state_space_equal(state1, state2)
self.assertEqual(reward1, reward2)
self.assertEqual(info1, info2)
self.assertEqual(terminated1, terminated2)
self.assertEqual(truncated1, truncated2)
self.assertEqual(info1, info2)
def test_CMAESBenchmark(self):
self.run_deterministic_test("CMAESBenchmark")
# This has no get_benchmark method
# def test_ModeaBenchmark(self):
# self.run_deterministic_test("ModeaBenchmark")
def test_SGDBenchmark(self):
self.run_deterministic_test("SGDBenchmark")
def test_OneLLBenchmark(self):
...
# todo
# self.run_deterministic_test("OneLLBenchmark")
| 2,971 | 33.16092 | 84 | py |
DACBench | DACBench-main/tests/envs/test_theory_env.py | import unittest
import gymnasium as gym
from dacbench.benchmarks import TheoryBenchmark
class TestTheoryEnv(unittest.TestCase):
def test_discrete_env(self):
bench = TheoryBenchmark(
config={
"discrete_action": True,
"action_choices": [1, 2, 4, 8],
"instance_set_path": "lo_rls_50.csv",
}
)
env = bench.get_environment()
# check observation space
s, _ = env.reset() # default observation space: n, f(x)
assert len(s) == 2
assert s[0] == env.n
assert s[1] == env.x.fitness
# check action space
assert isinstance(env.action_space, gym.spaces.Discrete)
assert env.action_space.n == 4
# check instance-specific cutoff time
assert env.max_evals == int(0.8 * env.n * env.n)
# check initial solution for various instances
for i in range(len(env.instance_set)):
if env.instance.initObj != "random":
assert int(env.x.fitness) == int(env.instance.initObj)
env.reset()
def test_non_discrete_env(self):
bench = TheoryBenchmark(
config={
"discrete_action": False,
"min_action": 1,
"max_action": 49,
"instance_set_path": "lo_rls_50.csv",
}
)
env = bench.get_environment()
# check observation space
s, _ = env.reset() # default observation space: n, f(x)
assert len(s) == 2
assert s[0] == env.n
assert s[1] == env.x.fitness
# check action space
assert isinstance(env.action_space, gym.spaces.Box)
# check instance-specific cutoff time
assert env.max_evals == int(0.8 * env.n * env.n)
# check initial solution for various instances
for i in range(len(env.instance_set)):
if env.instance.initObj != "random":
assert int(env.x.fitness) == int(env.instance.initObj)
env.reset()
# check behaviour with out-of-range action
s, r, terminated, truncated, info = env.step(
100
) # a large negative reward will be returned and the epsidoe will end
assert r < -1e4
assert terminated or truncated
if __name__ == "__main__":
TestTheoryEnv().test_discrete_env()
TestTheoryEnv().test_non_discrete_env()
| 2,430 | 30.166667 | 78 | py |
DACBench | DACBench-main/tests/envs/test_luby.py | import unittest
import numpy as np
from dacbench import AbstractEnv
from dacbench.benchmarks.luby_benchmark import LUBY_DEFAULTS
from dacbench.envs import LubyEnv
class TestLubyEnv(unittest.TestCase):
def make_env(self):
config = LUBY_DEFAULTS
config["instance_set"] = {0: [1, 1]}
env = LubyEnv(config)
return env
def test_setup(self):
env = self.make_env()
self.assertTrue(issubclass(type(env), AbstractEnv))
self.assertFalse(env.np_random is None)
self.assertFalse(env._genny is None)
self.assertFalse(env._next_goal is None)
self.assertFalse(env._seq is None)
self.assertTrue(env._ms == LUBY_DEFAULTS["cutoff"])
self.assertTrue(env._mi == LUBY_DEFAULTS["min_steps"])
self.assertTrue(env._hist_len == LUBY_DEFAULTS["hist_length"])
self.assertTrue(env._start_shift == 0)
self.assertTrue(env._sticky_shif == 0)
def test_reset(self):
env = self.make_env()
state, info = env.reset()
self.assertTrue(issubclass(type(info), dict))
self.assertTrue(env._start_shift, 1)
self.assertTrue(env._sticky_shif, 1)
self.assertTrue(
np.array_equal(-1 * np.ones(LUBY_DEFAULTS["hist_length"] + 1), state)
)
def test_step(self):
env = self.make_env()
env.reset()
state, reward, terminated, truncated, meta = env.step(1)
self.assertTrue(reward >= env.reward_range[0])
self.assertTrue(reward <= env.reward_range[1])
self.assertTrue(state[-1] == 0)
self.assertTrue(state[0] == 1)
self.assertTrue(np.array_equal(state[1:-1], -1 * np.ones(4)))
self.assertTrue(len(state) == env._hist_len + 1)
self.assertFalse(terminated)
self.assertFalse(truncated)
self.assertTrue(len(meta.keys()) == 0)
config = LUBY_DEFAULTS
config["instance_set"] = {1: [-4, -4]}
env = LubyEnv(config)
env.reset()
state, reward, terminated, truncated, meta = env.step(1)
self.assertTrue(reward >= env.reward_range[0])
self.assertTrue(reward <= env.reward_range[1])
self.assertTrue(state[-1] == 0)
self.assertTrue(state[0] == 1)
self.assertTrue(np.array_equal(state[1:-1], -1 * np.ones(4)))
self.assertTrue(len(state) == env._hist_len + 1)
self.assertFalse(terminated)
self.assertFalse(truncated)
self.assertTrue(len(meta.keys()) == 0)
def test_close(self):
env = self.make_env()
self.assertTrue(env.close())
def test_render(self):
env = self.make_env()
env.render("human")
with self.assertRaises(NotImplementedError):
env.render("random")
| 2,769 | 34.512821 | 81 | py |
DACBench | DACBench-main/tests/envs/test_modcma.py | import unittest
import numpy as np
from gymnasium import spaces
from dacbench import AbstractEnv
from dacbench.abstract_benchmark import objdict
from dacbench.envs import ModCMAEnv
class TestModCMAEnv(unittest.TestCase):
def make_env(self):
config = objdict({})
config.budget = 20
config.datapath = "."
config.threshold = 1e-8
config.instance_set = {2: [10, 12, 0, np.ones(11)]}
config.cutoff = 10
config.benchmark_info = None
config.action_space = spaces.MultiDiscrete([2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3])
config.observation_space = spaces.Box(
low=-np.inf * np.ones(5), high=np.inf * np.ones(5)
)
config.reward_range = (-(10**12), 0)
env = ModCMAEnv(config)
return env
def test_setup(self):
env = self.make_env()
self.assertTrue(issubclass(type(env), AbstractEnv))
def test_reset(self):
env = self.make_env()
state, info = env.reset()
self.assertTrue(issubclass(type(info), dict))
self.assertTrue(state is not None)
def test_step(self):
env = self.make_env()
env.reset()
state, reward, terminated, truncated, meta = env.step(np.ones(11, dtype=int))
self.assertTrue(reward >= env.reward_range[0])
self.assertTrue(reward <= env.reward_range[1])
self.assertFalse(terminated)
self.assertFalse(truncated)
self.assertTrue(len(meta.keys()) == 0)
self.assertTrue(len(state) == 5)
while not (terminated or truncated):
_, _, terminated, truncated, _ = env.step(env.action_space.sample())
def test_close(self):
env = self.make_env()
self.assertTrue(env.close())
| 1,751 | 31.444444 | 85 | py |
DACBench | DACBench-main/tests/envs/test_geometric.py | import os
import unittest
from typing import Dict
import numpy as np
from dacbench import AbstractEnv
from dacbench.abstract_benchmark import objdict
from dacbench.benchmarks import GeometricBenchmark
from dacbench.envs import GeometricEnv
FILE_PATH = os.path.dirname(__file__)
DEFAULTS_STATIC = objdict(
{
"action_space_class": "Discrete",
"action_space_args": [],
"observation_space_class": "Box",
"observation_space_type": np.float32,
"observation_space_args": [],
"reward_range": (0, 1),
"cutoff": 10,
"action_values": [],
"action_value_default": 4,
"action_values_variable": False, # if True action value mapping will be used
"action_interval_mapping": {}, # maps actions to equally sized intervalls in [-1, 1] # clip function value if it is higher than this number
"derivative_interval": 3,
"realistic_trajectory": True,
"instance_set_path": "../instance_sets/geometric/geometric_test.csv",
"correlation_table": None,
"correlation_info": {
"high": [(1, 2, "+"), (2, 3, "-"), (1, 5, "+")],
"middle": [(4, 5, "-")],
"low": [(4, 7, "+"), (2, 3, "+"), (0, 2, "-")],
},
"correlation_mapping": {
"high": (0.5, 1),
"middle": (0.1, 0.5),
"low": (0, 0.1),
},
"correlation_depth": 4,
"correlation_active": True,
"benchmark_info": "Hallo",
}
)
class TestGeometricEnv(unittest.TestCase):
def make_env(self, config: Dict):
geo_bench = GeometricBenchmark()
geo_bench.read_instance_set()
geo_bench.set_action_values()
geo_bench.create_correlation_table()
config["action_interval_mapping"] = geo_bench.config.action_interval_mapping
config["instance_set"] = geo_bench.config.instance_set
config["action_values"] = geo_bench.config.action_values
config["config_space"] = geo_bench.config.config_space
config["observation_space_args"] = geo_bench.config.observation_space_args
config["correlation_table"] = geo_bench.config.correlation_table
config["correlation_active"] = True
env = GeometricEnv(config)
return env
def test_setup(self):
env = self.make_env(DEFAULTS_STATIC)
self.assertTrue(issubclass(type(env), AbstractEnv))
self.assertFalse(env.np_random is None)
self.assertTrue(env.n_steps == 10)
self.assertTrue(env.n_actions == len(env.action_vals))
self.assertTrue(type(env.action_interval_mapping) == dict)
def test_reset(self):
env = self.make_env(DEFAULTS_STATIC)
state, info = env.reset()
self.assertTrue(state[0] == DEFAULTS_STATIC["cutoff"])
self.assertTrue(issubclass(type(info), dict))
self.assertFalse(env._prev_state)
self.assertTrue(type(env.action_trajectory) == list)
self.assertTrue(type(env.action_trajectory_set) == dict)
def test_step(self):
env = self.make_env(DEFAULTS_STATIC)
env.reset()
state, reward, terminated, truncated, meta = env.step(env.action_space.sample())
self.assertTrue(reward >= env.reward_range[0])
self.assertTrue(reward <= env.reward_range[1])
self.assertTrue(state[0] == 9)
self.assertTrue(type(state) == np.ndarray)
self.assertTrue(len(state) == 2 + 2 * env.n_actions)
self.assertFalse(terminated)
self.assertFalse(truncated)
self.assertTrue(len(meta.keys()) == 0)
def test_close(self):
env = self.make_env(DEFAULTS_STATIC)
self.assertTrue(env.close())
def test_functions(self):
env = self.make_env(DEFAULTS_STATIC)
functions = env.functions
self.assertTrue(functions._sigmoid(1, 0, 0) == 0.5)
self.assertTrue(functions._linear(5, 2, -3) == 7)
self.assertTrue(functions._constant(5) == 5)
self.assertAlmostEqual(functions._logarithmic(2, 2), 1.39, places=2)
self.assertAlmostEqual(functions._sinus(4, 0.5), 0.91, places=2)
def test_calculate_norm_values(self):
env = self.make_env(DEFAULTS_STATIC)
env.functions.calculate_norm_values(env.instance_set)
self.assertTrue(env.functions.norm_calculated)
def test_calculate_function_value(self):
env = self.make_env(DEFAULTS_STATIC)
env.functions.instance_idx = 2
env.functions.norm_calculated = False
function_info = [2, "linear", 1, 2]
self.assertTrue(
env.functions._calculate_function_value(0, function_info, 0) == 2.0
)
def test_calculate_derivative(self):
env = self.make_env(DEFAULTS_STATIC)
trajectory1 = [np.zeros(env.n_actions)]
self.assertTrue(
(
env.functions.calculate_derivative(trajectory1, env.c_step)
== np.zeros(env.n_actions)
).all()
)
env.c_step = 1
trajectory2 = [np.zeros(env.n_actions), np.ones(env.n_actions)]
self.assertTrue(
(
env.functions.calculate_derivative(trajectory2, env.c_step)
== np.ones(env.n_actions)
).all()
)
trajectory2 = [
np.zeros(env.n_actions),
np.ones(env.n_actions),
np.ones(env.n_actions) * 2,
]
env.c_step = 2
self.assertTrue(
(
env.functions.calculate_derivative(trajectory2, env.c_step)
== np.ones(env.n_actions)
).all()
)
trajectory3 = [
np.zeros(env.n_actions),
np.ones(env.n_actions),
np.ones(env.n_actions) * 2,
np.ones(env.n_actions) * 4,
np.ones(env.n_actions) * 7,
]
env.c_step = 4
self.assertTrue(
(
env.functions.calculate_derivative(trajectory3, env.c_step)
== np.ones(env.n_actions) * 2
).all()
)
def test_get_coordinates_at_time_step(self):
env = self.make_env(DEFAULTS_STATIC)
self.assertTrue(
len(env.functions.get_coordinates_at_time_step(env.c_step)) == env.n_actions
)
def test_get_optimal_policy(self):
env = self.make_env(DEFAULTS_STATIC)
self.assertTrue(
(env.get_optimal_policy()).shape == (env.n_steps, env.n_actions)
)
self.assertTrue(len(env.get_optimal_policy(vector_action=False)) == env.n_steps)
def test_render_dimensions(self):
env = self.make_env(DEFAULTS_STATIC)
dimensions = [1, 2]
env.render(dimensions, FILE_PATH)
fig_title = f"GeoBench-Dimensions{len(dimensions)}.jpg"
self.assertTrue(os.path.exists(os.path.join(FILE_PATH, fig_title)))
os.remove(os.path.join(FILE_PATH, fig_title))
def test_render_3d_dimensions(self):
env = self.make_env(DEFAULTS_STATIC)
env.render_3d_dimensions([0, 1], FILE_PATH)
self.assertTrue(os.path.exists(os.path.join(FILE_PATH, "3D.jpg")))
self.assertTrue(os.path.exists(os.path.join(FILE_PATH, "3D-90side.jpg")))
os.remove(os.path.join(FILE_PATH, "3D.jpg"))
os.remove(os.path.join(FILE_PATH, "3D-90side.jpg"))
| 7,332 | 36.035354 | 149 | py |
DACBench | DACBench-main/tests/envs/test_sigmoid.py | import unittest
from unittest import mock
import numpy as np
from dacbench import AbstractEnv
from dacbench.benchmarks.sigmoid_benchmark import SIGMOID_DEFAULTS
from dacbench.envs import SigmoidEnv
class TestSigmoidEnv(unittest.TestCase):
def make_env(self):
config = SIGMOID_DEFAULTS
config["instance_set"] = {20: [0, 1, 2, 3]}
env = SigmoidEnv(config)
return env
def test_setup(self):
env = self.make_env()
self.assertTrue(issubclass(type(env), AbstractEnv))
self.assertFalse(env.np_random is None)
self.assertTrue(
np.array_equal(
env.shifts, 5 * np.ones(len(SIGMOID_DEFAULTS["action_values"]))
)
)
self.assertTrue(
np.array_equal(
env.slopes, -1 * np.ones(len(SIGMOID_DEFAULTS["action_values"]))
)
)
self.assertTrue(env.n_actions == len(SIGMOID_DEFAULTS["action_values"]))
self.assertTrue(env.slope_multiplier == SIGMOID_DEFAULTS["slope_multiplier"])
self.assertTrue(
(env.action_space.nvec + 1 == SIGMOID_DEFAULTS["action_values"]).all()
)
def test_reset(self):
env = self.make_env()
state, info = env.reset()
self.assertTrue(issubclass(type(info), dict))
self.assertTrue(np.array_equal(env.shifts, [0, 1]))
self.assertTrue(np.array_equal(env.slopes, [2, 3]))
self.assertTrue(state[0] == SIGMOID_DEFAULTS["cutoff"])
self.assertTrue(np.array_equal([state[1], state[3]], env.shifts))
self.assertTrue(np.array_equal([state[2], state[4]], env.slopes))
self.assertTrue(np.array_equal(state[5:], -1 * np.ones(2)))
def test_step(self):
env = self.make_env()
env.reset()
state, reward, terminated, truncated, meta = env.step([1, 1])
self.assertTrue(reward >= env.reward_range[0])
self.assertTrue(reward <= env.reward_range[1])
self.assertTrue(state[0] == 9)
self.assertTrue(np.array_equal([state[1], state[3]], env.shifts))
self.assertTrue(np.array_equal([state[2], state[4]], env.slopes))
self.assertTrue(len(state) == 7)
self.assertFalse(terminated)
self.assertFalse(truncated)
self.assertTrue(len(meta.keys()) == 0)
def test_close(self):
env = self.make_env()
self.assertTrue(env.close())
@mock.patch("dacbench.envs.sigmoid.plt")
def test_render(self, mock_plt):
env = self.make_env()
env.render("random")
self.assertFalse(mock_plt.show.called)
env.render("human")
self.assertTrue(mock_plt.show.called)
| 2,676 | 35.175676 | 85 | py |
DACBench | DACBench-main/tests/container/test_container_utils.py | import json
import unittest
from gymnasium.spaces import Box, Dict, Discrete, MultiBinary, MultiDiscrete, Tuple
from dacbench.container.container_utils import Decoder, Encoder
class TestEncoder(unittest.TestCase):
def test_spaces(self):
box = Box(low=-1, high=1, shape=(2,))
multi_discrete = MultiDiscrete([[2, 1], [2, 1]])
multi_binary = MultiBinary([2, 2])
discrete = Discrete(2)
spaces = [box, discrete, multi_discrete, multi_binary]
for space in spaces:
with self.subTest(msg=str(type(space)), space=space):
serialized = json.dumps(space, cls=Encoder)
restored_space = json.loads(serialized, cls=Decoder)
self.assertEqual(space, restored_space)
def test_recursive_spaces(self):
tuple_space = Tuple(
(Box(low=-1, high=1, shape=(2,)), Box(low=-1, high=1, shape=(2,)))
)
dict_space = Dict(
{
"a": Box(low=-1, high=1, shape=(2,)),
"b": MultiBinary([2, 2]),
"c": Tuple(
(Box(low=-1, high=1, shape=(2,)), Box(low=-1, high=1, shape=(2,)))
),
}
)
spaces = [tuple_space, dict_space]
for space in spaces:
with self.subTest(msg=str(type(space)), space=space):
serialized = json.dumps(space, cls=Encoder)
restored_space = json.loads(serialized, cls=Decoder)
self.assertEqual(space, restored_space)
| 1,542 | 34.068182 | 86 | py |
DACBench | DACBench-main/tests/agents/test_dynamic_random_agent.py | import unittest
import numpy as np
from dacbench.agents import DynamicRandomAgent
from dacbench.benchmarks import SigmoidBenchmark
from dacbench.wrappers import MultiDiscreteActionWrapper
class MyTestCase(unittest.TestCase):
def get_agent(self, switching_interval):
env = SigmoidBenchmark().get_benchmark()
env = MultiDiscreteActionWrapper(env)
env.action_space.seed(0)
agent = DynamicRandomAgent(env, switching_interval=switching_interval)
return agent, env
def test_init(self):
agent, _ = self.get_agent(switching_interval=4)
assert agent.switching_interval == 4
def test_deterministic(self):
switching_interval = 2
agent, env = self.get_agent(switching_interval)
state, _ = env.reset()
reward = 0
actions = []
for _ in range(6):
action = agent.act(state, reward)
state, reward, *_ = env.step(action)
actions.append(action)
agent, env = self.get_agent(switching_interval)
state, _ = env.reset()
reward = 0
actions2 = []
for _ in range(6):
action = agent.act(state, reward)
state, reward, *_ = env.step(action)
actions2.append(action)
assert actions == actions2
def test_switing_interval(self):
switching_interval = 3
agent, env = self.get_agent(switching_interval)
state, _ = env.reset()
reward = 0
actions = []
for i in range(21):
action = agent.act(state, reward)
state, reward, *_ = env.step(action)
actions.append(action)
actions = np.array(actions).reshape((-1, switching_interval))
assert (actions[:, 0] == actions[:, 1]).all()
| 1,788 | 29.322034 | 78 | py |
DACBench | DACBench-main/tests/wrappers/test_state_tracking_wrapper.py | import tempfile
import unittest
from itertools import groupby
from pathlib import Path
import gymnasium as gym
import numpy as np
from dacbench.agents import StaticAgent
from dacbench.benchmarks import CMAESBenchmark, LubyBenchmark
from dacbench.logger import Logger, load_logs, log2dataframe
from dacbench.runner import run_benchmark
from dacbench.wrappers import StateTrackingWrapper
class TestStateTrackingWrapper(unittest.TestCase):
def test_box_logging(self):
temp_dir = tempfile.TemporaryDirectory()
seed = 0
episodes = 10
logger = Logger(
output_path=Path(temp_dir.name),
experiment_name="test_box_logging",
step_write_frequency=None,
episode_write_frequency=1,
)
bench = LubyBenchmark()
bench.set_seed(seed)
env = bench.get_environment()
state_logger = logger.add_module(StateTrackingWrapper)
wrapped = StateTrackingWrapper(env, logger=state_logger)
agent = StaticAgent(env, 1)
logger.set_env(env)
run_benchmark(wrapped, agent, episodes, logger)
state_logger.close()
logs = load_logs(state_logger.get_logfile())
dataframe = log2dataframe(logs, wide=True)
sate_columns = [
"state_Action t (current)",
"state_Step t (current)",
"state_Action t-1",
"state_Action t-2",
"state_Step t-1",
"state_Step t-2",
]
for state_column in sate_columns:
self.assertTrue(state_column in dataframe.columns)
self.assertTrue((~dataframe[state_column].isna()).all())
temp_dir.cleanup()
def test_dict_logging(self):
temp_dir = tempfile.TemporaryDirectory()
seed = 0
episodes = 2
logger = Logger(
output_path=Path(temp_dir.name),
experiment_name="test_dict_logging",
step_write_frequency=None,
episode_write_frequency=1,
)
bench = CMAESBenchmark()
bench.set_seed(seed)
env = bench.get_environment()
state_logger = logger.add_module(StateTrackingWrapper)
wrapped = StateTrackingWrapper(env, logger=state_logger)
agent = StaticAgent(env, 3.5)
logger.set_env(env)
run_benchmark(wrapped, agent, episodes, logger)
state_logger.close()
logs = load_logs(state_logger.get_logfile())
dataframe = log2dataframe(logs, wide=False)
state_parts = {
"Loc": 10,
"Past Deltas": 40,
"Population Size": 1,
"Sigma": 1,
"History Deltas": 80,
"Past Sigma Deltas": 40,
}
names = dataframe.name.unique()
def field(name: str):
state, field_, *idx = name.split("_")
return field_
parts = groupby(sorted(names), key=field)
for part, group_members in parts:
expected_number = state_parts[part]
actual_number = len(list(group_members))
self.assertEqual(expected_number, actual_number)
temp_dir.cleanup()
def test_init(self):
bench = LubyBenchmark()
env = bench.get_environment()
wrapped = StateTrackingWrapper(env)
self.assertTrue(len(wrapped.overall_states) == 0)
self.assertTrue(wrapped.state_interval is None)
wrapped.instance = [0]
self.assertTrue(wrapped.instance[0] == 0)
wrapped2 = StateTrackingWrapper(env, 10)
self.assertTrue(len(wrapped2.overall_states) == 0)
self.assertTrue(wrapped2.state_interval == 10)
self.assertTrue(len(wrapped2.state_intervals) == 0)
self.assertTrue(len(wrapped2.current_states) == 0)
def test_step_reset(self):
bench = LubyBenchmark()
env = bench.get_environment()
wrapped = StateTrackingWrapper(env, 2)
state, info = wrapped.reset()
self.assertTrue(issubclass(type(info), dict))
self.assertTrue(len(state) > 1)
self.assertTrue(len(wrapped.overall_states) == 1)
state, reward, terminated, truncated, _ = wrapped.step(1)
self.assertTrue(len(state) > 1)
self.assertTrue(reward <= 0)
self.assertFalse(terminated)
self.assertFalse(truncated)
self.assertTrue(len(wrapped.overall_states) == 2)
self.assertTrue(len(wrapped.current_states) == 2)
self.assertTrue(len(wrapped.state_intervals) == 0)
state, _ = wrapped.reset()
self.assertTrue(len(wrapped.overall_states) == 3)
self.assertTrue(len(wrapped.current_states) == 1)
self.assertTrue(len(wrapped.state_intervals) == 1)
def test_get_states(self):
bench = LubyBenchmark()
env = bench.get_environment()
wrapped = StateTrackingWrapper(env)
wrapped.reset()
for i in range(4):
wrapped.step(i)
wrapped2 = StateTrackingWrapper(env, 2)
wrapped2.reset()
for i in range(4):
wrapped2.step(i)
overall_states_only = wrapped.get_states()
overall_states, intervals = wrapped2.get_states()
self.assertTrue(np.array_equal(overall_states, overall_states_only))
self.assertTrue(len(overall_states_only) == 5)
self.assertTrue(len(overall_states_only[4]) == 6)
self.assertTrue(len(intervals) == 3)
self.assertTrue(len(intervals[0]) == 2)
self.assertTrue(len(intervals[1]) == 2)
self.assertTrue(len(intervals[2]) == 1)
def test_rendering(self):
bench = CMAESBenchmark()
env = bench.get_environment()
wrapped = StateTrackingWrapper(env)
wrapped.reset()
with self.assertRaises(NotImplementedError):
wrapped.render_state_tracking()
bench = CMAESBenchmark()
def dummy(_):
return [1, [2, 3]]
bench.config.state_method = dummy
bench.config.observation_space = gym.spaces.Tuple(
(
gym.spaces.Discrete(2),
gym.spaces.Box(low=np.array([-1, 1]), high=np.array([5, 5])),
)
)
env = bench.get_environment()
wrapped = StateTrackingWrapper(env)
wrapped.reset()
with self.assertRaises(NotImplementedError):
wrapped.render_state_tracking()
def dummy2(_):
return [0.5]
bench.config.state_method = dummy2
bench.config.observation_space = gym.spaces.Box(
low=np.array([0]), high=np.array([1])
)
env = bench.get_environment()
wrapped = StateTrackingWrapper(env)
wrapped.reset()
wrapped.step(1)
wrapped.step(1)
img = wrapped.render_state_tracking()
self.assertTrue(img.shape[-1] == 3)
bench = LubyBenchmark()
env = bench.get_environment()
wrapped = StateTrackingWrapper(env, 2)
wrapped.reset()
wrapped.step(1)
wrapped.step(1)
img = wrapped.render_state_tracking()
self.assertTrue(img.shape[-1] == 3)
class discrete_obs_env:
def __init__(self):
self.observation_space = gym.spaces.Discrete(2)
self.action_space = gym.spaces.Discrete(2)
self.reward_range = (1, 2)
self.metadata = {}
def reset(self):
return 1, {}
def step(self, _):
return 1, 1, 1, 1, {}
env = discrete_obs_env()
wrapped = StateTrackingWrapper(env, 2)
wrapped.reset()
wrapped.step(1)
img = wrapped.render_state_tracking()
self.assertTrue(img.shape[-1] == 3)
class multi_discrete_obs_env:
def __init__(self):
self.observation_space = gym.spaces.MultiDiscrete([2, 3])
self.action_space = gym.spaces.Discrete(2)
self.reward_range = (1, 2)
self.metadata = {}
def reset(self):
return [1, 2], {}
def step(self, _):
return [1, 2], 1, 1, 1, {}
env = multi_discrete_obs_env()
wrapped = StateTrackingWrapper(env)
wrapped.reset()
wrapped.step(1)
img = wrapped.render_state_tracking()
self.assertTrue(img.shape[-1] == 3)
class multi_binary_obs_env:
def __init__(self):
self.observation_space = gym.spaces.MultiBinary(2)
self.action_space = gym.spaces.Discrete(2)
self.reward_range = (1, 2)
self.metadata = {}
def reset(self):
return [1, 1], {}
def step(self, _):
return [1, 1], 1, 1, 1, {}
env = multi_binary_obs_env()
wrapped = StateTrackingWrapper(env)
wrapped.reset()
wrapped.step(1)
img = wrapped.render_state_tracking()
self.assertTrue(img.shape[-1] == 3)
| 9,003 | 31.157143 | 77 | py |
DACBench | DACBench-main/tests/wrappers/test_instance_sampling_wrapper.py | import unittest
import numpy as np
from sklearn.metrics import mutual_info_score
from dacbench.benchmarks import LubyBenchmark
from dacbench.wrappers import InstanceSamplingWrapper
class TestInstanceSamplingWrapper(unittest.TestCase):
def test_init(self):
bench = LubyBenchmark()
bench.config.instance_update_func = "none"
env = bench.get_environment()
with self.assertRaises(Exception):
wrapped = InstanceSamplingWrapper(env)
def sample():
return [0, 0]
wrapped = InstanceSamplingWrapper(env, sampling_function=sample)
self.assertFalse(wrapped.sampling_function is None)
def test_reset(self):
bench = LubyBenchmark()
bench.config.instance_update_func = "none"
env = bench.get_environment()
def sample():
return [1, 1]
wrapped = InstanceSamplingWrapper(env, sampling_function=sample)
self.assertFalse(np.array_equal(wrapped.instance, sample()))
self.assertFalse(
np.array_equal(list(wrapped.instance_set.values())[0], sample())
)
wrapped.reset()
self.assertTrue(np.array_equal(wrapped.instance, sample()))
def test_fit(self):
bench = LubyBenchmark()
bench.config.instance_update_func = "none"
bench.config.instance_set_path = "../instance_sets/luby/luby_train.csv"
bench.read_instance_set()
instances = bench.config.instance_set
env = bench.get_environment()
wrapped = InstanceSamplingWrapper(env, instances=instances)
samples = []
for _ in range(100):
samples.append(wrapped.sampling_function())
mi1 = mutual_info_score(
np.array(list(instances.values()))[:, 0], np.array(samples)[:, 0]
)
mi2 = mutual_info_score(
np.array(list(instances.values()))[:, 1], np.array(samples)[:, 1]
)
self.assertTrue(mi1 > 0.99)
self.assertTrue(mi1 != 1)
self.assertTrue(mi2 > 0.99)
self.assertTrue(mi2 != 1)
| 2,074 | 30.439394 | 79 | py |
DACBench | DACBench-main/tests/wrappers/test_time_tracking_wrapper.py | import tempfile
import unittest
from pathlib import Path
import numpy as np
from dacbench.agents import StaticAgent
from dacbench.benchmarks import LubyBenchmark
from dacbench.logger import Logger, load_logs, log2dataframe
from dacbench.runner import run_benchmark
from dacbench.wrappers import EpisodeTimeWrapper
class TestTimeTrackingWrapper(unittest.TestCase):
def test_logging(self):
temp_dir = tempfile.TemporaryDirectory()
episodes = 5
logger = Logger(
output_path=Path(temp_dir.name),
experiment_name="test_logging",
)
bench = LubyBenchmark()
env = bench.get_environment()
time_logger = logger.add_module(EpisodeTimeWrapper)
wrapped = EpisodeTimeWrapper(env, logger=time_logger)
agent = StaticAgent(env=env, action=1)
run_benchmark(wrapped, agent, episodes, logger)
logger.close()
logs = load_logs(time_logger.get_logfile())
dataframe = log2dataframe(logs, wide=True)
# all steps must have logged time
self.assertTrue((~dataframe.step_duration.isna()).all())
# each episode has a recored time
episodes = dataframe.groupby("episode")
last_steps_per_episode = dataframe.iloc[episodes.step.idxmax()]
self.assertTrue((~last_steps_per_episode.episode_duration.isna()).all())
# episode time equals the sum of the steps in episode
calculated_episode_times = episodes.step_duration.sum()
recorded_episode_times = last_steps_per_episode.episode_duration
self.assertListEqual(
calculated_episode_times.tolist(), recorded_episode_times.tolist()
)
temp_dir.cleanup()
def test_init(self):
bench = LubyBenchmark()
env = bench.get_environment()
wrapped = EpisodeTimeWrapper(env)
self.assertTrue(len(wrapped.overall_times) == 0)
self.assertTrue(wrapped.time_interval is None)
wrapped.instance = [0]
self.assertTrue(wrapped.instance[0] == 0)
wrapped2 = EpisodeTimeWrapper(env, 10)
self.assertTrue(len(wrapped2.overall_times) == 0)
self.assertTrue(wrapped2.time_interval == 10)
self.assertTrue(len(wrapped2.time_intervals) == 0)
self.assertTrue(len(wrapped2.current_times) == 0)
def test_step(self):
bench = LubyBenchmark()
env = bench.get_environment()
wrapped = EpisodeTimeWrapper(env, 10)
state, info = wrapped.reset()
self.assertTrue(issubclass(type(info), dict))
self.assertTrue(len(state) > 1)
state, reward, terminated, truncated, _ = wrapped.step(1)
self.assertTrue(len(state) > 1)
self.assertTrue(reward <= 0)
self.assertFalse(terminated)
self.assertFalse(truncated)
self.assertTrue(len(wrapped.all_steps) == 1)
self.assertTrue(len(wrapped.current_step_interval) == 1)
self.assertTrue(len(wrapped.step_intervals) == 0)
for _ in range(20):
wrapped.step(1)
self.assertTrue(len(wrapped.overall_times) > 2)
self.assertTrue(len(wrapped.time_intervals) == 1)
def test_get_times(self):
bench = LubyBenchmark()
env = bench.get_environment()
wrapped = EpisodeTimeWrapper(env)
wrapped.reset()
for i in range(5):
wrapped.step(i)
wrapped2 = EpisodeTimeWrapper(env, 2)
wrapped2.reset()
for i in range(5):
wrapped2.step(i)
overall_times_only, steps_only = wrapped.get_times()
overall_times, steps, intervals, step_intervals = wrapped2.get_times()
self.assertTrue(
np.array_equal(
np.round(overall_times, decimals=2),
np.round(overall_times_only, decimals=2),
)
)
self.assertTrue(len(step_intervals) == 3)
self.assertTrue(len(step_intervals[0]) == 2)
self.assertTrue(len(step_intervals[1]) == 2)
self.assertTrue(len(step_intervals[2]) == 1)
def test_rendering(self):
bench = LubyBenchmark()
env = bench.get_environment()
wrapped = EpisodeTimeWrapper(env, 10)
wrapped.reset()
for _ in range(30):
wrapped.step(1)
img = wrapped.render_step_time()
self.assertTrue(img.shape[-1] == 3)
img = wrapped.render_episode_time()
self.assertTrue(img.shape[-1] == 3)
| 4,449 | 33.765625 | 80 | py |
DACBench | DACBench-main/tests/wrappers/test_performance_tracking_wrapper.py | import unittest
from unittest import mock
import numpy as np
from dacbench.benchmarks import LubyBenchmark
from dacbench.wrappers import PerformanceTrackingWrapper
class TestPerformanceWrapper(unittest.TestCase):
def test_init(self):
bench = LubyBenchmark()
env = bench.get_environment()
wrapped = PerformanceTrackingWrapper(env)
self.assertTrue(len(wrapped.overall_performance) == 0)
self.assertTrue(wrapped.performance_interval is None)
wrapped.instance = [0]
self.assertTrue(wrapped.instance[0] == 0)
wrapped2 = PerformanceTrackingWrapper(env, 10)
self.assertTrue(len(wrapped2.overall_performance) == 0)
self.assertTrue(wrapped2.performance_interval == 10)
self.assertTrue(len(wrapped2.performance_intervals) == 0)
self.assertTrue(len(wrapped2.current_performance) == 0)
def test_step(self):
bench = LubyBenchmark()
bench.config.instance_set = {0: [0, 0], 1: [1, 1], 2: [3, 4], 3: [5, 6]}
env = bench.get_environment()
wrapped = PerformanceTrackingWrapper(env, 2)
state, info = wrapped.reset()
self.assertTrue(len(state) > 1)
self.assertTrue(issubclass(type(info), dict))
state, reward, terminated, truncated, _ = wrapped.step(1)
self.assertTrue(len(state) > 1)
self.assertTrue(reward <= 0)
self.assertFalse(terminated)
self.assertFalse(truncated)
while not (terminated or truncated):
_, _, terminated, truncated, _ = wrapped.step(1)
self.assertTrue(len(wrapped.overall_performance) == 1)
self.assertTrue(len(wrapped.performance_intervals) == 0)
self.assertTrue(len(wrapped.current_performance) == 1)
self.assertTrue(len(wrapped.instance_performances.keys()) == 1)
terminated, truncated = False, False
while not (terminated or truncated):
_, _, terminated, truncated, _ = wrapped.step(1)
terminated, truncated = False, False
while not (terminated or truncated):
_, _, terminated, truncated, _ = wrapped.step(1)
self.assertTrue(len(wrapped.performance_intervals) == 1)
self.assertTrue(len(wrapped.current_performance) == 1)
wrapped.reset()
terminated, truncated = False, False
while not (terminated or truncated):
_, _, terminated, truncated, _ = wrapped.step(1)
wrapped.reset()
terminated, truncated = False, False
while not (terminated or truncated):
_, _, terminated, truncated, _ = wrapped.step(1)
self.assertTrue(len(wrapped.instance_performances.keys()) == 3)
wrapped.reset()
terminated, truncated = False, False
while not (terminated or truncated):
_, _, terminated, truncated, _ = wrapped.step(1)
wrapped.reset()
terminated, truncated = False, False
while not (terminated or truncated):
_, _, terminated, truncated, _ = wrapped.step(1)
self.assertTrue(len(wrapped.instance_performances.keys()) == 4)
def test_get_performance(self):
bench = LubyBenchmark()
env = bench.get_environment()
wrapped = PerformanceTrackingWrapper(env)
wrapped.reset()
terminated, truncated = False, False
while not (terminated or truncated):
_, _, terminated, truncated, _ = wrapped.step(1)
wrapped2 = PerformanceTrackingWrapper(env, 2)
wrapped2.reset()
terminated, truncated = False, False
while not (terminated or truncated):
_, _, terminated, truncated, _ = wrapped2.step(1)
wrapped3 = PerformanceTrackingWrapper(env, 2, track_instance_performance=False)
wrapped3.reset()
for i in range(5):
wrapped3.step(i)
wrapped4 = PerformanceTrackingWrapper(env, track_instance_performance=False)
wrapped4.reset()
for i in range(5):
wrapped4.step(i)
overall, instance_performance = wrapped.get_performance()
overall_perf, interval_perf, instance_perf = wrapped2.get_performance()
overall_performance_only = wrapped4.get_performance()
overall_performance, intervals = wrapped3.get_performance()
self.assertTrue(
np.array_equal(
np.round(overall_performance, decimals=2),
np.round(overall_performance_only, decimals=2),
)
)
self.assertTrue(
np.array_equal(
np.round(overall_perf, decimals=2), np.round(overall, decimals=2)
)
)
self.assertTrue(len(instance_performance.keys()) == 1)
self.assertTrue(len(list(instance_performance.values())[0]) == 1)
self.assertTrue(len(instance_perf.keys()) == 1)
self.assertTrue(len(list(instance_perf.values())[0]) == 1)
self.assertTrue(len(intervals) == 1)
self.assertTrue(len(intervals[0]) == 0)
self.assertTrue(len(interval_perf) == 1)
self.assertTrue(len(interval_perf[0]) == 1)
@mock.patch("dacbench.wrappers.performance_tracking_wrapper.plt")
def test_render(self, mock_plt):
bench = LubyBenchmark()
env = bench.get_environment()
env = PerformanceTrackingWrapper(env)
for _ in range(10):
terminated, truncated = False, False
env.reset()
while not (terminated or truncated):
_, _, terminated, truncated, _ = env.step(1)
env.render_performance()
self.assertTrue(mock_plt.show.called)
env.render_instance_performance()
self.assertTrue(mock_plt.show.called)
| 5,701 | 38.874126 | 87 | py |
DACBench | DACBench-main/tests/wrappers/test_observation_wrapper.py | import unittest
import numpy as np
from dacbench import AbstractEnv
from dacbench.benchmarks import CMAESBenchmark
from dacbench.wrappers import ObservationWrapper
class TestObservationTrackingWrapper(unittest.TestCase):
def get_test_env(self) -> AbstractEnv:
bench = CMAESBenchmark()
env = bench.get_benchmark(seed=42)
return env
def test_flatten(self):
wrapped_env = ObservationWrapper(self.get_test_env())
d = {"b": 0, "a": np.array([0, 1.4, 3])}
flat = wrapped_env.flatten(d)
expected = np.array([0, 1.4, 3, 0])
np.testing.assert_array_almost_equal(flat, expected)
def test_conversion_wrapper(self):
action = 0.2
env = self.get_test_env()
reset_state_env, info = env.reset()
step_state_env, *rest_env = env.step(action)
self.assertIsInstance(reset_state_env, dict)
self.assertTrue(issubclass(type(info), dict))
wrapped_env = ObservationWrapper(self.get_test_env())
reset_state_wrapped, info = wrapped_env.reset()
step_state_wrapped, *rest_wrapped = wrapped_env.step(action)
self.assertIsInstance(reset_state_wrapped, np.ndarray)
self.assertListEqual(rest_env[1:], rest_wrapped[1:])
np.testing.assert_array_equal(
wrapped_env.flatten(reset_state_env).shape, reset_state_wrapped.shape
)
np.testing.assert_array_equal(
wrapped_env.flatten(step_state_env).shape, step_state_wrapped.shape
)
| 1,523 | 30.75 | 81 | py |
DACBench | DACBench-main/tests/wrappers/test_reward_noise_wrapper.py | import unittest
from dacbench.benchmarks import LubyBenchmark
from dacbench.wrappers import RewardNoiseWrapper
class TestRewardNoiseWrapper(unittest.TestCase):
def test_init(self):
bench = LubyBenchmark()
env = bench.get_environment()
wrapped = RewardNoiseWrapper(env)
self.assertFalse(wrapped.noise_function is None)
with self.assertRaises(Exception):
wrapped = RewardNoiseWrapper(env, noise_dist=None)
with self.assertRaises(Exception):
wrapped = RewardNoiseWrapper(env, noise_dist="norm")
wrapped = RewardNoiseWrapper(env, noise_dist="normal", dist_args=[0, 0.3])
self.assertFalse(wrapped.noise_function is None)
def dummy():
return 0
wrapped = RewardNoiseWrapper(env, noise_function=dummy)
self.assertFalse(wrapped.noise_function is None)
def test_step(self):
bench = LubyBenchmark()
bench.config.reward_range = (-10, 10)
env = bench.get_environment()
env.reset()
_, raw_reward, _, _, _ = env.step(1)
wrapped = RewardNoiseWrapper(env)
wrapped.reset()
_, reward, _, _, _ = wrapped.step(1)
self.assertTrue(reward != raw_reward)
wrapped = RewardNoiseWrapper(env, noise_dist="normal", dist_args=[0, 0.3])
wrapped.reset()
env.reset()
_, raw_reward, _, _, _ = env.step(1)
_, reward, _, _, _ = wrapped.step(1)
self.assertTrue(reward != raw_reward)
def dummy():
return 0
wrapped = RewardNoiseWrapper(env, noise_function=dummy)
wrapped.reset()
_, reward, _, _, _ = wrapped.step(1)
self.assertTrue(reward == 0 or reward == -1)
def test_getters_and_setters(self):
bench = LubyBenchmark()
env = bench.get_environment()
wrapped = RewardNoiseWrapper(env)
self.assertTrue(wrapped.noise_function == getattr(wrapped, "noise_function"))
self.assertTrue(wrapped.env == getattr(wrapped, "env"))
print(wrapped.action_space)
print(wrapped.env.action_space)
print(getattr(wrapped.env, "action_space"))
self.assertTrue(wrapped.action_space == getattr(wrapped.env, "action_space"))
self.assertTrue(
wrapped.observation_space == getattr(wrapped.env, "observation_space")
)
self.assertTrue(wrapped.reward_range == getattr(wrapped.env, "reward_range"))
| 2,460 | 33.661972 | 85 | py |
DACBench | DACBench-main/tests/wrappers/test_action_tracking_wrapper.py | import tempfile
import unittest
from pathlib import Path
import gymnasium as gym
import numpy as np
import pandas as pd
from dacbench.agents import StaticAgent
from dacbench.benchmarks import (
CMAESBenchmark,
FastDownwardBenchmark,
LubyBenchmark,
ModCMABenchmark,
)
from dacbench.logger import Logger, load_logs, log2dataframe
from dacbench.runner import run_benchmark
from dacbench.wrappers import ActionFrequencyWrapper
class TestActionTrackingWrapper(unittest.TestCase):
def test_logging_multi_discrete(self):
temp_dir = tempfile.TemporaryDirectory()
seed = 0
logger = Logger(
output_path=Path(temp_dir.name),
experiment_name="test_multi_discrete_logging",
step_write_frequency=None,
episode_write_frequency=1,
)
bench = ModCMABenchmark()
bench.set_seed(seed)
env = bench.get_environment()
env.action_space.seed(seed)
action_logger = logger.add_module(ActionFrequencyWrapper)
wrapped = ActionFrequencyWrapper(env, logger=action_logger)
action = env.action_space.sample()
agent = StaticAgent(env, action)
logger.set_env(env)
run_benchmark(wrapped, agent, 1, logger)
action_logger.close()
logs = load_logs(action_logger.get_logfile())
dataframe = log2dataframe(logs, wide=True)
expected_actions = pd.DataFrame(
{
"action_0": [action[0]] * 10,
"action_1": [action[1]] * 10,
"action_10": [action[10]] * 10,
"action_2": [action[2]] * 10,
"action_3": [action[3]] * 10,
"action_4": [action[4]] * 10,
"action_5": [action[5]] * 10,
"action_6": [action[6]] * 10,
"action_7": [action[7]] * 10,
"action_8": [action[8]] * 10,
"action_9": [action[9]] * 10,
}
)
for column in expected_actions.columns:
# todo: seems to be an bug here. Every so ofter the last action is missing.
# Double checked not a logging problem. Could be a seeding issue
self.assertListEqual(
dataframe[column].to_list()[:10],
expected_actions[column].to_list()[:10],
f"Column {column}",
)
temp_dir.cleanup()
def test_logging_discrete(self):
temp_dir = tempfile.TemporaryDirectory()
seed = 0
logger = Logger(
output_path=Path(temp_dir.name),
experiment_name="test_discrete_logging",
step_write_frequency=None,
episode_write_frequency=1,
)
bench = LubyBenchmark()
bench.set_seed(seed)
env = bench.get_environment()
env.action_space.seed(seed)
action_logger = logger.add_module(ActionFrequencyWrapper)
wrapped = ActionFrequencyWrapper(env, logger=action_logger)
action = env.action_space.sample()
agent = StaticAgent(env, action)
logger.set_env(env)
run_benchmark(wrapped, agent, 10, logger)
action_logger.close()
logs = load_logs(action_logger.get_logfile())
dataframe = log2dataframe(logs, wide=True)
expected_actions = [action] * 80
self.assertListEqual(dataframe.action.to_list(), expected_actions)
temp_dir.cleanup()
def test_init(self):
bench = LubyBenchmark()
env = bench.get_environment()
wrapped = ActionFrequencyWrapper(env)
self.assertTrue(len(wrapped.overall_actions) == 0)
self.assertTrue(wrapped.action_interval is None)
wrapped.instance = [0]
self.assertTrue(wrapped.instance[0] == 0)
wrapped2 = ActionFrequencyWrapper(env, 10)
self.assertTrue(len(wrapped2.overall_actions) == 0)
self.assertTrue(wrapped2.action_interval == 10)
self.assertTrue(len(wrapped2.action_intervals) == 0)
self.assertTrue(len(wrapped2.current_actions) == 0)
def test_step(self):
bench = LubyBenchmark()
env = bench.get_environment()
wrapped = ActionFrequencyWrapper(env, 10)
state, info = wrapped.reset()
self.assertTrue(issubclass(type(info), dict))
self.assertTrue(len(state) > 1)
state, reward, terminated, truncated, _ = wrapped.step(1)
self.assertTrue(len(state) > 1)
self.assertTrue(reward <= 0)
self.assertFalse(terminated)
self.assertFalse(truncated)
self.assertTrue(len(wrapped.overall_actions) == 1)
self.assertTrue(wrapped.overall_actions[0] == 1)
self.assertTrue(len(wrapped.current_actions) == 1)
self.assertTrue(wrapped.current_actions[0] == 1)
self.assertTrue(len(wrapped.action_intervals) == 0)
def test_get_actions(self):
bench = LubyBenchmark()
env = bench.get_environment()
wrapped = ActionFrequencyWrapper(env)
wrapped.reset()
for i in range(5):
wrapped.step(i)
wrapped2 = ActionFrequencyWrapper(env, 2)
wrapped2.reset()
for i in range(5):
wrapped2.step(i)
overall_actions_only = wrapped.get_actions()
overall_actions, intervals = wrapped2.get_actions()
self.assertTrue(np.array_equal(overall_actions, overall_actions_only))
self.assertTrue(overall_actions_only == [0, 1, 2, 3, 4])
self.assertTrue(len(intervals) == 3)
self.assertTrue(len(intervals[0]) == 2)
self.assertTrue(intervals[0] == [0, 1])
self.assertTrue(len(intervals[1]) == 2)
self.assertTrue(intervals[1] == [2, 3])
self.assertTrue(len(intervals[2]) == 1)
self.assertTrue(intervals[2] == [4])
def test_rendering(self):
bench = FastDownwardBenchmark()
env = bench.get_environment()
wrapped = ActionFrequencyWrapper(env, 2)
wrapped.reset()
for _ in range(10):
wrapped.step(1)
img = wrapped.render_action_tracking()
self.assertTrue(img.shape[-1] == 3)
bench = CMAESBenchmark()
env = bench.get_environment()
wrapped = ActionFrequencyWrapper(env, 2)
wrapped.reset()
wrapped.step(np.ones(10))
img = wrapped.render_action_tracking()
self.assertTrue(img.shape[-1] == 3)
class dict_action_env:
def __init__(self):
self.action_space = gym.spaces.Dict(
{
"one": gym.spaces.Discrete(2),
"two": gym.spaces.Box(
low=np.array([-1, 1]), high=np.array([1, 5])
),
}
)
self.observation_space = gym.spaces.Discrete(2)
self.reward_range = (1, 2)
self.metadata = {}
def reset(self):
return 1, {}
def step(self, action):
return 1, 1, 1, 1, {}
env = dict_action_env()
wrapped = ActionFrequencyWrapper(env)
wrapped.reset()
with self.assertRaises(NotImplementedError):
wrapped.render_action_tracking()
class tuple_action_env:
def __init__(self):
self.action_space = gym.spaces.Tuple(
(
gym.spaces.Discrete(2),
gym.spaces.Box(low=np.array([-1, 1]), high=np.array([1, 5])),
)
)
self.observation_space = gym.spaces.Discrete(2)
self.reward_range = (1, 2)
self.metadata = {}
def reset(self):
return 1, {}
def step(self, action):
return 1, 1, 1, 1, {}
env = tuple_action_env()
wrapped = ActionFrequencyWrapper(env)
wrapped.reset()
with self.assertRaises(NotImplementedError):
wrapped.render_action_tracking()
class multi_discrete_action_env:
def __init__(self):
self.action_space = gym.spaces.MultiDiscrete([2, 3])
self.observation_space = gym.spaces.Discrete(2)
self.reward_range = (1, 2)
self.metadata = {}
def reset(self):
return 1, {}
def step(self, action):
return 1, 1, 1, 1, {}
env = multi_discrete_action_env()
wrapped = ActionFrequencyWrapper(env, 5)
wrapped.reset()
for _ in range(10):
wrapped.step([1, 2])
img = wrapped.render_action_tracking()
self.assertTrue(img.shape[-1] == 3)
class multi_binary_action_env:
def __init__(self):
self.action_space = gym.spaces.MultiBinary(2)
self.observation_space = gym.spaces.Discrete(2)
self.reward_range = (1, 2)
self.metadata = {}
def reset(self):
return 1, {}
def step(self, action):
return 1, 1, 1, 1, {}
env = multi_binary_action_env()
wrapped = ActionFrequencyWrapper(env)
wrapped.reset()
wrapped.step([1, 0])
img = wrapped.render_action_tracking()
self.assertTrue(img.shape[-1] == 3)
class large_action_env:
def __init__(self):
self.action_space = gym.spaces.Box(low=np.zeros(15), high=np.ones(15))
self.observation_space = gym.spaces.Discrete(2)
self.reward_range = (1, 2)
self.metadata = {}
def reset(self):
return 1, {}
def step(self, action):
return 1, 1, 1, 1, {}
env = large_action_env()
wrapped = ActionFrequencyWrapper(env)
wrapped.reset()
wrapped.step(0.5 * np.ones(15))
img = wrapped.render_action_tracking()
self.assertTrue(img.shape[-1] == 3)
| 10,052 | 32.622074 | 87 | py |
DACBench | DACBench-main/tests/wrappers/test_policy_progress_wrapper.py | import unittest
from unittest import mock
import numpy as np
from dacbench.benchmarks import SigmoidBenchmark
from dacbench.wrappers import PolicyProgressWrapper
def _sig(x, scaling, inflection):
return 1 / (1 + np.exp(-scaling * (x - inflection)))
def compute_optimal_sigmoid(instance):
sig_values = [_sig(i, instance[1], instance[0]) for i in range(10)]
optimal = [np.around(x) for x in sig_values]
return [optimal]
class TestPolicyProgressWrapper(unittest.TestCase):
def test_init(self):
bench = SigmoidBenchmark()
bench.set_action_values((3,))
env = bench.get_environment()
wrapped = PolicyProgressWrapper(env, compute_optimal_sigmoid)
self.assertTrue(len(wrapped.policy_progress) == 0)
self.assertTrue(len(wrapped.episode) == 0)
self.assertFalse(wrapped.compute_optimal is None)
def test_step(self):
bench = SigmoidBenchmark()
bench.set_action_values((3,))
bench.config.instance_set = {0: [0, 0], 1: [1, 1], 2: [3, 4], 3: [5, 6]}
env = bench.get_environment()
wrapped = PolicyProgressWrapper(env, compute_optimal_sigmoid)
wrapped.reset()
action = env.action_space.sample()
_, _, terminated, truncated, _ = wrapped.step(action)
self.assertTrue(len(wrapped.episode) == 1)
while not (terminated or truncated):
_, _, terminated, truncated, _ = wrapped.step(action)
self.assertTrue(len(wrapped.episode) == 0)
self.assertTrue(len(wrapped.policy_progress) == 1)
@mock.patch("dacbench.wrappers.policy_progress_wrapper.plt")
def test_render(self, mock_plt):
bench = SigmoidBenchmark()
bench.set_action_values((3,))
env = bench.get_environment()
env = PolicyProgressWrapper(env, compute_optimal_sigmoid)
for _ in range(2):
terminated, truncated = False, False
env.reset()
while not (terminated or truncated):
_, _, terminated, truncated, _ = env.step(env.action_space.sample())
env.render_policy_progress()
self.assertTrue(mock_plt.show.called)
| 2,155 | 35.542373 | 84 | py |
DACBench | DACBench-main/docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import automl_sphinx_theme
from dacbench import __version__ as version
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- Project information -----------------------------------------------------
name = "DACBench"
copyright = "2021, Theresa Eimer, Maximilian Reimer"
author = "Theresa Eimer, Maximilian Reimer"
# The full version, including alpha/beta/rc tags
release = "01.02.2021"
options = {
"copyright": copyright,
"author": author,
"version": version,
"versions": {
f"v{version}": "#",
},
"name": name,
"html_theme_options": {
"github_url": "https://github.com/automl/DACBench",
"twitter_url": "https://twitter.com/automl_org?lang=de",
},
#this is here to exclude the gallery for examples
"extensions": ["myst_parser",
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon", # Enables to understand NumPy docstring
# "numpydoc",
"sphinx.ext.autosummary",
"sphinx.ext.autosectionlabel",
"sphinx_autodoc_typehints",
"sphinx.ext.doctest",
]
}
automl_sphinx_theme.set_options(globals(), options) | 1,883 | 33.888889 | 89 | py |
cvnn | cvnn-master/setup.py | from setuptools import setup
import versioneer
requirements = [
'tensorflow>=2.0', 'tensorflow-probability', # tfp for the Batch Norm (covariance)
# 'tensorflow-addons',
'numpy', 'six', 'packaging',
'pandas', 'scipy', # Data
'colorlog', 'openpyxl', # Logging
'tqdm'
]
setup(
name='cvnn',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Library to help implement a complex-valued neural network (cvnn) using tensorflow as back-end",
license="MIT",
author="J Agustin BARRACHINA",
author_email='joseagustin.barra@gmail.com',
url='https://github.com/NEGU93/cvnn',
packages=['cvnn', 'cvnn.layers'],
entry_points={
'console_scripts': [
'cvnn=cvnn.cli:cli'
]
},
install_requires=requirements,
keywords='cvnn',
setup_requires=['pytest-runner'],
tests_require=['pytest', 'pytest-cov'],
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
long_description_content_type="text/markdown",
long_description=open('README.md').read(),
extras_require={
'plotter': ['matplotlib', 'seaborn', 'plotly', 'tikzplotlib'],
'full': ['prettytable', 'matplotlib', 'seaborn', 'plotly', 'tikzplotlib']
}
)
| 1,413 | 30.422222 | 112 | py |
cvnn | cvnn-master/versioneer.py |
# Version: 0.18
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.python.org/pypi/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601 format. This will be None if the date is not
available.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See [details.md](details.md) in the Versioneer
source tree for descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/warner/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other langauges) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
### Unicode version strings
While Versioneer works (and is continually tested) with both Python 2 and
Python 3, it is not entirely consistent with bytes-vs-unicode distinctions.
Newer releases probably generate unicode version strings on py2. It's not
clear that this is wrong, but it may be surprising for applications when then
write these strings to a network connection or include them in bytes-oriented
APIs like cryptographic checksums.
[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates
this question.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
"""
from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.18) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
mo = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", contents, re.M)
return {"version": mo.group(1)}
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version",
"date": None}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if 'py2exe' in sys.modules: # py2exe enabled?
try:
from py2exe.distutils_buildexe import py2exe as _py2exe # py3
except ImportError:
from py2exe.build_exe import py2exe as _py2exe # py2
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1) | 68,742 | 36.667397 | 79 | py |
cvnn | cvnn-master/debug/having_same_result_two_runs.py | import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
import os
tfds.disable_progress_bar()
def normalize_img(image, label):
"""Normalizes images: `uint8` -> `float32`."""
return tf.cast(image, tf.float32) / 255., label
def get_dataset():
(ds_train, ds_test), ds_info = tfds.load(
'mnist',
split=['train', 'test'],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
ds_train = ds_train.map(normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_train = ds_train.cache()
# ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)
ds_train = ds_train.batch(128)
ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)
ds_test = ds_test.map(normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_test = ds_test.batch(128)
ds_test = ds_test.cache()
ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)
return ds_train, ds_test
def keras_fit(ds_train, ds_test, verbose=True, init1='glorot_uniform', init2='glorot_uniform'):
# https://www.tensorflow.org/datasets/keras_example
tf.random.set_seed(24)
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28, 1)),
tf.keras.layers.Dense(128, activation='relu', kernel_initializer=init1),
tf.keras.layers.Dense(10, activation='softmax', kernel_initializer=init2)
])
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(0.001),
metrics=['accuracy'],
)
history = model.fit(
ds_train,
epochs=6,
validation_data=ds_test,
verbose=verbose, shuffle=False
)
return history
def test_mnist():
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
if tf.test.gpu_device_name():
print('GPU found')
else:
print("No GPU found")
init = tf.keras.initializers.GlorotUniform()
init1 = tf.constant_initializer(init((784, 128)).numpy())
init2 = tf.constant_initializer(init((128, 10)).numpy())
ds_train, ds_test = get_dataset()
keras1 = keras_fit(ds_train, ds_test, init1=init1, init2=init2)
keras2 = keras_fit(ds_train, ds_test, init1=init1, init2=init2)
assert keras1.history == keras2.history, f"\n{keras1.history}\n!=\n{keras2.history}"
if __name__ == "__main__":
test_mnist() | 2,405 | 31.08 | 95 | py |
cvnn | cvnn-master/debug/ComplexDense_example.py | import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import datasets
from layers.__init__ import ComplexDense, ComplexFlatten
from pdb import set_trace
(train_images, train_labels), (test_images, test_labels) = datasets.mnist.load_data()
train_images, test_images = tf.cast(train_images, tf.complex64) / 255.0, tf.cast(test_images, tf.complex64) / 255.0
model = Sequential([
ComplexFlatten(input_shape=(28, 28, 1)),
ComplexDense(128, activation='relu', input_shape=(28, 28, 1)),
ComplexDense(10, activation='softmax')
])
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=Adam(0.001),
metrics=['accuracy'],
)
print(model.predict(train_images[:10]).dtype)
# model.fit(
# train_images, train_labels,
# epochs=6,
# validation_data=(test_images, test_labels),
# ) | 946 | 31.655172 | 115 | py |
cvnn | cvnn-master/debug/conv_memory_script.py | import sys
import tensorflow as tf
from tensorflow.keras import datasets
from time import perf_counter
import numpy as np
from pdb import set_trace
import sys
ENABLE_MEMORY_GROWTH = True # https://stackoverflow.com/questions/36927607/how-can-i-solve-ran-out-of-gpu-memory-in-tensorflow
DEBUG_CONV = False
TEST_KERAS_CONV2D = False
TEST_CONV_SPEED = False
if ENABLE_MEMORY_GROWTH:
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
if TEST_KERAS_CONV2D:
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
train_images, test_images = train_images / 255.0, test_images / 255.0 # Normalize pixel values to be between 0 and 1
start_time = perf_counter()
conv2d = tf.keras.layers.Conv2D(1, 3, input_shape=(32, 32, 3))
k_out = conv2d(train_images[:32].astype(np.float32))
end_time = perf_counter()
# Without memory growth: 12.695380546000003; 3.7; 11.6; 13.1
# With Memory Growth: 1.4; 1.2; 1.15; 4.132;
# Failed to initialize GPU device #0: unknown error
print("Computing time was {} seconds".format(end_time - start_time))
sys.exit()
class Dense:
def __init__(self, output_size, input_size, activation=tf.keras.activations.relu):
self.w = tf.Variable(tf.keras.initializers.GlorotUniform()(shape=(input_size, output_size)))
self.b = tf.Variable(tf.keras.initializers.Zeros()(shape=output_size))
self.activation = activation
self.__class__.__call__ = self.call
def call(self, inputs):
return self.activation(tf.add(tf.matmul(inputs, self.w), self.b))
def trainable_variables(self):
return [self.w, self.b]
class Flatten:
def __init__(self, input_size):
self.input_size = input_size
self.output_size = np.prod(self.input_size)
self.__class__.__call__ = self.call
def call(self, inputs):
return tf.reshape(inputs, (inputs.shape[0], self.output_size))
def trainable_variables(self):
return []
class ConvND:
def __init__(self, kernels, input_size, kernel_shape=(3, 3), padding=0, stride=1, activation=tf.keras.activations.linear):
self.filters = kernels
self.input_size = input_size
self.activation = activation
self._calculate_shapes(kernel_shape, padding, stride)
self.__class__.last_layer_output_size = self.output_size
self._init_kernel()
self.__class__.__call__ = self.call
def _init_kernel(self):
self.kernels = []
this_shape = self.kernel_shape + (self.input_size[-1],)
for _ in range(self.filters):
self.kernels.append(tf.Variable(tf.keras.initializers.GlorotUniform()(shape=this_shape)))
self.bias = tf.Variable(tf.keras.initializers.Zeros()(shape=self.filters))
def _calculate_shapes(self, kernel_shape, padding, stride):
if isinstance(kernel_shape, int):
self.kernel_shape = (kernel_shape,) * (len(self.input_size) - 1) # -1 because the last is the channel
elif isinstance(kernel_shape, (tuple, list)):
self.kernel_shape = tuple(kernel_shape)
else:
print(
"Kernel shape: " + str(kernel_shape) + " format not supported. It must be an int or a tuple")
sys.exit(-1)
# Padding
if isinstance(padding, int):
self.padding_shape = (padding,) * (len(self.input_size) - 1) # -1 because the last is the channel
# I call super first in the case input_shape is none
elif isinstance(padding, (tuple, list)):
self.padding_shape = tuple(padding)
else:
print("Padding: " + str(padding) + " format not supported. It must be an int or a tuple")
sys.exit(-1)
# Stride
if isinstance(stride, int):
self.stride_shape = (stride,) * (len(self.input_size) - 1)
# I call super first in the case input_shape is none
elif isinstance(stride, (tuple, list)):
self.stride_shape = tuple(stride)
else:
print("stride: " + str(stride) + " format not supported. It must be an int or a tuple")
sys.exit(-1)
out_list = []
for i in range(len(self.input_size) - 1): # -1 because the number of input channels is irrelevant
# 2.4 on https://arxiv.org/abs/1603.07285
out_list.append(int(np.floor(
(self.input_size[i] + 2 * self.padding_shape[i] - self.kernel_shape[i]) / self.stride_shape[i]
) + 1))
out_list.append(self.filters) # New channels are actually the filters
self.output_size = tuple(out_list)
return self.output_size
def trainable_variables(self):
return self.kernels + [self.bias]
# @tf.function
def call(self, inputs):
inputs = self.apply_padding(inputs) # Add zeros if needed
output_np = np.zeros( # I use np because tf does not support the assigment
(inputs.shape[0],) + # Per each image
self.output_size, dtype=np.float32
)
img_index = 0
progbar = tf.keras.utils.Progbar(inputs.shape[0])
for image in inputs:
for filter_index in range(self.filters):
for i in range(int(np.prod(self.output_size[:-1]))): # for each element in the output
index = np.unravel_index(i, self.output_size[:-1])
start_index = tuple([a * b for a, b in zip(index, self.stride_shape)])
end_index = tuple([a+b for a, b in zip(start_index, self.kernel_shape)])
sector_slice = tuple(
[slice(start_index[ind], end_index[ind]) for ind in range(len(start_index))]
)
sector = image[sector_slice]
new_value = tf.reduce_sum(sector * self.kernels[filter_index]) + self.bias[filter_index]
indices = (img_index,) + index + (filter_index,)
mask = tf.Variable(tf.fill(output_np.shape, 1))
mask = mask[indices].assign(0)
mask = tf.cast(mask, dtype=np.float32)
output_np = output_np * mask + (1 - mask) * new_value
# import pdb; pdb.set_trace()
img_index += 1
progbar.update(img_index)
output = self.activation(output_np)
return output
def apply_padding(self, inputs):
pad = [[0, 0]] # No padding to the images itself
for p in self.padding_shape:
pad.append([p, p])
pad.append([0, 0]) # No padding to the channel
return tf.pad(inputs, tf.constant(pad), "CONSTANT", 0)
# Test conv works: https://www.analyticsvidhya.com/blog/2018/12/guide-convolutional-neural-network-cnn/
# set_trace()
# Prepare to test conv layers
"""
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 425.31 Driver Version: 425.31 CUDA Version: 10.1 |
|-------------------------------+----------------------+----------------------+
| GPU Name TCC/WDDM | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
|===============================+======================+======================|
| 0 GeForce GT 735M WDDM | 00000000:01:00.0 N/A | N/A |
| N/A 58C P0 N/A / N/A | 37MiB / 1024MiB | N/A Default |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: GPU Memory |
| GPU PID Type Process name Usage |
|=============================================================================|
| 0 Not Supported |
+-----------------------------------------------------------------------------+
"""
if DEBUG_CONV:
img1 = np.array([
[3, 0, 1, 2, 7, 4],
[1, 5, 8, 9, 3, 1],
[2, 7, 2, 5, 1, 3],
[0, 1, 3, 1, 7, 8],
[4, 2, 1, 6, 2, 8],
[2, 4, 5, 2, 3, 9]
]).astype(np.float32)
img2 = np.array([
[10, 10, 10, 0, 0, 0],
[10, 10, 10, 0, 0, 0],
[10, 10, 10, 0, 0, 0],
[10, 10, 10, 0, 0, 0],
[10, 10, 10, 0, 0, 0],
[10, 10, 10, 0, 0, 0]
]).astype(np.float32)
img1 = np.reshape(img1, (1, 6, 6, 1))
img2 = np.reshape(img2, (1, 6, 6, 1))
conv = ConvND(1, kernel_shape=(3, 3), input_size=(6, 6, 1), padding=0)
conv.kernels[0] = np.reshape(np.array([
[1, 0, -1],
[1, 0, -1],
[1, 0, -1]
]), (3, 3, 1))
out1 = conv(img1)
out2 = conv(img2)
print(out1[0,...,0])
print(out2[0,...,0])
# set_trace()
# conv tested
"""
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 425.31 Driver Version: 425.31 CUDA Version: 10.1 |
|-------------------------------+----------------------+----------------------+
| GPU Name TCC/WDDM | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
|===============================+======================+======================|
| 0 GeForce GT 735M WDDM | 00000000:01:00.0 N/A | N/A |
| N/A 58C P0 N/A / N/A | 110MiB / 1024MiB | N/A Default |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: GPU Memory |
| GPU PID Type Process name Usage |
|=============================================================================|
| 0 Not Supported |
+-----------------------------------------------------------------------------+
"""
# Model class to train network
class Model:
def __init__(self, shape):
self.shape = shape
self.__class__.__call__ = self.call
def call(self, x):
for i in range(len(self.shape)): # Apply all the layers
x = self.shape[i].call(x)
return x
def fit(self, x, y, epochs=10, batch_size=32, learning_rate=0.01):
train_dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(batch_size=batch_size)
num_tr_iter = int(x.shape[0] / batch_size)
for epoch in range(epochs):
iteration = 0
tf.print("\nEpoch {0}/{1}".format(epoch+1, epochs))
progbar = tf.keras.utils.Progbar(num_tr_iter)
for x_batch, y_batch in train_dataset.prefetch(tf.data.experimental.AUTOTUNE).cache():
progbar.update(iteration)
iteration += 1
self._train_step(x_batch, y_batch, learning_rate)
def _apply_loss(self, y_true, y_pred):
return tf.reduce_mean(input_tensor=tf.keras.losses.categorical_crossentropy(y_true, y_pred))
@tf.function # This makes all faster but harder to debug (set_trace is broken and print doesn't work)
def _train_step(self, x_train_batch, y_train_batch, learning_rate):
with tf.GradientTape() as tape:
with tf.name_scope("Forward_Phase") as scope:
tf.print("Forward mode")
x_called = self.call(x_train_batch) # Forward mode computation
# Loss function computation
with tf.name_scope("Loss") as scope:
tf.print("Compute loss")
current_loss = self._apply_loss(y_train_batch, x_called) # Compute loss
# Calculating gradient
with tf.name_scope("Gradient") as scope:
tf.print("Get trainable variables")
variables = []
for lay in self.shape:
variables.extend(lay.trainable_variables()) # TODO: Debug this for all layers.
tf.print("Compute gradients")
gradients = tape.gradient(current_loss, variables) # Compute gradients
assert all(g is not None for g in gradients)
# Backpropagation
with tf.name_scope("Optimizer") as scope:
tf.print("Assign values")
for i, val in enumerate(variables):
val.assign(val - learning_rate * gradients[i])
# Prepare Dataset
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
train_images, test_images = train_images / 255.0, test_images / 255.0 # Normalize pixel values to be between 0 and 1
print(train_images.shape)
if TEST_CONV_SPEED:
start_time = perf_counter()
conv_layer = ConvND(1, kernel_shape=(3, 3), input_size=(32, 32, 3))
out = conv_layer(train_images[:32].astype(np.float32)) # 152x2 secs, 475.65
end_time = perf_counter()
print("Computing time was {} seconds".format(end_time - start_time))
"""
I sometimes have:
Failed to initialize GPU device #0: unknown error
with @tf.function decorator I have the error:
Failed to initialize GPU device #0: unknown error
2020-06-23 19:11:09.754024: F .\tensorflow/core/kernels/random_op_gpu.h:227] Non-OK-status: GpuLaunchKernel(FillPhiloxRandomKernelLaunch<Distribution>, num_blocks, block_size, 0, d.stream(), gen, data, size, dist) status: Internal: invalid configuration argument
or
Traceback (most recent call last):
TypeError: in converted code:
TypeError: tf__call() takes 2 positional arguments but 3 were given
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 425.31 Driver Version: 425.31 CUDA Version: 10.1 |
|-------------------------------+----------------------+----------------------+
| GPU Name TCC/WDDM | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
|===============================+======================+======================|
| 0 GeForce GT 735M WDDM | 00000000:01:00.0 N/A | N/A |
| N/A 65C P0 N/A / N/A | 112MiB / 1024MiB | N/A Default |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: GPU Memory |
| GPU PID Type Process name Usage |
|=============================================================================|
| 0 Not Supported |
+-----------------------------------------------------------------------------+
"""
# Define layers
model_layers = [
ConvND(1, kernel_shape=(3, 3), input_size=(32, 32, 3)),
Flatten((30, 30, 1)),
Dense(64, activation=tf.keras.activations.relu, input_size=900),
Dense(10, input_size=64, activation=tf.keras.activations.softmax)
]
model = Model(model_layers)
# set_trace()
# Train Model
model.fit(train_images[:1000].astype(np.float32), train_labels[:1000].astype(np.float32), epochs=5, batch_size=32)
"""
Epoch 1/5
0/31 [..............................] - ETA: 0sForward mode
1/32 [..............................] - ETA: 10:372020-06-23 19:38:17.893582: W tensorflow/core/common_runtime/bfc_allocator.cc:419] Allocator (GPU_0_bfc) ran out of memory trying to allocate 112.5KiB (rounded to 115200). Current allocation summary follows.
2020-06-23 19:38:17.930516: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (256): Total Chunks: 3940, Chunks in use: 3940. 985.0KiB allocated for chunks. 985.0KiB in use in bin. 215.6KiB client-requested in use in bin.
2020-06-23 19:38:17.977848: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (512): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.017997: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (1024): Total Chunks: 1, Chunks in use: 1. 1.3KiB allocated for chunks. 1.3KiB in use in bin. 1.0KiB client-requested in use in bin.
2020-06-23 19:38:18.058818: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (2048): Total Chunks: 1, Chunks in use: 1. 2.5KiB allocated for chunks. 2.5KiB in use in bin. 2.5KiB client-requested in use in bin.
2020-06-23 19:38:18.100510: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (4096): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.141216: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (8192): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.176846: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (16384): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.207463: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (32768): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.242445: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (65536): Total Chunks: 4908, Chunks in use: 4908. 539.22MiB allocated for chunks. 539.22MiB in use in bin. 539.21MiB client-requested in use in bin.
2020-06-23 19:38:18.280173: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (131072): Total Chunks: 8, Chunks in use: 8. 1.40MiB allocated for chunks. 1.40MiB in use in bin. 1012.5KiB client-requested in use in bin.
2020-06-23 19:38:18.320035: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (262144): Total Chunks: 1, Chunks in use: 1. 450.3KiB allocated for chunks. 450.3KiB in use in bin. 384.0KiB client-requested in use in bin.
2020-06-23 19:38:18.376468: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (524288): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.413848: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (1048576): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.450537: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (2097152): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.486643: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (4194304): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.523049: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (8388608): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.554221: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (16777216): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.590211: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (33554432): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.625964: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (67108864): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.664359: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (134217728): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.701712: I tensorflow/core/common_runtime/bfc_allocator.cc:869] Bin (268435456): Total Chunks: 0, Chunks in use: 0. 0B allocated for chunks. 0B in use in bin. 0B client-requested in use in bin.
2020-06-23 19:38:18.743990: I tensorflow/core/common_runtime/bfc_allocator.cc:885] Bin for 112.5KiB was 64.0KiB, Chunk State:
2020-06-23 19:38:18.760060: I tensorflow/core/common_runtime/bfc_allocator.cc:898] Next region of size 1048576
2020-06-23 19:38:18.778819: I tensorflow/core/common_runtime/bfc_allocator.cc:905] InUse at 0000000600F80000 next 1 of size 1280
2020-06-23 19:38:18.797981: I tensorflow/core/common_runtime/bfc_allocator.cc:905] InUse at 0000000600F80500 next 2 of size 256
.... A very long repetition of this message
2020-06-23 19:42:42.059088: I tensorflow/core/common_runtime/bfc_allocator.cc:905] InUse at 0000000622D51200 next 8858 of size 256
2020-06-23 19:42:42.077451: I tensorflow/core/common_runtime/bfc_allocator.cc:905] InUse at 0000000622D51300 next 18446744073709551615 of size 214528
2020-06-23 19:42:42.097273: I tensorflow/core/common_runtime/bfc_allocator.cc:914] Summary of in-use Chunks by size:
2020-06-23 19:42:42.115466: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 3940 Chunks of size 256 totalling 985.0KiB
2020-06-23 19:42:42.132905: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 1280 totalling 1.3KiB
2020-06-23 19:42:42.149939: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 2560 totalling 2.5KiB
2020-06-23 19:42:42.167957: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 4905 Chunks of size 115200 totalling 538.88MiB
2020-06-23 19:42:42.185877: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 116224 totalling 113.5KiB
2020-06-23 19:42:42.203308: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 117504 totalling 114.8KiB
2020-06-23 19:42:42.222310: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 119296 totalling 116.5KiB
2020-06-23 19:42:42.244583: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 138752 totalling 135.5KiB
2020-06-23 19:42:42.268096: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 155392 totalling 151.8KiB
2020-06-23 19:42:42.288503: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 158720 totalling 155.0KiB
2020-06-23 19:42:42.307893: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 173312 totalling 169.3KiB
2020-06-23 19:42:42.333169: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 195072 totalling 190.5KiB
2020-06-23 19:42:42.355288: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 202240 totalling 197.5KiB
2020-06-23 19:42:42.376818: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 214528 totalling 209.5KiB
2020-06-23 19:42:42.400296: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 230400 totalling 225.0KiB
2020-06-23 19:42:42.431019: I tensorflow/core/common_runtime/bfc_allocator.cc:917] 1 Chunks of size 461056 totalling 450.3KiB
2020-06-23 19:42:42.449943: I tensorflow/core/common_runtime/bfc_allocator.cc:921] Sum Total of in-use chunks: 542.02MiB
2020-06-23 19:42:42.473861: I tensorflow/core/common_runtime/bfc_allocator.cc:923] total_region_allocated_bytes_: 568350976 memory_limit_: 568351129 available bytes: 153 curr_region_allocation_bytes_: 1073741824
2020-06-23 19:42:42.505584: I tensorflow/core/common_runtime/bfc_allocator.cc:929] Stats:
Limit: 568351129
InUse: 568350976
MaxInUse: 568350976
NumAllocs: 12827
MaxAllocSize: 461056
2020-06-23 19:42:42.543755: W tensorflow/core/common_runtime/bfc_allocator.cc:424]
2020-06-23 19:42:42.572182: W tensorflow/core/framework/op_kernel.cc:1622] OP_REQUIRES failed at cast_op.cc:109 : Resource exhausted: OOM when allocating tensor with shape[32,30,30,1] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc
2020-06-23 19:42:42.620366: W tensorflow/core/kernels/data/cache_dataset_ops.cc:820] The calling iterator did not fully read the dataset being cached. In order to avoid unexpected truncation of the dataset, the partially cached contents of the dataset will be discarded. This can happen if you have an input pipeline similar to dataset.cache().take(k).repeat(). You should use dataset.take(k).cache().repeat() instead.
tensorflow.python.framework.errors_impl.ResourceExhaustedError: OOM when allocating tensor with shape[32,30,30,1] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc [Op:Cast] name: Forward_Phase/Cast/
""" | 25,639 | 61.689487 | 418 | py |
cvnn | cvnn-master/debug/mwe_testing_learning_algo.py | import tensorflow as tf
import numpy as np
from pdb import set_trace
BATCH_SIZE = 10
def get_dataset():
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
return (train_images, train_labels), (test_images, test_labels)
def get_model(init1='glorot_uniform', init2='glorot_uniform'):
tf.random.set_seed(1)
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu', kernel_initializer=init1),
tf.keras.layers.Dense(10, kernel_initializer=init2)
])
model.compile(optimizer='sgd',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False),
metrics=['accuracy'])
return model
def train(model, x_fit, y_fit):
np.save("initial_weights.npy", np.array(model.get_weights()))
with tf.GradientTape() as g:
y_pred = model(x_fit)
loss = tf.keras.losses.categorical_crossentropy(y_pred=y_pred, y_true=y_fit)
np.save("loss.npy", np.array(loss))
gradients = g.gradient(loss, model.trainable_weights)
np.save("gradients.npy", np.array(gradients))
model.fit(x_fit, y_fit, epochs=1, batch_size=BATCH_SIZE)
np.save("final_weights.npy", np.array(model.get_weights()))
if __name__ == "__main__":
(train_images, train_labels), (test_images, test_labels) = get_dataset()
model = get_model()
y_fit = np.zeros((BATCH_SIZE, 10))
for i, val in enumerate(train_labels[:BATCH_SIZE]):
y_fit[i][val] = 1.
train(model, train_images[:BATCH_SIZE], y_fit)
results = {
"loss": np.load("loss.npy", allow_pickle=True),
"init_weights": np.load("initial_weights.npy", allow_pickle=True),
"gradients": np.load("gradients.npy", allow_pickle=True),
"final_weights": np.load("final_weights.npy", allow_pickle=True)
}
for i_w, f_w, gr in zip(results["init_weights"], results["final_weights"], results["gradients"]):
gr = gr.numpy()
print(np.allclose(gr, (i_w - f_w) * BATCH_SIZE / 0.01))
| 2,133 | 37.8 | 101 | py |
cvnn | cvnn-master/debug/testing_learning_algo.py | import numpy as np
from pathlib import Path
from pdb import set_trace
path = Path("/home/barrachina/Documents/cvnn/log/montecarlo/2021/03March/10Wednesday/run-15h12m52")
# init_weight = np.load(path / "initial_weights.npy", allow_pickle=True)
init_debug_weight = np.load(path / "initial_debug_weights.npy", allow_pickle=True)
complex_dict = {
"init_weights": np.array(init_debug_weight[0]),
"gradients": np.load(path / "run/iteration0_model0_complex_network/gradients.npy", allow_pickle=True),
"final_weights": np.load(path / "run/iteration0_model0_complex_network/final_weights.npy", allow_pickle=True)
}
real_dict = {
"init_weights": init_debug_weight[1]
}
assert len(complex_dict["init_weights"]) == len(complex_dict["gradients"]) == len(complex_dict["final_weights"])
for i_w, f_w, gr in zip(complex_dict["init_weights"], complex_dict["final_weights"], complex_dict["gradients"]):
gr = gr.numpy()
print(np.allclose(gr, (i_w - f_w) / 0.01))
set_trace()
| 990 | 37.115385 | 113 | py |
cvnn | cvnn-master/debug/monte_carlo_tests.py | from cvnn.montecarlo import MonteCarlo
import tensorflow as tf
import layers.__init__ as layers
import numpy as np
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
own_model = tf.keras.Sequential([
layers.ComplexFlatten(input_shape=(28, 28)),
layers.ComplexDense(128, activation='cart_relu', dtype=np.float32),
layers.ComplexDense(10, dtype=np.float32)
], name="own_model")
own_model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
keras_model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10)
], name="keras_model")
keras_model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
monte_carlo = MonteCarlo()
monte_carlo.add_model(own_model)
monte_carlo.add_model(keras_model)
monte_carlo.run(x=train_images, y=train_labels, validation_data=(test_images, test_labels), iterations=2)
monte_carlo.monte_carlo_analyzer.do_all()
| 1,245 | 34.6 | 105 | py |
cvnn | cvnn-master/debug/gradient_tape_complex.py | import tensorflow as tf
# https://www.tensorflow.org/guide/autodiff
x = tf.Variable(tf.complex([2., 2.], [2., 2.]))
with tf.GradientTape() as tape:
y = tf.abs(tf.reduce_sum(x))**2
print(y)
dy_dx = tape.gradient(y, x)
print(dy_dx)
| 241 | 19.166667 | 47 | py |
cvnn | cvnn-master/debug/fft_testing.py | import tensorflow as tf
import numpy as np
from layers.__init__ import Convolutional
from pdb import set_trace
import sys
from scipy import signal
from scipy import linalg
COMPARE_TF_AND_NP = False
TWO_DIM_TEST = True
ONE_DIM_TEST = False
STACKOVERFLOW_EXAMPLE = False
if COMPARE_TF_AND_NP:
# Results are not exactly the same (but fair enough)
aaa = np.linspace(1.0, 10000.0, 10000)
x = aaa + 1j * aaa
x_tensor = tf.convert_to_tensor(x)
tf_fft = tf.signal.fft(x_tensor)
np_fft = np.fft.fft(x)
print(tf_fft.dtype)
print(np.all(tf_fft.numpy() == np_fft))
set_trace()
if ONE_DIM_TEST:
b = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
c = [1, 0, 1]
# conv = Convolutional(1, (3,), (10, 1), padding=2, input_dtype=np.float32)
# conv.kernels = []
# conv.kernels.append(tf.reshape(tf.cast(tf.Variable(c, name="kernel" + str(0) + "_f" + str(0)),
# dtype=np.float32), (3, 1)))
# std_out = conv([b])[..., 0]
b_pad = tf.cast(tf.pad(b, tf.constant([[0, 2]])), tf.complex64)
I = tf.signal.fft(tf.cast(b_pad, tf.complex64))
paddings = tf.constant([[0, 9]])
c_pad = tf.cast(tf.pad(c, paddings), tf.complex64)
C = tf.signal.fft(c_pad)
F = tf.math.multiply(I, C)
f = tf.signal.ifft(F)
f_real = tf.cast(f, tf.int32)
# print("std_out: " + str(std_out))
print("f_real: " + str(f_real))
if TWO_DIM_TEST:
img2 = np.array([
[10, 10, 10, 0, 0, 0],
[10, 10, 10, 0, 0, 0],
[10, 10, 10, 0, 0, 0],
[10, 10, 10, 0, 0, 0],
[10, 10, 10, 0, 0, 0],
[10, 10, 10, 0, 0, 0]
]).astype(np.float32)
k = np.array([
[1., 0., -1.],
[1., 0., -1.],
[1., 0., -1.]
]).astype(np.float32)
mode = 'full'
conv = Convolutional(1, (3, 3), (6, 6, 1), padding=2, input_dtype=np.float32)
conv.kernels = []
conv.kernels.append(tf.reshape(tf.cast(tf.Variable(k, name="kernel" + str(0) + "_f" + str(0)), dtype=np.float32),
(3, 3, 1)))
std_out = conv([img2])[..., 0]
print("std_out: " + str(std_out))
img_tf = tf.constant(tf.reshape(img2, (1, 6, 6, 1)), dtype=tf.float32)
k_tf = tf.constant(tf.reshape(k, (3, 3, 1, 1)), dtype=tf.float32)
conv_tf = tf.nn.conv2d(img_tf, k_tf, strides=[1, 1], padding="SAME")[0, ..., 0]
print("conv_tf: " + str(conv_tf))
# set_trace()
img2_pad = tf.pad(img2, tf.constant([[0, 2], [0, 2]]))
k_pad = tf.cast(tf.pad(k, tf.constant([[0, 5], [0, 5]])), tf.complex64)
I = tf.signal.fft2d(tf.cast(img2_pad, tf.complex64))
K = tf.signal.fft2d(k_pad)
F = tf.math.multiply(I, K)
f = tf.signal.ifft2d(F)
f_real = tf.cast(f, tf.int32)
print("f_real: " + str(f_real))
np_fft_conv = np.array(signal.fftconvolve(img2, k, mode=mode) , np.int32)
print("sp_fft_conv_" + mode + ":\n" + str(np_fft_conv))
np_conv = np.array(signal.convolve2d(img2 , k, mode), np.int32)
print("sp_fft_conv_" + mode + ":\n" + str(np_conv))
# set_trace()
"""
# Check numpy implementation
I = np.fft.fft2(img2)
K = np.fft.fft2(tf.pad(k, tf.constant([[0, 5], [0, 5]])))
F = np.multiply(I, K)
f = np.fft.ifft2(F)
print("f_np_real: " + str(np.round(f.astype(np.float32))))
"""
if STACKOVERFLOW_EXAMPLE:
# https://stackoverflow.com/questions/40703751/using-fourier-transforms-to-do-convolution
x = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 3, 0], [0, 0, 0, 1]]
x = np.array(x)
y = [[4, 5], [3, 4]]
y = np.array(y)
print("conv:\n", signal.convolve2d(x, y, 'full'))
s1 = np.array(x.shape)
s2 = np.array(y.shape)
size = s1 + s2 - 1
fsize = 2 ** np.ceil(np.log2(size)).astype(int)
fslice = tuple([slice(0, int(sz)) for sz in size])
new_x = np.fft.fft2(x, fsize)
new_y = np.fft.fft2(y, fsize)
result = np.fft.ifft2(new_x*new_y)[fslice].copy()
print("fft for my method:\n", np.array(result.real, np.int32))
print("fft:\n" , np.array(signal.fftconvolve(x, y), np.int32))
| 4,086 | 31.181102 | 117 | py |
cvnn | cvnn-master/examples/u_net_example.py | import tensorflow as tf
from cvnn import layers
from pdb import set_trace
import tensorflow_datasets as tfds
# https://medium.com/analytics-vidhya/training-u-net-from-scratch-using-tensorflow2-0-fad541e2eaf1
BATCH_SIZE = 64
BUFFER_SIZE = 1000
INPUT_SIZE = (572, 572)
MASK_SIZE = (388, 388)
def _downsample_tf(inputs, units):
c0 = tf.keras.layers.Conv2D(units, activation='relu', kernel_size=3)(inputs)
c1 = tf.keras.layers.Conv2D(units, activation='relu', kernel_size=3)(c0)
c2 = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(c1)
return c0, c1, c2
def _downsample_cvnn(inputs, units, dtype=tf.float32):
c0 = layers.ComplexConv2D(units, activation='cart_relu', kernel_size=3, dtype=dtype)(inputs)
c1 = layers.ComplexConv2D(units, activation='cart_relu', kernel_size=3, dtype=dtype)(c0)
c2 = layers.ComplexMaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', dtype=dtype)(c1)
return c0, c1, c2
def _upsample_tf(in1, in2, units, crop):
t01 = tf.keras.layers.Conv2DTranspose(units, kernel_size=2, strides=(2, 2), activation='relu')(in1)
crop01 = tf.keras.layers.Cropping2D(cropping=(crop, crop))(in2)
concat01 = tf.keras.layers.concatenate([t01, crop01], axis=-1)
out1 = tf.keras.layers.Conv2D(units, activation='relu', kernel_size=3)(concat01)
out2 = tf.keras.layers.Conv2D(units, activation='relu', kernel_size=3)(out1)
return out1, out2
def _upsample_cvnn(in1, in2, units, crop, dtype=tf.float32):
t01 = layers.ComplexConv2DTranspose(units, kernel_size=2, strides=(2, 2), activation='relu', dtype=dtype)(in1)
crop01 = tf.keras.layers.Cropping2D(cropping=(crop, crop))(in2)
concat01 = tf.keras.layers.concatenate([t01, crop01], axis=-1)
out1 = layers.ComplexConv2D(units, activation='relu', kernel_size=3, dtype=dtype)(concat01)
out2 = layers.ComplexConv2D(units, activation='relu', kernel_size=3, dtype=dtype)(out1)
return out1, out2
def normalize(input_image, input_mask):
input_image = tf.cast(input_image, tf.float32) / 255.0
# input_mask -= 1
return input_image, input_mask
def load_image(datapoint):
input_image = tf.image.resize_with_pad(datapoint['image'], INPUT_SIZE[0], INPUT_SIZE[1])
input_mask = tf.image.resize_with_pad(datapoint['segmentation_mask'], MASK_SIZE[0], MASK_SIZE[1])
input_image, input_mask = normalize(input_image, input_mask)
return input_image, input_mask
def get_dataset():
(train_images, test_images), info = tfds.load('oxford_iiit_pet:3.*.*', with_info=True,
split=['train[:1%]', 'test[:1%]'])
train_length = info.splits['train'].num_examples
steps_per_epoch = train_length // BATCH_SIZE
train_images = train_images.map(load_image)
test_images = test_images.map(load_image)
train_batches = train_images.batch(BATCH_SIZE).prefetch(buffer_size=tf.data.AUTOTUNE)
test_batches = test_images.batch(BATCH_SIZE)
# set_trace()
return train_batches, test_batches
def get_cvnn_model(dtype=tf.float32):
tf.random.set_seed(1)
inputs = layers.complex_input(shape=INPUT_SIZE + (3,), dtype=dtype)
# inputs = tf.keras.layers.InputLayer(input_shape=INPUT_SIZE + (3,), dtype=dtype)
# inputs = tf.keras.layers.Input(shape=INPUT_SIZE + (3,))
c0, c1, c2 = _downsample_cvnn(inputs, 64, dtype)
c3, c4, c5 = _downsample_cvnn(c2, 128, dtype)
c6, c7, c8 = _downsample_cvnn(c5, 256, dtype)
c9, c10, c11 = _downsample_cvnn(c8, 512, dtype)
c12 = layers.ComplexConv2D(1024, activation='relu', kernel_size=3, dtype=dtype)(c11)
c13 = layers.ComplexConv2D(1024, activation='relu', kernel_size=3, padding='valid', dtype=dtype)(c12)
c14, c15 = _upsample_cvnn(c13, c10, 512, 4, dtype)
c16, c17 = _upsample_cvnn(c15, c7, 256, 16, dtype)
c18, c19 = _upsample_cvnn(c17, c4, 128, 40, dtype)
c20, c21 = _upsample_cvnn(c19, c1, 64, 88, dtype)
outputs = layers.ComplexConv2D(4, kernel_size=1, dtype=dtype)(c21)
model = tf.keras.Model(inputs=inputs, outputs=outputs, name="u-net-cvnn")
model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer="adam", metrics=["accuracy"])
return model
def get_tf_model():
tf.random.set_seed(1)
inputs = tf.keras.layers.Input(shape=INPUT_SIZE + (3,))
c0, c1, c2 = _downsample_tf(inputs, 64)
c3, c4, c5 = _downsample_tf(c2, 128)
c6, c7, c8 = _downsample_tf(c5, 256)
c9, c10, c11 = _downsample_tf(c8, 512)
c12 = tf.keras.layers.Conv2D(1024, activation='relu', kernel_size=3)(c11)
c13 = tf.keras.layers.Conv2D(1024, activation='relu', kernel_size=3, padding='valid')(c12)
c14, c15 = _upsample_tf(c13, c10, 512, 4)
c16, c17 = _upsample_tf(c15, c7, 256, 16)
c18, c19 = _upsample_tf(c17, c4, 128, 40)
c20, c21 = _upsample_tf(c19, c1, 64, 88)
outputs = tf.keras.layers.Conv2D(4, kernel_size=1)(c21)
model = tf.keras.Model(inputs=inputs, outputs=outputs, name="u-net-tf")
model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(),
optimizer="adam", metrics=["accuracy"])
return model
def test_model(model, train_batches, test_batches):
weigths = model.get_weights()
# with tf.GradientTape() as tape:
# # for elem, label in iter(ds_train):
# loss = model.compiled_loss(y_true=tf.convert_to_tensor(test_labels), y_pred=model(test_images))
# gradients = tape.gradient(loss, model.trainable_weights) # back-propagation
logs = {
'weights': weigths,
# 'loss': loss,
# 'gradients': gradients
}
history = model.fit(train_batches, epochs=2, validation_data=test_batches)
return history, logs
def test_unet():
train_batches, test_batches = get_dataset()
history_own, logs_own = test_model(get_cvnn_model(), train_batches, test_batches)
history_keras, logs_keras = test_model(get_tf_model(), train_batches, test_batches)
assert history_keras.history == history_own.history, f"\n{history_keras.history}\n !=\n{history_own.history}"
if __name__ == '__main__':
from importlib import reload
import os
import tensorflow
reload(tensorflow)
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
test_unet()
| 6,265 | 38.1625 | 114 | py |
cvnn | cvnn-master/examples/fashion_mnist_example.py | # TensorFlow and tf.keras
import tensorflow as tf
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
from cvnn import layers
print(tf.__version__)
def get_fashion_mnist_dataset():
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
return (train_images, train_labels), (test_images, test_labels)
def keras_fit(train_images, train_labels, test_images, test_labels,
init1='glorot_uniform', init2='glorot_uniform', epochs=10):
tf.random.set_seed(1)
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu', kernel_initializer=init1),
tf.keras.layers.Dense(10, kernel_initializer=init2)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
history = model.fit(train_images, train_labels, epochs=epochs, shuffle=False)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
return history
def own_fit(train_images, train_labels, test_images, test_labels,
init1='glorot_uniform', init2='glorot_uniform', epochs=10):
tf.random.set_seed(1)
model = tf.keras.Sequential([
layers.ComplexFlatten(input_shape=(28, 28)),
layers.ComplexDense(128, activation='cart_relu', dtype=np.float32, kernel_initializer=init1),
layers.ComplexDense(10, dtype=np.float32, kernel_initializer=init2)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
history = model.fit(train_images, train_labels, epochs=epochs, shuffle=False)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
return history
def test_fashion_mnist():
visible_devices = tf.config.get_visible_devices()
for device in visible_devices:
assert device.device_type != 'GPU', "Using GPU not good for debugging"
seed = 117
epochs = 3
init = tf.keras.initializers.GlorotUniform(seed=seed)
init1 = tf.constant_initializer(init((784, 128)).numpy())
init2 = tf.constant_initializer(init((128, 10)).numpy())
(train_images, train_labels), (test_images, test_labels) = get_fashion_mnist_dataset()
keras = keras_fit(train_images, train_labels, test_images, test_labels, init1=init1, init2=init2, epochs=epochs)
# keras1 = keras_fit(train_images, train_labels, test_images, test_labels, init1=init1, init2=init2, epochs=epochs)
own = own_fit(train_images, train_labels, test_images, test_labels, init1=init1, init2=init2, epochs=epochs)
assert keras.history == own.history, f"{keras.history } != {own.history }"
if __name__ == "__main__":
# https://www.tensorflow.org/tutorials/keras/classification
from importlib import reload
import os
import tensorflow
reload(tensorflow)
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
test_fashion_mnist()
| 3,211 | 41.826667 | 119 | py |
cvnn | cvnn-master/examples/random_noise_publication.py | from cvnn.montecarlo import run_gaussian_dataset_montecarlo
run_gaussian_dataset_montecarlo(iterations=20, m=10000, n=128, param_list=None, validation_split=0.2,
epochs=150, batch_size=100, display_freq=1, optimizer='sgd',
shape_raw=[64], activation='cart_relu', debug=False, capacity_equivalent=False,
polar=False, do_all=True, dropout=0.5, tensorboard=False)
run_gaussian_dataset_montecarlo(iterations=20, m=10000, n=128, param_list=None, validation_split=0.2,
epochs=150, batch_size=100, display_freq=1, optimizer='sgd',
shape_raw=[128, 40], activation='cart_relu', debug=False, capacity_equivalent=False,
polar=False, do_all=True, dropout=0.5, tensorboard=False)
| 860 | 77.272727 | 116 | py |
cvnn | cvnn-master/examples/cifar410_example.py | import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import cvnn.layers as complex_layers
import numpy as np
from pdb import set_trace
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images.astype(dtype=np.float32) / 255.0, test_images.astype(dtype=np.float32) / 255.0
def keras_fit(epochs=10, use_bias=True):
tf.random.set_seed(1)
init = tf.keras.initializers.GlorotUniform(seed=117)
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3), kernel_initializer=init,
use_bias=use_bias))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer=init, use_bias=use_bias))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer=init, use_bias=use_bias))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu', kernel_initializer=init, use_bias=use_bias))
model.add(layers.Dense(10, kernel_initializer=init, use_bias=use_bias))
model.compile(optimizer='sgd',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
weigths = model.get_weights()
with tf.GradientTape() as tape:
# for elem, label in iter(ds_train):
loss = model.compiled_loss(y_true=tf.convert_to_tensor(test_labels), y_pred=model(test_images))
gradients = tape.gradient(loss, model.trainable_weights) # back-propagation
history = model.fit(train_images, train_labels, epochs=epochs, validation_data=(test_images, test_labels))
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
logs = {
'weights_at_init': weigths,
'loss': loss,
'gradients': gradients,
'weights_at_end': model.get_weights()
}
return history, logs
def own_fit(epochs=10):
tf.random.set_seed(1)
init = tf.keras.initializers.GlorotUniform(seed=117)
model = models.Sequential()
model.add(complex_layers.ComplexConv2D(32, (3, 3), activation='cart_relu', input_shape=(32, 32, 3),
dtype=np.float32, kernel_initializer=init))
model.add(complex_layers.ComplexMaxPooling2D((2, 2), dtype=np.float32))
model.add(complex_layers.ComplexConv2D(64, (3, 3), activation='cart_relu', dtype=np.float32,
kernel_initializer=init))
model.add(complex_layers.ComplexMaxPooling2D((2, 2), dtype=np.float32))
model.add(complex_layers.ComplexConv2D(64, (3, 3), activation='cart_relu', dtype=np.float32,
kernel_initializer=init))
model.add(complex_layers.ComplexFlatten())
model.add(complex_layers.ComplexDense(64, activation='cart_relu', dtype=np.float32, kernel_initializer=init))
model.add(complex_layers.ComplexDense(10, dtype=np.float32, kernel_initializer=init))
# model.summary()
model.compile(optimizer='sgd',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
history = model.fit(train_images, train_labels, epochs=epochs, validation_data=(test_images, test_labels))
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
return history
def own_complex_fit(epochs=10):
tf.random.set_seed(1)
init = tf.keras.initializers.GlorotUniform(seed=117)
model = models.Sequential()
model.add(complex_layers.ComplexConv2D(32, (3, 3), activation='cart_relu', input_shape=(32, 32, 3),
kernel_initializer=init, use_bias=False, init_technique='zero_imag'))
model.add(complex_layers.ComplexMaxPooling2D((2, 2)))
model.add(complex_layers.ComplexConv2D(64, (3, 3), activation='cart_relu', kernel_initializer=init,
use_bias=False, init_technique='zero_imag'))
model.add(complex_layers.ComplexMaxPooling2D((2, 2)))
model.add(complex_layers.ComplexConv2D(64, (3, 3), activation='cart_relu', kernel_initializer=init,
use_bias=False, init_technique='zero_imag'))
model.add(complex_layers.ComplexFlatten())
model.add(complex_layers.ComplexDense(64, activation='cart_relu', kernel_initializer=init,
use_bias=False, init_technique='zero_imag'))
model.add(complex_layers.ComplexDense(10, activation='cast_to_real', kernel_initializer=init,
use_bias=False, init_technique='zero_imag'))
# model.summary()
model.compile(optimizer='sgd',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
weigths = model.get_weights()
with tf.GradientTape() as tape:
loss = model.compiled_loss(y_true=tf.convert_to_tensor(test_labels), y_pred=model(test_images))
gradients = tape.gradient(loss, model.trainable_weights) # back-propagation
history = model.fit(train_images, train_labels, epochs=epochs, validation_data=(test_images, test_labels))
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
logs = {
'weights_at_init': weigths,
'loss': loss,
'gradients': gradients,
'weights_at_end': model.get_weights()
}
return history, logs
def test_cifar10():
epochs = 3
assert not tf.test.gpu_device_name(), "Using GPU not good for debugging"
keras, keras_logs = keras_fit(epochs=epochs, use_bias=False)
# keras1 = keras_fit(epochs=epochs)
own, own_logs = own_complex_fit(epochs=epochs)
assert np.all([np.all(k_w == o_w) for k_w, o_w in zip(keras_logs['weights_at_init'],
own_logs['weights_at_init'][::2])]) # real part equal
assert np.all([np.all(o_w == 0) for o_w in own_logs['weights_at_init'][1::2]]) # imag part at zero
assert np.all([np.all(o_w == 0) for o_w in own_logs['weights_at_end'][1::2]])
assert own_logs['loss'] == keras_logs['loss'] # same loss
assert np.all([np.allclose(k, o) for k, o in zip(keras_logs['gradients'], own_logs['gradients'][::2])])
# assert keras.history == own.history, f"\n{keras.history}\n !=\n{own.history}"
keras, _ = keras_fit(epochs=epochs)
# keras1 = keras_fit(epochs=epochs)
own = own_fit(epochs=epochs)
assert keras.history == own.history, f"\n{keras.history}\n !=\n{own.history}"
# for k, k2, o in zip(keras.history.values(), keras1.history.values(), own.history.values()):
# if np.all(np.array(k) == np.array(k2)):
# assert np.all(np.array(k) == np.array(o)), f"\n{keras.history}\n !=\n{own.history}"
if __name__ == "__main__":
from importlib import reload
import os
import tensorflow
reload(tensorflow)
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
test_cifar10()
| 7,167 | 52.492537 | 119 | py |
cvnn | cvnn-master/examples/mnist_dataset_example.py | import tensorflow as tf
import tensorflow_datasets as tfds
from cvnn import layers
import numpy as np
import timeit
import datetime
from pdb import set_trace
try:
import plotly.graph_objects as go
import plotly
PLOTLY = True
except ModuleNotFoundError:
PLOTLY = False
# tf.enable_v2_behavior()
# tfds.disable_progress_bar()
PLOTLY_CONFIG = {
'scrollZoom': True,
'editable': True
}
def cast_to_complex(image, label):
return tf.cast(image, tf.complex64), label
def normalize_img(image, label):
"""Normalizes images: `uint8` -> `float32`."""
return tf.cast(image, tf.float32) / 255., label
def get_dataset():
(ds_train, ds_test), ds_info = tfds.load(
'mnist',
split=['train', 'test'],
shuffle_files=False,
as_supervised=True,
with_info=True,
)
ds_train = ds_train.map(normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_train = ds_train.cache()
# ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)
ds_train = ds_train.batch(128)
ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)
ds_test = ds_test.map(normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_test = ds_test.batch(128)
ds_test = ds_test.cache()
ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)
return ds_train, ds_test
def keras_fit(ds_train, ds_test, verbose=True, init1='glorot_uniform', init2='glorot_uniform', train_bias=True):
tf.random.set_seed(24)
# https://www.tensorflow.org/datasets/keras_example
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28, 1), dtype=np.float32),
tf.keras.layers.Dense(128, activation='relu', kernel_initializer=init1, dtype=np.float32, use_bias=train_bias),
tf.keras.layers.Dense(10, activation='softmax', kernel_initializer=init2, dtype=np.float32, use_bias=train_bias)
])
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(0.001),
metrics=['accuracy'],
)
weigths = model.get_weights()
with tf.GradientTape() as tape:
# for elem, label in iter(ds_train):
elem, label = next(iter(ds_test))
loss = model.compiled_loss(y_true=label, y_pred=model(elem)) # calculate loss
gradients = tape.gradient(loss, model.trainable_weights) # back-propagation
logs = {
'weights': weigths,
'loss': loss,
'gradients': gradients
}
start = timeit.default_timer()
history = model.fit(
ds_train,
epochs=6,
validation_data=ds_test,
verbose=verbose, shuffle=False
)
stop = timeit.default_timer()
return history, stop - start, logs
def own_complex_fit(ds_train, ds_test, verbose=True, init1='glorot_uniform', init2='glorot_uniform'):
tf.random.set_seed(24)
model = tf.keras.models.Sequential([
layers.ComplexFlatten(input_shape=(28, 28, 1), dtype=np.complex64),
layers.ComplexDense(128, activation='cart_relu', dtype=np.complex64, kernel_initializer=init1,
use_bias=False, init_technique='zero_imag'),
layers.ComplexDense(10, activation='cast_to_real', dtype=np.complex64, kernel_initializer=init2,
use_bias=False, init_technique='zero_imag'),
tf.keras.layers.Activation('softmax')
])
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(0.001),
metrics=['accuracy'],
)
# ds_train = ds_train.map(cast_to_complex)
# ds_test = ds_test.map(cast_to_complex)
weigths = model.get_weights()
with tf.GradientTape() as tape:
# for elem, label in iter(ds_train):
elem, label = next(iter(ds_test))
loss = model.compiled_loss(y_true=label, y_pred=model(elem)) # calculate loss
gradients = tape.gradient(loss, model.trainable_weights) # back-propagation
logs = {
'weights': weigths,
'loss': loss,
'gradients': gradients
}
start = timeit.default_timer()
history = model.fit(
ds_train,
epochs=6,
validation_data=ds_test,
verbose=verbose, shuffle=False
)
stop = timeit.default_timer()
return history, stop - start, logs
def own_fit(ds_train, ds_test, verbose=True, init1='glorot_uniform', init2='glorot_uniform'):
tf.random.set_seed(24)
model = tf.keras.models.Sequential([
layers.ComplexFlatten(input_shape=(28, 28, 1), dtype=np.float32),
layers.ComplexDense(128, activation='cart_relu', dtype=np.float32, kernel_initializer=init1),
layers.ComplexDense(10, activation='softmax_real_with_abs', dtype=np.float32, kernel_initializer=init2)
])
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(0.001),
metrics=['accuracy'],
)
weigths = model.get_weights()
with tf.GradientTape() as tape:
# for elem, label in iter(ds_train):
elem, label = next(iter(ds_test))
loss = model.compiled_loss(y_true=label, y_pred=model(elem)) # calculate loss
gradients = tape.gradient(loss, model.trainable_weights) # back-propagation
logs = {
'weights': weigths,
'loss': loss,
'gradients': gradients
}
start = timeit.default_timer()
history = model.fit(
ds_train,
epochs=6,
validation_data=ds_test,
verbose=verbose, shuffle=False
)
stop = timeit.default_timer()
return history, stop - start, logs
def test_mnist():
visible_devices = tf.config.get_visible_devices()
for device in visible_devices:
assert device.device_type != 'GPU', "Using GPU not good for debugging"
ds_train, ds_test = get_dataset()
# Don't use bias becase complex model gets a complex bias with imag not zero.
keras_hist, keras_time, keras_logs = keras_fit(ds_train, ds_test, train_bias=False)
keras_weigths = keras_logs['weights']
own_cvnn_hist, own_cvnn_time, own_cvnn_logs = own_complex_fit(ds_train, ds_test)
own_cvnn_weigths = own_cvnn_logs['weights']
assert np.all([np.all(k_w == o_w) for k_w, o_w in zip(keras_weigths, own_cvnn_weigths[::2])])
assert np.all([np.all(o_w == 0) for o_w in own_cvnn_weigths[1::2]])
assert own_cvnn_logs['loss'] == keras_logs['loss']
assert np.allclose(own_cvnn_logs['gradients'][2], keras_logs['gradients'][1])
# for k, o in zip(keras_hist.history.values(), own_cvnn_hist.history.values()):
# assert np.allclose(k, o), f"\n{keras_hist.history}\n !=\n{own_cvnn_hist.history}"
# DO AGAIN TO USE BIAS
keras_hist, keras_time, keras_logs = keras_fit(ds_train, ds_test)
keras_weigths = keras_logs['weights']
own_hist, own_time, own_logs = own_fit(ds_train, ds_test)
own_weigths = own_logs['weights']
assert [np.all(k_w == o_w) for k_w, o_w in zip(keras_weigths, own_weigths)]
assert keras_hist.history == own_hist.history, f"\n{keras_hist.history}\n !=\n{own_hist.history}"
assert own_logs['loss'] == keras_logs['loss']
# for k, k2, o in zip(keras_hist.history.values(), keras2_hist.history.values(), own_hist.history.values()):
# if np.all(np.array(k) == np.array(k2)):
# assert np.all(np.array(k) == np.array(o)), f"\n{keras_hist.history}\n !=\n{own_hist.history}"
if __name__ == "__main__":
from importlib import reload
import os
import tensorflow
reload(tensorflow)
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
test_mnist()
# test_mnist_montecarlo()
# ds_train, ds_test = get_dataset()
# keras_fit(ds_train, ds_test, train_bias=False)
# own_fit(ds_train, ds_test)
| 7,789 | 36.63285 | 118 | py |
cvnn | cvnn-master/tests/test_dropout.py | import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
from pdb import set_trace
import cvnn.layers as complex_layers
from cvnn.montecarlo import run_montecarlo
def normalize_img(image, label):
"""Normalizes images: `uint8` -> `float32`."""
return tf.cast(image, tf.float32) / 255., label
def get_dataset():
(ds_train, ds_test), ds_info = tfds.load(
'mnist',
split=['train', 'test'],
shuffle_files=False,
as_supervised=True,
with_info=True,
)
ds_train = ds_train.map(normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_train = ds_train.cache()
# ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)
ds_train = ds_train.batch(128)
ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)
ds_test = ds_test.map(normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_test = ds_test.batch(128)
ds_test = ds_test.cache()
ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)
return ds_train, ds_test
@tf.autograph.experimental.do_not_convert
def simple_random_example():
tf.random.set_seed(0)
layer = complex_layers.ComplexDropout(.2, input_shape=(2,), seed=0)
data = np.arange(10).reshape(5, 2).astype(np.float32)
data = tf.complex(data, data)
outputs = layer(data, training=True)
expected_out = np.array([[0. + 0.j, 0. + 0.j],
[0. + 0.j, 3.75 + 3.75j],
[5. + 5.j, 6.25 + 6.25j],
[7.5 + 7.5j, 8.75 + 8.75j],
[10. + 10.j, 11.25 + 11.25j]])
assert np.all(data == layer(data, training=False))
assert np.all(outputs == expected_out)
tf.random.set_seed(0)
layer = tf.keras.layers.Dropout(.2, input_shape=(2,), seed=0)
real_outputs = layer(tf.math.real(data), training=True)
assert np.all(real_outputs == tf.math.real(outputs))
def get_real_mnist_model():
in1 = tf.keras.layers.Input(shape=(28, 28, 1))
flat = tf.keras.layers.Flatten(input_shape=(28, 28, 1))(in1)
dense = tf.keras.layers.Dense(128, activation='cart_relu')(flat)
# drop = complex_layers.ComplexDropout(rate=0.5)(dense)
drop = tf.keras.layers.Dropout(0.5)(dense)
out = tf.keras.layers.Dense(10, activation='softmax_real_with_abs', kernel_initializer="ComplexGlorotUniform")(drop)
real_model = tf.keras.Model(in1, out, name="tf_rvnn")
real_model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(0.001),
metrics=['accuracy'],
)
real_intermediate_model = tf.keras.Model(in1, drop)
return real_model, real_intermediate_model
def get_complex_mnist_model():
inputs = complex_layers.complex_input(shape=(28, 28, 1), dtype=np.float32)
flat = complex_layers.ComplexFlatten(input_shape=(28, 28, 1), dtype=np.float32)(inputs)
dense = complex_layers.ComplexDense(128, activation='cart_relu', dtype=np.float32)(flat)
drop = complex_layers.ComplexDropout(rate=0.5)(dense)
out = complex_layers.ComplexDense(10, activation='softmax_real_with_abs', dtype=np.float32)(drop)
complex_model = tf.keras.Model(inputs, out, name="rvnn")
complex_model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(0.001),
metrics=['accuracy'],
)
complex_intermediate_model = tf.keras.Model(inputs, drop)
return complex_model, complex_intermediate_model
def dropout():
ds_train, ds_test = get_dataset()
train_images, train_labels = convert_to_numpy(ds_train)
test_images, test_labels = convert_to_numpy(ds_test)
img, label = next(iter(ds_test))
tf.random.set_seed(0)
complex_model, complex_intermediate_model = get_complex_mnist_model()
tf.random.set_seed(0)
real_model, real_intermediate_model = get_real_mnist_model()
c_before_train_eval = complex_intermediate_model(img, training=False)
r_before_train_eval = real_intermediate_model(img, training=False)
assert np.all(r_before_train_eval == c_before_train_eval), f"Results are not equal after drop with training=False"
assert np.all(real_model.layers[2].get_weights()[0] == complex_model.layers[2].get_weights()[
0]), f"Output layer weights are not equal before any call"
assert np.all(real_model.layers[-1].get_weights()[0] == complex_model.layers[-1].get_weights()[
0]), f"Output layer weights are not equal before any call"
c_before_train_eval = complex_model(img, training=False)
r_before_train_eval = real_model(img, training=False)
assert np.all(r_before_train_eval == c_before_train_eval), f"Results are not equal with training=False"
tf.random.set_seed(0)
c_before_train_eval = complex_intermediate_model(img, training=True)
tf.random.set_seed(0)
r_before_train_eval = real_intermediate_model(img, training=True)
assert np.all(r_before_train_eval == c_before_train_eval), f"Results are not equal after drop with training=True"
tf.random.set_seed(0)
c_before_train_eval = complex_model(img, training=True)
tf.random.set_seed(0)
r_before_train_eval = real_model(img, training=True)
assert np.all(r_before_train_eval == c_before_train_eval), f"Results are not equal with training=True"
tf.random.set_seed(0)
complex_eval = complex_model.evaluate(ds_test, verbose=False)
tf.random.set_seed(0)
real_eval = real_model.evaluate(ds_test, verbose=False)
assert np.all(real_eval == complex_eval), f"\n{real_eval}\n !=\n{complex_eval}"
elem, label = convert_to_numpy(ds_test)
label = tf.convert_to_tensor(label)
# elem, label = next(iter(ds_test))
# set_trace()
tf.random.set_seed(0)
with tf.GradientTape() as tape:
r_loss = real_model.compiled_loss(y_true=label, y_pred=real_model(elem, training=True)) # calculate loss
real_gradients = tape.gradient(r_loss, real_model.trainable_weights) # back-propagation
tf.random.set_seed(0)
with tf.GradientTape() as tape:
c_loss = complex_model.compiled_loss(y_true=label, y_pred=complex_model(elem, training=True)) # calculate loss
complex_gradients = tape.gradient(c_loss, complex_model.trainable_weights) # back-propagation
assert r_loss == c_loss, f"\nReal loss:\t\t {r_loss};\nComplex loss:\t {c_loss}"
# Next assertions showed a rounding error with my library.
assert np.all([np.allclose(c_g, r_g) for c_g, r_g in zip(complex_gradients, real_gradients)])
def convert_to_numpy(ds):
ds_numpy = tfds.as_numpy(ds)
train_images = None
train_labels = None
for ex in ds_numpy:
if train_images is None:
train_images = ex[0]
train_labels = ex[1]
else:
train_images = np.concatenate((train_images, ex[0]), axis=0)
train_labels = np.concatenate((train_labels, ex[1]), axis=0)
return train_images, train_labels
def mnist(tf_data: bool = True):
ds_train, ds_test = get_dataset()
train_images, train_labels = convert_to_numpy(ds_train)
test_images, test_labels = convert_to_numpy(ds_test)
tf.random.set_seed(0)
complex_model, _ = get_complex_mnist_model()
tf.random.set_seed(0)
real_model, _ = get_real_mnist_model()
if tf_data:
r_history = real_model.fit(ds_train, epochs=6, validation_data=ds_test,
verbose=False, shuffle=False)
c_history = complex_model.fit(ds_train, epochs=6, validation_data=ds_test,
verbose=False, shuffle=False)
else:
r_history = real_model.fit(train_images, train_labels, epochs=6, validation_data=(test_images, test_labels),
verbose=False, shuffle=False)
c_history = complex_model.fit(train_images, train_labels, epochs=6, validation_data=(test_images, test_labels),
verbose=False, shuffle=False)
assert r_history.history == c_history.history, f"{r_history.history} != {c_history.history}"
def get_fashion_mnist_dataset():
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
return (train_images, train_labels), (test_images, test_labels)
def fashion_mnist():
(train_images, train_labels), (test_images, test_labels) = get_fashion_mnist_dataset()
tf.random.set_seed(0)
complex_model, _ = get_complex_mnist_model()
tf.random.set_seed(0)
real_model, _ = get_real_mnist_model()
c_history = complex_model.fit(train_images, train_labels, epochs=10, shuffle=True, verbose=False,
validation_data=(test_images, test_labels))
r_history = real_model.fit(train_images, train_labels, epochs=10, shuffle=True, verbose=False,
validation_data=(test_images, test_labels))
assert r_history.history == c_history.history, f"{r_history.history} != {c_history.history}"
def montecarlo():
ds_train, ds_test = get_dataset()
complex_model, _ = get_complex_mnist_model()
real_model, _ = get_real_mnist_model()
run_montecarlo(models=[complex_model, real_model], dataset=ds_train, iterations=30,
epochs=20, validation_data=ds_test, do_all=True, validation_split=0.0, preprocess_data=False)
def test_dropout():
from importlib import reload
import os
import tensorflow
# reload(tensorflow)
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
dropout()
mnist(True)
mnist(False)
fashion_mnist()
simple_random_example()
if __name__ == "__main__":
test_dropout()
| 9,706 | 42.142222 | 120 | py |
cvnn | cvnn-master/tests/test_doc_cvnn_example.py | import numpy as np
import cvnn.layers as complex_layers
import tensorflow as tf
from pdb import set_trace
def get_dataset():
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.cifar10.load_data()
train_images = train_images.astype(dtype=np.complex64) / 255.0
test_images = test_images.astype(dtype=np.complex64) / 255.0
return (train_images, train_labels), (test_images, test_labels)
def test_cifar():
(train_images, train_labels), (test_images, test_labels) = get_dataset()
# Create your model
model = tf.keras.models.Sequential()
model.add(complex_layers.ComplexInput(input_shape=(32, 32, 3))) # Always use ComplexInput at the start
model.add(complex_layers.ComplexConv2D(32, (3, 3), activation='cart_relu'))
model.add(complex_layers.ComplexAvgPooling2D((2, 2)))
model.add(complex_layers.ComplexConv2D(64, (3, 3), activation='cart_relu'))
model.add(complex_layers.ComplexFlatten())
model.add(complex_layers.ComplexDense(64, activation='cart_relu'))
model.add(complex_layers.ComplexDense(10, activation='convert_to_real_with_abs'))
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
# model.summary()
history = model.fit(train_images, train_labels, epochs=1, validation_data=(test_images, test_labels))
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
def test_regression():
input_shape = (4, 28, 28, 3)
x = tf.cast(tf.random.normal(input_shape), tf.complex64)
model = tf.keras.models.Sequential()
model.add(complex_layers.ComplexInput(input_shape=input_shape[1:]))
model.add(complex_layers.ComplexFlatten())
model.add(complex_layers.ComplexDense(units=64, activation='cart_relu'))
model.add(complex_layers.ComplexDense(units=10, activation='linear'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
y = model(x)
assert y.dtype == np.complex64
def test_functional_api():
inputs = complex_layers.complex_input(shape=(128, 128, 3))
c0 = complex_layers.ComplexConv2D(32, activation='cart_relu', kernel_size=3)(inputs)
c1 = complex_layers.ComplexConv2D(32, activation='cart_relu', kernel_size=3)(c0)
c2 = complex_layers.ComplexMaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(c1)
t01 = complex_layers.ComplexConv2DTranspose(5, kernel_size=2, strides=(2, 2), activation='cart_relu')(c2)
concat01 = tf.keras.layers.concatenate([t01, c1], axis=-1)
c3 = complex_layers.ComplexConv2D(4, activation='cart_relu', kernel_size=3)(concat01)
out = complex_layers.ComplexConv2D(4, activation='cart_relu', kernel_size=3)(c3)
model = tf.keras.Model(inputs, out)
if __name__ == '__main__':
test_functional_api()
test_regression()
test_cifar()
| 2,912 | 44.515625 | 109 | py |
cvnn | cvnn-master/tests/test_functional_api.py | from cvnn.layers import ComplexUnPooling2D, complex_input, ComplexMaxPooling2DWithArgmax, \
ComplexUpSampling2D, ComplexMaxPooling2D
import tensorflow as tf
import numpy as np
from pdb import set_trace
def get_img():
img_r = np.array([[
[0, 1, 2],
[0, 2, 2],
[0, 5, 7]
], [
[0, 7, 5],
[3, 7, 9],
[4, 5, 3]
]]).astype(np.float32)
img_i = np.array([[
[0, 4, 5],
[3, 7, 9],
[4, 5, 3]
], [
[0, 4, 5],
[3, 2, 2],
[4, 8, 9]
]]).astype(np.float32)
img = img_r + 1j * img_i
img = np.reshape(img, (2, 3, 3, 1))
return img
def unpooling_example():
x = get_img()
inputs = complex_input(shape=x.shape[1:])
max_pool_o, max_arg = ComplexMaxPooling2DWithArgmax(strides=1, data_format="channels_last", name="argmax")(inputs)
# max_pool_o = ComplexMaxPooling2D(strides=1, data_format="channels_last")(inputs)
max_unpool = ComplexUnPooling2D(x.shape[1:])
outputs = max_unpool([max_pool_o, max_arg])
model = tf.keras.Model(inputs=inputs, outputs=outputs, name="pooling_model")
# model.summary()
model(x)
# print(model(x)[..., 0])
# set_trace()
return model
def upsampling_example():
x = get_img()
inputs = complex_input(shape=x.shape[1:])
max_pool_o = ComplexMaxPooling2D(data_format="channels_last")(inputs)
upsampling = ComplexUpSampling2D(size=(2, 2))
outputs = upsampling(max_pool_o)
model = tf.keras.Model(inputs=inputs, outputs=outputs, name="pooling_model")
# model.summary()
model(x)
# print(model(x)[..., 0])
# set_trace()
return model
def test_functional_api():
upsampling_example()
unpooling_example()
if __name__ == "__main__":
test_functional_api() | 1,792 | 25.761194 | 118 | py |
cvnn | cvnn-master/tests/test_initialization.py | import tensorflow as tf
from cvnn import logger
import cvnn.initializers as initializers
from pdb import set_trace
import sys
shape = (3, 3, 3)
dtype = tf.dtypes.float32
def compare(key, tf_init, my_init):
tf_version = tf_init(seed=100)(shape=shape, dtype=dtype)
my_version = my_init(seed=100)(shape=shape, dtype=dtype)
comparison = tf_version.numpy() == my_version.numpy()
if comparison.all():
print(f"{key} initialization works fine")
else:
logger.error(f"ERROR! FAIL! {key} initialization does not work!")
# print(comparison)
print("tensorflow version: " + str(tf_version))
print("own version: " + str(my_version))
sys.exit(-1)
tests = {
"He Uniform": [tf.initializers.he_uniform, initializers.ComplexHeUniform],
"Glorot Uniform": [tf.initializers.GlorotUniform, initializers.ComplexGlorotUniform],
"He Normal": [tf.initializers.he_normal, initializers.ComplexHeNormal],
"Glorot Normal": [tf.initializers.GlorotNormal, initializers.ComplexGlorotNormal]
}
def test_inits():
for key, value in tests.items():
compare(key, value[0], value[1])
if __name__ == "__main__":
test_inits()
| 1,191 | 28.8 | 89 | py |
cvnn | cvnn-master/tests/test_custom_layers.py | import numpy as np
from cvnn.layers import ComplexDense, ComplexFlatten, ComplexInput, ComplexConv2D, ComplexMaxPooling2D, \
ComplexAvgPooling2D, ComplexConv2DTranspose, ComplexUnPooling2D, ComplexMaxPooling2DWithArgmax, \
ComplexUpSampling2D, ComplexBatchNormalization, ComplexAvgPooling1D, ComplexPolarAvgPooling2D
import cvnn.layers as complex_layers
from tensorflow.keras.models import Sequential
import tensorflow as tf
import tensorflow_datasets as tfds
from pdb import set_trace
"""
This module tests:
Correct result of Complex AVG and MAX pooling layers.
Init ComplexConv2D layer and verifies output dtype and shape.
Trains using:
ComplexDense
ComplexFlatten
ComplexInput
ComplexDropout
"""
@tf.autograph.experimental.do_not_convert
def dense_example():
img_r = np.array([[
[0, 1, 2],
[0, 2, 2],
[0, 5, 7]
], [
[0, 4, 5],
[3, 7, 9],
[4, 5, 3]
]]).astype(np.float32)
img_i = np.array([[
[0, 4, 5],
[3, 7, 9],
[4, 5, 3]
], [
[0, 4, 5],
[3, 7, 9],
[4, 5, 3]
]]).astype(np.float32)
img = img_r + 1j * img_i
c_flat = ComplexFlatten()
c_dense = ComplexDense(units=10)
res = c_dense(c_flat(img.astype(np.complex64)))
assert res.shape == [2, 10]
assert res.dtype == tf.complex64
model = tf.keras.models.Sequential()
model.add(ComplexInput(input_shape=(3, 3)))
model.add(ComplexFlatten())
model.add(ComplexDense(32, activation='cart_relu'))
model.add(ComplexDense(32))
assert model.output_shape == (None, 32)
res = model(img.astype(np.complex64))
@tf.autograph.experimental.do_not_convert
def serial_layers():
model = Sequential()
model.add(ComplexDense(32, activation='relu', input_shape=(32, 32, 3)))
model.add(ComplexDense(32))
print(model.output_shape)
img_r = np.array([[
[0, 1, 2],
[0, 2, 2],
[0, 5, 7]
], [
[0, 4, 5],
[3, 7, 9],
[4, 5, 3]
]]).astype(np.float32)
img_i = np.array([[
[0, 4, 5],
[3, 7, 9],
[4, 5, 3]
], [
[0, 4, 5],
[3, 7, 9],
[4, 5, 3]
]]).astype(np.float32)
img = img_r + 1j * img_i
model = Sequential()
# model.add(ComplexInput(img.shape[1:]))
model.add(ComplexFlatten(input_shape=img.shape[1:]))
model.add(ComplexDense(units=10))
res = model(img)
@tf.autograph.experimental.do_not_convert
def shape_ad_dtype_of_conv2d():
input_shape = (4, 28, 28, 3)
x = tf.cast(tf.random.normal(input_shape), tf.complex64)
y = ComplexConv2D(2, 3, activation='cart_relu', padding="same", input_shape=input_shape[1:], dtype=x.dtype)(x)
assert y.shape == (4, 28, 28, 2)
assert y.dtype == tf.complex64
@tf.autograph.experimental.do_not_convert
def normalize_img(image, label):
"""Normalizes images: `uint8` -> `float32`."""
return tf.cast(image, tf.float32) / 255., label
def get_dataset():
(ds_train, ds_test), ds_info = tfds.load(
'mnist',
split=['train', 'test'],
shuffle_files=False,
as_supervised=True,
with_info=True,
)
ds_train = ds_train.map(normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_train = ds_train.cache()
# ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)
ds_train = ds_train.batch(128)
ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)
ds_test = ds_test.map(normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_test = ds_test.batch(128)
ds_test = ds_test.cache()
ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)
return ds_train, ds_test
def get_img():
img_r = np.array([[
[0, 1, 2],
[0, 2, 2],
[0, 5, 7]
], [
[0, 7, 5],
[3, 7, 9],
[4, 5, 3]
]]).astype(np.float32)
img_i = np.array([[
[0, 4, 5],
[3, 7, 9],
[4, 5, 3]
], [
[0, 4, 5],
[3, 2, 2],
[4, 8, 9]
]]).astype(np.float32)
img = img_r + 1j * img_i
img = np.reshape(img, (2, 3, 3, 1))
return img
@tf.autograph.experimental.do_not_convert
def complex_avg_pool_1d():
x = tf.constant([1., 2., 3., 4., 5.])
x = tf.reshape(x, [1, 5, 1])
avg_pool_1d = tf.keras.layers.AveragePooling1D(pool_size=2, strides=1, padding='valid')
tf_res = avg_pool_1d(x)
own_res = ComplexAvgPooling1D(pool_size=2, strides=1, padding='valid')(x)
assert np.all(tf_res.numpy() == own_res.numpy())
avg_pool_1d = tf.keras.layers.AveragePooling1D(pool_size=2, strides=2, padding='valid')
tf_res = avg_pool_1d(x)
own_res = ComplexAvgPooling1D(pool_size=2, strides=2, padding='valid')(x)
assert np.all(tf_res.numpy() == own_res.numpy())
avg_pool_1d = tf.keras.layers.AveragePooling1D(pool_size=2, strides=1, padding='same')
tf_res = avg_pool_1d(x)
own_res = ComplexAvgPooling1D(pool_size=2, strides=1, padding='same')(x)
assert np.all(tf_res.numpy() == own_res.numpy())
img_r = np.array([[
[0, 1, 2, 0, 2, 2, 0, 5, 7]
], [
[0, 4, 5, 3, 7, 9, 4, 5, 3]
]]).astype(np.float32)
img_i = np.array([[
[0, 4, 5, 3, 7, 9, 4, 5, 3]
], [
[0, 4, 5, 3, 2, 2, 4, 8, 9]
]]).astype(np.float32)
img = img_r + 1j * img_i
img = np.reshape(img, (2, 9, 1))
avg_pool = ComplexAvgPooling1D()
res = avg_pool(img.astype(np.complex64))
expected = tf.expand_dims(tf.convert_to_tensor([[0.5 + 2.j, 1. + 4.j, 2. + 8.j, 2.5 + 4.5j],
[2. + 2.j, 4. + 4.j, 8. + 2.j, 4.5 + 6.j]], dtype=tf.complex64),
axis=-1)
assert np.all(res.numpy() == expected.numpy())
@tf.autograph.experimental.do_not_convert
def complex_max_pool_2d(test_unpool=True):
img = get_img()
max_pool = ComplexMaxPooling2DWithArgmax(strides=1, data_format="channels_last")
max_pool_2 = ComplexMaxPooling2D(strides=1, data_format="channels_last")
res, argmax = max_pool(img.astype(np.complex64))
res2 = max_pool_2(img.astype(np.complex64))
expected_res = np.array([
[[
[2. + 7.j],
[2. + 9.j]],
[[2. + 7.j],
[2. + 9.j]]],
[[
[7. + 4.j],
[9. + 2.j]],
[
[5. + 8.j],
[3. + 9.j]]]
])
assert np.all(res.numpy() == res2.numpy())
assert (res.numpy() == expected_res.astype(np.complex64)).all()
if test_unpool:
max_unpooling = ComplexUnPooling2D(img.shape[1:])
unpooled = max_unpooling([res, argmax])
expected_unpooled = np.array([[[0. + 0.j, 0. + 0.j, 0. + 0.j],
[0. + 0.j, 4. + 14.j, 4. + 18.j],
[0. + 0.j, 0. + 0.j, 0. + 0.j]],
[[0. + 0.j, 7. + 4.j, 0. + 0.j],
[0. + 0.j, 0. + 0.j, 9. + 2.j],
[0. + 0.j, 5. + 8.j, 3. + 9.j]]]).reshape(2, 3, 3, 1)
assert np.all(unpooled.numpy() == expected_unpooled)
x = tf.constant([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]])
x = tf.reshape(x, [1, 3, 3, 1])
max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='valid')
complex_max_pool_2d = ComplexMaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='valid')
assert np.all(max_pool_2d(x) == complex_max_pool_2d(x))
def new_max_unpooling_2d_test():
img = get_img()
new_imag = tf.stack((img.reshape((2, 3, 3)), img.reshape((2, 3, 3))), axis=-1)
max_pool = ComplexMaxPooling2DWithArgmax(strides=1, data_format="channels_last")
res, argmax = max_pool(tf.cast(new_imag, dtype=np.complex64))
max_unpooling = ComplexUnPooling2D(new_imag.shape[1:])
unpooled = max_unpooling([res, argmax])
@tf.autograph.experimental.do_not_convert
def complex_avg_pool():
img = get_img()
avg_pool = ComplexAvgPooling2D(strides=1)
res = avg_pool(img.astype(np.complex64))
expected_res = np.array([[[[0.75 + 3.5j], [1.75 + 6.25j]], [[1.75 + 4.75j], [4. + 6.j]]],
[[[4.25 + 2.25j], [7 + 3.25j]], [[4.75 + 4.25j], [6. + 5.25j]]]])
assert (res.numpy() == expected_res.astype(np.complex64)).all()
@tf.autograph.experimental.do_not_convert
def complex_polar_avg_pool():
avg_pool = ComplexPolarAvgPooling2D(strides=1)
img_r = np.array([
[0, 1, -1],
[0, 1, 0],
[0, 1, 0]
]).astype(np.float32)
img_i = np.array([
[1, 0, 0],
[1, 0, -1],
[1, 0, -1]
]).astype(np.float32)
img = img_r + 1j * img_i
img = np.reshape(img, (1, 3, 3, 1))
res = avg_pool(img.astype(np.complex64))
assert np.allclose(tf.math.abs(res).numpy(), 1.)
img_r = np.array([
[1, 0],
[-1, 0]
]).astype(np.float32)
img_i = np.array([
[0, 1],
[0, -1]
]).astype(np.float32)
img = img_r + 1j * img_i
img = np.reshape(img, (1, 2, 2, 1))
res = avg_pool(img.astype(np.complex64))
assert np.allclose(tf.math.abs(res).numpy(), 1.)
img_r = np.array([
[1, 0],
[-1, 0]
]).astype(np.float32)
img_i = np.array([
[0, 1],
[0, 1]
]).astype(np.float32)
img = img_r + 1j * img_i
img = np.reshape(img, (1, 2, 2, 1))
res = avg_pool(img.astype(np.complex64))
assert np.allclose(tf.math.abs(res).numpy(), 1.)
assert np.allclose(tf.math.angle(res).numpy(), 1.57079632679) # pi/2
@tf.autograph.experimental.do_not_convert
def complex_conv_2d_transpose():
value = [[1, 2, 1], [2, 1, 2], [1, 1, 2]]
init = tf.constant_initializer(value)
transpose_2 = ComplexConv2DTranspose(1, kernel_size=3, kernel_initializer=init, dtype=np.float32)
input = np.array([[55, 52], [57, 50]]).astype(np.float32).reshape((1, 2, 2, 1))
expected = np.array([
[55., 162., 159., 52.],
[167., 323., 319., 154.],
[169., 264., 326., 204.],
[57., 107., 164., 100.]
], dtype=np.float32)
assert np.allclose(transpose_2(input).numpy().reshape((4, 4)), expected) # TODO: Check why the difference
value = [[1, 2], [2, 1]]
init = tf.constant_initializer(value)
transpose_3 = ComplexConv2DTranspose(1, kernel_size=2, kernel_initializer=init, dtype=np.float32)
expected = np.array([
[55., 162., 104],
[167., 323., 152],
[114., 157, 50]
], dtype=np.float32)
assert np.allclose(transpose_3(input).numpy().reshape((3, 3)), expected)
complex_transpose = ComplexConv2DTranspose(1, kernel_size=2, dtype=np.complex64)
complex_input = (input + 1j * np.zeros(input.shape)).astype(np.complex64)
assert complex_transpose(complex_input).dtype == tf.complex64
@tf.autograph.experimental.do_not_convert
def upsampling_near_neighbour():
input_shape = (2, 2, 1, 3)
x = np.arange(np.prod(input_shape)).reshape(input_shape).astype(np.float32)
z = tf.complex(real=x, imag=x)
upsample = ComplexUpSampling2D(size=(2, 3))
y = upsample(z)
expected = np.array([[[[0. + 0.j, 1. + 1.j, 2. + 2.j],
[0. + 0.j, 1. + 1.j, 2. + 2.j],
[0. + 0.j, 1. + 1.j, 2. + 2.j]],
[[0. + 0.j, 1. + 1.j, 2. + 2.j],
[0. + 0.j, 1. + 1.j, 2. + 2.j],
[0. + 0.j, 1. + 1.j, 2. + 2.j]],
[[3. + 3.j, 4. + 4.j, 5. + 5.j],
[3. + 3.j, 4. + 4.j, 5. + 5.j],
[3. + 3.j, 4. + 4.j, 5. + 5.j]],
[[3. + 3.j, 4. + 4.j, 5. + 5.j],
[3. + 3.j, 4. + 4.j, 5. + 5.j],
[3. + 3.j, 4. + 4.j, 5. + 5.j]]],
[[[6. + 6.j, 7. + 7.j, 8. + 8.j],
[6. + 6.j, 7. + 7.j, 8. + 8.j],
[6. + 6.j, 7. + 7.j, 8. + 8.j]],
[[6. + 6.j, 7. + 7.j, 8. + 8.j],
[6. + 6.j, 7. + 7.j, 8. + 8.j],
[6. + 6.j, 7. + 7.j, 8. + 8.j]],
[[9. + 9.j, 10. + 10.j, 11. + 11.j],
[9. + 9.j, 10. + 10.j, 11. + 11.j],
[9. + 9.j, 10. + 10.j, 11. + 11.j]],
[[9. + 9.j, 10. + 10.j, 11. + 11.j],
[9. + 9.j, 10. + 10.j, 11. + 11.j],
[9. + 9.j, 10. + 10.j, 11. + 11.j]]]])
assert np.all(y.numpy() == expected)
upsample = ComplexUpSampling2D(size=(1, 3))
y = upsample(z)
expected = np.array([[[[0. + 0.j, 1. + 1.j, 2. + 2.j],
[0. + 0.j, 1. + 1.j, 2. + 2.j],
[0. + 0.j, 1. + 1.j, 2. + 2.j]],
[[3. + 3.j, 4. + 4.j, 5. + 5.j],
[3. + 3.j, 4. + 4.j, 5. + 5.j],
[3. + 3.j, 4. + 4.j, 5. + 5.j]]],
[[[6. + 6.j, 7. + 7.j, 8. + 8.j],
[6. + 6.j, 7. + 7.j, 8. + 8.j],
[6. + 6.j, 7. + 7.j, 8. + 8.j]],
[[9. + 9.j, 10. + 10.j, 11. + 11.j],
[9. + 9.j, 10. + 10.j, 11. + 11.j],
[9. + 9.j, 10. + 10.j, 11. + 11.j]]]])
assert np.all(y.numpy() == expected)
upsample = ComplexUpSampling2D(size=(1, 2))
y = upsample(z)
# print(y)
y_tf = tf.keras.layers.UpSampling2D(size=(1, 2))(x)
my_y = upsample.get_real_equivalent()(x)
assert np.all(my_y == y_tf)
x = tf.convert_to_tensor([[[[1., 2.], [3., 4.]]]])
upsample = ComplexUpSampling2D(size=2, data_format='channels_first')
my_y = upsample(x)
y_tf = tf.keras.layers.UpSampling2D(size=(2, 2), data_format='channels_first')(x)
assert np.all(my_y == y_tf)
@tf.autograph.experimental.do_not_convert
def upsampling_bilinear_corners_aligned():
# Pytorch examples
# https://pytorch.org/docs/stable/generated/torch.nn.Upsample.html
x = tf.convert_to_tensor([[[[1., 2.], [3., 4.]]]])
z = tf.complex(real=x, imag=x)
expected = np.array([[[[1.0000, 1.3333, 1.6667, 2.0000],
[1.6667, 2.0000, 2.3333, 2.6667],
[2.3333, 2.6667, 3.0000, 3.3333],
[3.0000, 3.3333, 3.6667, 4.0000]]]])
upsample = ComplexUpSampling2D(size=2, interpolation='bilinear', data_format='channels_first', align_corners=True)
y_complex = upsample(z)
assert np.allclose(expected, tf.math.real(y_complex).numpy(), 0.0001)
x = tf.convert_to_tensor([[[[1., 2., 0.],
[3., 4., 0.],
[0., 0., 0.]]]])
expected = np.array([[[[1.0000, 1.4000, 1.8000, 1.6000, 0.8000, 0.0000],
[1.8000, 2.2000, 2.6000, 2.2400, 1.1200, 0.0000],
[2.6000, 3.0000, 3.4000, 2.8800, 1.4400, 0.0000],
[2.4000, 2.7200, 3.0400, 2.5600, 1.2800, 0.0000],
[1.2000, 1.3600, 1.5200, 1.2800, 0.6400, 0.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])
upsample = ComplexUpSampling2D(size=2, interpolation='bilinear', data_format='channels_first', align_corners=True)
y = upsample(x)
assert np.allclose(expected, tf.math.real(y).numpy(), 0.00001)
# https://blogs.sas.com/content/iml/2020/05/18/what-is-bilinear-interpolation.html#:~:text=Bilinear%20interpolation%20is%20a%20weighted,the%20point%20and%20the%20corners.&text=The%20only%20important%20formula%20is,x%20%5B0%2C1%5D.
x = tf.convert_to_tensor([[[[0., 4.], [2., 1.]]]])
z = tf.complex(real=x, imag=x)
upsample = ComplexUpSampling2D(size=3, interpolation='bilinear', data_format='channels_first', align_corners=True)
y_complex = upsample(z)
expected = np.array([[[[0. + 0.j, 0.8 + 0.8j,
1.6 + 1.6j, 2.4 + 2.4j,
3.2 + 3.2j, 4. + 4.j],
[0.4 + 0.4j, 1. + 1.j,
1.6 + 1.6j, 2.2 + 2.2j,
2.8 + 2.8j, 3.4 + 3.4j],
[0.8 + 0.8j, 1.2 + 1.2j,
1.6 + 1.6j, 2. + 2.j,
2.4 + 2.4j, 2.8 + 2.8j],
[1.2 + 1.2j, 1.4 + 1.4j,
1.6 + 1.6j, 1.8 + 1.8j,
2. + 2.j, 2.2 + 2.2j],
[1.6 + 1.6j, 1.6 + 1.6j,
1.6 + 1.6j, 1.6 + 1.6j,
1.6 + 1.6j, 1.6 + 1.6j],
[2. + 2.j, 1.8 + 1.8j,
1.6 + 1.6j, 1.4 + 1.4j,
1.2 + 1.2j, 1. + 1.j]]]])
assert np.allclose(expected, y_complex.numpy(), 0.000001)
@tf.autograph.experimental.do_not_convert
def upsampling_bilinear_corner_not_aligned():
# Pytorch
# https://pytorch.org/docs/stable/generated/torch.nn.Upsample.html
x = tf.convert_to_tensor([[[[1., 2.], [3., 4.]]]])
z = tf.complex(real=x, imag=x)
y_tf = tf.keras.layers.UpSampling2D(size=2, interpolation='bilinear', data_format='channels_first')(x)
y_own = ComplexUpSampling2D(size=2, interpolation='bilinear', data_format='channels_first')(z)
# set_trace()
assert np.all(y_tf == tf.math.real(y_own).numpy())
x = tf.convert_to_tensor([[[[1., 2., 0.],
[3., 4., 0.],
[0., 0., 0.]]]])
z = tf.complex(real=x, imag=x)
y_tf = tf.keras.layers.UpSampling2D(size=2, interpolation='bilinear', data_format='channels_first')(x)
y_own = ComplexUpSampling2D(size=2, interpolation='bilinear', data_format='channels_first')(z)
assert np.all(y_tf == tf.math.real(y_own).numpy())
x = tf.convert_to_tensor([[[[1., 2.], [3., 4.]]]])
z = tf.complex(real=x, imag=x)
y_tf = tf.keras.layers.UpSampling2D(size=3, interpolation='bilinear', data_format='channels_first')(x)
y_own = ComplexUpSampling2D(size=3, interpolation='bilinear', data_format='channels_first')(z)
assert np.allclose(y_tf, tf.math.real(y_own).numpy())
y_tf = tf.keras.layers.UpSampling2D(size=6, interpolation='bilinear', data_format='channels_first')(x)
y_own = ComplexUpSampling2D(size=6, interpolation='bilinear', data_format='channels_first')(z)
assert np.allclose(y_tf, tf.math.real(y_own).numpy())
y_tf = tf.keras.layers.UpSampling2D(size=8, interpolation='bilinear', data_format='channels_first')(x)
y_own = ComplexUpSampling2D(size=8, interpolation='bilinear', data_format='channels_first')(z)
assert np.all(y_tf == tf.math.real(y_own).numpy())
# to test bicubic= https://discuss.pytorch.org/t/what-we-should-use-align-corners-false/22663/17
# https://www.tensorflow.org/api_docs/python/tf/keras/layers/UpSampling2D
input_shape = (2, 2, 1, 3)
x = np.arange(np.prod(input_shape)).reshape(input_shape)
y_tf = tf.keras.layers.UpSampling2D(size=(1, 2), interpolation='bilinear')(x)
y_own = ComplexUpSampling2D(size=(1, 2), interpolation='bilinear')(x)
assert np.all(y_tf == y_own)
@tf.autograph.experimental.do_not_convert
def upsampling():
x = tf.convert_to_tensor([[[[1., 2.], [3., 4.]]]])
z = tf.complex(real=x, imag=x)
y_tf = tf.keras.layers.UpSampling2D(size=2, interpolation='bilinear', data_format='channels_first')(x)
y_cvnn = ComplexUpSampling2D(size=2, interpolation='bilinear', data_format='channels_first')(z)
assert np.all(y_tf == tf.math.real(y_cvnn).numpy())
upsampling_near_neighbour()
# test_upsampling_bilinear_corners_aligned()
upsampling_bilinear_corner_not_aligned()
def check_proximity(x1, x2, name: str):
th = 0.1
diff = np.max(np.abs(x1 - x2))
if 0 < diff < th:
print(f"{name} are equal with an error of {diff}")
if diff >= th:
return False
return True
def batch_norm():
# z = tf.transpose(tf.convert_to_tensor([[[-1, 1] * 10] * 20] * 2))
# c_bn = ComplexBatchNormalization(dtype=np.float32)
# c_out = c_bn(z, training=True)
# # set_trace()
# assert check_proximity(c_out, z, "Normalized input")
z = np.random.rand(3, 43, 12, 10) # + np.random.rand(3, 43, 12, 75)*1j
# z = np.random.rand(100, 10)
bn = tf.keras.layers.BatchNormalization(epsilon=0)
c_bn = ComplexBatchNormalization(dtype=np.float32) # If I use the complex64 then the init is different
c_bn_2 = ComplexBatchNormalization(dtype=np.float32, cov_method=2)
input = tf.convert_to_tensor(z.astype(np.float32), dtype=np.float32)
out = bn(input, training=False)
c_out = c_bn(input, training=False)
assert check_proximity(out, c_out, "Results before training")
assert check_proximity(bn.moving_mean, c_bn.moving_mean, "Moving mean before training")
assert check_proximity(bn.moving_variance, c_bn.moving_var[..., 0, 0], "Moving variance before training")
assert check_proximity(bn.gamma, c_bn.gamma, "Gamma before training")
assert check_proximity(bn.beta, c_bn.beta, "Beta before training")
out = bn(input, training=True)
c_out = c_bn(input, training=True)
assert check_proximity(out, c_out, "Results after training")
assert check_proximity(bn.moving_mean, c_bn.moving_mean, "Moving mean after training")
assert check_proximity(bn.moving_variance, c_bn.moving_var[..., 0, 0], "Moving variance after training")
assert check_proximity(bn.gamma, c_bn.gamma, "Gamma after training")
assert check_proximity(bn.beta, c_bn.beta, "Beta after training")
c_out_2 = c_bn_2(input, training=True)
assert check_proximity(c_out, c_out_2, "Method comparison results after training")
assert check_proximity(c_bn_2.moving_mean, c_bn.moving_mean, "Method comparison Moving mean after training")
assert check_proximity(c_bn_2.moving_var, c_bn.moving_var, "Method comparison Moving variance after training")
assert check_proximity(c_bn_2.gamma, c_bn.gamma, "Method comparison Gamma after training")
assert check_proximity(c_bn_2.beta, c_bn.beta, "Method comparison Beta after training")
def pooling_layers():
complex_polar_avg_pool()
complex_max_pool_2d()
complex_avg_pool_1d()
complex_avg_pool()
@tf.autograph.experimental.do_not_convert
def test_layers():
pooling_layers()
new_max_unpooling_2d_test()
batch_norm()
upsampling()
complex_conv_2d_transpose()
shape_ad_dtype_of_conv2d()
dense_example()
if __name__ == "__main__":
test_layers()
| 22,749 | 40.288566 | 234 | py |
cvnn | cvnn-master/tests/test_tf_vs_cvnn.py | from examples.cifar410_example import test_cifar10
from examples.fashion_mnist_example import test_fashion_mnist
from examples.mnist_dataset_example import test_mnist
from examples.u_net_example import test_unet
from importlib import reload
import os
import tensorflow as tf
def test_tf_vs_cvnn():
"""
This modules compares cvnn when working with float numbers so that it gives the exact same value as tensorflow.
"""
reload(tf)
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
tf.config.set_visible_devices([], 'GPU')
visible_devices = tf.config.get_visible_devices()
for device in visible_devices:
assert device.device_type != 'GPU'
test_unet()
test_mnist()
test_fashion_mnist()
test_cifar10()
if __name__ == '__main__':
test_tf_vs_cvnn()
| 798 | 27.535714 | 115 | py |
cvnn | cvnn-master/tests/test_output_dtype.py | import tensorflow as tf
import numpy as np
import cvnn.layers as complex_layers
from pdb import set_trace
def all_layers_model():
"""
Creates a model using all possible layers to assert no layer changes the dtype to real.
"""
input_shape = (4, 28, 28, 3)
x = tf.cast(tf.random.normal(input_shape), tf.complex64)
model = tf.keras.models.Sequential()
model.add(complex_layers.ComplexInput(input_shape=input_shape[1:])) # Always use ComplexInput at the start
model.add(complex_layers.ComplexConv2D(32, (3, 3), activation='cart_relu'))
model.add(complex_layers.ComplexAvgPooling2D((2, 2)))
model.add(complex_layers.ComplexConv2D(64, (3, 3), activation='cart_sigmoid'))
model.add(complex_layers.ComplexDropout(0.5))
model.add(complex_layers.ComplexMaxPooling2D((2, 2)))
model.add(complex_layers.ComplexConv2DTranspose(32, (2, 2)))
model.add(complex_layers.ComplexFlatten())
model.add(complex_layers.ComplexDense(64, activation='cart_tanh'))
model.compile(loss=tf.keras.losses.MeanAbsoluteError(), optimizer='adam', metrics=['accuracy'])
y = model(x)
assert y.dtype == np.complex64
return model
def test_output_dtype():
model = all_layers_model()
if __name__ == "__main__":
test_output_dtype()
| 1,280 | 34.583333 | 111 | py |
cvnn | cvnn-master/tests/test_losses.py | from cvnn.losses import ComplexAverageCrossEntropy, ComplexWeightedAverageCrossEntropy, \
ComplexAverageCrossEntropyIgnoreUnlabeled
import numpy as np
import tensorflow as tf
from tensorflow.keras.losses import CategoricalCrossentropy
from cvnn.layers import ComplexDense, complex_input
from pdb import set_trace
def to_categorical_unlabeled(sparse, classes=2):
cat = np.zeros(shape=sparse.shape + (classes,))
for i in range(len(sparse)):
for row in range(len(sparse[i])):
for col in range(len(sparse[i][row])):
if sparse[i][row][col]:
cat[i][row][col][sparse[i][row][col] - 1] = 1
return cat
def averaging_method():
# Here, I see that the loss is not computed per image, but per pixel.
y_true = np.array([
[[1, 1], [1, 1]],
[[0, 0], [0, 2]]
])
y_pred = np.array([
[[1, 1], [1, 2]],
[[1, 1], [1, 1]]
])
y_true = to_categorical_unlabeled(y_true)
y_pred = to_categorical_unlabeled(y_pred)
class_loss_result = CategoricalCrossentropy()(y_pred=y_pred, y_true=y_true)
fun_loss_result = tf.keras.metrics.categorical_crossentropy(y_pred=y_pred, y_true=y_true)
two_dim_mean = np.mean(fun_loss_result.numpy(), axis=(1, 2))
mean = np.mean(fun_loss_result.numpy())
assert np.allclose(mean, class_loss_result)
assert np.mean(two_dim_mean) == mean
def no_label_test():
y_true = np.array([
[[0, 0], [0, 2]]
])
y_true_2 = np.array([
[[1, 2], [1, 2]]
])
y_pred = np.array([
[[1, 2], [1, 2]]
])
y_true = to_categorical_unlabeled(y_true)
y_pred = to_categorical_unlabeled(y_pred)
y_true_2 = to_categorical_unlabeled(y_true_2)
tf_result = ComplexAverageCrossEntropyIgnoreUnlabeled()(y_pred=tf.complex(y_pred, y_pred), y_true=y_true)
tf_result_2 = ComplexAverageCrossEntropyIgnoreUnlabeled()(y_pred=tf.complex(y_pred, y_pred), y_true=y_true_2)
assert tf_result == tf_result_2
def ace():
y_pred = np.random.rand(3, 43, 12, 10)
y_true = np.random.rand(3, 43, 12, 10)
tf_result = CategoricalCrossentropy()(y_pred=y_pred, y_true=y_true)
own_result = ComplexAverageCrossEntropy()(y_pred=tf.complex(y_pred, y_pred), y_true=y_true)
own_real_result = ComplexAverageCrossEntropy()(y_pred=tf.convert_to_tensor(y_pred, dtype=np.float64),
y_true=y_true)
assert tf_result == own_real_result, f"ComplexCrossentropy {own_real_result} != CategoricalCrossentropy {tf_result}"
assert tf_result == own_result, f"ComplexCrossentropy {own_result} != CategoricalCrossentropy {tf_result}"
def weighted_loss():
y_true = np.array([
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[0., 1.]
])
y_pred = np.array([
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.]
])
ace = ComplexAverageCrossEntropy()(y_pred=tf.complex(y_pred, y_pred), y_true=y_true)
wace = ComplexWeightedAverageCrossEntropy(weights=[1., 9.])(y_pred=tf.complex(y_pred, y_pred), y_true=y_true)
assert ace.numpy() < wace.numpy(), f"ACE {ace.numpy()} > WACE {wace.numpy()}"
def test_losses():
no_label_test()
averaging_method()
# weighted_loss()
ace()
if __name__ == "__main__":
test_losses()
| 3,533 | 31.722222 | 120 | py |
cvnn | cvnn-master/tests/test_metrics.py | import numpy as np
from tensorflow.keras.metrics import CategoricalAccuracy
import tensorflow as tf
from pdb import set_trace
from cvnn.metrics import ComplexAverageAccuracy, ComplexCategoricalAccuracy
def test_with_tf():
classes = 3
y_true = tf.cast(tf.random.uniform(shape=(34, 54, 12), maxval=classes), dtype=tf.int32)
y_pred = tf.nn.softmax(tf.cast(tf.random.uniform(shape=(34, 54, 12, classes), maxval=1), dtype=tf.float64))
y_pred_one_hot = y_pred # tf.one_hot(y_pred, depth=classes)
y_true_one_hot = tf.one_hot(y_true, depth=classes)
tf_metric = CategoricalAccuracy()
tf_metric.update_state(y_true_one_hot, y_pred_one_hot)
own_metric = ComplexCategoricalAccuracy()
own_metric.update_state(y_true_one_hot, y_pred_one_hot)
# set_trace()
assert own_metric.result().numpy() == tf_metric.result().numpy()
y_true = np.array([
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[0., 0., 0., 0.], # This shows tf does not ignore cases with [0. 0. 0. 0.] (unlabeled)
[0., 0., 1., 0.], # 3
[0., 0., 1., 0.], # 3
[0., 0., 0., 0.], # 3
[0., 0., 1., 0.] # 3
])
y_pred = np.array([
[1., 0., 0., 0.], # 1
[.8, 0., 0.2, 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[0., 0., 0.1, .9], # 4
[0., 0., 0., 1.], # 4
[0., 0., 0., 1.], # 4
[0., 0., 0., 1.] # 4
])
tf_metric = CategoricalAccuracy()
tf_metric.update_state(y_true, y_pred)
own_metric = ComplexCategoricalAccuracy()
own_metric.update_state(y_true, y_pred, ignore_unlabeled=False) # to make it as tf
assert own_metric.result().numpy() == tf_metric.result().numpy()
y_true = np.array([
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[0., 1.]
])
y_pred = np.array([
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.]
])
tf_metric = CategoricalAccuracy()
tf_metric.update_state(y_true, y_pred)
own_metric = ComplexCategoricalAccuracy()
own_metric.update_state(y_true, y_pred, ignore_unlabeled=False) # to make it as tf
assert own_metric.result().numpy() == tf_metric.result().numpy()
def test_metric():
y_true = [[0, 0, 0],
[0, 0, 1],
[0, 1, 0], [0, 1, 0],
[1, 0, 0]]
y_pred = [[0.1, 0.9, 0.8],
[0.1, 0.9, 0.8],
[0.05, 0.95, 0], [0.95, 0.05, 0],
[0, 1, 0]]
m = ComplexCategoricalAccuracy()
m.update_state(y_true, y_pred)
assert m.result().numpy() == 0.25
m = ComplexAverageAccuracy()
m.update_state(y_true, y_pred)
assert m.result().numpy() == np.cast['float32'](1/6) # I want 0.5/3 = 1/6
y_true = np.array([
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[0., 1.]
])
y_pred = np.array([
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.]
])
m = ComplexCategoricalAccuracy()
m.update_state(y_true, y_pred)
assert m.result().numpy() == np.cast['float32'](.9)
m = ComplexAverageAccuracy()
m.update_state(y_true, y_pred)
assert m.result().numpy() == np.cast['float32'](0.5)
def test_null_label():
y_true = np.array([
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[0., 0., 1., 0.], # 3
[0., 0., 1., 0.], # 3
[0., 0., 1., 0.], # 3
[0., 0., 1., 0.] # 3
])
y_pred = np.array([
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[0., 0., 0., 1.], # 4
[0., 0., 0., 1.], # 4
[0., 0., 0., 1.], # 4
[0., 0., 0., 1.] # 4
])
m = ComplexAverageAccuracy()
m.update_state(y_true, y_pred)
assert m.result().numpy() == np.cast['float32'](0.5)
y_true = np.array([
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[0., 0.],
[0., 0.],
[0., 0.],
[0., 0.],
[0., 1.]
])
y_pred = np.array([
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[0., 1.],
[0., 1.],
[0., 1.],
[0., 1.],
[1., 0.]
])
m = ComplexCategoricalAccuracy()
m.update_state(y_true, y_pred)
# tf_metric = CategoricalAccuracy()
# tf_metric.update_state(y_true, y_pred)
assert m.result().numpy() == np.cast['float32'](.9)
# assert m.result().numpy() == tf_metric.result().numpy()
m = ComplexAverageAccuracy()
m.update_state(y_true, y_pred)
assert m.result().numpy() == np.cast['float32'](0.5)
y_true = np.array([
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[0., 0., 1., 0.], # 3
[0., 0., 1., 0.], # 3
[0., 0., 1., 0.], # 3
[0., 0., 0., 1.], # 4
[0., 1., 0., 0.] # 2
])
y_pred = np.array([
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[1., 0., 0., 0.], # 1
[0., 0., 0., 1.], # 4
[0., 0., 0., 1.], # 4
[0., 0., 0., 1.], # 4
[0., 0., 0., 1.], # 4
[0., 1., 0., 0.] # 2
])
m = ComplexAverageAccuracy()
m.update_state(y_true, y_pred)
assert m.result().numpy() == np.cast['float32'](0.75)
if __name__ == "__main__":
test_null_label()
test_with_tf()
test_metric()
| 6,381 | 27.364444 | 111 | py |
cvnn | cvnn-master/tests/test_dataset_conversion.py | from cvnn.utils import transform_to_real_map_function
import numpy as np
from pdb import set_trace
import tensorflow as tf
def test_image_real_conversion():
img_r = np.array([[
[0, 1, 2],
[0, 2, 2],
[0, 5, 7]
], [
[0, 7, 5],
[3, 7, 9],
[4, 5, 3]
]]).astype(np.float32)
img_i = np.array([[
[0, 4, 5],
[3, 7, 9],
[4, 5, 3]
], [
[0, 4, 5],
[3, 2, 2],
[4, 8, 9]
]]).astype(np.float32)
img = img_r + 1j * img_i
img = np.reshape(img, (2, 3, 3, 1))
label = img
dataset = tf.data.Dataset.from_tensor_slices((img, label))
real_dataset = dataset.map(transform_to_real_map_function)
c_elem, c_label = next(iter(dataset))
r_elem, r_label = next(iter(real_dataset))
assert np.all(c_elem.shape[:-1] == r_elem.shape[:-1])
assert 2 * c_elem.shape[-1] == r_elem.shape[-1]
assert np.all(tf.math.real(c_elem)[:, :, 0] == r_elem[:, :, 0])
assert np.all(tf.math.imag(c_elem)[:, :, 0] == r_elem[:, :, 1])
if __name__ == '__main__':
test_image_real_conversion()
| 1,114 | 24.340909 | 67 | py |
cvnn | cvnn-master/tests/test_capacity_real_equivalent.py | import numpy as np
import cvnn.layers as layers
from time import sleep
from cvnn.layers import ComplexDense
from cvnn.real_equiv_tools import get_real_equivalent_multiplier
from tensorflow.keras.models import Sequential
from tensorflow.keras.losses import categorical_crossentropy
def shape_tst(input_size, output_size, shape_raw, classifier=True, equiv_technique='alternate_tp', expected_result=None):
shape = [
layers.ComplexInput(input_shape=input_size, dtype=np.complex64)
]
if len(shape_raw) == 0:
shape.append(
ComplexDense(units=output_size, activation='softmax_real_with_abs', dtype=np.complex64)
)
else: # len(shape_raw) > 0:
for s in shape_raw:
shape.append(ComplexDense(units=s, activation='cart_relu')) # Add dropout!
shape.append(ComplexDense(units=output_size, activation='softmax_real_with_abs'))
complex_network = Sequential(shape, name="complex_network")
complex_network.compile(optimizer='sgd', loss=categorical_crossentropy, metrics=['accuracy'])
result = get_real_equivalent_multiplier(complex_network.layers, classifier=classifier,
equiv_technique=equiv_technique)
# from pdb import set_trace; set_trace()
# rvnn = complex_network.get_real_equivalent(classifier, capacity_equivalent)
# complex_network.training_param_summary()
# rvnn.training_param_summary()
if expected_result is not None:
if not np.all(expected_result == result):
# from pdb import set_trace; set_trace()
raise f"Expecting result {expected_result} but got {result}."
else:
print(result)
def test_shape():
# Ratio
# The bigger the middle, it will tend to sqrt(2) = 1.4142135623730951
shape_tst(4, 2, [1, 30, 500, 400, 60, 50, 3], classifier=True, equiv_technique='ratio_tp')
sleep(2)
# this is 1 for regression
shape_tst(4, 2, [64], classifier=False, equiv_technique='ratio_tp', expected_result=[1., 2])
sleep(2)
# this is 2*(in+out)/(2*in+out) = 1.2
shape_tst(4, 2, [64], classifier=True, equiv_technique='ratio_tp', expected_result=[1.2, 1])
sleep(2)
# shape_tst(100, 2, [100, 30, 50, 40, 60, 50, 30], classifier=True, equiv_technique='ratio')
# sleep(2)
# shape_tst(100, 2, [100, 30, 50, 60, 50, 30], classifier=True, equiv_technique='ratio')
# sleep(2)
# shape_tst(100, 2, [100, 30, 50, 60, 50, 30], classifier=False, equiv_technique='ratio')
# sleep(2)
# shape_tst(100, 2, [100, 30, 50, 40, 60, 50, 30], classifier=False, equiv_technique='ratio')
# sleep(2)
# shape_tst(100, 2, [100, 30, 50, 40, 60, 50, 30], capacity_equivalent=False, equiv_technique='ratio')
# sleep(2)
# Alternate
shape_tst(100, 2, [], expected_result=[1])
sleep(2)
shape_tst(100, 2, [64], expected_result=[204/202, 1])
sleep(2)
shape_tst(100, 2, [100, 64], expected_result=[1, 2, 1])
sleep(2)
shape_tst(100, 2, [100, 30, 64], expected_result=[1, 328/228, 2, 1])
sleep(2)
shape_tst(100, 2, [100, 30, 40, 50], expected_result=[1, 2, 1, 2, 1])
sleep(2)
shape_tst(100, 2, [100, 30, 40, 60, 30], expected_result=[1, 2, 180/120, 1, 2, 1])
sleep(2)
shape_tst(100, 2, [100, 30, 40, 60, 50, 30], expected_result=[1, 2, 1, 2, 1, 2, 1])
sleep(2)
shape_tst(100, 2, [100, 30, 40, 60, 50, 30, 60], expected_result=[1, 2, 1, 180/140, 2, 1, 2, 1])
# Not capacity equivalent
sleep(2)
shape_tst(100, 2, [], equiv_technique='np', expected_result=[1])
sleep(2)
shape_tst(100, 2, [64], equiv_technique='np', expected_result=[2, 1])
sleep(2)
shape_tst(100, 2, [100, 64], equiv_technique='np', expected_result=[2, 2, 1])
sleep(2)
shape_tst(100, 2, [100, 30, 64], equiv_technique='np', expected_result=[2, 2, 2, 1])
sleep(2)
shape_tst(100, 2, [100, 30, 40, 50], equiv_technique='np', expected_result=[2, 2, 2, 2, 1])
sleep(2)
shape_tst(100, 2, [100, 30, 40, 60, 50, 30], equiv_technique='np', expected_result=[2, 2, 2, 2, 2, 2, 1])
sleep(2)
shape_tst(100, 2, [100, 30, 40, 60, 50, 30], classifier=False, equiv_technique='np',
expected_result=[2, 2, 2, 2, 2, 2, 2])
# Not capacity equivalent
sleep(2)
shape_tst(100, 2, [], equiv_technique='none', expected_result=[1])
sleep(2)
shape_tst(100, 2, [64], equiv_technique='none', expected_result=[1, 1])
sleep(2)
shape_tst(100, 2, [100, 64], equiv_technique='none', expected_result=[1, 1, 1])
sleep(2)
shape_tst(100, 2, [100, 30, 64], equiv_technique='none', expected_result=[1, 1, 1, 1])
sleep(2)
shape_tst(100, 2, [100, 30, 40, 50], equiv_technique='none', expected_result=[1, 1, 1, 1, 1])
sleep(2)
shape_tst(100, 2, [100, 30, 40, 60, 50, 30], equiv_technique='none', expected_result=[1, 1, 1, 1, 1, 1, 1])
sleep(2)
shape_tst(100, 2, [100, 30, 40, 60, 50, 30], classifier=False, equiv_technique='none',
expected_result=[1, 1, 1, 1, 1, 1, 1])
if __name__ == '__main__':
test_shape()
| 5,079 | 43.955752 | 121 | py |
cvnn | cvnn-master/tests/test_several_datasets.py | import tensorflow as tf
import numpy as np
import os
import tensorflow_datasets as tfds
from tensorflow.keras import datasets, models
from cvnn.initializers import ComplexGlorotUniform
from cvnn.layers import ComplexDense, ComplexFlatten, ComplexInput
import cvnn.layers as complex_layers
from cvnn import layers
from pdb import set_trace
from cvnn.montecarlo import run_gaussian_dataset_montecarlo
def normalize_img(image, label):
"""Normalizes images: `uint8` -> `float32`."""
return tf.cast(image, tf.float32) / 255., label
def mnist_example():
(ds_train, ds_test), ds_info = tfds.load(
'mnist',
split=['train', 'test'],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
ds_train = ds_train.map(
normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_train = ds_train.cache()
ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)
ds_train = ds_train.batch(128)
ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)
ds_test = ds_test.map(
normalize_img, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_test = ds_test.batch(128)
ds_test = ds_test.cache()
ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)
model = tf.keras.models.Sequential([
ComplexFlatten(input_shape=(28, 28, 1)),
ComplexDense(128, activation='relu', dtype=tf.float32),
ComplexDense(10, dtype=tf.float32)
])
model.compile(
optimizer=tf.keras.optimizers.Adam(0.001),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],
)
model.fit(
ds_train,
epochs=2,
validation_data=ds_test,
)
def fashion_mnist_example():
dtype_1 = np.complex64
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images = train_images.astype(dtype_1)
test_images = test_images.astype(dtype_1)
train_labels = train_labels.astype(dtype_1)
test_labels = test_labels.astype(dtype_1)
model = tf.keras.Sequential([
ComplexInput(input_shape=(28, 28)),
ComplexFlatten(),
ComplexDense(128, activation='cart_relu', kernel_initializer=ComplexGlorotUniform(seed=0)),
ComplexDense(10, activation='convert_to_real_with_abs', kernel_initializer=ComplexGlorotUniform(seed=0))
])
model.summary()
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy']
)
model.fit(train_images, train_labels, epochs=2)
# import pdb; pdb.set_trace()
def cifar10_test():
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
dtype_1 = 'complex64'
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
train_images = train_images.astype(dtype_1)
test_images = test_images.astype(dtype_1)
train_labels = train_labels.astype(dtype_1)
test_labels = test_labels.astype(dtype_1)
tf.random.set_seed(1)
hist1 = cifar10_test_model_1(train_images, train_labels, test_images, test_labels, dtype_1)
tf.random.set_seed(1)
hist2 = cifar10_test_model_2(train_images, train_labels, test_images, test_labels, dtype_1)
assert hist1.history == hist2.history, f"\n{hist1.history}\n !=\n{hist2.history}"
def cifar10_test_model_1(train_images, train_labels, test_images, test_labels, dtype_1='complex64'):
model = models.Sequential()
model.add(layers.ComplexInput(input_shape=(32, 32, 3), dtype=dtype_1)) # Never forget this!!!
model.add(layers.ComplexConv2D(32, (3, 3), activation='cart_relu'))
model.add(layers.ComplexMaxPooling2D((2, 2)))
model.add(layers.ComplexConv2D(64, (3, 3), activation='cart_relu'))
model.add(layers.ComplexAvgPooling2D((2, 2)))
model.add(layers.ComplexConv2D(64, (3, 3), activation='cart_relu'))
model.add(layers.ComplexFlatten())
model.add(layers.ComplexDense(64, activation='cart_relu'))
model.add(layers.ComplexDense(10, activation='convert_to_real_with_abs'))
model.summary()
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model.fit(train_images, train_labels, epochs=2, validation_data=(test_images, test_labels), shuffle=False)
def cifar10_test_model_2(train_images, train_labels, test_images, test_labels, dtype_1='complex64'):
x = layers.complex_input(shape=(32, 32, 3), dtype=dtype_1)
conv1 = layers.ComplexConv2D(32, (3, 3), activation='cart_relu')(x)
pool1 = layers.ComplexMaxPooling2D((2, 2))(conv1)
conv2 = layers.ComplexConv2D(64, (3, 3), activation='cart_relu')(pool1)
pool2 = layers.ComplexAvgPooling2D((2, 2))(conv2)
conv3 = layers.ComplexConv2D(64, (3, 3), activation='cart_relu')(pool2)
flat = layers.ComplexFlatten()(conv3)
dense1 = layers.ComplexDense(64, activation='cart_relu')(flat)
y = layers.ComplexDense(10, activation='convert_to_real_with_abs')(dense1)
model = models.Model(inputs=[x], outputs=[y])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.summary()
return model.fit(train_images, train_labels, epochs=2, validation_data=(test_images, test_labels), shuffle=False)
def random_dataset():
x_train = np.complex64(tf.complex(tf.random.uniform([640, 65, 82, 1]), tf.random.uniform([640, 65, 82, 1])))
x_test = np.complex64(tf.complex(tf.random.uniform([200, 65, 82, 1]), tf.random.uniform([200, 65, 82, 1])))
y_train = np.uint8(np.random.randint(5, size=(640, 1)))
y_test = np.uint8(np.random.randint(5, size=(200, 1)))
model = tf.keras.models.Sequential()
model.add(complex_layers.ComplexInput(input_shape=(65, 82, 1))) # Always use ComplexInput at the start
model.add(complex_layers.ComplexConv2D(8, (5, 5), activation='cart_relu'))
model.add(complex_layers.ComplexMaxPooling2D((2, 2)))
model.add(complex_layers.ComplexConv2D(16, (5, 5), activation='cart_relu'))
model.add(complex_layers.ComplexFlatten())
model.add(complex_layers.ComplexDense(256, activation='cart_relu'))
model.add(complex_layers.ComplexDropout(0.1))
model.add(complex_layers.ComplexDense(64, activation='cart_relu'))
model.add(complex_layers.ComplexDropout(0.1))
model.add(complex_layers.ComplexDense(5, activation='convert_to_real_with_abs'))
# An activation that casts to real must be used at the last layer.
# The loss function cannot minimize a complex number
# Compile it
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'],
# run_eagerly=Trutest_regressione
)
model.summary()
# Train and evaluate
history = model.fit(x_train, y_train, epochs=2, validation_data=(x_test, y_test))
test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)
def test_datasets():
run_gaussian_dataset_montecarlo(epochs=2, iterations=1)
random_dataset()
fashion_mnist_example()
mnist_example()
# cifar10_test()
if __name__ == '__main__':
test_datasets()
| 7,601 | 41.233333 | 117 | py |
cvnn | cvnn-master/tests/test_activation_functions.py | import tensorflow as tf
from cvnn import layers, activations
if __name__ == '__main__':
for activation in activations.act_dispatcher.keys():
print(activation)
model = tf.keras.Sequential([
layers.ComplexInput(4),
layers.ComplexDense(1, activation=activation),
layers.ComplexDense(1, activation='linear')
]) | 371 | 32.818182 | 58 | py |
cvnn | cvnn-master/docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
master_doc = 'index'
# -- Project information -----------------------------------------------------
project = 'cvnn'
copyright = '2020, J Agustin BARRACHINA'
author = 'J Agustin BARRACHINA'
# The full version, including alpha/beta/rc tags
release = '0.1.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['recommonmark', 'nbsphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme' # 'classic'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 2,002 | 35.418182 | 79 | py |
cvnn | cvnn-master/cvnn/initializers.py | from abc import abstractmethod
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.keras.initializers import Initializer
import sys
from pdb import set_trace
# Typing
from typing import Optional
INIT_TECHNIQUES = {'zero_imag', 'mirror'}
def _compute_fans(shape):
"""
Taken from https://github.com/tensorflow/tensorflow/blob/2b96f3662bd776e277f86997659e61046b56c315/tensorflow/python/ops/init_ops_v2.py#L994
Computes the number of input and output units for a weight shape.
Args:
shape: Integer shape tuple or TF tensor shape.
Returns:
A tuple of scalars (fan_in, fan_out).
"""
if len(shape) < 1: # Just to avoid errors for constants.
fan_in = fan_out = 1.
elif len(shape) == 1:
fan_in = fan_out = shape[0]
elif len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
else:
# Assuming convolution kernels (2D, 3D, or more).
# kernel shape: (..., input_depth, depth)
receptive_field_size = 1.
for dim in shape[:-2]:
receptive_field_size *= dim
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
return fan_in, fan_out
class _RandomGenerator(object):
"""
Random generator that selects appropriate random ops.
https://github.com/tensorflow/tensorflow/blob/2b96f3662bd776e277f86997659e61046b56c315/tensorflow/python/ops/init_ops_v2.py#L1041
"""
def __init__(self, seed=None):
super(_RandomGenerator, self).__init__()
if seed is not None:
# Stateless random ops requires 2-int seed.
self.seed = [seed, 0]
else:
self.seed = None
def random_normal(self, shape, mean=0.0, stddev=1, dtype=tf.dtypes.float32):
"""A deterministic random normal if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_random_normal
else:
op = random_ops.random_normal
return op(shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)
def random_uniform(self, shape, minval, maxval, dtype):
"""A deterministic random uniform if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_random_uniform
else:
op = random_ops.random_uniform
return op(shape=shape, minval=minval, maxval=maxval, dtype=dtype, seed=self.seed)
def truncated_normal(self, shape, mean, stddev, dtype):
"""A deterministic truncated normal if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_truncated_normal
else:
op = random_ops.truncated_normal
return op(shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)
class ComplexInitializer(Initializer):
def __init__(self, distribution: str = "uniform", seed: Optional[int] = None):
if distribution.lower() not in {"uniform", "normal"}:
raise ValueError("Invalid `distribution` argument:", distribution)
else:
self.distribution = distribution.lower()
self._random_generator = _RandomGenerator(seed)
def _call_random_generator(self, shape, arg, dtype):
if self.distribution == "uniform":
return self._random_generator.random_uniform(shape=shape, minval=-arg, maxval=arg, dtype=dtype)
elif self.distribution == "normal":
# I make this magic number division because that's what tf does on this case
return self._random_generator.truncated_normal(shape=shape, mean=0.0, stddev=arg / .87962566103423978,
dtype=dtype)
@abstractmethod
def _compute_limit(self, fan_in, fan_out):
pass
def __call__(self, shape, dtype=tf.dtypes.complex64, **kwargs):
fan_in, fan_out = _compute_fans(shape)
arg = self._compute_limit(fan_in, fan_out)
dtype = tf.dtypes.as_dtype(dtype)
if dtype.is_complex:
arg = arg / np.sqrt(2)
return self._call_random_generator(shape=shape, arg=arg, dtype=dtype.real_dtype)
def get_config(self): # To support serialization
return {"seed": self._random_generator.seed}
class ComplexGlorotUniform(ComplexInitializer):
"""
The Glorot uniform initializer, also called Xavier uniform initializer.
Reference: http://proceedings.mlr.press/v9/glorot10a.html
Draws samples from a uniform distribution:
- Real case: `x ~ U[-limit, limit]` where `limit = sqrt(6 / (fan_in + fan_out))`
- Complex case: `z / Re{z} = Im{z} ~ U[-limit, limit]` where `limit = sqrt(3 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor and `fan_out` is the number of output units.
```
# Standalone usage:
import cvnn
initializer = cvnn.initializers.ComplexGlorotUniform()
values = initializer(shape=(2, 2)) # Returns a complex Glorot Uniform tensor of shape (2, 2)
```
```
# Usage in a cvnn layer:
import cvnn
initializer = cvnn.initializers.ComplexGlorotUniform()
layer = cvnn.layers.ComplexDense(units=10, kernel_initializer=initializer)
```
"""
__name__ = "Complex Glorot Uniform"
def __init__(self, seed: Optional[int] = None):
super(ComplexGlorotUniform, self).__init__(distribution="uniform", seed=seed)
def _compute_limit(self, fan_in, fan_out):
return tf.math.sqrt(6. / (fan_in + fan_out))
class ComplexGlorotNormal(ComplexInitializer):
"""
The Glorot normal initializer, also called Xavier normal initializer.
Reference: http://proceedings.mlr.press/v9/glorot10a.html
*Note: The reference actually refers to the uniform case but it's analysis was adapted for a normal distribution
Draws samples from a truncated normal distribution centered on 0 with
- Real case: `stddev = sqrt(2 / (fan_in + fan_out))`
- Complex case: real part stddev = complex part stddev = `1 / sqrt(fan_in + fan_out)`
where `fan_in` is the number of input units in the weight tensor and `fan_out` is the number of output units.
```
# Standalone usage:
import cvnn
initializer = cvnn.initializers.ComplexGlorotNormal()
values = initializer(shape=(2, 2)) # Returns a complex Glorot Normal tensor of shape (2, 2)
```
```
# Usage in a cvnn layer:
import cvnn
initializer = cvnn.initializers.ComplexGlorotNormal()
layer = cvnn.layers.ComplexDense(units=10, kernel_initializer=initializer)
```
"""
__name__ = "Complex Glorot Normal"
def __init__(self, seed: Optional[int] = None):
super(ComplexGlorotNormal, self).__init__(distribution="normal", seed=seed)
def _compute_limit(self, fan_in, fan_out):
return tf.math.sqrt(2. / (fan_in + fan_out))
class ComplexHeUniform(ComplexInitializer):
"""
The He Uniform initializer.
Reference: http://proceedings.mlr.press/v9/glorot10a.html
Draws samples from a uniform distribution:
- Real case: `x ~ U[-limit, limit]` where `limit = sqrt(6 / fan_in)`
- Complex case: `z / Re{z} = Im{z} ~ U[-limit, limit]` where `limit = sqrt(3 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
```
# Standalone usage:
import cvnn
initializer = cvnn.initializers.ComplexHeUniform()
values = initializer(shape=(2, 2)) # Returns a real He Uniform tensor of shape (2, 2)
```
```
# Usage in a cvnn layer:
import cvnn
initializer = cvnn.initializers.ComplexHeUniform()
layer = cvnn.layers.ComplexDense(units=10, kernel_initializer=initializer)
```
"""
__name__ = "Complex He Uniform"
def __init__(self, seed: Optional[int] = None):
super(ComplexHeUniform, self).__init__(distribution="uniform", seed=seed)
def _compute_limit(self, fan_in, fan_out):
return tf.math.sqrt(6. / fan_in)
class ComplexHeNormal(ComplexInitializer):
"""
He normal initializer.
Reference: https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html
It draws samples from a truncated normal distribution centered on 0 with
- Real case: `stddev = sqrt(2 / fan_in)`
- Complex case: real part stddev = complex part stddev = `1 / sqrt(fan_in)`
where fan_in is the number of input units in the weight tensor.
```
# Standalone usage:
import cvnn
initializer = cvnn.initializers.ComplexHeNormal()
values = initializer(shape=(2, 2)) # Returns a complex He Normal tensor of shape (2, 2)
```
```
# Usage in a cvnn layer:
import cvnn
initializer = cvnn.initializers.ComplexHeNormal()
layer = cvnn.layers.ComplexDense(units=10, kernel_initializer=initializer)
```
"""
__name__ = "Complex He Normal"
def __init__(self, seed: Optional[int] = None):
super(ComplexHeNormal, self).__init__(distribution="normal", seed=seed)
def _compute_limit(self, fan_in, fan_out):
return tf.math.sqrt(2. / fan_in)
class Zeros:
"""
Creates a tensor with all elements set to zero.
```
> >> cvnn.initializers.Zeros()(shape=(2,2))
<tf.Tensor: shape=(2, 2), dtype=float32, numpy=
array([[0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j]], dtype=float32)>
```
```
# Usage in a cvnn layer:
import cvnn
initializer = cvnn.initializers.Zeros()
layer = cvnn.layers.ComplexDense(units=10, bias_initializer=initializer)
```
"""
__name__ = "Zeros"
def __call__(self, shape, dtype=tf.dtypes.complex64):
return tf.zeros(shape, dtype=tf.dtypes.as_dtype(dtype).real_dtype)
class Ones:
__name__ = "Ones"
def __call__(self, shape, dtype=tf.dtypes.complex64):
return tf.ones(shape, dtype=tf.dtypes.as_dtype(dtype).real_dtype)
init_dispatcher = {
"ComplexGlorotUniform": ComplexGlorotUniform,
"ComplexGlorotNormal": ComplexGlorotNormal,
"ComplexHeUniform": ComplexHeUniform,
"ComplexHeNormal": ComplexHeNormal
}
if __name__ == '__main__':
# Nothing yet
set_trace()
__author__ = 'J. Agustin BARRACHINA'
__version__ = '0.0.13'
__maintainer__ = 'J. Agustin BARRACHINA'
__email__ = 'joseagustin.barra@gmail.com; jose-agustin.barrachina@centralesupelec.fr'
| 10,508 | 35.237931 | 143 | py |
cvnn | cvnn-master/cvnn/tb.py | from tensorflow.keras.callbacks import TensorBoard
from tensorflow import GradientTape
import tensorflow as tf
# This extends TensorBoard to save gradients as histogram
# ExtendedTensorBoard can then be used in replace of tf.keras.callbacks.TensorBoard.
class ExtendedTensorBoard(TensorBoard):
def _log_gradients(self, epoch):
writer = self._writers['train']
with writer.as_default(), GradientTape() as g:
# here we use test data to calculate the gradients
features, y_true = list(dataset.batch(100).take(1))[0]
y_pred = self.model(features) # forward-propagation
loss = self.model.compiled_loss(y_true=y_true, y_pred=y_pred) # calculate loss
gradients = g.gradient(loss, self.model.trainable_weights) # back-propagation
# In eager mode, grads does not have name, so we get names from model.trainable_weights
for weights, grads in zip(self.model.trainable_weights, gradients):
tf.summary.histogram(
weights.name.replace(':', '_') + '_grads', data=grads, step=epoch)
writer.flush()
def on_epoch_end(self, epoch, logs=None):
# This function overwrites the on_epoch_end in tf.keras.callbacks.TensorBoard
# but we do need to run the original on_epoch_end, so here we use the super function.
super(ExtendedTensorBoard, self).on_epoch_end(epoch, logs=logs)
if self.histogram_freq and epoch % self.histogram_freq == 0:
self._log_gradients(epoch)
| 1,546 | 44.5 | 99 | py |
cvnn | cvnn-master/cvnn/losses.py | import tensorflow as tf
from tensorflow.keras import backend
from tensorflow.keras.losses import Loss, categorical_crossentropy
class ComplexAverageCrossEntropy(Loss):
def call(self, y_true, y_pred):
real_loss = categorical_crossentropy(y_true, tf.math.real(y_pred))
if y_pred.dtype.is_complex:
imag_loss = categorical_crossentropy(y_true, tf.math.imag(y_pred))
else:
imag_loss = real_loss
return (real_loss + imag_loss) / 2.
class ComplexAverageCrossEntropyIgnoreUnlabeled(ComplexAverageCrossEntropy):
def call(self, y_true, y_pred):
mask = tf.reduce_any(tf.cast(y_true, tf.bool), axis=-1)
y_true = tf.boolean_mask(y_true, mask)
y_pred = tf.boolean_mask(y_pred, mask)
return super(ComplexAverageCrossEntropyIgnoreUnlabeled, self).call(y_true, y_pred)
class ComplexMeanSquareError(Loss):
def call(self, y_true, y_pred):
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.convert_to_tensor(y_true)
if y_pred.dtype.is_complex and not y_true.dtype.is_complex: # Complex pred but real true
y_true = tf.complex(y_true, y_true)
y_true = tf.cast(y_true, y_pred.dtype)
return tf.cast(backend.mean(tf.math.square(tf.math.abs(y_true - y_pred)), axis=-1),
dtype=y_pred.dtype.real_dtype)
class ComplexWeightedAverageCrossEntropy(ComplexAverageCrossEntropy):
def __init__(self, weights, **kwargs):
self.class_weights = weights
super(ComplexWeightedAverageCrossEntropy, self).__init__(**kwargs)
def call(self, y_true, y_pred):
# https://stackoverflow.com/questions/44560549/unbalanced-data-and-weighted-cross-entropy
weights = tf.reduce_sum(self.class_weights * y_true, axis=-1)
unweighted_losses = super(ComplexWeightedAverageCrossEntropy, self).call(y_true, y_pred)
weighted_losses = unweighted_losses * tf.cast(weights, dtype=unweighted_losses.dtype)
return weighted_losses
class ComplexWeightedAverageCrossEntropyIgnoreUnlabeled(ComplexAverageCrossEntropy):
def __init__(self, weights, **kwargs):
self.class_weights = weights
super(ComplexWeightedAverageCrossEntropyIgnoreUnlabeled, self).__init__(**kwargs)
def call(self, y_true, y_pred):
mask = tf.reduce_any(tf.cast(y_true, tf.bool), axis=-1)
y_true = tf.boolean_mask(y_true, mask)
y_pred = tf.boolean_mask(y_pred, mask)
return super(ComplexWeightedAverageCrossEntropyIgnoreUnlabeled, self).call(y_true, y_pred)
if __name__ == "__main__":
import numpy as np
y_true = np.random.randint(0, 2, size=(2, 3)).astype("float32")
y_pred = tf.complex(np.random.random(size=(2, 3)).astype("float32"),
np.random.random(size=(2, 3)).astype("float32"))
loss = ComplexMeanSquareError().call(y_true, y_pred)
expected_loss = np.mean(np.square(np.abs(tf.complex(y_true, y_true) - y_pred)), axis=-1)
# import pdb; pdb.set_trace()
assert np.all(loss == expected_loss)
| 3,061 | 40.378378 | 100 | py |
cvnn | cvnn-master/cvnn/__main__.py | from cvnn import cli
import sys
import logging
from pdb import set_trace
cli.cli()
| 84 | 11.142857 | 25 | py |
cvnn | cvnn-master/cvnn/utils.py | import numpy as np
from datetime import datetime
from pathlib import Path
from pdb import set_trace
import sys
from tensorflow.python.keras import Model
import tensorflow as tf # TODO: Imported only for dtype
import os
from os.path import join
from scipy.io import loadmat
# To test logger:
import cvnn
import logging
from typing import Type
logger = logging.getLogger(cvnn.__name__)
REAL_CAST_MODES = {
'real_imag': 2,
'amplitude_phase': 2,
'amplitude_only': 1,
'real_only': 1
}
def reset_weights(model: Type[Model]):
# https://github.com/keras-team/keras/issues/341#issuecomment-539198392
for layer in model.layers:
if isinstance(layer, tf.keras.Model): #if you're using a model as a layer
reset_weights(layer) #apply function recursively
continue
#where are the initializers?
if hasattr(layer, 'cell'):
init_container = layer.cell
else:
init_container = layer
for key, initializer in init_container.__dict__.items():
if "initializer" not in key: #is this item an initializer?
continue #if no, skip it
# find the corresponding variable, like the kernel or the bias
if key == 'recurrent_initializer': #special case check
var = getattr(init_container, 'recurrent_kernel')
else:
var = getattr(init_container, key.replace("_initializer", ""))
var.assign(initializer(var.shape, var.dtype))
def load_matlab_matrices(fname="data_cnn1dT.mat", path="/media/barrachina/data/gilles_data/"):
"""
Opens Matlab matrix (.mat) as numpy array.
:param fname: file name to be opened
:param path: path to file
:return: numpy array with the Matlab matrix information
"""
mat_fname = join(path, fname)
mat = loadmat(mat_fname)
return mat
def create_folder(root_path, now=None):
"""
Creates folders within root_path using a date format.
:param root_path: root path where to create the folder chain
:param now: date to be used. If None then it will use current time
:return: the created path in pathlib format (compatible across different OS)
"""
if now is None:
now = datetime.today()
# path = Path(__file__).parents[1].absolute() / Path(root_path + now.strftime("%Y/%m%B/%d%A/run-%Hh%Mm%S/"))
# Last line was to create inside cvnn. I prefer now to save stuff on each project folder and not on libraries folder
path = Path(root_path + now.strftime("%Y/%m%B/%d%A/run-%Hh%Mm%S/"))
os.makedirs(path, exist_ok=True) # Do this not to have a problem if I run in parallel
return path
def cast_to_path(path):
if isinstance(path, str):
path = Path(path)
elif not isinstance(path, Path):
logger.error("Path datatype not recognized")
sys.exit(-1)
return path
def get_func_name(fun):
"""
Returns the name of a function passed as parameter being either a function itself or a string with the function name
:param fun: function or function name
:return: function name
"""
if callable(fun):
return fun.__name__
elif isinstance(fun, str):
return fun
else:
logger.error("Function not recognizable", stack_info=True)
sys.exit(-1)
def transform_to_real_map_function(image, label, mode: str = "real_imag"):
if mode not in REAL_CAST_MODES:
raise KeyError(f"Unknown real cast mode {mode}")
if mode == 'real_imag':
ret_value = tf.concat([tf.math.real(image), tf.math.imag(image)], axis=-1)
elif mode == 'amplitude_phase':
ret_value = tf.concat([tf.math.abs(image), tf.math.angle(image)], axis=-1)
elif mode == 'amplitude_only':
ret_value = tf.math.abs(image)
elif mode == 'real_only':
ret_value = tf.math.real(image)
else:
raise KeyError(f"Real cast mode {mode} not implemented")
return ret_value, label
def transform_to_real(x_complex, mode: str = "real_imag"):
"""
Transforms a complex input matrix into a real value matrix (double size)
:param x_complex: Complex-valued matrix of size mxn
:param mode: Mode on how to transform to real. One of the following:
- real_imag: Separate x_complex into real and imaginary making the size of the return double x_complex
- amplitude_phase: Separate x_complex into amplitude and phase making the size of the return double x_complex
- amplitude_only: Apply the absolute value to x_complex. Shape remains the same.
:return: real-valued matrix of real valued cast of x_complex
"""
# import pdb; pdb.set_trace()
if not tf.dtypes.as_dtype(x_complex.dtype).is_complex:
# Intput was not complex, nothing to do
return x_complex
if mode not in REAL_CAST_MODES:
raise KeyError(f"Unknown real cast mode {mode}")
if mode == 'real_imag':
ret_value = np.concatenate([np.real(image), np.imag(image)], axis=-1)
elif mode == 'amplitude_phase':
ret_value = np.concatenate([np.abs(image), np.angle(image)], axis=-1)
elif mode == 'amplitude_only':
ret_value = np.abs(image)
elif mode == 'real_only':
ret_value = np.real(image)
else:
raise KeyError(f"Real cast mode {mode} not implemented")
return x_real
def cart2polar(z):
"""
:param z: complex input
:return: tuple with the absolute value of the input and the phase
"""
return np.abs(z), np.angle(z)
def polar2cart(rho, angle):
"""
:param rho: absolute value
:param angle: phase
:return: complex number using phase and angle
"""
return rho * np.exp(1j*angle)
def randomize(x, y):
"""
Randomizes the order of data samples and their corresponding labels
:param x: data
:param y: data labels
:return: Tuple of (shuffled_x, shuffled_y) maintaining coherence of elements labels
"""
if isinstance(x, tf.data.Dataset):
return x.shuffle(1000), y
permutation = np.random.permutation(y.shape[0])
shuffled_x = x[permutation, :]
shuffled_y = y[permutation]
return shuffled_x, shuffled_y
def normalize(x):
return (x-np.amin(x))/np.abs(np.amax(x)-np.amin(x)) # Checked it works for complex values
def standarize(x):
return (x - np.mean(x)) / np.std(x)
def tensorflow_argmax_np_equivalent(x, num_classes):
res = np.zeros((np.argmax(x, 1).shape[0], num_classes))
indx = 0
for k in np.argmax(x, 1):
res[indx, k] = 1
indx += 1
return res
def compute_accuracy(x, y):
return np.average(np.equal(x, y).all(axis=1))
def median_error(q_75: float, q_25: float, n: int) -> int:
assert q_75 >= q_25 >= 0.0, f"q_75 {q_75} < q_25 {q_25}"
return 1.57*(q_75-q_25)/np.sqrt(n)
if __name__ == "__main__":
logger.warning("Testing logger")
__author__ = 'J. Agustin BARRACHINA'
__version__ = '0.0.28'
__maintainer__ = 'J. Agustin BARRACHINA'
__email__ = 'joseagustin.barra@gmail.com; jose-agustin.barrachina@centralesupelec.fr'
| 7,066 | 31.869767 | 120 | py |
cvnn | cvnn-master/cvnn/_version.py | __version__ = '2.0'
| 20 | 9.5 | 19 | py |
cvnn | cvnn-master/cvnn/cli.py | from argparse import ArgumentParser
from cvnn import __version__
def cli(args=None):
p = ArgumentParser(
description="Library to help implement a complex-valued neural network (cvnn) using tensorflow as back-end",
conflict_handler='resolve'
)
p.add_argument(
'-V', '--version',
action='version',
help='Show the conda-prefix-replacement version number and exit.',
version="cvnn %s" % __version__,
)
args = p.parse_args(args)
# do something with the args
print("CLI template - fix me up!")
# No return value means no error.
# Return a value of 1 or higher to signify an error.
# See https://docs.python.org/3/library/sys.html#sys.exit
if __name__ == '__main__':
import sys
cli(sys.argv[1:])
| 791 | 26.310345 | 116 | py |
cvnn | cvnn-master/cvnn/metrics.py | import tensorflow as tf
from tensorflow.keras.metrics import Accuracy, CategoricalAccuracy, Precision, Recall, Mean
from tensorflow_addons.metrics import F1Score, CohenKappa
from tensorflow.python.keras import backend
class ComplexAccuracy(Accuracy):
def __init__(self, name='complex_accuracy', dtype=tf.complex64, **kwargs):
super(ComplexAccuracy, self).__init__(name=name, dtype=dtype, **kwargs)
def update_state(self, y_true, y_pred, sample_weight=None, ignore_unlabeled=True):
if ignore_unlabeled: # WARNING, this will overwrite sample_weight!
sample_weight = tf.math.logical_not(tf.math.reduce_all(tf.math.logical_not(tf.cast(y_true, bool)), axis=-1))
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.convert_to_tensor(y_true)
if y_pred.dtype.is_complex:
y_pred = (tf.math.real(y_pred) + tf.math.imag(y_pred)) / 2
if y_true.dtype.is_complex:
assert tf.math.reduce_all(tf.math.real(y_pred) == tf.math.imag(y_pred)), "y_pred must be real valued"
super(ComplexAccuracy, self).update_state(y_true=y_true, y_pred=y_pred, sample_weight=sample_weight)
class ComplexCategoricalAccuracy(CategoricalAccuracy):
def __init__(self, name='complex_categorical_accuracy', **kwargs):
super(ComplexCategoricalAccuracy, self).__init__(name=name, **kwargs)
def update_state(self, y_true, y_pred, sample_weight=None, ignore_unlabeled=True):
if ignore_unlabeled: # WARNING, this will overwrite sample_weight!
sample_weight = tf.math.logical_not(tf.math.reduce_all(tf.math.logical_not(tf.cast(y_true, bool)), axis=-1))
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.convert_to_tensor(y_true)
if y_pred.dtype.is_complex:
y_pred = (tf.math.real(y_pred) + tf.math.imag(y_pred)) / 2
if y_true.dtype.is_complex:
assert tf.math.reduce_all(tf.math.real(y_pred) == tf.math.imag(y_pred)), "y_pred must be real valued"
super(ComplexCategoricalAccuracy, self).update_state(y_true=y_true, y_pred=y_pred, sample_weight=sample_weight)
class ComplexPrecision(Precision):
def __init__(self, name='complex_precision', **kwargs):
super(ComplexPrecision, self).__init__(name=name, **kwargs)
def update_state(self, y_true, y_pred, sample_weight=None, ignore_unlabeled=True):
if ignore_unlabeled: # WARNING, this will overwrite sample_weight!
sample_weight = tf.math.logical_not(tf.math.reduce_all(tf.math.logical_not(tf.cast(y_true, bool)), axis=-1))
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.convert_to_tensor(y_true)
if y_pred.dtype.is_complex:
y_pred = (tf.math.real(y_pred) + tf.math.imag(y_pred)) / 2
if y_true.dtype.is_complex:
assert tf.math.reduce_all(tf.math.real(y_pred) == tf.math.imag(y_pred)), "y_pred must be real valued"
super(ComplexPrecision, self).update_state(y_true=y_true, y_pred=y_pred, sample_weight=sample_weight)
class ComplexRecall(Recall):
def __init__(self, name='complex_recall', **kwargs):
super(ComplexRecall, self).__init__(name=name, **kwargs)
def update_state(self, y_true, y_pred, sample_weight=None, ignore_unlabeled=True):
if ignore_unlabeled: # WARNING, this will overwrite sample_weight!
sample_weight = tf.math.logical_not(tf.math.reduce_all(tf.math.logical_not(tf.cast(y_true, bool)), axis=-1))
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.convert_to_tensor(y_true)
if y_pred.dtype.is_complex:
y_pred = (tf.math.real(y_pred) + tf.math.imag(y_pred)) / 2
if y_true.dtype.is_complex:
assert tf.math.reduce_all(tf.math.real(y_pred) == tf.math.imag(y_pred)), "y_pred must be real valued"
super(ComplexRecall, self).update_state(y_true=y_true, y_pred=y_pred, sample_weight=sample_weight)
class ComplexCohenKappa(CohenKappa):
def __init__(self, name='complex_cohen_kappa', **kwargs):
super(ComplexCohenKappa, self).__init__(name=name, **kwargs)
def update_state(self, y_true, y_pred, sample_weight=None, ignore_unlabeled=True):
if ignore_unlabeled: # WARNING, this will overwrite sample_weight!
sample_weight = tf.math.logical_not(tf.math.reduce_all(tf.math.logical_not(tf.cast(y_true, bool)), axis=-1))
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.convert_to_tensor(y_true)
if y_pred.dtype.is_complex:
y_pred = (tf.math.real(y_pred) + tf.math.imag(y_pred)) / 2
if y_true.dtype.is_complex:
assert tf.math.reduce_all(tf.math.real(y_pred) == tf.math.imag(y_pred)), "y_pred must be real valued"
super(ComplexCohenKappa, self).update_state(y_true=y_true, y_pred=y_pred, sample_weight=sample_weight)
class ComplexF1Score(F1Score):
def __init__(self, name='complex_f1_score', **kwargs):
super(ComplexF1Score, self).__init__(name=name, **kwargs)
def update_state(self, y_true, y_pred, sample_weight=None, ignore_unlabeled=True):
if ignore_unlabeled: # WARNING, this will overwrite sample_weight!
sample_weight = tf.math.logical_not(tf.math.reduce_all(tf.math.logical_not(tf.cast(y_true, bool)), axis=-1))
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.convert_to_tensor(y_true)
if y_pred.dtype.is_complex:
y_pred = (tf.math.real(y_pred) + tf.math.imag(y_pred)) / 2
if y_true.dtype.is_complex:
assert tf.math.reduce_all(tf.math.real(y_pred) == tf.math.imag(y_pred)), "y_pred must be real valued"
super(ComplexF1Score, self).update_state(y_true=y_true, y_pred=y_pred, sample_weight=sample_weight)
def _accuracy(y_true, y_pred):
y_true.shape.assert_is_compatible_with(y_pred.shape)
if y_true.dtype != y_pred.dtype:
y_pred = tf.cast(y_pred, y_true.dtype)
reduced_sum = tf.reduce_sum(tf.cast(tf.math.equal(y_true, y_pred), backend.floatx()), axis=-1)
return tf.math.divide_no_nan(reduced_sum, tf.cast(tf.shape(y_pred)[-1], reduced_sum.dtype))
def custom_average_accuracy(y_true, y_pred):
# Mask to remove the labels (y_true) that are zero: ex. [0, 0, 0]
remove_zeros_mask = tf.math.logical_not(tf.math.reduce_all(tf.math.logical_not(tf.cast(y_true, bool)), axis=-1))
y_true = tf.boolean_mask(y_true, remove_zeros_mask)
y_pred = tf.boolean_mask(y_pred, remove_zeros_mask)
num_cls = y_true.shape[-1] # get total amount of classes
y_pred = tf.math.argmax(y_pred, axis=-1) # one hot encoded to sparse
y_true = tf.math.argmax(y_true, axis=-1) # ex. [0, 0, 1] -> [2]
accuracies = tf.TensorArray(tf.float32, size=0, dynamic_size=True)
for i in range(0, num_cls):
cls_mask = y_true == i
cls_y_true = tf.boolean_mask(y_true, cls_mask)
if tf.not_equal(tf.size(cls_y_true), 0):
new_acc = _accuracy(y_true=cls_y_true, y_pred=tf.boolean_mask(y_pred, cls_mask))
accuracies = accuracies.write(accuracies.size(), new_acc)
# import pdb; pdb.set_trace()
accuracies = accuracies.stack()
# return tf.cast(len(accuracies), dtype=accuracies.dtype)
return tf.math.reduce_sum(accuracies) / tf.cast(len(accuracies), dtype=accuracies.dtype)
class ComplexAverageAccuracy(Mean):
def __init__(self, name='complex_average_accuracy', dtype=None):
self._fn = custom_average_accuracy
super(ComplexAverageAccuracy, self).__init__(name, dtype=dtype)
def update_state(self, y_true, y_pred, sample_weight=None):
# WARNING: sample_weights will not be used
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.convert_to_tensor(y_true)
if y_pred.dtype.is_complex: # make y_pred real valued
y_pred = (tf.math.real(y_pred) + tf.math.imag(y_pred)) / 2
if y_true.dtype.is_complex: # make y_true real valued
assert tf.math.reduce_all(tf.math.real(y_true) == tf.math.imag(y_true)), "y_pred must be real valued"
y_true = tf.math.real(y_true)
matches = self._fn(y_true, y_pred)
return super(ComplexAverageAccuracy, self).update_state(matches)
if __name__ == '__main__':
m = ComplexAccuracy()
m.update_state([[1+1j], [2+1j], [3+1j], [4+1j]], [[1+1j], [2+1j], [3+5j], [4+5j]])
print(m.result().numpy())
| 8,397 | 50.521472 | 120 | py |
cvnn | cvnn-master/cvnn/activations.py | import tensorflow as tf
from tensorflow.keras.layers import Activation
from typing import Union, Callable, Optional
from tensorflow import Tensor
from numpy import pi
"""
This module contains many complex-valued activation functions to be used by CVNN class.
"""
# logger = logging.getLogger(cvnn.__name__)
t_activation = Union[str, Callable] # TODO: define better
# Regression
def linear(z: Tensor) -> Tensor:
"""
Does not apply any activation function. It just outputs the input.
:param z: Input tensor variable
:return: z
"""
return z
def modrelu(z: Tensor, b: float = 1., c: float = 1e-3) -> Tensor:
"""
mod ReLU presented in "Unitary Evolution Recurrent Neural Networks"
from M. Arjovsky et al. (2016)
URL: https://arxiv.org/abs/1511.06464
A variation of the ReLU named modReLU. It is a pointwise nonlinearity,
modReLU(z) : C -> C, which affects only the absolute
value of a complex number, defined:
modReLU(z) = ReLU(|z|+b)*z/|z|
TODO: See how to check the non zero abs.
"""
abs_z = tf.math.abs(z)
return tf.cast(tf.keras.activations.relu(abs_z + b), dtype=z.dtype) * z / tf.cast(abs_z + c, dtype=z.dtype)
def zrelu(z: Tensor, epsilon=1e-7) -> Tensor:
"""
zReLU presented in "On Complex Valued Convolutional Neural Networks"
from Nitzan Guberman (2016).
This methods let's the output as the input if both real and imaginary parts are positive.
https://stackoverflow.com/questions/49412717/advanced-custom-activation-function-in-keras-tensorflow
"""
imag_relu = tf.nn.relu(tf.math.imag(z))
real_relu = tf.nn.relu(tf.math.real(z))
ret_real = imag_relu*real_relu / (imag_relu + epsilon)
ret_imag = imag_relu*real_relu / (real_relu + epsilon)
ret_val = tf.complex(ret_real, ret_imag)
return ret_val
def crelu(z: Tensor, alpha: float = 0.0, max_value: Optional[float] = None, threshold: float = 0) -> Tensor:
"""
Mirror of cart_relu
"""
return cart_relu(z, alpha, max_value, threshold)
def complex_cardioid(z: Tensor) -> Tensor:
"""
Complex cardioid presented in "Better than Real: Complex-valued Neural Nets for MRI Fingerprinting"
from V. Patrick (2017).
This function maintains the phase information while attenuating the magnitude based on the phase itself.
For real-valued inputs, it reduces to the ReLU.
"""
return tf.cast(1 + tf.math.cos(tf.math.angle(z)), dtype=z.dtype) * z / 2.
"""
Complex input, real output
"""
def cast_to_real(z: Tensor) -> Tensor:
return tf.cast(z, z.dtype.real_dtype)
def sigmoid_real(z: Tensor) -> Tensor:
return tf.keras.activations.sigmoid(tf.math.real(z) + tf.math.imag(z))
def softmax_real_with_abs(z: Tensor, axis=-1) -> Tensor:
"""
Applies the softmax function to the modulus of z.
The softmax activation function transforms the outputs so that all values are in range (0, 1) and sum to 1.
It is often used as the activation for the last layer of a classification network because the result could be
interpreted as a probability distribution.
The softmax of x is calculated by exp(x)/tf.reduce_sum(exp(x)).
https://www.tensorflow.org/api_docs/python/tf/keras/activations/softmax
:param z: Input tensor.
:return: Real-valued tensor of the applied activation function
"""
if z.dtype.is_complex:
return tf.keras.activations.softmax(tf.math.abs(z), axis)
else:
return tf.keras.activations.softmax(z, axis)
def softmax_real_with_avg(z: Tensor, axis=-1) -> Tensor:
"""
The softmax activation function transforms the outputs so that all values are in range (0, 1) and sum to 1.
It is often used as the activation for the last layer of a classification network because the result could be
interpreted as a probability distribution.
The softmax of x is calculated by exp(x)/tf.reduce_sum(exp(x)).
https://www.tensorflow.org/api_docs/python/tf/keras/activations/softmax
:param z: Input tensor.
:return: Real-valued tensor of the applied activation function
"""
if z.dtype.is_complex:
return 0.5 * (tf.keras.activations.softmax(tf.math.real(z), axis) + tf.keras.activations.softmax(
tf.math.real(z), axis))
else:
return tf.keras.activations.softmax(z, axis)
def softmax_real_with_mult(z: Tensor, axis=-1) -> Tensor:
"""
Applies the softmax function to the modulus of z.
The softmax activation function transforms the outputs so that all values are in range (0, 1) and sum to 1.
It is often used as the activation for the last layer of a classification network because the result could be
interpreted as a probability distribution.
The softmax of x is calculated by exp(x)/tf.reduce_sum(exp(x)).
https://www.tensorflow.org/api_docs/python/tf/keras/activations/softmax
:param z: Input tensor.
:return: Real-valued tensor of the applied activation function
"""
if z.dtype.is_complex:
return tf.keras.activations.softmax(tf.math.real(z), axis) * tf.keras.activations.softmax(tf.math.real(z), axis)
else:
return tf.keras.activations.softmax(z, axis)
def softmax_of_softmax_real_with_mult(z: Tensor, axis=-1) -> Tensor:
"""
Applies the softmax function to the modulus of z.
The softmax activation function transforms the outputs so that all values are in range (0, 1) and sum to 1.
It is often used as the activation for the last layer of a classification network because the result could be
interpreted as a probability distribution.
The softmax of x is calculated by exp(x)/tf.reduce_sum(exp(x)).
https://www.tensorflow.org/api_docs/python/tf/keras/activations/softmax
:param z: Input tensor.
:return: Real-valued tensor of the applied activation function
"""
if z.dtype.is_complex:
return tf.keras.activations.softmax(
tf.keras.activations.softmax(tf.math.real(z), axis) * tf.keras.activations.softmax(tf.math.real(z), axis),
axis)
else:
return tf.keras.activations.softmax(z, axis)
def softmax_of_softmax_real_with_avg(z: Tensor, axis=-1) -> Tensor:
"""
Applies the softmax function to the modulus of z.
The softmax activation function transforms the outputs so that all values are in range (0, 1) and sum to 1.
It is often used as the activation for the last layer of a classification network because the result could be
interpreted as a probability distribution.
The softmax of x is calculated by exp(x)/tf.reduce_sum(exp(x)).
https://www.tensorflow.org/api_docs/python/tf/keras/activations/softmax
:param z: Input tensor.
:return: Real-valued tensor of the applied activation function
"""
if z.dtype.is_complex:
return tf.keras.activations.softmax(
tf.keras.activations.softmax(tf.math.real(z), axis) + tf.keras.activations.softmax(tf.math.real(z), axis),
axis)
else:
return tf.keras.activations.softmax(z, axis)
def softmax_real_by_parameter(z: Tensor, axis=-1, params: Optional[dict] = None) -> Tensor:
if params is None:
params = {
'abs': True,
'angle': True,
'real': True,
'imag': True
}
result = []
for k, v in params:
if k == 'abs' and v:
result.append(tf.keras.activations.softmax(tf.math.abs(z), axis))
if k == 'angle' and v:
result.append(tf.keras.activations.softmax(tf.math.angle(z), axis))
if k == 'real' and v:
result.append(tf.keras.activations.softmax(tf.math.real(z), axis))
if k == 'imag' and v:
result.append(tf.keras.activations.softmax(tf.math.imag(z), axis))
return tf.convert_to_tensor(result)
def convert_to_real_with_abs(z: Tensor) -> Tensor:
"""
Applies the absolute value and returns a real-valued output.
:param z: Input tensor.
:return: Real-valued tensor of the applied activation function
"""
if z.dtype.is_complex:
return tf.math.abs(z)
else:
return z
def softmax_real_with_polar(z: Tensor, axis=-1) -> Tensor:
"""
Applies the softmax function to the modulus of z.
The softmax activation function transforms the outputs so that all values are in range (0, 1) and sum to 1.
It is often used as the activation for the last layer of a classification network because the result could be
interpreted as a probability distribution.
The softmax of x is calculated by exp(x)/tf.reduce_sum(exp(x)).
https://www.tensorflow.org/api_docs/python/tf/keras/activations/softmax
:param z: Input tensor.
:return: Real-valued tensor of the applied activation function
"""
if z.dtype.is_complex:
return 0.5 * (tf.keras.activations.softmax(tf.math.abs(z), axis) + tf.keras.activations.softmax(tf.math.angle(z),
axis))
else:
return tf.keras.activations.softmax(z, axis)
"""
etf Functions
"""
def etf_circular_tan(z: Tensor) -> Tensor:
return tf.math.tan(z)
def etf_circular_sin(z: Tensor) -> Tensor:
return tf.math.sin(z)
def etf_inv_circular_atan(z: Tensor) -> Tensor:
return tf.math.atan(z)
def etf_inv_circular_asin(z: Tensor) -> Tensor:
return tf.math.asin(z)
def etf_inv_circular_acos(z: Tensor) -> Tensor:
return tf.math.acos(z)
def etf_circular_tanh(z: Tensor) -> Tensor:
return tf.math.tanh(z)
def etf_circular_sinh(z: Tensor) -> Tensor:
return tf.math.sinh(z)
def etf_inv_circular_atanh(z: Tensor) -> Tensor:
return tf.math.atanh(z)
def etf_inv_circular_asinh(z: Tensor) -> Tensor:
return tf.math.asinh(z)
"""
Phasor Networks
"""
def georgiou_cdbp(z:Tensor, r: float = 1, c: float = 1e-3) -> Tensor:
"""
Activation function proposed by G. M. Georgioy and C. Koutsougeras in
https://ieeexplore.ieee.org/abstract/document/142037
"""
return z / tf.cast(c + tf.math.abs(z)/r, dtype=z.dtype)
def complex_signum(z: Tensor, k: Optional[int] = None) -> Tensor:
"""
Complex signum activation function is very similar to mvn_activation.
For a detailed explanation refer to:
https://ieeexplore.ieee.org/abstract/document/548176
"""
if k:
# values = np.linspace(pi / k, 2 * pi - pi / k, k)
angle_cast = tf.math.floor(tf.math.angle(z) * k / (2 * pi))
# import pdb; pdb.set_trace()
return tf.math.exp(tf.complex(
tf.zeros(tf.shape(z), dtype=z.dtype.real_dtype), angle_cast * 2 * pi / k))
else:
return tf.math.exp(tf.complex(tf.zeros(tf.shape(z), dtype=z.dtype.real_dtype), tf.math.angle(z)))
def mvn_activation(z: Tensor, k: Optional[int] = None) -> Tensor:
"""
Function inspired by Naum Aizenberg.
A multi-valued neuron (MVN) is a neural element with n inputs and one output lying on the unit circle,
and with complex-valued weights.
Works:
https://link.springer.com/article/10.1007%2FBF01068667
http://pefmath2.etf.rs/files/93/399.pdf
"""
if k:
# values = np.linspace(pi / k, 2 * pi - pi / k, k)
angle_cast = tf.math.floor(tf.math.angle(z) * k / (2 * pi))
# import pdb; pdb.set_trace()
return tf.math.exp(tf.complex(
tf.zeros(tf.shape(z), dtype=z.dtype.real_dtype), (angle_cast + 0.5) * 2 * pi / k))
else:
return tf.math.exp(tf.complex(tf.zeros(tf.shape(z), dtype=z.dtype.real_dtype), tf.math.angle(z)))
"""
TYPE A: Cartesian form.
"""
# TODO: shall I use tf.nn or tf.keras.activation modules?
# https://stackoverflow.com/questions/54761088/tf-nn-relu-vs-tf-keras-activations-relu
# nn has leaky relu, activation doesn't
def cart_sigmoid(z: Tensor) -> Tensor:
"""
Applies the function (1.0 / (1.0 + exp(-x))) + j * (1.0 / (1.0 + exp(-y))) where z = x + j * y
https://www.tensorflow.org/api_docs/python/tf/keras/activations/sigmoid
:param z: Tensor to be used as input of the activation function
:return: Tensor result of the applied activation function
"""
return tf.cast(tf.complex(tf.keras.activations.sigmoid(tf.math.real(z)),
tf.keras.activations.sigmoid(tf.math.imag(z))),
dtype=z.dtype)
def cart_elu(z: Tensor, alpha=1.0) -> Tensor:
"""
Applies the "Exponential linear unit": x if x > 0 and alpha * (exp(x)-1) if x < 0
To both the real and imaginary part of z.
https://www.tensorflow.org/api_docs/python/tf/keras/activations/elu
:param z: Input tensor.
:param alpha: A scalar, slope of negative section.
:return: Tensor result of the applied activation function
"""
return tf.cast(tf.complex(tf.keras.activations.elu(tf.math.real(z), alpha),
tf.keras.activations.elu(tf.math.imag(z), alpha)), dtype=z.dtype)
def cart_exponential(z: Tensor) -> Tensor:
"""
Exponential activation function. Applies to both the real and imag part of z the exponential activation: exp(x)
https://www.tensorflow.org/api_docs/python/tf/keras/activations/exponential
:param z: Input tensor.
:return: Tensor result of the applied activation function
"""
return tf.cast(tf.complex(tf.keras.activations.exponential(tf.math.real(z)),
tf.keras.activations.exponential(tf.math.imag(z))), dtype=z.dtype)
def cart_hard_sigmoid(z: Tensor) -> Tensor:
"""
Applies the Hard Sigmoid function to both the real and imag part of z.
The hard sigmoid function is faster to compute than sigmoid activation.
Hard sigmoid activation: 0 if x < -2.5
1 if x > 2.5
0.2 * x + 0.5 if -2.5 <= x <= 2.5
https://www.tensorflow.org/api_docs/python/tf/keras/activations/hard_sigmoid
:param z: Input tensor.
:return: Tensor result of the applied activation function
"""
return tf.cast(tf.complex(tf.keras.activations.hard_sigmoid(tf.math.real(z)),
tf.keras.activations.hard_sigmoid(tf.math.imag(z))), dtype=z.dtype)
def cart_relu(z: Tensor, alpha: float = 0.0, max_value: Optional[float] = None, threshold: float = 0) -> Tensor:
"""
Applies Rectified Linear Unit to both the real and imag part of z
The relu function, with default values, it returns element-wise max(x, 0).
Otherwise, it follows: f(x) = max_value for x >= max_value,
f(x) = x for threshold <= x < max_value,
f(x) = alpha * (x - threshold) otherwise.
https://www.tensorflow.org/api_docs/python/tf/keras/activations/relu
:param z: Tensor -- Input tensor.
:param alpha: float -- A float that governs the slope for values lower than the threshold (default 0.0).
:param max_value: Optional float -- A float that sets the saturation threshold (the largest value the function will return)
(default None).
:param threshold: float -- A float giving the threshold value of the activation function below which
values will be damped or set to zero (default 0).
:return: Tensor result of the applied activation function
"""
return tf.cast(tf.complex(tf.keras.activations.relu(tf.math.real(z), alpha, max_value, threshold),
tf.keras.activations.relu(tf.math.imag(z), alpha, max_value, threshold)), dtype=z.dtype)
def cart_leaky_relu(z: Tensor, alpha=0.2, name=None) -> Tensor:
"""
Applies Leaky Rectified Linear Unit to both the real and imag part of z
https://www.tensorflow.org/api_docs/python/tf/nn/leaky_relu
http://robotics.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf
:param z: Input tensor.
:param alpha: Slope of the activation function at x < 0. Default: 0.2
:param name: A name for the operation (optional).
:return: Tensor result of the applied activation function
"""
return tf.cast(tf.complex(tf.nn.leaky_relu(tf.math.real(z), alpha, name),
tf.nn.leaky_relu(tf.math.imag(z), alpha, name)), dtype=z.dtype)
def cart_selu(z: Tensor) -> Tensor:
"""
Applies Scaled Exponential Linear Unit (SELU) to both the real and imag part of z.
The scaled exponential unit activation: scale * elu(x, alpha).
https://www.tensorflow.org/api_docs/python/tf/keras/activations/selu
https://arxiv.org/abs/1706.02515
:param z: Input tensor.
:return: Tensor result of the applied activation function
"""
return tf.cast(tf.complex(tf.keras.activations.selu(tf.math.real(z)),
tf.keras.activations.selu(tf.math.imag(z))), dtype=z.dtype)
def cart_softplus(z: Tensor) -> Tensor:
"""
Applies Softplus activation function to both the real and imag part of z.
The Softplus function: log(exp(x) + 1)
https://www.tensorflow.org/api_docs/python/tf/keras/activations/softplus
:param z: Input tensor.
:return: Tensor result of the applied activation function
"""
return tf.cast(tf.complex(tf.keras.activations.softplus(tf.math.real(z)),
tf.keras.activations.softplus(tf.math.imag(z))), dtype=z.dtype)
def cart_softsign(z: Tensor) -> Tensor:
"""
Applies Softsign activation function to both the real and imag part of z.
The softsign activation: x / (abs(x) + 1). TODO: typo in tensorflow references (softplus instead of softsign)
https://www.tensorflow.org/api_docs/python/tf/keras/activations/softsign
:param z: Input tensor.
:return: Tensor result of the applied activation function
"""
return tf.cast(tf.complex(tf.keras.activations.softsign(tf.math.real(z)),
tf.keras.activations.softsign(tf.math.imag(z))), dtype=z.dtype)
def cart_tanh(z: Tensor) -> Tensor:
"""
Applies Hyperbolic Tangent (tanh) activation function to both the real and imag part of z.
The tanh activation: tanh(x) = sinh(x)/cosh(x) = ((exp(x) - exp(-x))/(exp(x) + exp(-x))).
The derivative if tanh is computed as 1 - tanh^2 so it should be fast to compute for backprop.
https://www.tensorflow.org/api_docs/python/tf/keras/activations/tanh
:param z: Input tensor.
:return: Tensor result of the applied activation function
"""
return tf.cast(tf.complex(tf.keras.activations.tanh(tf.math.real(z)),
tf.keras.activations.tanh(tf.math.imag(z))), dtype=z.dtype)
# Classification
def cart_softmax(z: Tensor, axis=-1) -> Tensor:
"""
Applies the softmax function to both the real and imag part of z.
The softmax activation function transforms the outputs so that all values are in range (0, 1) and sum to 1.
It is often used as the activation for the last layer of a classification network because the result could be
interpreted as a probability distribution.
The softmax of x is calculated by exp(x)/tf.reduce_sum(exp(x)).
https://www.tensorflow.org/api_docs/python/tf/keras/activations/softmax
:param z: Input tensor.
:return: Tensor result of the applied activation function
"""
return tf.cast(tf.complex(tf.keras.activations.softmax(tf.math.real(z), axis),
tf.keras.activations.softmax(tf.math.imag(z), axis)), dtype=z.dtype)
"""
TYPE B: Polar form.
"""
# For all ReLU functions, the polar form makes no real sense. If we keep the phase because abs(z) > 0
def _apply_pol(z: Tensor, amp_fun: Callable[[Tensor], Tensor],
pha_fun: Optional[Callable[[Tensor], Tensor]] = None) -> Tensor:
amp = amp_fun(tf.math.abs(z))
pha = tf.math.angle(z)
if pha_fun is not None:
pha = pha_fun(pha)
return tf.cast(tf.complex(amp * tf.math.cos(pha), amp * tf.math.sin(pha)), dtype=z.dtype)
def pol_tanh(z: Tensor) -> Tensor:
"""
Applies Hyperbolic Tangent (tanh) activation function to the amplitude of the complex number
leaving the phase untouched.
The derivative if tanh is computed as 1 - tanh^2 so it should be fast to compute for backprop.
https://www.tensorflow.org/api_docs/python/tf/keras/activations/tanh
:param z: Input tensor.
:return: Tensor result of the applied activation function
"""
return _apply_pol(z, tf.keras.activations.tanh)
def pol_sigmoid(z: Tensor) -> Tensor:
"""
Applies the sigmoid function to the amplitude of the complex number leaving the phase untouched
https://www.tensorflow.org/api_docs/python/tf/keras/activations/sigmoid
:param z: Tensor to be used as input of the activation function
:return: Tensor result of the applied activation function
"""
return _apply_pol(z, tf.keras.activations.sigmoid)
def pol_selu(z: Tensor) -> Tensor:
"""
Applies Scaled Exponential Linear Unit (SELU) to the absolute value of z, keeping the phase unchanged.
The scaled exponential unit activation: scale * elu(x, alpha).
https://www.tensorflow.org/api_docs/python/tf/keras/activations/selu
https://arxiv.org/abs/1706.02515
:param z: Input tensor.
:return: Tensor result of the applied activation function
Logic:
I must mantain the phase (angle) so: cos(theta) = x_0/r_0 = x_1/r_1.
For real case, x_0 = r_0 so it also works.
"""
r_0 = tf.abs(z)
r_1 = tf.keras.activations.selu(r_0)
return tf.cast(tf.complex(tf.math.real(z) * r_1 / r_0, tf.math.imag(z) * r_1 / r_0), dtype=z.dtype)
act_dispatcher = {
'linear': linear,
# Complex input, real output
'cast_to_real': cast_to_real,
'convert_to_real_with_abs': convert_to_real_with_abs,
'sigmoid_real': sigmoid_real,
'softmax_real_with_abs': softmax_real_with_abs,
'softmax_real_with_avg': softmax_real_with_avg,
'softmax_real_with_mult': softmax_real_with_mult,
'softmax_of_softmax_real_with_mult': softmax_of_softmax_real_with_mult,
'softmax_of_softmax_real_with_avg': softmax_of_softmax_real_with_avg,
'softmax_real_with_polar': softmax_real_with_polar,
# Phasor networks
'georgiou_cdbp': georgiou_cdbp,
'mvn_activation': mvn_activation,
'complex_signum': complex_signum,
# Type A (cartesian)
'cart_sigmoid': cart_sigmoid,
'cart_elu': cart_elu,
'cart_exponential': cart_exponential,
'cart_hard_sigmoid': cart_hard_sigmoid,
'cart_relu': cart_relu,
'cart_leaky_relu': cart_leaky_relu,
'cart_selu': cart_selu,
'cart_softplus': cart_softplus,
'cart_softsign': cart_softsign,
'cart_tanh': cart_tanh,
'cart_softmax': cart_softmax,
# Type B (polar)
'pol_tanh': pol_tanh,
'pol_sigmoid': pol_sigmoid,
'pol_selu': pol_selu,
# Elementary Transcendental Functions (ETF)
'etf_circular_tan': etf_circular_tan,
'etf_circular_sin': etf_circular_sin,
'etf_inv_circular_atan': etf_inv_circular_atan,
'etf_inv_circular_asin': etf_inv_circular_asin,
'etf_inv_circular_acos': etf_inv_circular_acos,
'etf_circular_tanh': etf_circular_tanh,
'etf_circular_sinh': etf_circular_sinh,
'etf_inv_circular_atanh': etf_inv_circular_atanh,
'etf_inv_circular_asinh': etf_inv_circular_asinh,
# ReLU
'modrelu': modrelu,
'crelu': crelu,
'zrelu': zrelu,
'complex_cardioid': complex_cardioid
}
if __name__ == '__main__':
x = tf.constant([-2, 1.0, 0.0, 1.0, -3, 0.8, 0.1], dtype=tf.float32)
y = tf.constant([-2.5, -1.5, 0.0, 1.0, 2, 0.4, -0.4], dtype=tf.float32)
z = tf.complex(x, y)
result = crelu(z)
result = modrelu(z, 4)
result = zrelu(z)
result = complex_cardioid(z)
"""import matplotlib.pyplot as plt
import numpy as np
x = tf.constant([-2, 1.0, 0.0, 1.0, -3, 0.8, 0.1], dtype=tf.float32)
y = tf.constant([-2.5, -1.5, 0.0, 1.0, 2, 0.4, -0.4], dtype=tf.float32)
z = tf.complex(x, y)
result = georgiou_cdbp(z)
ax = plt.axes()
ax.scatter(tf.math.real(z), tf.math.imag(z), color='red')
ax.scatter(tf.math.real(result), tf.math.imag(result), color='blue')
for x, y, dx, dy in zip(tf.math.real(z), tf.math.imag(z),
tf.math.real(result) - tf.math.real(z),
tf.math.imag(result) - tf.math.imag(z)):
ax.arrow(x, y, dx, dy, length_includes_head=True, head_width=0.1)
t = np.linspace(0, np.pi * 2, 100)
ax.plot(np.cos(t), np.sin(t), linewidth=1)
yabs_max = abs(max(ax.get_ylim(), key=abs))
xabs_max = abs(max(ax.get_xlim(), key=abs))
axis_max = max(yabs_max, xabs_max)
ax.set_ylim(ymin=-axis_max, ymax=axis_max)
ax.set_xlim(xmin=-axis_max, xmax=axis_max)
plt.show()"""
__author__ = 'J. Agustin BARRACHINA'
__version__ = '0.0.21'
__maintainer__ = 'J. Agustin BARRACHINA'
__email__ = 'joseagustin.barra@gmail.com; jose-agustin.barrachina@centralesupelec.fr'
| 24,929 | 39.080386 | 127 | py |
cvnn | cvnn-master/cvnn/__init__.py | import logging
import colorlog
import re
import os
from cvnn.utils import create_folder
from tensorflow.keras.utils import get_custom_objects
from cvnn.activations import act_dispatcher
from cvnn.initializers import init_dispatcher
get_custom_objects().update(act_dispatcher) # Makes my activation functions usable with a string
get_custom_objects().update(init_dispatcher)
def get_version() -> str:
versionfile = os.path.split(os.path.realpath(__file__))[0] + "/_version.py"
verstrline = open(versionfile, "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
return mo.group(1)
else:
VSRE = r"\"version\": ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
return mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (versionfile,))
# How to comment script header
# https://medium.com/@rukavina.andrei/how-to-write-a-python-script-header-51d3cec13731
__author__ = 'J. Agustin BARRACHINA'
__copyright__ = 'Copyright 2020, {project_name}'
__credits__ = ['{credit_list}']
__license__ = '{license}'
__version__ = get_version()
__maintainer__ = 'J. Agustin BARRACHINA'
__email__ = 'joseagustin.barra@gmail.com; jose-agustin.barrachina@centralesupelec.fr'
__status__ = '{dev_status}'
# logging.getLogger('tensorflow').disabled = True # Removes https://github.com/tensorflow/tensorflow/issues/41557
STRING_FORMATTER = "%(asctime)s — %(levelname)s - %(module)s::%(funcName)s line %(lineno)s — %(message)s"
# file_handler = logging.FileHandler(create_folder("./log/logs/") / "logs.log")
# formatter = logging.Formatter(STRING_FORMATTER)
# file_handler.setFormatter(formatter)
# https://github.com/borntyping/python-colorlog
# https://stackoverflow.com/a/23964880/5931672
console_handler = colorlog.StreamHandler()
console_handler.setFormatter(colorlog.ColoredFormatter('%(log_color)s' + STRING_FORMATTER))
logger = colorlog.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(console_handler)
# logger.addHandler(file_handler)
| 2,139 | 32.968254 | 117 | py |
cvnn | cvnn-master/cvnn/real_equiv_tools.py | import sys
import numpy as np
from tensorflow.keras import Sequential
from pdb import set_trace
from cvnn import logger
import cvnn.layers as layers
from cvnn.layers.core import ComplexLayer
from typing import Type, List
from typing import Optional
EQUIV_TECHNIQUES = {
"np", "alternate_tp", "ratio_tp", "none"
}
def get_real_equivalent_multiplier(layers_shape, classifier, equiv_technique, bias_adjust: bool = False):
"""
Returns an array (output_multiplier) of size `self.shape` (number of hidden layers + output layer)
one must multiply the real valued equivalent layer
In other words, the real valued equivalent layer 'i' will have:
neurons_real_valued_layer[i] = output_multiplier[i] * neurons_complex_valued_layer[i]
:param layers_shape:
:param classifier: Boolean (default = True) weather the model's task is to classify (True) or
a regression task (False)
:param equiv_technique: Used to define the strategy of the capacity equivalent model.
This parameter is ignored if capacity_equivalent=False
- 'np': double all layer size (except the last one if classifier=True)
- 'ratio': neurons_real_valued_layer[i] = r * neurons_complex_valued_layer[i], 'r' constant for all 'i'
- 'alternate': Method described in https://arxiv.org/abs/1811.12351 where one alternates between
multiplying by 2 or 1. Special case in the middle is treated as a compromise between the two.
:return: output_multiplier
"""
dense_layers = [d for d in layers_shape if isinstance(d, layers.ComplexDense)] # Keep only dense layers
return get_real_equivalent_multiplier_from_shape(_parse_sizes(dense_layers), classifier=classifier,
equiv_technique=equiv_technique, bias_adjust=bias_adjust)
def get_real_equivalent_multiplier_from_shape(layers_shape: List[int], equiv_technique: str,
classifier: bool = True, bias_adjust: bool = False):
equiv_technique = equiv_technique.lower()
if equiv_technique not in EQUIV_TECHNIQUES:
raise ValueError(f"Unknown equiv_technique {equiv_technique}")
if equiv_technique == "alternate_tp":
output_multiplier = _get_alternate_capacity_equivalent(layers_shape, classifier)
elif equiv_technique == "ratio_tp":
output_multiplier = _get_ratio_capacity_equivalent(layers_shape, classifier,
bias_adjust=bias_adjust)
elif equiv_technique == "np":
output_multiplier = 2 * np.ones(len(layers_shape)-1).astype(int)
if classifier:
output_multiplier[-1] = 1
elif equiv_technique == "none":
output_multiplier = np.ones(len(layers_shape) - 1).astype(int)
else:
raise ValueError(f"Unknown equiv_technique {equiv_technique} but listed on {EQUIV_TECHNIQUES}.")
return output_multiplier
def get_real_equivalent(complex_model: Type[Sequential], classifier: bool = True, capacity_equivalent: bool = True,
equiv_technique: str = 'ratio', name: Optional[str] = None):
assert isinstance(complex_model, Sequential), "Sorry, only sequential models supported for the moment"
equiv_technique = equiv_technique.lower()
if equiv_technique not in {"ratio", "alternate"}:
logger.error("Invalid `equivalent_technique` argument: " + equiv_technique)
sys.exit(-1)
# assert len(self.shape) != 0
real_input_shape = [inp for inp in complex_model.layers[0].input_shape if inp is not None]
real_input_shape[-1] = real_input_shape[-1]*2
real_shape = [layers.ComplexInput(input_shape=real_input_shape,
dtype=complex_model.layers[0].input.dtype.real_dtype)]
output_multiplier = get_real_equivalent_multiplier(complex_model.layers,
classifier, capacity_equivalent, equiv_technique)
counter = 0
for layer in complex_model.layers:
if isinstance(layer, ComplexLayer):
if isinstance(layer, layers.ComplexDense): # TODO: Check if I can do this with kargs or sth
real_shape.append(layer.get_real_equivalent(
output_multiplier=output_multiplier[counter]))
counter += 1
else:
real_shape.append(layer.get_real_equivalent())
else:
sys.exit("Layer " + str(layer) + " unknown")
assert counter == len(output_multiplier)
if name is None:
name = f"{complex_model.name}_real_equiv"
real_equiv = Sequential(real_shape, name=name)
real_equiv.compile(optimizer=complex_model.optimizer.__class__(), loss=complex_model.loss,
metrics=['accuracy'])
return real_equiv
def _parse_sizes(dense_layers):
assert len(dense_layers[0].input_shape) == 2, "Possibly a bug of cvnn. Please report it to github issues"
model_in_c = dense_layers[0].input_shape[-1] # -1 not to take the None part
model_out_c = dense_layers[-1].units
x_c = [dense_layers[i].units for i in range(len(dense_layers[:-1]))]
x_c.insert(0, model_in_c)
x_c.append(model_out_c)
return x_c
def _get_ratio_capacity_equivalent(layers_shape, classification: bool = True, bias_adjust: bool = True):
"""
Generates output_multiplier keeping not only the same capacity but keeping a constant ratio between the
model's layers
This helps keeps the 'aspect' or shape of the model my making:
neurons_real_layer_i = ratio * neurons_complex_layer_i
:param layers_shape:
:param classification: True (default) if the model is a classification model. False otherwise.
:param bias_adjust: True (default) if taking into account the bias as a trainable parameter. If not it will
only match the real valued parameters of the weights
"""
p_c = 0
for i in range(len(layers_shape[:-1])):
p_c += 2 * layers_shape[i] * layers_shape[i+1]
model_in_c = layers_shape[0]
model_out_c = layers_shape[-1]
x_c = layers_shape[1:-1]
if bias_adjust:
p_c = p_c + 2 * np.sum(x_c) + 2 * model_out_c
model_in_r = 2 * model_in_c
model_out_r = model_out_c if classification else 2 * model_out_c
# Quadratic equation
if len(x_c) > 1:
quadratic_c = float(-p_c)
quadratic_b = float(model_in_r * x_c[0] + model_out_r * x_c[-1])
if bias_adjust:
quadratic_b = quadratic_b + np.sum(x_c) + model_out_c
quadratic_a = float(np.sum([x_c[i] * x_c[i + 1] for i in range(len(x_c) - 1)]))
# The result MUST be positive so I use the '+' solution
ratio = (-quadratic_b + np.sqrt(quadratic_b ** 2 - 4 * quadratic_c * quadratic_a)) / (2 * quadratic_a)
if not 1 <= ratio < 2:
logger.error("Ratio {} has a weird value. This function must have a bug.".format(ratio))
else:
ratio = 2 * (model_in_c + model_out_c) / (model_in_r + model_out_r)
return [ratio] * len(x_c) + [1 if classification else 2]
def _get_alternate_capacity_equivalent(layers_shape, classification: bool = True):
"""
Generates output_multiplier using the alternate method described in https://arxiv.org/abs/1811.12351 which
doubles or not the layer if it's neighbor was doubled or not (making the opposite).
The code fills output_multiplier from both senses:
output_multiplier = [ ... , .... ]
---> <---
If when both ends meet there's not a coincidence (example: [..., 1, 1, ...]) then
the code will find a compromise between the two to keep the same real valued trainable parameters.
"""
output_multiplier = np.zeros(len(layers_shape))
output_multiplier[0] = 2 # Sets input multiplier
output_multiplier[-1] = 1 if classification else 2 # Output multiplier
i: int = 1
while i < (len(layers_shape) - i): # Fill the hidden layers (from 1 to len()-1)
output_multiplier[i] = 2 if output_multiplier[i - 1] == 1 else 1 # From beginning
output_multiplier[-1 - i] = 2 if output_multiplier[-i] == 1 else 1 # From the end
index_in_middle_with_diff_borders = i == len(layers_shape) - i - 1 and output_multiplier[i - 1] != output_multiplier[i + 1]
subsequent_indexes_are_equal = i == len(layers_shape) - i and output_multiplier[i] == output_multiplier[i + 1]
if index_in_middle_with_diff_borders or subsequent_indexes_are_equal:
m_inf = layers_shape[i - 1] # This is because dense_layers are len(output_multiplier) - 1
m_sup = layers_shape[i + 1]
if i == len(layers_shape) - i - 1: # index_in_middle_with_diff_borders
coef_sup = output_multiplier[i + 1]
coef_inf = output_multiplier[i - 1]
else: # subsequent_indexes_are_equal
coef_sup = output_multiplier[i + 1]
coef_inf = output_multiplier[i]
output_multiplier[i] = 2 * (m_inf + m_sup) / (coef_inf * m_inf + coef_sup * m_sup)
i += 1
return output_multiplier[1:]
| 9,341 | 53.631579 | 131 | py |
cvnn | cvnn-master/cvnn/layers/pooling.py | import tensorflow as tf
from packaging import version
from tensorflow.keras.layers import Layer
from tensorflow.python.keras import backend
from tensorflow.python.keras.utils import conv_utils
if version.parse(tf.__version__) < version.parse("2.6.0"):
from tensorflow.python.keras.engine.input_spec import InputSpec
else:
from tensorflow.keras.layers import InputSpec
from tensorflow.python.framework import tensor_shape
from abc import abstractmethod
# Typing
from typing import Union, Optional, Tuple
# Own models
from cvnn.layers.core import ComplexLayer
from cvnn.layers.core import DEFAULT_COMPLEX_TYPE
class ComplexPooling2D(Layer, ComplexLayer):
"""
Pooling layer for arbitrary pooling functions, for 2D inputs (e.g. images).
Abstract class. This class only exists for code reuse. It will never be an exposed API.
"""
def __init__(self, pool_size: Union[int, Tuple[int, int]] = (2, 2),
strides: Optional[Union[int, Tuple[int, int]]] = None,
padding: str = 'valid', data_format: Optional[str] = None,
name: Optional[str] = None, dtype=DEFAULT_COMPLEX_TYPE, **kwargs):
"""
:param pool_size: An integer or tuple/list of 2 integers: (pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for all spatial dimensions.
:param strides: An integer or tuple/list of 2 integers, specifying the strides of the pooling operation.
Can be a single integer to specify the same value for all spatial dimensions.
:param padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive.
:param data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`.
:param name: A string, the name of the layer.
"""
self.my_dtype = tf.dtypes.as_dtype(dtype)
super(ComplexPooling2D, self).__init__(name=name, **kwargs)
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_size = conv_utils.normalize_tuple(pool_size, 2,
'pool_size') # Values are checked here. No need to check them later
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=4)
@abstractmethod
def pool_function(self, inputs, ksize, strides, padding, data_format):
pass
def call(self, inputs, **kwargs):
if self.data_format == 'channels_last':
pool_shape = (1,) + self.pool_size + (1,)
strides = (1,) + self.strides + (1,)
else:
pool_shape = (1, 1) + self.pool_size
strides = (1, 1) + self.strides
outputs = self.pool_function(
inputs,
ksize=pool_shape,
strides=strides,
padding=self.padding.upper(),
data_format=conv_utils.convert_data_format(self.data_format, 4))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
else:
rows = input_shape[1]
cols = input_shape[2]
rows = conv_utils.conv_output_length(rows, self.pool_size[0], self.padding,
self.strides[0])
cols = conv_utils.conv_output_length(cols, self.pool_size[1], self.padding,
self.strides[1])
if self.data_format == 'channels_first':
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], rows, cols])
else:
return tensor_shape.TensorShape(
[input_shape[0], rows, cols, input_shape[3]])
def get_config(self):
config = super(ComplexPooling2D, self).get_config()
config.update({
'pool_size': self.pool_size,
'padding': self.padding,
'strides': self.strides,
'data_format': self.data_format,
'dtype': self.my_dtype
})
return config
class ComplexMaxPooling2D(ComplexPooling2D):
"""
Max pooling operation for 2D spatial data.
Works for complex dtype using the absolute value to get the max.
"""
def __init__(self, pool_size: Union[int, Tuple[int, int]] = (2, 2),
strides: Optional[Union[int, Tuple[int, int]]] = None,
padding: str = 'valid', data_format: Optional[str] = None,
name: Optional[str] = None, **kwargs):
super(ComplexMaxPooling2D, self).__init__(pool_size=pool_size, strides=strides, padding=padding,
data_format=data_format, name=name, **kwargs)
self.argmax = None
def pool_function(self, inputs, ksize, strides, padding, data_format):
# The max is calculated with the absolute value. This will still work on real values.
if inputs.dtype.is_complex:
abs_in = tf.math.abs(inputs)
else:
abs_in = inputs
output, argmax = tf.nn.max_pool_with_argmax(input=abs_in, ksize=ksize, strides=strides,
padding=padding, data_format=data_format,
include_batch_in_index=True)
self.argmax = argmax
shape = tf.shape(output)
tf_res = tf.reshape(tf.gather(tf.reshape(inputs, [-1]), argmax), shape)
# assert np.all(tf_res == output) # For debugging when the input is real only!
assert tf_res.dtype == inputs.dtype
return tf_res
def get_real_equivalent(self):
return ComplexMaxPooling2D(pool_size=self.pool_size, strides=self.strides, padding=self.padding,
data_format=self.data_format, name=self.name + "_real_equiv")
def get_max_index(self):
if self.argmax is None:
raise AttributeError("Variable argmax did not exist, call at least once the max-pooling layer")
return self.argmax # TODO: Shall I check this is use only once?
class ComplexMaxPooling2DWithArgmax(ComplexMaxPooling2D):
"""
Max pooling operation for 2D spatial data and outputs both max values and indices.
This class is equivalent to ComplexMaxPooling2D but that also outputs indices.
Useful to perform Max Unpooling using ComplexUnPooling2D.
Works for complex dtype using the absolute value to get the max.
"""
def pool_function(self, inputs, ksize, strides, padding, data_format):
"""
:param inputs: A Tensor. Input to pool over.
:param ksize: An int or list of ints that has length 1, 2 or 4.
The size of the window for each dimension of the input tensor.
:param strides: An int or list of ints that has length 1, 2 or 4.
The stride of the sliding window for each dimension of the input tensor.
:param padding: A string from: "SAME", "VALID". The type of padding algorithm to use.
:param data_format: An optional string, must be set to "NHWC". Defaults to "NHWC".
Specify the data format of the input and output data.
:return: A tuple of Tensor objects (output, argmax).
- output A Tensor. Has the same type as input.
- argmax A Tensor. The indices in argmax are flattened (Complains directly to TensorFlow)
"""
# The max is calculated with the absolute value. This will still work on real values.
if inputs.dtype.is_complex:
abs_in = tf.math.abs(inputs)
else:
abs_in = inputs
output, argmax = tf.nn.max_pool_with_argmax(input=abs_in, ksize=ksize, strides=strides,
padding=padding, data_format=data_format,
include_batch_in_index=True)
shape = tf.shape(output)
tf_res = tf.reshape(tf.gather(tf.reshape(inputs, [-1]), argmax), shape)
# assert np.all(tf_res == output) # For debugging when the input is real only!
assert tf_res.dtype == inputs.dtype
return tf_res, argmax
class ComplexAvgPooling2D(ComplexPooling2D):
def pool_function(self, inputs, ksize, strides, padding, data_format):
inputs_r = tf.math.real(inputs)
inputs_i = tf.math.imag(inputs)
output_r = tf.nn.avg_pool2d(input=inputs_r, ksize=ksize, strides=strides,
padding=padding, data_format=data_format)
output_i = tf.nn.avg_pool2d(input=inputs_i, ksize=ksize, strides=strides,
padding=padding, data_format=data_format)
if inputs.dtype.is_complex:
output = tf.complex(output_r, output_i)
else:
output = output_r
return output
def get_real_equivalent(self):
return ComplexAvgPooling2D(pool_size=self.pool_size, strides=self.strides, padding=self.padding,
data_format=self.data_format, name=self.name + "_real_equiv")
class ComplexCircularAvgPooling2D(ComplexPooling2D):
def pool_function(self, inputs, ksize, strides, padding, data_format):
inputs_abs = tf.math.abs(inputs)
inputs_phase = tf.math.phase(inputs)
amp = tf.nn.avg_pool2d(input=inputs_abs, ksize=ksize, strides=strides,
padding=padding, data_format=data_format)
raise NotImplemetedError("Still not implemented") # https://en.wikipedia.org/wiki/Circular_mean
pha = tf.nn.avg_pool2d(input=inputs_phase, ksize=ksize, strides=strides,
padding=padding, data_format=data_format) # TODO
output = tf.cast(tf.complex(amp * tf.math.cos(pha), amp * tf.math.sin(pha)), dtype=z.dtype)
return output
def get_real_equivalent(self):
return ComplexAvgPooling2D(pool_size=self.pool_size, strides=self.strides, padding=self.padding,
data_format=self.data_format, name=self.name + "_real_equiv")
class ComplexPolarAvgPooling2D(ComplexPooling2D):
def pool_function(self, inputs, ksize, strides, padding, data_format):
inputs_abs = tf.math.abs(inputs)
output_abs = tf.nn.avg_pool2d(input=inputs_abs, ksize=ksize, strides=strides,
padding=padding, data_format=data_format)
# Use circular mean
inputs_angle = tf.math.angle(inputs)
unit_x = tf.math.cos(inputs_angle) # Convert all angles to corresponding points on the unit circle
unit_y = tf.math.sin(inputs_angle) # convert polar coordinates to Cartesian coordinates.
avg_unit_x = tf.nn.avg_pool2d(input=unit_x, ksize=ksize, strides=strides, # Then compute the arithmetic
padding=padding, data_format=data_format) # mean of these points.
avg_unit_y = tf.nn.avg_pool2d(input=unit_y, ksize=ksize, strides=strides,
padding=padding, data_format=data_format)
# The angle is a reasonable mean of the input angles.
output_angle = tf.math.angle(tf.complex(avg_unit_x, avg_unit_y))
# Unknown result. If the angles are uniformly distributed on the circle,
# then the resulting radius will be 0, and there is no circular mean.
if inputs.dtype.is_complex:
output = tf.complex(output_abs * tf.math.cos(output_angle), output_abs * tf.math.sin(output_angle))
else:
output = output_abs
return output
def get_real_equivalent(self):
return ComplexPolarAvgPooling2D(pool_size=self.pool_size, strides=self.strides, padding=self.padding,
data_format=self.data_format, name=self.name + "_real_equiv")
class ComplexUnPooling2D(Layer, ComplexLayer):
"""
Performs UnPooling as explained in:
https://www.oreilly.com/library/view/hands-on-convolutional-neural/9781789130331/6476c4d5-19f2-455f-8590-c6f99504b7a5.xhtml
This class was inspired to recreate the CV-FCN model of https://www.mdpi.com/2072-4292/11/22/2653
As far as I am concerned this class should work for any dimensional input but I have not tested it
(and you need the argmax which I only implemented the 2D case).
"""
def __init__(self, desired_output_shape=None, upsampling_factor: Optional[int] = None, name=None,
dtype=DEFAULT_COMPLEX_TYPE, dynamic=False, **kwargs):
"""
:param desired_output_shape: tf.TensorShape (or equivalent like tuple or list).
The expected output shape without the batch size.
Meaning that for a 2D image to be enlarged, this is size 3 of the form HxWxC or CxHxW
:param upsampling_factor: Integer. The factor to which enlarge the image,
For example, if upsampling_factor=2, an input image of size 32x32 will be 64x64.
This parameter is ignored if desired_output_shape is used or if the output shape is given to the call funcion.
"""
self.my_dtype = tf.dtypes.as_dtype(dtype)
if desired_output_shape is not None:
if not tf.TensorShape(desired_output_shape).is_fully_defined():
# tf.print(f"Warning: Partially defined desired_output_shape will be casted to None")
# desired_output_shape = None
raise ValueError(f"desired_output_shape must be fully defined, got {desired_output_shape}")
elif len(desired_output_shape) != 3:
raise ValueError(f"desired_output_shape expected to be size 3 and got size {len(desired_output_shape)}")
self.desired_output_shape = desired_output_shape
if upsampling_factor is None or isinstance(upsampling_factor, int):
self.upsampling_factor = upsampling_factor
else:
raise ValueError(f"Unsuported upsampling_factor = {upsampling_factor}")
super(ComplexUnPooling2D, self).__init__(trainable=False, name=name, dtype=self.my_dtype.real_dtype,
dynamic=dynamic, **kwargs)
def call(self, inputs, **kwargs):
"""
TODO: Still has a bug, if argmax has coincident indexes. Don't think this is desired (but might).
:param inputs: A tuple of Tensor objects (input, argmax).
- input A Tensor.
- argmax A Tensor. The indices in argmax are flattened (Complains directly to TensorFlow)
- output_shape (Optional) A tf.TensorShape (or equivalent like tuple or list).
The expected output shape without the batch size.
Meaning that for a 2D image to be enlarged, this is size 3 of the form HxWxC or CxHxW
# TODO: I could make an automatic unpool mat if it is not given.
"""
if not isinstance(inputs, list):
raise ValueError('This layer should be called on a list of inputs.')
if len(inputs) == 2:
inputs_values, unpool_mat = inputs
output_shape = self.desired_output_shape
elif len(inputs) == 3:
inputs_values, unpool_mat, output_shape = inputs
else:
raise ValueError(f'inputs = {inputs} must have size 2 or 3 and had size {len(inputs)}')
# https://stackoverflow.com/a/42549265/5931672
# https://github.com/tensorflow/addons/issues/632#issuecomment-482580850
# This is for the case I don't know the expected output shape so I used the upsampling factor
if not tf.TensorShape(output_shape).is_fully_defined():
if self.upsampling_factor is None:
raise ValueError('output_shape should be passed as 3rd element or either desired_output_shape '
'or upsampling_factor should be passed on construction')
if inputs_values.get_shape()[1:].is_fully_defined():
output_shape = tf.tile(inputs_values,
[1, self.upsampling_factor, self.upsampling_factor, 1]).get_shape()[1:]
else:
output_shape = tf.shape(inputs_values)[1:]
elif self.upsampling_factor is not None:
tf.print("WARNING: Ignoring self.upsampling_factor parameter")
flat_output_shape = tf.reduce_prod(output_shape)
shape = (tf.shape(inputs_values)[0] * flat_output_shape,)
updates = tf.reshape(inputs_values, [-1])
indices = tf.expand_dims(tf.reshape(unpool_mat, [-1]), axis=-1)
# assert indices.shape[-1] == tf.rank(shape)
ret = tf.scatter_nd(indices, updates, shape=shape)
# import pdb; pdb.set_trace()
desired_output_shape_with_batch = tf.concat([[tf.shape(inputs_values)[0]], output_shape], axis=0)
ret = tf.reshape(ret, shape=desired_output_shape_with_batch)
return ret
def get_real_equivalent(self):
return ComplexUnPooling2D(desired_output_shape=self.desired_output_shape, name=self.name,
dtype=self.my_dtype.real_dtype, dynamic=self.dtype)
def get_config(self):
config = super(ComplexUnPooling2D, self).get_config()
config.update({
'desired_output_shape': self.desired_output_shape,
'name': self.name,
'dtype': self.my_dtype,
'dynamic': False,
})
return config
"""
3D Pooling
"""
class ComplexPooling3D(Layer, ComplexLayer):
def __init__(self, pool_size=(2, 2, 1), strides=None,
padding='valid', data_format='channels_last',
name=None, dtype=DEFAULT_COMPLEX_TYPE, **kwargs):
self.my_dtype = dtype
super(ComplexPooling3D, self).__init__(name=name, **kwargs)
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_size = conv_utils.normalize_tuple(pool_size, 3, 'pool_size')
self.strides = conv_utils.normalize_tuple(strides, 3, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=5)
@abstractmethod
def pool_function(self, inputs, ksize, strides, padding, data_format):
pass
def call(self, inputs, **kwargs):
outputs = self.pool_function(
inputs,
self.pool_size,
strides=self.strides,
padding=self.padding.upper(),
data_format=conv_utils.convert_data_format(self.data_format, 5))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
deps = input_shape[-3]
rows = input_shape[-2]
cols = input_shape[-1]
else:
deps = input_shape[-4]
rows = input_shape[-3]
cols = input_shape[-2]
rows = conv_utils.conv_output_length(rows, self.pool_size[0], self.padding, self.strides[0])
cols = conv_utils.conv_output_length(cols, self.pool_size[1], self.padding, self.strides[1])
if self.data_format == 'channels_first':
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], deps, rows, cols])
else:
return tensor_shape.TensorShape(
[input_shape[0], deps, rows, cols, input_shape[-1]])
def get_config(self):
config = super(ComplexPooling3D, self).get_config()
config.update({
'strides': self.strides,
'pool_size': self.pool_size,
'padding': self.padding,
'data_format': self.data_format,
'dtype': self.my_dtype
})
return config
class ComplexAvgPooling3D(ComplexPooling3D):
def pool_function(self, inputs, ksize, strides, padding, data_format):
inputs_r = tf.math.real(inputs)
inputs_i = tf.math.imag(inputs)
output_r = tf.nn.avg_pool3d(input=inputs_r, ksize=ksize, strides=strides,
padding=padding, data_format=data_format)
output_i = tf.nn.avg_pool3d(input=inputs_i, ksize=ksize, strides=strides,
padding=padding, data_format=data_format)
if inputs.dtype.is_complex:
output = tf.complex(output_r, output_i)
else:
output = output_r
return output
def get_real_equivalent(self):
return ComplexAvgPooling3D(pool_size=self.pool_size, strides=self.strides, padding=self.padding,
data_format=self.data_format, name=self.name + "_real_equiv")
"""
1D Pooling
"""
class ComplexPooling1D(Layer, ComplexLayer):
def __init__(self, pool_size=2, strides=None,
padding='valid', data_format='channels_last',
name=None, dtype=DEFAULT_COMPLEX_TYPE, **kwargs):
self.my_dtype = dtype
super(ComplexPooling1D, self).__init__(name=name, **kwargs)
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_size = conv_utils.normalize_tuple(pool_size, 1, 'pool_size')
self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=3)
@abstractmethod
def pool_function(self, inputs, ksize, strides, padding, data_format):
pass
def call(self, inputs, **kwargs):
outputs = self.pool_function(
inputs,
self.pool_size,
strides=self.strides,
padding=self.padding.upper(),
data_format=conv_utils.convert_data_format(self.data_format, 3))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
steps = input_shape[2]
features = input_shape[1]
else:
steps = input_shape[1]
features = input_shape[2]
length = conv_utils.conv_output_length(steps,
self.pool_size[0],
self.padding,
self.strides[0])
if self.data_format == 'channels_first':
return tf.TensorShape([input_shape[0], features, length])
else:
return tf.TensorShape([input_shape[0], length, features])
def get_config(self):
config = super(ComplexPooling1D, self).get_config()
config.update({
'strides': self.strides,
'pool_size': self.pool_size,
'padding': self.padding,
'data_format': self.data_format,
'dtype': self.my_dtype
})
return config
class ComplexAvgPooling1D(ComplexPooling1D):
def pool_function(self, inputs, ksize, strides, padding, data_format):
inputs_r = tf.math.real(inputs)
inputs_i = tf.math.imag(inputs)
output_r = tf.nn.avg_pool1d(input=inputs_r, ksize=ksize, strides=strides,
padding=padding, data_format=data_format)
output_i = tf.nn.avg_pool1d(input=inputs_i, ksize=ksize, strides=strides,
padding=padding, data_format=data_format)
if inputs.dtype.is_complex:
output = tf.complex(output_r, output_i)
else:
output = output_r
return output
def get_real_equivalent(self):
return ComplexAvgPooling1D(pool_size=self.pool_size, strides=self.strides, padding=self.padding,
data_format=self.data_format, name=self.name + "_real_equiv")
| 24,753 | 47.253411 | 138 | py |
cvnn | cvnn-master/cvnn/layers/convolutional.py | import six
import functools
import tensorflow as tf
from packaging import version
from tensorflow.keras import activations
from tensorflow.keras import backend
from tensorflow.keras import constraints
from tensorflow.keras import initializers
from tensorflow.keras import regularizers
from tensorflow.keras.layers import Layer
from tensorflow.python.keras.utils import conv_utils
if version.parse(tf.__version__) < version.parse("2.6.0"):
from tensorflow.python.keras.engine.input_spec import InputSpec
else:
from tensorflow.keras.layers import InputSpec
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
# Own modules
from cvnn.layers.core import ComplexLayer
from cvnn.initializers import ComplexGlorotUniform, Zeros, ComplexInitializer, INIT_TECHNIQUES
from cvnn import logger
from cvnn.layers.core import DEFAULT_COMPLEX_TYPE
class ComplexConv(Layer, ComplexLayer):
"""
Almost exact copy of
https://github.com/tensorflow/tensorflow/blob/v2.4.0/tensorflow/python/keras/layers/convolutional.py#L52
Abstract N-D complex convolution layer (private, used as implementation base).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Note: layer attributes cannot be modified after the layer has been called
once (except the `trainable` attribute).
Arguments:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
length of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input. `"causal"` results in causal
(dilated) convolutions, e.g. `output[t]` does not depend on `input[t+1:]`.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch_size, channels, ...)`.
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
groups: A positive integer specifying the number of groups in which the
input is split along the channel axis. Each group is convolved
separately with `filters / groups` filters. The output is the
concatenation of all the `groups` results along the channel axis.
Input channels and `filters` must both be divisible by `groups`.
activation: Activation function to use.
If you don't specify anything, no activation is applied.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
ATTENTION: Not yet implemented! This parameter will have no effect.
bias_regularizer: Optional regularizer for the bias vector.
ATTENTION: Not yet implemented! This parameter will have no effect.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
:param init_technique: One of 'mirror' or 'zero_imag'. Tells the initializer how to init complex number if
the initializer was tensorflow's built in initializers (not supporting complex numbers).
- 'mirror': Uses the initializer for both real and imaginary part.
Note that some initializers such as Glorot or He will lose it's property if initialized this way.
- 'zero_imag': Initializer real part and let imaginary part to zero.
"""
def __init__(self, rank, filters, kernel_size, dtype=DEFAULT_COMPLEX_TYPE, strides=1, padding='valid', data_format=None, dilation_rate=1,
groups=1, activation=None, use_bias=True,
kernel_initializer=ComplexGlorotUniform(), bias_initializer=Zeros(),
kernel_regularizer=None, bias_regularizer=None, # TODO: Not yet working
activity_regularizer=None, kernel_constraint=None, bias_constraint=None,
init_technique: str = 'mirror',
trainable=True, name=None, conv_op=None, **kwargs):
if kernel_regularizer is not None or bias_regularizer is not None:
logger.warning(f"Sorry, regularizers are not implemented yet, this parameter will take no effect")
super(ComplexConv, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs)
self.rank = rank
self.my_dtype = tf.dtypes.as_dtype(dtype)
# I use no default dtype to make sure I don't forget to give it to my ComplexConv layers
if isinstance(filters, float):
filters = int(filters)
self.filters = filters
self.groups = groups or 1
self.kernel_size = conv_utils.normalize_tuple(
kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(
dilation_rate, rank, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(min_ndim=self.rank + 2)
self._validate_init()
self._is_causal = self.padding == 'causal'
self._channels_first = self.data_format == 'channels_first'
self._tf_data_format = conv_utils.convert_data_format(
self.data_format, self.rank + 2)
self.init_technique = init_technique.lower()
def _validate_init(self):
if self.filters is not None and self.filters % self.groups != 0:
raise ValueError(
'The number of filters must be evenly divisible by the number of '
'groups. Received: groups={}, filters={}'.format(
self.groups, self.filters))
if not all(self.kernel_size):
raise ValueError('The argument `kernel_size` cannot contain 0(s). '
'Received: %s' % (self.kernel_size,))
if (self.padding == 'causal' and not isinstance(self, (ComplexConv1D))):
raise ValueError('Causal padding is only supported for `Conv1D`'
'and `SeparableConv1D`.')
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
input_channel = self._get_input_channel(input_shape)
if input_channel % self.groups != 0:
raise ValueError(
f'The number of input channels must be evenly divisible by the number '
f'of groups. Received groups={self.groups}, but the input has {input_channel} channels '
f'(full input shape is {input_shape}).')
kernel_shape = self.kernel_size + (input_channel // self.groups, self.filters)
if self.my_dtype.is_complex:
i_kernel_dtype = self.my_dtype if isinstance(self.kernel_initializer,
ComplexInitializer) else self.my_dtype.real_dtype
i_bias_dtype = self.my_dtype if isinstance(self.bias_initializer,
ComplexInitializer) else self.my_dtype.real_dtype
i_kernel_initializer = self.kernel_initializer
i_bias_initializer = self.bias_initializer
if not isinstance(self.kernel_initializer, ComplexInitializer):
tf.print(f"WARNING: you are using a Tensorflow Initializer for complex numbers. "
f"Using {self.init_technique} method.")
if self.init_technique in INIT_TECHNIQUES:
if self.init_technique == 'zero_imag':
# This section is done to initialize with tf initializers, making imaginary part zero
i_kernel_initializer = initializers.Zeros()
i_bias_initializer = initializers.Zeros()
else:
raise ValueError(f"Unsuported init_technique {self.init_technique}, "
f"supported techniques are {INIT_TECHNIQUES}")
self.kernel_r = tf.Variable(
initial_value=self.kernel_initializer(shape=kernel_shape, dtype=i_kernel_dtype),
name='kernel_r',
constraint=self.kernel_constraint,
trainable=True
) # TODO: regularizer=self.kernel_regularizer,
self.kernel_i = tf.Variable(
initial_value=i_kernel_initializer(shape=kernel_shape, dtype=i_kernel_dtype),
name='kernel_i',
constraint=self.kernel_constraint,
trainable=True
) # TODO: regularizer=self.kernel_regularizer
if self.use_bias:
self.bias_r = tf.Variable(
initial_value=self.bias_initializer(shape=(self.filters,), dtype=i_bias_dtype),
name='bias_r',
trainable=True
)
self.bias_i = tf.Variable(
initial_value=i_bias_initializer(shape=(self.filters,), dtype=i_bias_dtype),
name='bias_i',
constraint=self.bias_constraint,
trainable=True
) # TODO: regularizer=self.bias_regularizer
else:
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.my_dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.my_dtype)
if not self.use_bias:
self.bias = None
channel_axis = self._get_channel_axis()
self.input_spec = InputSpec(min_ndim=self.rank + 2,
axes={channel_axis: input_channel})
self.built = True
def convolution_op(self, inputs, kernel):
# Convert Keras formats to TF native formats.
if self.padding == 'causal':
tf_padding = 'VALID' # Causal padding handled in `call`.
elif isinstance(self.padding, str):
tf_padding = self.padding.upper()
else:
tf_padding = self.padding
return tf.nn.convolution(
inputs,
kernel,
strides=list(self.strides),
padding=tf_padding,
dilations=list(self.dilation_rate),
data_format=self._tf_data_format,
name=self.__class__.__name__)
def call(self, inputs):
"""
Calls convolution, this function is divided in 4:
1. Input parser/verification
2. Convolution
3. Bias
4. Activation Function
:returns: A tensor of rank 4+ representing `activation(conv2d(inputs, kernel) + bias)`.
"""
if inputs.dtype != self.my_dtype:
tf.print(f"WARNING: {self.name} - Expected input to be {self.my_dtype}, but received {inputs.dtype}.")
if self.my_dtype.is_complex and inputs.dtype.is_floating:
tf.print("\tThis is normally fixed using ComplexInput() "
"at the start (tf casts input automatically to real).")
inputs = tf.cast(inputs, self.my_dtype)
if self._is_causal: # Apply causal padding to inputs for Conv1D.
inputs = tf.pad(inputs, self._compute_causal_padding(inputs))
# Convolution
inputs_r = tf.math.real(inputs)
inputs_i = tf.math.imag(inputs)
if self.my_dtype.is_complex:
kernel_r = self.kernel_r
kernel_i = self.kernel_i
if self.use_bias:
bias = tf.complex(self.bias_r, self.bias_i)
else:
kernel_r = tf.math.real(self.kernel)
kernel_i = tf.math.imag(self.kernel) # TODO: Check they are all zero
if self.use_bias:
bias = self.bias
real_outputs = self.convolution_op(inputs_r, kernel_r) - self.convolution_op(inputs_i, kernel_i)
imag_outputs = self.convolution_op(inputs_r, kernel_i) + self.convolution_op(inputs_i, kernel_r)
outputs = tf.cast(tf.complex(real_outputs, imag_outputs), dtype=self.my_dtype)
# Add bias
if self.use_bias:
output_rank = outputs.shape.rank
if self.rank == 1 and self._channels_first:
# tf.nn.bias_add does not accept a 1D input tensor.
bias = tf.reshape(bias, (1, self.filters, 1))
outputs += bias
else:
# Handle multiple batch dimensions.
if output_rank is not None and output_rank > 2 + self.rank:
def _apply_fn(o):
# TODO: Will this bias be visible? Horrible
return tf.nn.bias_add(o, bias, data_format=self._tf_data_format)
outputs = nn_ops.squeeze_batch_dims(
outputs, _apply_fn, inner_rank=self.rank + 1)
else:
outputs = tf.nn.bias_add(
outputs, bias, data_format=self._tf_data_format)
# Activation function
if self.activation is not None:
outputs = self.activation(outputs)
return outputs
def _spatial_output_shape(self, spatial_input_shape):
return [
conv_utils.conv_output_length(
length,
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
for i, length in enumerate(spatial_input_shape)
]
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
batch_rank = len(input_shape) - self.rank - 1
if self.data_format == 'channels_last':
return tf.TensorShape(
input_shape[:batch_rank]
+ self._spatial_output_shape(input_shape[batch_rank:-1])
+ [self.filters])
else:
return tf.TensorShape(
input_shape[:batch_rank] + [self.filters] +
self._spatial_output_shape(input_shape[batch_rank + 1:]))
def _recreate_conv_op(self, inputs): # pylint: disable=unused-argument
return False
def get_config(self):
config = super(ComplexConv, self).get_config()
config.update({
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'groups': self.groups,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dtype': self.my_dtype
})
return config
def _compute_causal_padding(self, inputs):
"""Calculates padding for 'causal' option for 1-d conv layers."""
left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)
if getattr(inputs.shape, 'ndims', None) is None:
batch_rank = 1
else:
batch_rank = len(inputs.shape) - 2
if self.data_format == 'channels_last':
causal_padding = [[0, 0]] * batch_rank + [[left_pad, 0], [0, 0]]
else:
causal_padding = [[0, 0]] * batch_rank + [[0, 0], [left_pad, 0]]
return causal_padding
def _get_channel_axis(self):
if self.data_format == 'channels_first':
return -1 - self.rank
else:
return -1
def _get_input_channel(self, input_shape):
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
return int(input_shape[channel_axis])
def _get_padding_op(self):
if self.padding == 'causal':
op_padding = 'valid'
else:
op_padding = self.padding
if not isinstance(op_padding, (list, tuple)):
op_padding = op_padding.upper()
return op_padding
def get_real_equivalent(self):
# TODO: Shall I check it's not already complex?
return ComplexConv(rank=self.rank, filters=self.filters, kernel_size=self.kernel_size,
dtype=self.my_dtype.real_dtype, strides=self.strides, padding=self.padding,
data_format=self.data_format, dilation_rate=self.dilation_rate, groups=self.groups,
activation=self.activation, use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer, bias_initializer=self.bias_initializer,
kernel_regularizer=self.kernel_regularizer, bias_regularizer=self.bias_regularizer,
activity_regularizer=self.activity_regularizer, kernel_constraint=self.kernel_constraint,
bias_constraint=self.bias_constraint, trainable=self.trainable,
name=self.name + "_real_equiv")
class ComplexConv1D(ComplexConv):
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid', dtype=DEFAULT_COMPLEX_TYPE,
data_format='channels_last',
dilation_rate=1,
groups=1,
activation=None,
use_bias=True,
kernel_initializer=ComplexGlorotUniform(),
bias_initializer=Zeros(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(ComplexConv1D, self).__init__(
rank=1, dtype=dtype,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
class ComplexConv2D(ComplexConv):
"""2D convolution layer (e.g. spatial convolution over images).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of outputs.
If `use_bias` is True, a bias vector is created and added to the outputs.
Finally, if `activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model, provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures in `data_format="channels_last"`.
Input shape:
4+D tensor with shape: `batch_shape + (channels, rows, cols)` if
`data_format='channels_first'`
or 4+D tensor with shape: `batch_shape + (rows, cols, channels)` if
`data_format='channels_last'`.
Output shape:
4+D tensor with shape: `batch_shape + (filters, new_rows, new_cols)` if
`data_format='channels_first'` or 4+D tensor with shape: `batch_shape +
(new_rows, new_cols, filters)` if `data_format='channels_last'`. `rows`
and `cols` values might have changed due to padding.
Returns:
A tensor of rank 4+ representing
`activation(conv2d(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is `"causal"`.
ValueError: when both `strides > 1` and `dilation_rate > 1`.
"""
def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1),
groups=1, activation=None, use_bias=True, dtype=DEFAULT_COMPLEX_TYPE,
kernel_initializer=ComplexGlorotUniform(), bias_initializer=Zeros(),
kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,
kernel_constraint=None, bias_constraint=None, **kwargs):
"""
:param filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution).
:param kernel_size: An integer or tuple/list of 2 integers, specifying the height
and width of the 2D convolution window. Can be a single integer to specify
the same value for all spatial dimensions.
:param strides: An integer or tuple/list of 2 integers, specifying the strides of
the convolution along the height and width. Can be a single integer to
specify the same value for all spatial dimensions. Specifying any stride
value != 1 is incompatible with specifying any `dilation_rate` value != 1.
:param padding: one of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
:param data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. `channels_last` corresponds
to inputs with shape `(batch_size, height, width, channels)` while
`channels_first` corresponds to inputs with shape `(batch_size, channels,
height, width)`. It defaults to the `image_data_format` value found in
your Keras config file at `~/.keras/keras.json`. If you never set it, then
it will be `channels_last`.
:param dilation_rate: an integer or tuple/list of 2 integers, specifying the
dilation rate to use for dilated convolution. Can be a single integer to
specify the same value for all spatial dimensions. Currently, specifying
any `dilation_rate` value != 1 is incompatible with specifying any stride
value != 1.
:param groups: A positive integer specifying the number of groups in which the
input is split along the channel axis. Each group is convolved separately
with `filters / groups` filters. The output is the concatenation of all
the `groups` results along the channel axis. Input channels and `filters`
must both be divisible by `groups`.
:param activation: Activation function to use. If you don't specify anything, no activation is applied.
For complex :code:`dtype`, this must be a :code:`cvnn.activations` module.
:param use_bias: Boolean, whether the layer uses a bias vector.
:param kernel_initializer: Initializer for the `kernel` weights matrix (see `keras.initializers`).
:param bias_initializer: Initializer for the bias vector (see `keras.initializers`).
:param kernel_regularizer: Regularizer function applied to the `kernel` weights matrix (see `keras.regularizers`).
:param bias_regularizer: Regularizer function applied to the bias vector (see `keras.regularizers`).
:param activity_regularizer: Regularizer function applied to the output of the layer (its "activation") (see `keras.regularizers`).
:param kernel_constraint: Constraint function applied to the kernel matrix (see `keras.constraints`).
:param bias_constraint: Constraint function applied to the bias vector (see `keras.constraints`).
"""
super(ComplexConv2D, self).__init__(
rank=2, dtype=dtype,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
class ComplexConv3D(ComplexConv):
"""3D convolution layer (e.g. spatial convolution over volumes).
This layer creates a convolution kernel that is convolved
with the layer input to produce a tensor of
outputs. If `use_bias` is True,
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 128, 1)` for 128x128x128 volumes
with a single channel,
in `data_format="channels_last"`.
Examples:
>>> # The inputs are 28x28x28 volumes with a single channel, and the
>>> # batch size is 4
>>> input_shape =(4, 28, 28, 28, 1)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.Conv3D(
... 2, 3, activation='relu', input_shape=input_shape[1:])(x)
>>> print(y.shape)
(4, 26, 26, 26, 2)
>>> # With extended batch shape [4, 7], e.g. a batch of 4 videos of 3D frames,
>>> # with 7 frames per video.
>>> input_shape = (4, 7, 28, 28, 28, 1)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.Conv3D(
... 2, 3, activation='relu', input_shape=input_shape[2:])(x)
>>> print(y.shape)
(4, 7, 26, 26, 26, 2)
Arguments:
filters: Integer, the dimensionality of the output space (i.e. the number of
output filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the depth,
height and width of the 3D convolution window. Can be a single integer to
specify the same value for all spatial dimensions.
strides: An integer or tuple/list of 3 integers, specifying the strides of
the convolution along each spatial dimension. Can be a single integer to
specify the same value for all spatial dimensions. Specifying any stride
value != 1 is incompatible with specifying any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs. `channels_last` corresponds
to inputs with shape `batch_shape + (spatial_dim1, spatial_dim2,
spatial_dim3, channels)` while `channels_first` corresponds to inputs with
shape `batch_shape + (channels, spatial_dim1, spatial_dim2,
spatial_dim3)`. It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`. If you never set it, then it
will be "channels_last".
dilation_rate: an integer or tuple/list of 3 integers, specifying the
dilation rate to use for dilated convolution. Can be a single integer to
specify the same value for all spatial dimensions. Currently, specifying
any `dilation_rate` value != 1 is incompatible with specifying any stride
value != 1.
groups: A positive integer specifying the number of groups in which the
input is split along the channel axis. Each group is convolved separately
with `filters / groups` filters. The output is the concatenation of all
the `groups` results along the channel axis. Input channels and `filters`
must both be divisible by `groups`.
activation: Activation function to use. If you don't specify anything, no
activation is applied (see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix (see
`keras.initializers`).
bias_initializer: Initializer for the bias vector (see
`keras.initializers`).
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (see
`keras.regularizers`).
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation") (see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix (see
`keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (see
`keras.constraints`).
Input shape:
5+D tensor with shape: `batch_shape + (channels, conv_dim1, conv_dim2,
conv_dim3)` if data_format='channels_first'
or 5+D tensor with shape: `batch_shape + (conv_dim1, conv_dim2, conv_dim3,
channels)` if data_format='channels_last'.
Output shape:
5+D tensor with shape: `batch_shape + (filters, new_conv_dim1,
new_conv_dim2, new_conv_dim3)` if data_format='channels_first'
or 5+D tensor with shape: `batch_shape + (new_conv_dim1, new_conv_dim2,
new_conv_dim3, filters)` if data_format='channels_last'. `new_conv_dim1`,
`new_conv_dim2` and `new_conv_dim3` values might have changed due to
padding.
Returns:
A tensor of rank 5+ representing
`activation(conv3d(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is "causal".
ValueError: when both `strides > 1` and `dilation_rate > 1`.
"""
def __init__(self,
filters, kernel_size, dtype=DEFAULT_COMPLEX_TYPE, strides=(1, 1, 1), padding='valid', data_format=None,
dilation_rate=(1, 1, 1), groups=1, activation=None, use_bias=True,
kernel_initializer=ComplexGlorotUniform(), bias_initializer=Zeros(),
kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,
kernel_constraint=None, bias_constraint=None, **kwargs):
super(ComplexConv3D, self).__init__(
rank=3, dtype=dtype,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
class ComplexConv2DTranspose(ComplexConv2D):
"""
Transposed convolution layer. Sometimes (wrongly) called Deconvolution.
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
output_padding: An integer or tuple/list of 2 integers,
specifying the amount of padding along the height and width
of the output tensor.
Can be a single integer to specify the same value for all
spatial dimensions.
The amount of output padding along a given dimension must be
lower than the stride along that same dimension.
If set to `None` (default), the output shape is inferred.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied (
see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix (
see `keras.initializers`).
bias_initializer: Initializer for the bias vector (
see `keras.initializers`).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (
see `keras.regularizers`).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation") (see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix (
see `keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (
see `keras.constraints`).
Input shape:
4D tensor with shape:
`(batch_size, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch_size, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch_size, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch_size, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
If `output_padding` is specified:
```
new_rows = ((rows - 1) * strides[0] + kernel_size[0] - 2 * padding[0] +
output_padding[0])
new_cols = ((cols - 1) * strides[1] + kernel_size[1] - 2 * padding[1] +
output_padding[1])
```
Returns:
A tensor of rank 4 representing
`activation(conv2dtranspose(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is "causal".
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
References:
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid', dtype=DEFAULT_COMPLEX_TYPE,
output_padding=None,
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=ComplexGlorotUniform(),
bias_initializer=Zeros(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(ComplexConv2DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding, dtype=dtype,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
self.output_padding = output_padding
if self.output_padding is not None:
self.output_padding = conv_utils.normalize_tuple(self.output_padding, 2, 'output_padding')
for stride, out_pad in zip(self.strides, self.output_padding):
if out_pad >= stride:
raise ValueError(f'Stride {self.strides} must be greater than output padding {self.output_padding}')
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
if len(input_shape) != 4:
raise ValueError(f'Inputs should have rank 4. Received input shape: {input_shape}')
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
kernel_shape = self.kernel_size + (self.filters, input_dim)
if self.my_dtype.is_complex:
self.kernel_r = tf.Variable(
initial_value=self.kernel_initializer(shape=kernel_shape, dtype=self.my_dtype),
name='kernel_r',
constraint=self.kernel_constraint,
trainable=True
) # TODO: regularizer=self.kernel_regularizer,
self.kernel_i = tf.Variable(
initial_value=self.kernel_initializer(shape=kernel_shape, dtype=self.my_dtype),
name='kernel_i',
constraint=self.kernel_constraint,
trainable=True
) # TODO: regularizer=self.kernel_regularizer
if self.use_bias:
self.bias_r = tf.Variable(
initial_value=self.bias_initializer(shape=(self.filters,), dtype=self.my_dtype),
name='bias_r',
trainable=True
)
self.bias_i = tf.Variable(
initial_value=self.bias_initializer(shape=(self.filters,), dtype=self.my_dtype),
name='bias_i',
constraint=self.bias_constraint,
trainable=True
) # TODO: regularizer=self.bias_regularizer
else:
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.my_dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.my_dtype)
if not self.use_bias:
self.bias = None
self.built = True
def call(self, inputs):
inputs_shape = tf.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == 'channels_first':
h_axis, w_axis = 2, 3
else:
h_axis, w_axis = 1, 2
# Use the constant height and weight when possible.
# to all convolutional layers, which currently lost the static shape information due to tf.shape().
height, width = None, None
if inputs.shape.rank is not None:
dims = inputs.shape.as_list()
height = dims[h_axis]
width = dims[w_axis]
height = height if height is not None else inputs_shape[h_axis]
width = width if width is not None else inputs_shape[w_axis]
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_h = out_pad_w = None
else:
out_pad_h, out_pad_w = self.output_padding
# Infer the dynamic output shape:
out_height = conv_utils.deconv_output_length(height,
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
dilation=self.dilation_rate[0])
out_width = conv_utils.deconv_output_length(width,
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w,
dilation=self.dilation_rate[1])
if self.data_format == 'channels_first':
output_shape = (batch_size, self.filters, out_height, out_width)
else:
output_shape = (batch_size, out_height, out_width, self.filters)
output_shape_tensor = tf.stack(output_shape)
# Deconvolution part
inputs_r = tf.math.real(inputs)
inputs_i = tf.math.imag(inputs)
if self.my_dtype.is_complex:
kernel_r = self.kernel_r
kernel_i = self.kernel_i
if self.use_bias:
bias = tf.complex(self.bias_r, self.bias_i)
else:
kernel_r = tf.math.real(self.kernel)
kernel_i = tf.math.imag(self.kernel)
if self.use_bias:
bias = self.bias
real_outputs_ri_rk = backend.conv2d_transpose(
inputs_r,
kernel_r,
output_shape_tensor,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
real_outputs_ii_ik = backend.conv2d_transpose(
inputs_i,
kernel_i,
output_shape_tensor,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
real_outputs_ri_ik = backend.conv2d_transpose(
inputs_r,
kernel_i,
output_shape_tensor,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
real_outputs_ii_rk = backend.conv2d_transpose(
inputs_i,
kernel_r,
output_shape_tensor,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
real_outputs = real_outputs_ri_rk - real_outputs_ii_ik
imag_outputs = real_outputs_ii_rk + real_outputs_ri_ik
outputs = tf.cast(tf.complex(real_outputs, imag_outputs), dtype=self.my_dtype)
if not tf.executing_eagerly():
# Infer the static output shape:
out_shape = self.compute_output_shape(inputs.shape)
outputs.set_shape(out_shape)
# Apply bias
if self.use_bias:
outputs = tf.nn.bias_add(outputs, bias, data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
# Apply activation function
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
output_shape = list(input_shape)
if self.data_format == 'channels_first':
c_axis, h_axis, w_axis = 1, 2, 3
else:
c_axis, h_axis, w_axis = 3, 1, 2
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_h = out_pad_w = None
else:
out_pad_h, out_pad_w = self.output_padding
output_shape[c_axis] = self.filters
output_shape[h_axis] = conv_utils.deconv_output_length(
output_shape[h_axis],
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
dilation=self.dilation_rate[0])
output_shape[w_axis] = conv_utils.deconv_output_length(
output_shape[w_axis],
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w,
dilation=self.dilation_rate[1])
return tf.TensorShape(output_shape)
def get_config(self):
config = super(ComplexConv2DTranspose, self).get_config()
config['output_padding'] = self.output_padding
return config
| 51,395 | 49.636453 | 141 | py |
cvnn | cvnn-master/cvnn/layers/core.py | from abc import ABC, abstractmethod
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Flatten, Dense, InputLayer, Layer
from tensorflow.python.keras import backend as K
from tensorflow.keras import initializers
import tensorflow_probability as tfp
from tensorflow import TensorShape, Tensor
# from keras.utils import control_flow_util
# typing
from typing import Optional, Union, List, Tuple
# Own modules
from cvnn.activations import t_activation
from cvnn.initializers import ComplexGlorotUniform, Zeros, Ones, ComplexInitializer, INIT_TECHNIQUES
t_input = Union[Tensor, tuple, list]
t_input_shape = Union[TensorShape, List[TensorShape]]
DEFAULT_COMPLEX_TYPE = tf.as_dtype(np.complex64)
class ComplexLayer(ABC):
@abstractmethod
def get_real_equivalent(self):
"""
:return: Gets a real-valued COPY of the Complex Layer.
"""
pass
def complex_input(shape=None, batch_size=None, name=None, dtype=DEFAULT_COMPLEX_TYPE,
sparse=False, tensor=None, ragged=False, **kwargs):
"""
`complex_input()` is used to instantiate a Keras tensor.
A Keras tensor is a TensorFlow symbolic tensor object,
which we augment with certain attributes that allow us to build a Keras model
just by knowing the inputs and outputs of the model.
For instance, if `a`, `b` and `c` are Keras tensors,
it becomes possible to do:
`model = Model(input=[a, b], output=c)`
Arguments:
shape: A shape tuple (integers), not including the batch size.
For instance, `shape=(32,)` indicates that the expected input
will be batches of 32-dimensional vectors. Elements of this tuple
can be None; 'None' elements represent dimensions where the shape is
not known.
batch_size: optional static batch size (integer).
name: An optional name string for the layer.
Should be unique in a model (do not reuse the same name twice).
It will be autogenerated if it isn't provided.
dtype: The data type expected by the input
sparse: A boolean specifying whether the placeholder to be created is
sparse. Only one of 'ragged' and 'sparse' can be True. Note that,
if `sparse` is False, sparse tensors can still be passed into the
input - they will be densified with a default value of 0.
tensor: Optional existing tensor to wrap into the `Input` layer.
If set, the layer will use the `tf.TypeSpec` of this tensor rather
than creating a new placeholder tensor.
ragged: A boolean specifying whether the placeholder to be created is
ragged. Only one of 'ragged' and 'sparse' can be True. In this case,
values of 'None' in the 'shape' argument represent ragged dimensions.
For more information about RaggedTensors, see
[this guide](https://www.tensorflow.org/guide/ragged_tensors).
**kwargs: deprecated arguments support. Supports `batch_shape` and
`batch_input_shape`.
Returns:
A `tensor`.
Example:
```python
# this is a logistic regression in Keras
x = complex_input(shape=(32,))
y = Dense(16, activation='softmax')(x)
model = Model(x, y)
```
Note that even if eager execution is enabled,
`Input` produces a symbolic tensor (i.e. a placeholder).
This symbolic tensor can be used with other
TensorFlow ops, as such:
```python
x = complex_input(shape=(32,))
y = tf.square(x)
```
Raises:
ValueError: If both `sparse` and `ragged` are provided.
ValueError: If both `shape` and (`batch_input_shape` or `batch_shape`) are provided.
ValueError: If both `shape` and `tensor` are None.
ValueError: if any unrecognized parameters are provided.
"""
if sparse and ragged:
raise ValueError(
'Cannot set both sparse and ragged to True in a Keras input.')
dtype = tf.as_dtype(dtype)
input_layer_config = {'name': name, 'dtype': dtype.name, 'sparse': sparse,
'ragged': ragged, 'input_tensor': tensor}
batch_input_shape = kwargs.pop('batch_input_shape',
kwargs.pop('batch_shape', None))
if shape is not None and batch_input_shape is not None:
raise ValueError('Only provide the `shape` OR `batch_input_shape` argument '
'to Input, not both at the same time.')
if batch_input_shape is None and shape is None and tensor is None:
raise ValueError('Please provide to Input either a `shape`'
' or a `tensor` argument. Note that '
'`shape` does not include the batch '
'dimension.')
if kwargs:
raise ValueError('Unrecognized keyword arguments:', kwargs.keys())
if batch_input_shape:
shape = batch_input_shape[1:]
input_layer_config.update({'batch_input_shape': batch_input_shape})
else:
input_layer_config.update(
{'batch_size': batch_size, 'input_shape': shape})
# import pdb; pdb.set_trace()
input_layer = ComplexInput(**input_layer_config)
# Return tensor including `_keras_history`.
# Note that in this case train_output and test_output are the same pointer.
outputs = input_layer._inbound_nodes[0].output_tensors
if isinstance(outputs, list) and len(outputs) == 1:
return outputs[0]
else:
return outputs
class ComplexInput(InputLayer, ComplexLayer):
def __init__(self, input_shape=None, batch_size=None, dtype=DEFAULT_COMPLEX_TYPE, input_tensor=None, sparse=False,
name=None, ragged=False, **kwargs):
super(ComplexInput, self).__init__(input_shape=input_shape, batch_size=batch_size, dtype=dtype,
input_tensor=input_tensor, sparse=sparse,
name=name, ragged=ragged, **kwargs
)
def get_real_equivalent(self):
real_input_shape = self.input_shape[:-1] + (self.input_shape[-1] * 2,)
return ComplexInput(input_shape=real_input_shape, batch_size=self.batch_size, dtype=self.dtype,
input_tensor=self.input_tensor, sparse=self.sparse, name=self.name + "_real_equiv",
ragged=self.ragged)
class ComplexFlatten(Flatten, ComplexLayer):
def call(self, inputs: t_input):
# tf.print(f"inputs at ComplexFlatten are {inputs.dtype}")
real_flat = super(ComplexFlatten, self).call(tf.math.real(inputs))
imag_flat = super(ComplexFlatten, self).call(tf.math.imag(inputs))
return tf.cast(tf.complex(real_flat, imag_flat), inputs.dtype) # Keep input dtype
def get_real_equivalent(self):
# Dtype agnostic so just init one.
return ComplexFlatten(name=self.name + "_real_equiv")
class ComplexDense(Dense, ComplexLayer):
"""
Fully connected complex-valued layer.
Implements the operation:
activation(input * weights + bias)
* where data types can be either complex or real.
* activation is the element-wise activation function passed as the activation argument,
* weights is a matrix created by the layer
* bias is a bias vector created by the layer
"""
def __init__(self, units: int, activation: t_activation = None, use_bias: bool = True,
kernel_initializer="ComplexGlorotUniform",
bias_initializer="Zeros",
kernel_regularizer=None,
kernel_constraint=None,
dtype=DEFAULT_COMPLEX_TYPE, # TODO: Check typing of this.
init_technique: str = 'mirror',
**kwargs):
"""
:param units: Positive integer, dimensionality of the output space.
:param activation: Activation function to use.
Either from keras.activations or cvnn.activations. For complex dtype, only cvnn.activations module supported.
If you don't specify anything, no activation is applied (ie. "linear" activation: a(x) = x).
:param use_bias: Boolean, whether the layer uses a bias vector.
:param kernel_initializer: Initializer for the kernel weights matrix.
Recommended to use a `ComplexInitializer` such as `cvnn.initializers.ComplexGlorotUniform()` (default)
:param bias_initializer: Initializer for the bias vector.
Recommended to use a `ComplexInitializer` such as `cvnn.initializers.Zeros()` (default)
:param dtype: Dtype of the input and layer.
:param init_technique: One of 'mirror' or 'zero_imag'. Tells the initializer how to init complex number if
the initializer was tensorflow's built in initializers (not supporting complex numbers).
- 'mirror': Uses the initializer for both real and imaginary part.
Note that some initializers such as Glorot or He will lose it's property if initialized this way.
- 'zero_imag': Initializer real part and let imaginary part to zero.
"""
# TODO: verify the initializers? and that dtype complex has cvnn.activations.
if activation is None:
activation = "linear"
super(ComplexDense, self).__init__(units, activation=activation, use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_constraint=kernel_constraint, kernel_regularizer=kernel_regularizer,
**kwargs)
# !Cannot override dtype of the layer because it has a read-only @property
self.my_dtype = tf.dtypes.as_dtype(dtype)
self.init_technique = init_technique.lower()
def build(self, input_shape):
if self.my_dtype.is_complex:
i_kernel_dtype = self.my_dtype if isinstance(self.kernel_initializer,
ComplexInitializer) else self.my_dtype.real_dtype
i_bias_dtype = self.my_dtype if isinstance(self.bias_initializer,
ComplexInitializer) else self.my_dtype.real_dtype
i_kernel_initializer = self.kernel_initializer
i_bias_initializer = self.bias_initializer
if not isinstance(self.kernel_initializer, ComplexInitializer):
tf.print(f"WARNING: you are using a Tensorflow Initializer for complex numbers. "
f"Using {self.init_technique} method.")
if self.init_technique in INIT_TECHNIQUES:
if self.init_technique == 'zero_imag':
# This section is done to initialize with tf initializers, making imaginary part zero
i_kernel_initializer = initializers.Zeros()
i_bias_initializer = initializers.Zeros()
else:
raise ValueError(f"Unsuported init_technique {self.init_technique}, "
f"supported techniques are {INIT_TECHNIQUES}")
self.w_r = self.add_weight('kernel_r',
shape=(input_shape[-1], self.units),
dtype=self.my_dtype.real_dtype,
initializer=self.kernel_initializer,
trainable=True,
constraint=self.kernel_constraint, regularizer=self.kernel_regularizer)
#self.w_r = tf.Variable(
# name='kernel_r',
# initial_value=self.kernel_initializer(shape=(input_shape[-1], self.units), dtype=i_kernel_dtype),
# trainable=True
#)
self.w_i = self.add_weight('kernel_i',
shape=(input_shape[-1], self.units),
dtype=self.my_dtype.real_dtype,
initializer=self.kernel_initializer,
trainable=True,
constraint=self.kernel_constraint, regularizer=self.kernel_regularizer)
#self.w_i = tf.Variable(
# name='kernel_i',
# initial_value=i_kernel_initializer(shape=(input_shape[-1], self.units), dtype=i_kernel_dtype),
# trainable=True
#)
if self.use_bias:
self.b_r = tf.Variable(
name='bias_r',
initial_value=self.bias_initializer(shape=(self.units,), dtype=i_bias_dtype),
trainable=self.use_bias
)
self.b_i = tf.Variable(
name='bias_i',
initial_value=i_bias_initializer(shape=(self.units,), dtype=i_bias_dtype),
trainable=self.use_bias
)
else:
# TODO: For Complex you should probably want to use MY init for real keras. DO sth! at least error message
self.w = self.add_weight('kernel',
shape=(input_shape[-1], self.units),
dtype=self.my_dtype,
initializer=self.kernel_initializer,
trainable=True,
constraint=self.kernel_constraint, regularizer=self.kernel_regularizer)
if self.use_bias:
self.b = self.add_weight('bias', shape=(self.units,), dtype=self.my_dtype,
initializer=self.bias_initializer, trainable=self.use_bias)
def call(self, inputs: t_input):
# tf.print(f"inputs at ComplexDense are {inputs.dtype}")
if inputs.dtype != self.my_dtype:
tf.print(f"WARNING: {self.name} - Expected input to be {self.my_dtype}, but received {inputs.dtype}.")
if self.my_dtype.is_complex and inputs.dtype.is_floating:
tf.print("\tThis is normally fixed using ComplexInput() "
"at the start (tf casts input automatically to real).")
inputs = tf.cast(inputs, self.my_dtype)
if self.my_dtype.is_complex:
w = tf.complex(self.w_r, self.w_i)
if self.use_bias:
b = tf.complex(self.b_r, self.b_i)
else:
w = self.w
if self.use_bias:
b = self.b
out = tf.matmul(inputs, w)
if self.use_bias:
out = out + b
return self.activation(out)
def get_real_equivalent(self, output_multiplier=2):
# assert self.my_dtype.is_complex, "The layer was already real!" # TODO: Shall I check this?
# TODO: Does it pose a problem not to re-create an object of the initializer?
return ComplexDense(units=int(round(self.units * output_multiplier)),
activation=self.activation, use_bias=self.use_bias,
kernel_initializer=self.kernel_initializer, bias_initializer=self.bias_initializer,
kernel_constraint=self.kernel_constraint, kernel_regularizer=self.kernel_regularizer, #MODIFIED CODE ------
dtype=self.my_dtype.real_dtype, name=self.name + "_real_equiv")
def get_config(self):
config = super(ComplexDense, self).get_config()
config.update({
'dtype': self.my_dtype,
'init_technique': self.init_technique
})
return config
class ComplexDropout(Layer, ComplexLayer):
"""
Applies Dropout to the input.
It works also with complex inputs!
The Dropout layer randomly sets input units to 0 with a frequency of `rate`
at each step during training time, which helps prevent overfitting.
Inputs not set to 0 are scaled up by 1/(1 - rate) such that the sum over
all inputs is unchanged.
Note that the Dropout layer only applies when `training` is set to True
such that no values are dropped during inference. When using `model.fit`,
`training` will be appropriately set to True automatically, and in other
contexts, you can set the kwarg explicitly to True when calling the layer.
(This is in contrast to setting `trainable=False` for a Dropout layer.
`trainable` does not affect the layer's behavior, as Dropout does
not have any variables/weights that can be frozen during training.)
"""
def __init__(self, rate: float, noise_shape=None, seed: Optional[int] = None, **kwargs):
"""
:param rate: Float between 0 and 1. Fraction of the input units to drop.
:param noise_shape: 1D integer tensor representing the shape of the binary dropout mask that
will be multiplied with the input.
For instance, if your inputs have shape `(batch_size, timesteps, features)` and you want the dropout
mask to be the same for all timesteps, you can use `noise_shape=(batch_size, 1, features)`.
:param seed: A Python integer to use as random seed.
"""
super(ComplexDropout, self).__init__(**kwargs) # trainable=False,
if isinstance(rate, (int, float)) and not 0 <= rate <= 1:
raise ValueError(f'Invalid value {rate} received for `rate`, expected a value between 0 and 1.')
self.rate = rate
self.seed = seed
self.noise_shape = noise_shape
def _get_noise_shape(self, inputs):
# Subclasses of `Dropout` may implement `_get_noise_shape(self, inputs)`,
# which will override `self.noise_shape`, and allows for custom noise
# shapes with dynamically sized inputs.
if self.noise_shape is None:
return None
concrete_inputs_shape = tf.shape(inputs)
noise_shape = []
for i, value in enumerate(self.noise_shape):
noise_shape.append(concrete_inputs_shape[i] if value is None else value)
return tf.convert_to_tensor(noise_shape)
def call(self, inputs, training=None):
"""
:param inputs: Input tensor (of any rank).
:param training: Python boolean indicating whether the layer should behave in training mode (adding dropout)
or in inference mode (doing nothing).
"""
if training is None:
training = K.learning_phase()
tf.print(f"Training was None and now is {training}")
# This is used for my own debugging, I don't know WHEN this happens,
# I trust K.learning_phase() returns a correct boolean.
# def dropped_inputs():
# # import pdb; pdb.set_trace()
# drop_filter = tf.nn.dropout(tf.ones(tf.shape(inputs)), rate=self.rate,
# noise_shape=self._get_noise_shape(inputs), seed=self.seed)
# y_out = tf.multiply(tf.cast(drop_filter, dtype=inputs.dtype), inputs)
# y_out = tf.cast(y_out, dtype=inputs.dtype)
# return y_out
# output = control_flow_util.smart_cond(training, dropped_inputs, lambda: tf.identity(inputs))
# return output
if not training:
return inputs
drop_filter = tf.nn.dropout(tf.ones(tf.shape(inputs)), rate=self.rate,
noise_shape=self.noise_shape, seed=self.seed)
y_out = tf.multiply(tf.cast(drop_filter, dtype=inputs.dtype), inputs)
y_out = tf.cast(y_out, dtype=inputs.dtype)
return y_out
def compute_output_shape(self, input_shape):
return input_shape
def get_real_equivalent(self):
return ComplexDropout(rate=self.rate, seed=self.seed, noise_shape=self.noise_shape,
name=self.name + "_real_equiv")
def get_config(self):
config = super(ComplexDropout, self).get_config()
config.update({
'rate': self.rate,
'noise_shape': self.noise_shape,
'seed': self.seed
})
return config
class ComplexBatchNormalization(Layer, ComplexLayer):
"""
Complex Batch-Normalization as defined in section 3.5 of https://arxiv.org/abs/1705.09792
"""
def __init__(self, axis: Union[List[int], Tuple[int], int] = -1, momentum: float = 0.99,
center: bool = True, scale: bool = True, epsilon: float = 0.001,
beta_initializer=Zeros(), gamma_initializer=Ones(), dtype=DEFAULT_COMPLEX_TYPE,
moving_mean_initializer=Zeros(), moving_variance_initializer=Ones(), cov_method: int = 2, # TODO: Check inits
**kwargs):
self.my_dtype = tf.dtypes.as_dtype(dtype)
self.epsilon = epsilon
self.cov_method = cov_method
if isinstance(axis, int):
axis = [axis]
self.axis = list(axis)
super(ComplexBatchNormalization, self).__init__(**kwargs)
self.momentum = momentum
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.moving_mean_initializer = initializers.get(moving_mean_initializer)
self.moving_variance_initializer = initializers.get(moving_variance_initializer)
self.center = center
self.scale = scale
def build(self, input_shape):
self.epsilon_matrix = tf.eye(2, dtype=self.my_dtype.real_dtype) * self.epsilon
# Cast the negative indices to positive
self.axis = [len(input_shape) + ax if ax < 0 else ax for ax in self.axis]
self.used_axis = [ax for ax in range(0, len(input_shape)) if ax not in self.axis]
desired_shape = [input_shape[ax] for ax in self.axis]
if self.my_dtype.is_complex:
self.gamma_r = tf.Variable(
name='gamma_r',
initial_value=self.gamma_initializer(shape=tuple(desired_shape), dtype=self.my_dtype),
trainable=True
)
self.gamma_i = tf.Variable(
name='gamma_i',
initial_value=Zeros()(shape=tuple(desired_shape), dtype=self.my_dtype),
trainable=True
) # I think I just need to scale with gamma, so by default I leave the imag part to zero
self.beta_r = tf.Variable(
name="beta_r",
initial_value=self.beta_initializer(shape=desired_shape, dtype=self.my_dtype),
trainable=True
)
self.beta_i = tf.Variable(
name="beta_i",
initial_value=self.beta_initializer(shape=desired_shape, dtype=self.my_dtype),
trainable=True
)
self.moving_mean = tf.Variable(
name='moving_mean',
initial_value=tf.complex(real=self.moving_mean_initializer(shape=desired_shape,
dtype=self.my_dtype),
imag=self.moving_mean_initializer(shape=desired_shape,
dtype=self.my_dtype)),
trainable=False
)
self.moving_var = tf.Variable(
name='moving_var',
initial_value=tf.eye(2) * self.moving_variance_initializer(shape=tuple(desired_shape) + (2, 2),
dtype=self.my_dtype) / tf.math.sqrt(2.),
trainable=False
)
else:
self.gamma = tf.Variable(
name='gamma',
initial_value=self.gamma_initializer(shape=tuple(desired_shape), dtype=self.my_dtype),
trainable=True
)
self.beta = tf.Variable(
name="beta",
initial_value=self.beta_initializer(shape=desired_shape, dtype=self.my_dtype),
trainable=True
)
self.moving_mean = tf.Variable(
name='moving_mean',
initial_value=self.moving_mean_initializer(shape=desired_shape, dtype=self.my_dtype),
trainable=False
)
self.moving_var = tf.Variable(
name='moving_var',
initial_value=tf.eye(2, dtype=self.my_dtype) * self.moving_variance_initializer(
shape=tuple(desired_shape) + (2, 2),
dtype=self.my_dtype),
trainable=False
)
def call(self, inputs, training=None):
if inputs.dtype != self.my_dtype:
tf.print(f"Warning: Expecting input dtype {self.my_dtype} but got {inputs.dtype}. "
f"Automatic cast will be done.")
inputs = tf.cast(inputs, dtype=self.my_dtype)
if training is None:
training = K.learning_phase()
tf.print(f"Training was None and now is {training}")
# This is used for my own debugging, I don't know WHEN this happens,
# I trust K.learning_phase() returns a correct boolean.
if training:
# First get the mean and var
mean = tf.math.reduce_mean(inputs, axis=self.used_axis)
if self.cov_method == 1:
X_20 = tf.concat((tf.math.real(inputs), tf.math.imag(inputs)), axis=-1)
var_20_20 = tfp.stats.covariance(X_20, sample_axis=self.used_axis, event_axis=-1)
valu = int(var_20_20.shape[-1] / 2)
indices = [([[i, i], [i, i + valu]], [[i + valu, i], [i + valu, i + valu]]) for i in range(0, valu)]
var = tf.gather_nd(var_20_20, indices=indices)
elif self.cov_method == 2:
X_10_2 = tf.stack((tf.math.real(inputs), tf.math.imag(inputs)), axis=-1)
var_10_2_2 = tfp.stats.covariance(X_10_2, sample_axis=self.used_axis, event_axis=-1)
var = var_10_2_2
else:
raise ValueError(f"Method {self.method} not implemented")
# Now the train part with these values
self.moving_mean.assign(self.momentum * self.moving_mean + (1. - self.momentum) * mean)
self.moving_var.assign(self.moving_var * self.momentum + var * (1. - self.momentum))
out = self._normalize(inputs, var, mean)
else:
out = self._normalize(inputs, self.moving_var, self.moving_mean)
if self.scale:
if self.my_dtype.is_complex:
gamma = tf.complex(self.gamma_r, self.gamma_i) # TODO: Should this be real valued?
else:
gamma = self.gamma
out = gamma * out
if self.center:
if self.my_dtype.is_complex:
beta = tf.complex(self.beta_r, self.beta_i)
else:
beta = self.beta
out = out + beta
return out
def _normalize(self, inputs, var, mean):
"""
:inputs: Tensor
:param var: Tensor of shape [..., 2, 2], if inputs dtype is real, var[slice] = [[var_slice, 0], [0, 0]]
:param mean: Tensor with the mean in the corresponding dtype (same shape as inputs)
"""
complex_zero_mean = inputs - mean
# Inv and sqrtm is done over 2 inner most dimension [..., M, M] so it should be [..., 2, 2] for us.
inv_sqrt_var = tf.linalg.sqrtm(tf.linalg.inv(var + self.epsilon_matrix)) # var^(-1/2) # TODO: Check this exists always?
# Separate real and imag so I go from shape [...] to [..., 2]
zero_mean = tf.stack((tf.math.real(complex_zero_mean), tf.math.imag(complex_zero_mean)), axis=-1)
# I expand dims to make the mult of matrix [..., 2, 2] and [..., 2, 1] resulting in [..., 2, 1]
inputs_hat = tf.matmul(inv_sqrt_var, tf.expand_dims(zero_mean, axis=-1))
# Then I squeeze to remove the last shape so I go from [..., 2, 1] to [..., 2].
# Use reshape and not squeeze in case I have 1 channel for example.
squeeze_inputs_hat = tf.reshape(inputs_hat, shape=tf.shape(inputs_hat)[:-1])
# Get complex data
complex_inputs_hat = tf.cast(tf.complex(squeeze_inputs_hat[..., 0], squeeze_inputs_hat[..., 1]),
dtype=self.my_dtype)
# import pdb; pdb.set_trace()
return complex_inputs_hat
"""@staticmethod
def _normalize_real(inputs, var, mean):
numerator = inputs - mean
denominator = tf.math.sqrt(var[..., 0, 0])
return numerator / tf.cast(denominator, dtype=inputs.dtype)"""
def get_real_equivalent(self):
return ComplexBatchNormalization(axis=self.axis, momentum=self.momentum, center=self.center, scale=self.scale,
beta_initializer=self.beta_initializer, epsilon=self.epsilon_matrix[0],
gamma_initializer=self.gamma_initializer, dtype=self.my_dtype,
moving_mean_initializer=self.moving_mean_initializer,
moving_variance_initializer=self.moving_variance_initializer)
def get_config(self):
config = super(ComplexBatchNormalization, self).get_config()
config.update({
'axis': self.axis,
'momentum': self.momentum,
'center': self.center,
'scale': self.scale,
'beta_initializer': self.beta_initializer,
'gamma_initializer': self.gamma_initializer,
'dtype': self.my_dtype,
'moving_mean_initializer': self.moving_mean_initializer,
'moving_variance_initializer': self.moving_variance_initializer
})
return config
| 29,931 | 49.560811 | 135 | py |
cvnn | cvnn-master/cvnn/layers/upsampling.py | import tensorflow as tf
from tensorflow.keras import backend
from tensorflow.keras.layers import UpSampling2D
from typing import Optional, Union, Tuple
from cvnn.layers.core import ComplexLayer
from cvnn.layers.core import DEFAULT_COMPLEX_TYPE
class ComplexUpSampling2D(UpSampling2D, ComplexLayer):
def __init__(self, size: Union[int, Tuple[int, int]] = (2, 2),
data_format: Optional[str] = None, interpolation: str = 'nearest',
align_corners: bool = False, dtype=DEFAULT_COMPLEX_TYPE, **kwargs):
"""
:param size: Int, or tuple of 2 integers. The upsampling factors for rows and columns.
:param data_format: string, one of channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape
(batch_size, height, width, channels) while channels_first corresponds to inputs with shape
(batch_size, channels, height, width).
:param interpolation: A string, one of nearest or bilinear.
:param align_corners: if True, the corner pixels of the input and output tensors are aligned,
and thus preserving the values at those pixels.
Example of align corners: https://discuss.pytorch.org/t/what-we-should-use-align-corners-false/22663/9
"""
self.factor_upsample = size
self.my_dtype = tf.dtypes.as_dtype(dtype)
super(ComplexUpSampling2D, self).__init__(size=size, data_format=data_format, interpolation=interpolation,
dtype=self.my_dtype.real_dtype, **kwargs)
def call(self, inputs):
result = tf.complex(
backend.resize_images(tf.math.real(inputs), self.size[0], self.size[1], self.data_format,
interpolation=self.interpolation),
backend.resize_images(tf.math.imag(inputs), self.size[0], self.size[1], self.data_format,
interpolation=self.interpolation),
)
casted_value = inputs.dtype if not inputs.dtype.is_integer else tf.float32
return tf.cast(result, dtype=casted_value)
def get_real_equivalent(self):
return ComplexUpSampling2D(size=self.factor_upsample, data_format=self.data_format,
interpolation=self.interpolation, dtype=self.my_dtype.real_dtype)
def get_config(self):
config = super(ComplexUpSampling2D, self).get_config()
config.update({
'dtype': self.my_dtype,
'factor_upsample': self.factor_upsample
})
return config
if __name__ == "__main__":
image = tf.constant([
[1., 0., 0, 0, 0],
[0, 1., 0, 0, 0],
[0, 0, 1., 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
])
image = tf.complex(image, image)
image = image[tf.newaxis, ..., tf.newaxis]
result = ComplexUpSampling2D([3, 5])(image)
import pdb; pdb.set_trace()
| 3,007 | 43.895522 | 114 | py |
cvnn | cvnn-master/cvnn/layers/__init__.py | # https://stackoverflow.com/questions/24100558/how-can-i-split-a-module-into-multiple-files-without-breaking-a-backwards-compa/24100645
from cvnn.layers.pooling import ComplexMaxPooling2D, ComplexAvgPooling2D, ComplexAvgPooling3D, ComplexPolarAvgPooling2D
from cvnn.layers.pooling import ComplexUnPooling2D, ComplexMaxPooling2DWithArgmax, ComplexAvgPooling1D
from cvnn.layers.convolutional import ComplexConv2D, ComplexConv1D, ComplexConv3D
from cvnn.layers.convolutional import ComplexConv2DTranspose
from cvnn.layers.core import ComplexInput, ComplexDense, ComplexFlatten, ComplexDropout, complex_input
from cvnn.layers.upsampling import ComplexUpSampling2D
from cvnn.layers.core import ComplexBatchNormalization
__author__ = 'J. Agustin BARRACHINA'
__copyright__ = 'Copyright 2020, {project_name}'
__credits__ = ['{credit_list}']
__license__ = '{license}'
__version__ = '1.0.15'
__maintainer__ = 'J. Agustin BARRACHINA'
__email__ = 'joseagustin.barra@gmail.com; jose-agustin.barrachina@centralesupelec.fr'
__status__ = '{dev_status}'
| 1,039 | 53.736842 | 135 | py |
eco-dqn | eco-dqn-master/src/__init__.py | 0 | 0 | 0 | py | |
eco-dqn | eco-dqn-master/src/networks/mpnn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class MPNN(nn.Module):
def __init__(self,
n_obs_in=7,
n_layers=3,
n_features=64,
tied_weights=False,
n_hid_readout=[],):
super().__init__()
self.n_obs_in = n_obs_in
self.n_layers = n_layers
self.n_features = n_features
self.tied_weights = tied_weights
self.node_init_embedding_layer = nn.Sequential(
nn.Linear(n_obs_in, n_features, bias=False),
nn.ReLU()
)
self.edge_embedding_layer = EdgeAndNodeEmbeddingLayer(n_obs_in, n_features)
if self.tied_weights:
self.update_node_embedding_layer = UpdateNodeEmbeddingLayer(n_features)
else:
self.update_node_embedding_layer = nn.ModuleList([UpdateNodeEmbeddingLayer(n_features) for _ in range(self.n_layers)])
self.readout_layer = ReadoutLayer(n_features, n_hid_readout)
@torch.no_grad()
def get_normalisation(self, adj):
norm = torch.sum((adj != 0), dim=1).unsqueeze(-1)
norm[norm == 0] = 1
return norm.float()
def forward(self, obs):
if obs.dim() == 2:
obs = obs.unsqueeze(0)
obs.transpose_(-1, -2)
# Calculate features to be used in the MPNN
node_features = obs[:, :, 0:self.n_obs_in]
# Get graph adj matrix.
adj = obs[:, :, self.n_obs_in:]
# adj_conns = (adj != 0).type(torch.FloatTensor).to(adj.device)
norm = self.get_normalisation(adj)
init_node_embeddings = self.node_init_embedding_layer(node_features)
edge_embeddings = self.edge_embedding_layer(node_features, adj, norm)
# Initialise embeddings.
current_node_embeddings = init_node_embeddings
if self.tied_weights:
for _ in range(self.n_layers):
current_node_embeddings = self.update_node_embedding_layer(current_node_embeddings,
edge_embeddings,
norm,
adj)
else:
for i in range(self.n_layers):
current_node_embeddings = self.update_node_embedding_layer[i](current_node_embeddings,
edge_embeddings,
norm,
adj)
out = self.readout_layer(current_node_embeddings)
out = out.squeeze()
return out
class EdgeAndNodeEmbeddingLayer(nn.Module):
def __init__(self, n_obs_in, n_features):
super().__init__()
self.n_obs_in = n_obs_in
self.n_features = n_features
self.edge_embedding_NN = nn.Linear(int(n_obs_in+1), n_features-1, bias=False)
self.edge_feature_NN = nn.Linear(n_features, n_features, bias=False)
def forward(self, node_features, adj, norm):
edge_features = torch.cat([adj.unsqueeze(-1),
node_features.unsqueeze(-2).transpose(-2, -3).repeat(1, adj.shape[-2], 1, 1)],
dim=-1)
edge_features *= (adj.unsqueeze(-1)!=0).float()
edge_features_unrolled = torch.reshape(edge_features, (edge_features.shape[0], edge_features.shape[1] * edge_features.shape[1], edge_features.shape[-1]))
embedded_edges_unrolled = F.relu(self.edge_embedding_NN(edge_features_unrolled))
embedded_edges_rolled = torch.reshape(embedded_edges_unrolled,
(adj.shape[0], adj.shape[1], adj.shape[1], self.n_features-1))
embedded_edges = embedded_edges_rolled.sum(dim=2) / norm
edge_embeddings = F.relu(self.edge_feature_NN(torch.cat([embedded_edges, norm / norm.max()],dim=-1)))
return edge_embeddings
class UpdateNodeEmbeddingLayer(nn.Module):
def __init__(self, n_features):
super().__init__()
self.message_layer = nn.Linear(2*n_features, n_features, bias=False)
self.update_layer = nn.Linear(2*n_features, n_features, bias=False)
def forward(self, current_node_embeddings, edge_embeddings, norm, adj):
node_embeddings_aggregated = torch.matmul(adj, current_node_embeddings) / norm
message = F.relu(self.message_layer(torch.cat([node_embeddings_aggregated, edge_embeddings], dim=-1)))
new_node_embeddings = F.relu(self.update_layer(torch.cat([current_node_embeddings, message], dim=-1)))
return new_node_embeddings
class ReadoutLayer(nn.Module):
def __init__(self, n_features, n_hid=[], bias_pool=False, bias_readout=True):
super().__init__()
self.layer_pooled = nn.Linear(int(n_features), int(n_features), bias=bias_pool)
if type(n_hid)!=list:
n_hid = [n_hid]
n_hid = [2*n_features] + n_hid + [1]
self.layers_readout = []
for n_in, n_out in list(zip(n_hid, n_hid[1:])):
layer = nn.Linear(n_in, n_out, bias=bias_readout)
self.layers_readout.append(layer)
self.layers_readout = nn.ModuleList(self.layers_readout)
def forward(self, node_embeddings):
f_local = node_embeddings
h_pooled = self.layer_pooled(node_embeddings.sum(dim=1) / node_embeddings.shape[1])
f_pooled = h_pooled.repeat(1, 1, node_embeddings.shape[1]).view(node_embeddings.shape)
features = F.relu( torch.cat([f_pooled, f_local], dim=-1) )
for i, layer in enumerate(self.layers_readout):
features = layer(features)
if i<len(self.layers_readout)-1:
features = F.relu(features)
else:
out = features
return out | 5,994 | 36.704403 | 161 | py |
eco-dqn | eco-dqn-master/src/networks/__init__.py | 0 | 0 | 0 | py | |
eco-dqn | eco-dqn-master/src/envs/core.py | from src.envs.spinsystem import SpinSystemFactory
def make(id, *args, **kwargs):
if id == "SpinSystem":
env = SpinSystemFactory.get(*args, **kwargs)
else:
raise NotImplementedError()
return env | 225 | 19.545455 | 52 | py |
eco-dqn | eco-dqn-master/src/envs/spinsystem.py | from abc import ABC, abstractmethod
from collections import namedtuple
from operator import matmul
import numpy as np
import torch.multiprocessing as mp
from numba import jit, float64, int64
from src.envs.utils import (EdgeType,
RewardSignal,
ExtraAction,
OptimisationTarget,
Observable,
SpinBasis,
DEFAULT_OBSERVABLES,
GraphGenerator,
RandomGraphGenerator,
HistoryBuffer)
# A container for get_result function below. Works just like tuple, but prettier.
ActionResult = namedtuple("action_result", ("snapshot","observation","reward","is_done","info"))
class SpinSystemFactory(object):
'''
Factory class for returning new SpinSystem.
'''
@staticmethod
def get(graph_generator=None,
max_steps=20,
observables = DEFAULT_OBSERVABLES,
reward_signal = RewardSignal.DENSE,
extra_action = ExtraAction.PASS,
optimisation_target = OptimisationTarget.ENERGY,
spin_basis = SpinBasis.SIGNED,
norm_rewards=False,
memory_length=None, # None means an infinite memory.
horizon_length=None, # None means an infinite horizon.
stag_punishment=None, # None means no punishment for re-visiting states.
basin_reward=None, # None means no reward for reaching a local minima.
reversible_spins=True, # Whether the spins can be flipped more than once (i.e. True-->Georgian MDP).
init_snap=None,
seed=None):
if graph_generator.biased:
return SpinSystemBiased(graph_generator,max_steps,
observables,reward_signal,extra_action,optimisation_target,spin_basis,
norm_rewards,memory_length,horizon_length,stag_punishment,basin_reward,
reversible_spins,
init_snap,seed)
else:
return SpinSystemUnbiased(graph_generator,max_steps,
observables,reward_signal,extra_action,optimisation_target,spin_basis,
norm_rewards,memory_length,horizon_length,stag_punishment,basin_reward,
reversible_spins,
init_snap,seed)
class SpinSystemBase(ABC):
'''
SpinSystemBase implements the functionality of a SpinSystem that is common to both
biased and unbiased systems. Methods that require significant enough changes between
these two case to not readily be served by an 'if' statement are left abstract, to be
implemented by a specialised subclass.
'''
# Note these are defined at the class level of SpinSystem to ensure that SpinSystem
# can be pickled.
class action_space():
def __init__(self, n_actions):
self.n = n_actions
self.actions = np.arange(self.n)
def sample(self, n=1):
return np.random.choice(self.actions, n)
class observation_space():
def __init__(self, n_spins, n_observables):
self.shape = [n_spins, n_observables]
def __init__(self,
graph_generator=None,
max_steps=20,
observables=DEFAULT_OBSERVABLES,
reward_signal = RewardSignal.DENSE,
extra_action = ExtraAction.PASS,
optimisation_target=OptimisationTarget.ENERGY,
spin_basis=SpinBasis.SIGNED,
norm_rewards=False,
memory_length=None, # None means an infinite memory.
horizon_length=None, # None means an infinite horizon.
stag_punishment=None,
basin_reward=None,
reversible_spins=False,
init_snap=None,
seed=None):
'''
Init method.
Args:
graph_generator: A GraphGenerator (or subclass thereof) object.
max_steps: Maximum number of steps before termination.
reward_signal: RewardSignal enum determining how and when rewards are returned.
extra_action: ExtraAction enum determining if and what additional action is allowed,
beyond simply flipping spins.
init_snap: Optional snapshot to load spin system into pre-configured state for MCTS.
seed: Optional random seed.
'''
if seed != None:
np.random.seed(seed)
# Ensure first observable is the spin state.
# This allows us to access the spins as self.state[0,:self.n_spins.]
assert observables[0] == Observable.SPIN_STATE, "First observable must be Observation.SPIN_STATE."
self.observables = list(enumerate(observables))
self.extra_action = extra_action
if graph_generator!=None:
assert isinstance(graph_generator,GraphGenerator), "graph_generator must be a GraphGenerator implementation."
self.gg = graph_generator
else:
# provide a default graph generator if one is not passed
self.gg = RandomGraphGenerator(n_spins=20,
edge_type=EdgeType.DISCRETE,
biased=False,
extra_action=(extra_action!=extra_action.NONE))
self.n_spins = self.gg.n_spins # Total number of spins in episode
self.max_steps = max_steps # Number of actions before reset
self.reward_signal = reward_signal
self.norm_rewards = norm_rewards
self.n_actions = self.n_spins
if extra_action != ExtraAction.NONE:
self.n_actions+=1
self.action_space = self.action_space(self.n_actions)
self.observation_space = self.observation_space(self.n_spins, len(self.observables))
self.current_step = 0
if self.gg.biased:
self.matrix, self.bias = self.gg.get()
else:
self.matrix = self.gg.get()
self.bias = None
self.optimisation_target = optimisation_target
self.spin_basis = spin_basis
self.memory_length = memory_length
self.horizon_length = horizon_length if horizon_length is not None else self.max_steps
self.stag_punishment = stag_punishment
self.basin_reward = basin_reward
self.reversible_spins = reversible_spins
self.reset()
self.score = self.calculate_score()
if self.reward_signal == RewardSignal.SINGLE:
self.init_score = self.score
self.best_score = self.score
self.best_spins = self.state[0,:]
if init_snap != None:
self.load_snapshot(init_snap)
def reset(self, spins=None):
"""
Explanation here
"""
self.current_step = 0
if self.gg.biased:
# self.matrix, self.bias = self.gg.get(with_padding=(self.extra_action != ExtraAction.NONE))
self.matrix, self.bias = self.gg.get()
else:
# self.matrix = self.gg.get(with_padding=(self.extra_action != ExtraAction.NONE))
self.matrix = self.gg.get()
self._reset_graph_observables()
spinsOne = np.array([1] * self.n_spins)
local_rewards_available = self.get_immeditate_rewards_avaialable(spinsOne)
local_rewards_available = local_rewards_available[np.nonzero(local_rewards_available)]
if local_rewards_available.size == 0:
# We've generated an empty graph, this is pointless, try again.
self.reset()
else:
self.max_local_reward_available = np.max(local_rewards_available)
self.state = self._reset_state(spins)
self.score = self.calculate_score()
if self.reward_signal == RewardSignal.SINGLE:
self.init_score = self.score
self.best_score = self.score
self.best_obs_score = self.score
self.best_spins = self.state[0, :self.n_spins].copy()
self.best_obs_spins = self.state[0, :self.n_spins].copy()
if self.memory_length is not None:
self.score_memory = np.array([self.best_score] * self.memory_length)
self.spins_memory = np.array([self.best_spins] * self.memory_length)
self.idx_memory = 1
self._reset_graph_observables()
if self.stag_punishment is not None or self.basin_reward is not None:
self.history_buffer = HistoryBuffer()
return self.get_observation()
def _reset_graph_observables(self):
# Reset observed adjacency matrix
if self.extra_action != self.extra_action.NONE:
# Pad adjacency matrix for disconnected extra-action spins of value 0.
self.matrix_obs = np.zeros((self.matrix.shape[0] + 1, self.matrix.shape[0] + 1))
self.matrix_obs [:-1, :-1] = self.matrix
else:
self.matrix_obs = self.matrix
# Reset observed bias vector,
if self.gg.biased:
if self.extra_action != self.extra_action.NONE:
# Pad bias for disconnected extra-action spins of value 0.
self.bias_obs = np.concatenate((self.bias, [0]))
else:
self.bias_obs = self.bias
def _reset_state(self, spins=None):
state = np.zeros((self.observation_space.shape[1], self.n_actions))
if spins is None:
if self.reversible_spins:
# For reversible spins, initialise randomly to {+1,-1}.
state[0, :self.n_spins] = 2 * np.random.randint(2, size=self.n_spins) - 1
else:
# For irreversible spins, initialise all to +1 (i.e. allowed to be flipped).
state[0, :self.n_spins] = 1
else:
state[0, :] = self._format_spins_to_signed(spins)
state = state.astype('float')
# If any observables other than "immediate energy available" require setting to values other than
# 0 at this stage, we should use a 'for k,v in enumerate(self.observables)' loop.
for idx, obs in self.observables:
if obs==Observable.IMMEDIATE_REWARD_AVAILABLE:
state[idx, :self.n_spins] = self.get_immeditate_rewards_avaialable(spins=state[0, :self.n_spins]) / self.max_local_reward_available
elif obs==Observable.NUMBER_OF_GREEDY_ACTIONS_AVAILABLE:
immeditate_rewards_avaialable = self.get_immeditate_rewards_avaialable(spins=state[0, :self.n_spins])
state[idx, :self.n_spins] = 1 - np.sum(immeditate_rewards_avaialable <= 0) / self.n_spins
return state
def _get_spins(self, basis=SpinBasis.SIGNED):
spins = self.state[0, :self.n_spins]
if basis == SpinBasis.SIGNED:
pass
elif basis == SpinSystemBiased:
# convert {1,-1} --> {0,1}
spins[0, :] = (1 - spins[0, :]) / 2
else:
raise NotImplementedError("Unrecognised SpinBasis")
return spins
def calculate_best_energy(self):
if self.n_spins <= 10:
# Generally, for small systems the time taken to start multiple processes is not worth it.
res = self.calculate_best_brute()
else:
# Start up processing pool
n_cpu = int(mp.cpu_count()) / 2
pool = mp.Pool(mp.cpu_count())
# Split up state trials across the number of cpus
iMax = 2 ** (self.n_spins)
args = np.round(np.linspace(0, np.ceil(iMax / n_cpu) * n_cpu, n_cpu + 1))
arg_pairs = [list(args) for args in zip(args, args[1:])]
# Try all the states.
# res = pool.starmap(self._calc_over_range, arg_pairs)
try:
res = pool.starmap(self._calc_over_range, arg_pairs)
# Return the best solution,
idx_best = np.argmin([e for e, s in res])
res = res[idx_best]
except Exception as e:
# Falling back to single-thread implementation.
# res = self.calculate_best_brute()
res = self._calc_over_range(0, 2 ** (self.n_spins))
finally:
# No matter what happens, make sure we tidy up after outselves.
pool.close()
if self.spin_basis == SpinBasis.BINARY:
# convert {1,-1} --> {0,1}
best_score, best_spins = res
best_spins = (1 - best_spins) / 2
res = best_score, best_spins
if self.optimisation_target == OptimisationTarget.CUT:
best_energy, best_spins = res
best_cut = self.calculate_cut(best_spins)
res = best_cut, best_spins
elif self.optimisation_target == OptimisationTarget.ENERGY:
pass
else:
raise NotImplementedError()
return res
def seed(self, seed):
return self.seed
def set_seed(self, seed):
self.seed = seed
np.random.seed(seed)
def step(self, action):
done = False
rew = 0 # Default reward to zero.
randomised_spins = False
self.current_step += 1
if self.current_step > self.max_steps:
print("The environment has already returned done. Stop it!")
raise NotImplementedError
new_state = np.copy(self.state)
############################################################
# 1. Performs the action and calculates the score change. #
############################################################
if action==self.n_spins:
if self.extra_action == ExtraAction.PASS:
delta_score = 0
if self.extra_action == ExtraAction.RANDOMISE:
# Randomise the spin configuration.
randomised_spins = True
random_actions = np.random.choice([1, -1], self.n_spins)
new_state[0, :] = self.state[0, :] * random_actions
new_score = self.calculate_score(new_state[0, :])
delta_score = new_score - self.score
self.score = new_score
else:
# Perform the action and calculate the score change.
new_state[0,action] = -self.state[0,action]
if self.gg.biased:
delta_score = self._calculate_score_change(new_state[0,:self.n_spins], self.matrix, self.bias, action)
else:
delta_score = self._calculate_score_change(new_state[0,:self.n_spins], self.matrix, action)
self.score += delta_score
#############################################################################################
# 2. Calculate reward for action and update anymemory buffers. #
# a) Calculate reward (always w.r.t best observable score). #
# b) If new global best has been found: update best ever score and spin parameters. #
# c) If the memory buffer is finite (i.e. self.memory_length is not None): #
# - Add score/spins to their respective buffers. #
# - Update best observable score and spins w.r.t. the new buffers. #
# else (if the memory is infinite): #
# - If new best has been found: update best observable score and spin parameters. # #
#############################################################################################
self.state = new_state
immeditate_rewards_avaialable = self.get_immeditate_rewards_avaialable()
if self.score > self.best_obs_score:
if self.reward_signal == RewardSignal.BLS:
rew = self.score - self.best_obs_score
elif self.reward_signal == RewardSignal.CUSTOM_BLS:
rew = self.score - self.best_obs_score
rew = rew / (rew + 0.1)
if self.reward_signal == RewardSignal.DENSE:
rew = delta_score
elif self.reward_signal == RewardSignal.SINGLE and done:
rew = self.score - self.init_score
if self.norm_rewards:
rew /= self.n_spins
if self.stag_punishment is not None or self.basin_reward is not None:
visiting_new_state = self.history_buffer.update(action)
if self.stag_punishment is not None:
if not visiting_new_state:
rew -= self.stag_punishment
if self.basin_reward is not None:
if np.all(immeditate_rewards_avaialable <= 0):
# All immediate score changes are +ive <--> we are in a local minima.
if visiting_new_state:
# #####TEMP####
# if self.reward_signal != RewardSignal.BLS or (self.score > self.best_obs_score):
# ####TEMP####
rew += self.basin_reward
if self.score > self.best_score:
self.best_score = self.score
self.best_spins = self.state[0, :self.n_spins].copy()
if self.memory_length is not None:
# For case of finite memory length.
self.score_memory[self.idx_memory] = self.score
self.spins_memory[self.idx_memory] = self.state[0, :self.n_spins]
self.idx_memory = (self.idx_memory + 1) % self.memory_length
self.best_obs_score = self.score_memory.max()
self.best_obs_spins = self.spins_memory[self.score_memory.argmax()].copy()
else:
self.best_obs_score = self.best_score
self.best_obs_spins = self.best_spins.copy()
#############################################################################################
# 3. Updates the state of the system (except self.state[0,:] as this is always the spin #
# configuration and has already been done. #
# a) Update self.state local features to reflect the chosen action. # #
# b) Update global features in self.state (always w.r.t. best observable score/spins) #
#############################################################################################
for idx, observable in self.observables:
### Local observables ###
if observable==Observable.IMMEDIATE_REWARD_AVAILABLE:
self.state[idx, :self.n_spins] = immeditate_rewards_avaialable / self.max_local_reward_available
elif observable==Observable.TIME_SINCE_FLIP:
self.state[idx, :] += (1. / self.max_steps)
if randomised_spins:
self.state[idx, :] = self.state[idx, :] * (random_actions > 0)
else:
self.state[idx, action] = 0
### Global observables ###
elif observable==Observable.EPISODE_TIME:
self.state[idx, :] += (1. / self.max_steps)
elif observable==Observable.TERMINATION_IMMANENCY:
# Update 'Immanency of episode termination'
self.state[idx, :] = max(0, ((self.current_step - self.max_steps) / self.horizon_length) + 1)
elif observable==Observable.NUMBER_OF_GREEDY_ACTIONS_AVAILABLE:
self.state[idx, :] = 1 - np.sum(immeditate_rewards_avaialable <= 0) / self.n_spins
elif observable==Observable.DISTANCE_FROM_BEST_SCORE:
self.state[idx, :] = np.abs(self.score - self.best_obs_score) / self.max_local_reward_available
elif observable==Observable.DISTANCE_FROM_BEST_STATE:
self.state[idx, :self.n_spins] = np.count_nonzero(self.best_obs_spins[:self.n_spins] - self.state[0, :self.n_spins])
#############################################################################################
# 4. Check termination criteria. #
#############################################################################################
if self.current_step == self.max_steps:
# Maximum number of steps taken --> done.
# print("Done : maximum number of steps taken")
done = True
if not self.reversible_spins:
if len((self.state[0, :self.n_spins] > 0).nonzero()[0]) == 0:
# If no more spins to flip --> done.
# print("Done : no more spins to flip")
done = True
return (self.get_observation(), rew, done, None)
def get_observation(self):
state = self.state.copy()
if self.spin_basis == SpinBasis.BINARY:
# convert {1,-1} --> {0,1}
state[0,:] = (1-state[0,:])/2
if self.gg.biased:
return np.vstack((state, self.matrix_obs, self.bias_obs))
else:
return np.vstack((state, self.matrix_obs))
def get_immeditate_rewards_avaialable(self, spins=None):
if spins is None:
spins = self._get_spins()
if self.optimisation_target==OptimisationTarget.ENERGY:
immediate_reward_function = lambda *args: -1*self._get_immeditate_energies_avaialable_jit(*args)
elif self.optimisation_target==OptimisationTarget.CUT:
immediate_reward_function = self._get_immeditate_cuts_avaialable_jit
else:
raise NotImplementedError("Optimisation target {} not recognised.".format(self.optimisation_ta))
spins = spins.astype('float64')
matrix = self.matrix.astype('float64')
if self.gg.biased:
bias = self.bias.astype('float64')
return immediate_reward_function(spins,matrix,bias)
else:
return immediate_reward_function(spins,matrix)
def get_allowed_action_states(self):
if self.reversible_spins:
# If MDP is reversible, both actions are allowed.
if self.spin_basis == SpinBasis.BINARY:
return (0,1)
elif self.spin_basis == SpinBasis.SIGNED:
return (1,-1)
else:
# If MDP is irreversible, only return the state of spins that haven't been flipped.
if self.spin_basis==SpinBasis.BINARY:
return 0
if self.spin_basis==SpinBasis.SIGNED:
return 1
def calculate_score(self, spins=None):
if self.optimisation_target==OptimisationTarget.CUT:
score = self.calculate_cut(spins)
elif self.optimisation_target==OptimisationTarget.ENERGY:
score = -1.*self.calculate_energy(spins)
else:
raise NotImplementedError
return score
def _calculate_score_change(self, new_spins, matrix, action):
if self.optimisation_target==OptimisationTarget.CUT:
delta_score = self._calculate_cut_change(new_spins, matrix, action)
elif self.optimisation_target == OptimisationTarget.ENERGY:
delta_score = -1. * self._calculate_energy_change(new_spins, matrix, action)
else:
raise NotImplementedError
return delta_score
def _format_spins_to_signed(self, spins):
if self.spin_basis == SpinBasis.BINARY:
if not np.isin(spins, [0, 1]).all():
raise Exception("SpinSystem is configured for binary spins ([0,1]).")
# Convert to signed spins for calculation.
spins = 2 * spins - 1
elif self.spin_basis == SpinBasis.SIGNED:
if not np.isin(spins, [-1, 1]).all():
raise Exception("SpinSystem is configured for signed spins ([-1,1]).")
return spins
@abstractmethod
def calculate_energy(self, spins=None):
raise NotImplementedError
@abstractmethod
def calculate_cut(self, spins=None):
raise NotImplementedError
@abstractmethod
def get_best_cut(self):
raise NotImplementedError
@abstractmethod
def _calc_over_range(self, i0, iMax):
raise NotImplementedError
@abstractmethod
def _calculate_energy_change(self, new_spins, matrix, action):
raise NotImplementedError
@abstractmethod
def _calculate_cut_change(self, new_spins, matrix, action):
raise NotImplementedError
##########
# Classes for implementing the calculation methods with/without biases.
##########
class SpinSystemUnbiased(SpinSystemBase):
def calculate_energy(self, spins=None):
if spins is None:
spins = self._get_spins()
else:
spins = self._format_spins_to_signed(spins)
spins = spins.astype('float64')
matrix = self.matrix.astype('float64')
return self._calculate_energy_jit(spins, matrix)
def calculate_cut(self, spins=None):
if spins is None:
spins = self._get_spins()
else:
spins = self._format_spins_to_signed(spins)
return (1/4) * np.sum( np.multiply( self.matrix, 1 - np.outer(spins, spins) ) )
def get_best_cut(self):
if self.optimisation_target==OptimisationTarget.CUT:
return self.best_score
else:
raise NotImplementedError("Can't return best cut when optimisation target is set to energy.")
def _calc_over_range(self, i0, iMax):
list_spins = [2 * np.array([int(x) for x in list_string]) - 1
for list_string in
[list(np.binary_repr(i, width=self.n_spins))
for i in range(int(i0), int(iMax))]]
matrix = self.matrix.astype('float64')
return self.__calc_over_range_jit(list_spins, matrix)
@staticmethod
@jit(float64(float64[:],float64[:,:],int64), nopython=True)
def _calculate_energy_change(new_spins, matrix, action):
return -2 * new_spins[action] * matmul(new_spins.T, matrix[:, action])
@staticmethod
@jit(float64(float64[:],float64[:,:],int64), nopython=True)
def _calculate_cut_change(new_spins, matrix, action):
return -1 * new_spins[action] * matmul(new_spins.T, matrix[:, action])
@staticmethod
@jit(float64(float64[:],float64[:,:]), nopython=True)
def _calculate_energy_jit(spins, matrix):
return - matmul(spins.T, matmul(matrix, spins)) / 2
@staticmethod
@jit(parallel=True)
def __calc_over_range_jit(list_spins, matrix):
energy = 1e50
best_spins = None
for spins in list_spins:
spins = spins.astype('float64')
# This is self._calculate_energy_jit without calling to the class or self so jit can do its thing.
current_energy = - matmul(spins.T, matmul(matrix, spins)) / 2
if current_energy < energy:
energy = current_energy
best_spins = spins
return energy, best_spins
@staticmethod
@jit(float64[:](float64[:],float64[:,:]), nopython=True)
def _get_immeditate_energies_avaialable_jit(spins, matrix):
return 2 * spins * matmul(matrix, spins)
@staticmethod
@jit(float64[:](float64[:],float64[:,:]), nopython=True)
def _get_immeditate_cuts_avaialable_jit(spins, matrix):
return spins * matmul(matrix, spins)
class SpinSystemBiased(SpinSystemBase):
def calculate_energy(self, spins=None):
if type(spins) == type(None):
spins = self._get_spins()
spins = spins.astype('float64')
matrix = self.matrix.astype('float64')
bias = self.bias.astype('float64')
return self._calculate_energy_jit(spins, matrix, bias)
def calculate_cut(self, spins=None):
raise NotImplementedError("MaxCut not defined/implemented for biased SpinSystems.")
def get_best_cut(self):
raise NotImplementedError("MaxCut not defined/implemented for biased SpinSystems.")
def _calc_over_range(self, i0, iMax):
list_spins = [2 * np.array([int(x) for x in list_string]) - 1
for list_string in
[list(np.binary_repr(i, width=self.n_spins))
for i in range(int(i0), int(iMax))]]
matrix = self.matrix.astype('float64')
bias = self.bias.astype('float64')
return self.__calc_over_range_jit(list_spins, matrix, bias)
@staticmethod
@jit(nopython=True)
def _calculate_energy_change(new_spins, matrix, bias, action):
return 2 * new_spins[action] * (matmul(new_spins.T, matrix[:, action]) + bias[action])
@staticmethod
@jit(nopython=True)
def _calculate_cut_change(new_spins, matrix, bias, action):
raise NotImplementedError("MaxCut not defined/implemented for biased SpinSystems.")
@staticmethod
@jit(nopython=True)
def _calculate_energy_jit(spins, matrix, bias):
return matmul(spins.T, matmul(matrix, spins))/2 + matmul(spins.T, bias)
@staticmethod
@jit(parallel=True)
def __calc_over_range_jit(list_spins, matrix, bias):
energy = 1e50
best_spins = None
for spins in list_spins:
spins = spins.astype('float64')
# This is self._calculate_energy_jit without calling to the class or self so jit can do its thing.
current_energy = -( matmul(spins.T, matmul(matrix, spins))/2 + matmul(spins.T, bias))
if current_energy < energy:
energy = current_energy
best_spins = spins
return energy, best_spins
@staticmethod
@jit(nopython=True)
def _get_immeditate_energies_avaialable_jit(spins, matrix, bias):
return - (2 * spins * (matmul(matrix, spins) + bias))
@staticmethod
@jit(nopython=True)
def _get_immeditate_cuts_avaialable_jit(spins, matrix, bias):
raise NotImplementedError("MaxCut not defined/implemented for biased SpinSystems.") | 30,398 | 41.279555 | 173 | py |
eco-dqn | eco-dqn-master/src/envs/utils.py | import random
from abc import ABC, abstractmethod
from enum import Enum
import networkx as nx
import numpy as np
class EdgeType(Enum):
UNIFORM = 1
DISCRETE = 2
RANDOM = 3
class RewardSignal(Enum):
DENSE = 1
BLS = 2
SINGLE = 3
CUSTOM_BLS = 4
class ExtraAction(Enum):
PASS = 1
RANDOMISE = 2
NONE = 3
class OptimisationTarget(Enum):
CUT = 1
ENERGY = 2
class SpinBasis(Enum):
SIGNED = 1
BINARY = 2
class Observable(Enum):
# Local observations that differ between nodes.
SPIN_STATE = 1
IMMEDIATE_REWARD_AVAILABLE = 2
TIME_SINCE_FLIP = 3
# Global observations that are the same for all nodes.
EPISODE_TIME = 4
TERMINATION_IMMANENCY = 5
NUMBER_OF_GREEDY_ACTIONS_AVAILABLE = 6
DISTANCE_FROM_BEST_SCORE = 7
DISTANCE_FROM_BEST_STATE = 8
DEFAULT_OBSERVABLES = [Observable.SPIN_STATE,
Observable.IMMEDIATE_REWARD_AVAILABLE,
Observable.TIME_SINCE_FLIP,
Observable.DISTANCE_FROM_BEST_SCORE,
Observable.DISTANCE_FROM_BEST_STATE,
Observable.NUMBER_OF_GREEDY_ACTIONS_AVAILABLE,
Observable.TERMINATION_IMMANENCY]
class GraphGenerator(ABC):
def __init__(self, n_spins, edge_type, biased=False):
self.n_spins = n_spins
self.edge_type = edge_type
self.biased = biased
def pad_matrix(self, matrix):
dim = matrix.shape[0]
m = np.zeros((dim+1,dim+1))
m[:-1,:-1] = matrix
return matrix
def pad_bias(self, bias):
return np.concatenate((bias,[0]))
@abstractmethod
def get(self, with_padding=False):
raise NotImplementedError
###################
# Unbiased graphs #
###################
class RandomGraphGenerator(GraphGenerator):
def __init__(self, n_spins=20, edge_type=EdgeType.DISCRETE, biased=False):
super().__init__(n_spins, edge_type, biased)
if self.edge_type == EdgeType.UNIFORM:
self.get_w = lambda : 1
elif self.edge_type == EdgeType.DISCRETE:
self.get_w = lambda : np.random.choice([+1, -1])
elif self.edge_type == EdgeType.RANDOM:
self.get_w = lambda : np.random.uniform(-1, 1)
else:
raise NotImplementedError()
def get(self, with_padding=False):
g_size = self.n_spins
density = np.random.uniform()
matrix = np.zeros((g_size, g_size))
for i in range(self.n_spins):
for j in range(i):
if np.random.uniform() < density:
w = self.get_w()
matrix[i, j] = w
matrix[j, i] = w
matrix = self.pad_matrix(matrix) if with_padding else matrix
if self.biased:
bias = np.array([self.get_w() if np.random.uniform() < density else 0 for _ in range(self.n_spins)])
bias = self.pad_bias(bias) if with_padding else bias
return matrix, bias
else:
return matrix
m = self.pad_matrix(self.matrix) if with_padding else self.matrix
if self.biased:
b = self.pad_bias(self.bias) if with_padding else self.bias
return m, b
else:
return m
class RandomErdosRenyiGraphGenerator(GraphGenerator):
def __init__(self, n_spins=20, p_connection=[0.1,0], edge_type=EdgeType.DISCRETE):
super().__init__(n_spins, edge_type, False)
if type(p_connection) not in [list,tuple]:
p_connection = [p_connection, 0]
assert len(p_connection)==2, "p_connection must have length 2"
self.p_connection = p_connection
if self.edge_type == EdgeType.UNIFORM:
self.get_connection_mask = lambda : np.ones((self.n_spins,self.n_spins))
elif self.edge_type == EdgeType.DISCRETE:
def get_connection_mask():
mask = 2. * np.random.randint(2, size=(self.n_spins, self.n_spins)) - 1.
mask = np.tril(mask) + np.triu(mask.T, 1)
return mask
self.get_connection_mask = get_connection_mask
elif self.edge_type == EdgeType.RANDOM:
def get_connection_mask():
mask = 2.*np.random.rand(self.n_spins,self.n_spins)-1
mask = np.tril(mask) + np.triu(mask.T, 1)
return mask
self.get_connection_mask = get_connection_mask
else:
raise NotImplementedError()
def get(self, with_padding=False):
p = np.clip(np.random.normal(*self.p_connection),0,1)
g = nx.erdos_renyi_graph(self.n_spins, p)
adj = np.multiply(nx.to_numpy_array(g), self.get_connection_mask())
# No self-connections (this modifies adj in-place).
np.fill_diagonal(adj, 0)
return self.pad_matrix(adj) if with_padding else adj
class RandomBarabasiAlbertGraphGenerator(GraphGenerator):
def __init__(self, n_spins=20, m_insertion_edges=4, edge_type=EdgeType.DISCRETE):
super().__init__(n_spins, edge_type, False)
self.m_insertion_edges = m_insertion_edges
if self.edge_type == EdgeType.UNIFORM:
self.get_connection_mask = lambda : np.ones((self.n_spins,self.n_spins))
elif self.edge_type == EdgeType.DISCRETE:
def get_connection_mask():
mask = 2. * np.random.randint(2, size=(self.n_spins, self.n_spins)) - 1.
mask = np.tril(mask) + np.triu(mask.T, 1)
return mask
self.get_connection_mask = get_connection_mask
elif self.edge_type == EdgeType.RANDOM:
def get_connection_mask():
mask = 2.*np.random.rand(self.n_spins,self.n_spins)-1
mask = np.tril(mask) + np.triu(mask.T, 1)
return mask
self.get_connection_mask = get_connection_mask
else:
raise NotImplementedError()
def get(self, with_padding=False):
g = nx.barabasi_albert_graph(self.n_spins, self.m_insertion_edges)
adj = np.multiply(nx.to_numpy_array(g), self.get_connection_mask())
# No self-connections (this modifies adj in-place).
np.fill_diagonal(adj, 0)
return self.pad_matrix(adj) if with_padding else adj
class RandomRegularGraphGenerator(GraphGenerator):
def __init__(self, n_spins=20, d_node=[2,0], edge_type=EdgeType.DISCRETE, biased=False):
super().__init__(n_spins, edge_type, biased)
if type(d_node) not in [list,tuple]:
d_node = [d_node, 0]
assert len(d_node)==2, "k_neighbours must have length 2"
self.d_node = d_node
if self.edge_type == EdgeType.UNIFORM:
self.get_connection_mask = lambda : np.ones((self.n_spins,self.n_spins))
elif self.edge_type == EdgeType.DISCRETE:
def get_connection_mask():
mask = 2. * np.random.randint(2, size=(self.n_spins, self.n_spins)) - 1.
mask = np.tril(mask) + np.triu(mask.T, 1)
return mask
self.get_connection_mask = get_connection_mask
elif self.edge_type == EdgeType.RANDOM:
def get_connection_mask():
mask = 2.*np.random.rand(self.n_spins,self.n_spins)-1
mask = np.tril(mask) + np.triu(mask.T, 1)
return mask
self.get_connection_mask = get_connection_mask
else:
raise NotImplementedError()
def get(self, with_padding=False):
k = np.clip(int(np.random.normal(*self.d_node)),0,self.n_spins)
g = nx.random_regular_graph(k, self.n_spins)
adj = np.multiply(nx.to_numpy_array(g), self.get_connection_mask())
if not self.biased:
# No self-connections (this modifies adj in-place).
np.fill_diagonal(adj, 0)
return self.pad_matrix(adj) if with_padding else adj
class RandomWattsStrogatzGraphGenerator(GraphGenerator):
def __init__(self, n_spins=20, k_neighbours=[2,0], edge_type=EdgeType.DISCRETE, biased=False):
super().__init__(n_spins, edge_type, biased)
if type(k_neighbours) not in [list,tuple]:
k_neighbours = [k_neighbours, 0]
assert len(k_neighbours)==2, "k_neighbours must have length 2"
self.k_neighbours = k_neighbours
if self.edge_type == EdgeType.UNIFORM:
self.get_connection_mask = lambda: np.ones((self.n_spins, self.n_spins))
elif self.edge_type == EdgeType.DISCRETE:
def get_connection_mask():
mask = 2. * np.random.randint(2, size=(self.n_spins, self.n_spins)) - 1.
mask = np.tril(mask) + np.triu(mask.T, 1)
return mask
self.get_connection_mask = get_connection_mask
elif self.edge_type == EdgeType.RANDOM:
def get_connection_mask():
mask = 2. * np.random.rand(self.n_spins, self.n_spins) - 1
mask = np.tril(mask) + np.triu(mask.T, 1)
return mask
self.get_connection_mask = get_connection_mask
else:
raise NotImplementedError()
def get(self, with_padding=False):
k = np.clip(int(np.random.normal(*self.k_neighbours)),0,self.n_spins)
g = nx.watts_strogatz_graph(self.n_spins, k, 0)
adj = np.multiply(nx.to_numpy_array(g), self.get_connection_mask())
if not self.biased:
# No self-connections (this modifies adj in-place).
np.fill_diagonal(adj, 0)
return self.pad_matrix(adj) if with_padding else adj
################
# Known graphs #
################
class SingleGraphGenerator(GraphGenerator):
def __init__(self, matrix, bias=None):
n_spins = matrix.shape[0]
if np.isin(matrix,[0,1]).all():
edge_type=EdgeType.UNIFORM
elif np.isin(matrix,[0,-1,1]).all():
edge_type=EdgeType.DISCRETE
else:
edge_type = EdgeType.RANDOM
super().__init__(n_spins, edge_type, bias is not None)
self.matrix = matrix
self.bias = bias
def get(self, with_padding=False):
m = self.pad_matrix(self.matrix) if with_padding else self.matrix
if self.biased:
b = self.pad_bias(self.bias) if with_padding else self.bias
return m, b
else:
return m
class SetGraphGenerator(GraphGenerator):
def __init__(self, matrices, biases=None, ordered=False):
if len(set([m.shape[0]-1 for m in matrices]))==1:
n_spins = matrices[0].shape[0]
else:
raise NotImplementedError("All graphs in SetGraphGenerator must have the same dimension.")
if all([np.isin(m,[0,1]).all() for m in matrices]):
edge_type=EdgeType.UNIFORM
elif all([np.isin(m,[0,-1,1]).all() for m in matrices]):
edge_type=EdgeType.DISCRETE
else:
edge_type = EdgeType.RANDOM
super().__init__(n_spins, edge_type, biases is not None)
if not self.biased:
self.graphs = matrices
else:
assert len(matrices)==len(biases), "Must pass through the same number of matrices and biases."
assert all([len(b)==self.n_spins+1 for b in biases]), "All biases and must have the same dimension as the matrices."
self.graphs = list(zip(matrices, biases))
self.ordered = ordered
if self.ordered:
self.i = 0
def get(self, with_padding=False):
if self.ordered:
m = self.graphs[self.i]
self.i = (self.i + 1)%len(self.graphs)
else:
m = random.sample(self.graphs, k=1)[0]
return self.pad_matrix(m) if with_padding else m
class PerturbedGraphGenerator(GraphGenerator):
def __init__(self, matrices, perturb_mean=0, perturb_std=0.01, biases=None, ordered=False):
if type(matrices) != list:
matrices = list(matrices)
if biases is not None:
if type(biases) != list:
biases = list(biases)
if len(set([m.shape[0] - 1 for m in matrices])) == 1:
n_spins = matrices[0].shape[0]
else:
raise NotImplementedError("All graphs passed to PerturbedGraphGenerator must have the same dimension.")
super().__init__(n_spins, EdgeType.RANDOM, biases is not None)
self.perturb_mean = perturb_mean
self.perturb_std = perturb_std
if not self.biased:
self.graphs = matrices
else:
raise NotImplementedError("Not implemented PerturbedGraphGenerator for biased graphs yet.")
self.ordered = ordered
if self.ordered:
self.i = 0
def get(self, with_padding=False):
if self.ordered:
m = self.graphs[self.i]
self.i = (self.i + 1)%len(self.graphs)
if self.biased:
m, b = m
else:
if not self.biased:
m = random.sample(self.graphs, k=1)[0]
else:
m, b = random.sample(self.graphs, k=1)[0]
# Sample noise.
noise = np.random.normal(self.perturb_mean, self.perturb_std, size=m.shape)
# Set noise to 0 for non-edges in the adjacency matrix.
np.putmask(noise, m == 0, 0)
# Ensure noise is symettric.
noise = np.tril(noise) + np.triu(noise.T, 1)
m = m + noise
return self.pad_matrix(m) if with_padding else m
class HistoryBuffer():
def __init__(self):
self.buffer = {}
self.current_action_hist = set([])
self.current_action_hist_len = 0
def update(self, action):
new_action_hist = self.current_action_hist.copy()
if action in self.current_action_hist:
new_action_hist.remove(action)
self.current_action_hist_len -= 1
else:
new_action_hist.add(action)
self.current_action_hist_len += 1
try:
list_of_states = self.buffer[self.current_action_hist_len]
if new_action_hist in list_of_states:
self.current_action_hist = new_action_hist
return False
except KeyError:
list_of_states = []
list_of_states.append(new_action_hist)
self.current_action_hist = new_action_hist
self.buffer[self.current_action_hist_len] = list_of_states
return True | 14,455 | 33.09434 | 128 | py |
eco-dqn | eco-dqn-master/src/envs/__init__.py | 0 | 0 | 0 | py | |
eco-dqn | eco-dqn-master/src/agents/__init__.py | 0 | 0 | 0 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.