repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
deephyper | deephyper-master/deephyper/nas/run/_run_horovod.py | """The :func:`deephyper.nas.run.horovod.run` function is used to evaluate a deep neural network by enabling data-parallelism with Horovod to the :func:`deephyper.nas.run.alpha.run` function. This function will automatically apply the linear scaling rule to the learning rate and batch size given the current number of ranks (i.e., the initial learning rate and batch size are scaled by the number of ranks).
"""
import os
import traceback
import logging
import numpy as np
import tensorflow as tf
from deephyper.keras.callbacks import import_callback
import horovod.tensorflow.keras as hvd
import deephyper.nas.trainer._arch as a
from deephyper.nas.trainer import HorovodTrainer
from deephyper.nas.run._util import (
compute_objective,
load_config,
preproc_trainer,
save_history,
setup_data,
get_search_space,
)
logger = logging.getLogger(__name__)
# Default callbacks parameters
default_callbacks_config = {
"EarlyStopping": dict(
monitor="val_loss", min_delta=0, mode="min", verbose=0, patience=0
),
"ModelCheckpoint": dict(
monitor="val_loss",
mode="min",
save_best_only=True,
verbose=1,
filepath="model.h5",
save_weights_only=False,
),
"TensorBoard": dict(
log_dir="",
histogram_freq=0,
batch_size=32,
write_graph=False,
write_grads=False,
write_images=False,
update_freq="epoch",
),
"CSVLogger": dict(filename="training.csv", append=True),
"CSVExtendedLogger": dict(filename="training.csv", append=True),
"TimeStopping": dict(),
"ReduceLROnPlateau": dict(patience=5, verbose=0),
}
# Name of Callbacks reserved for root node
hvd_root_cb = ["ModelCheckpoint", "Tensorboard", "CSVLogger", "CSVExtendedLogger"]
def run_horovod(config: dict) -> float:
hvd.init()
# Threading configuration
if os.environ.get("OMP_NUM_THREADS", None) is not None:
logger.debug(f"OMP_NUM_THREADS is {os.environ.get('OMP_NUM_THREADS')}")
num_intra = int(os.environ.get("OMP_NUM_THREADS"))
tf.config.threading.set_intra_op_parallelism_threads(num_intra)
tf.config.threading.set_inter_op_parallelism_threads(2)
if os.environ.get("CUDA_VISIBLE_DEVICES") is not None:
devices = os.environ.get("CUDA_VISIBLE_DEVICES").split(",")
os.environ["CUDA_VISIBLE_DEVICES"] = devices[hvd.rank()]
config["seed"]
seed = config["seed"]
if seed is not None:
np.random.seed(seed)
tf.random.set_seed(seed)
load_config(config)
# Scale batch size and learning rate according to the number of ranks
initial_lr = config[a.hyperparameters][a.learning_rate]
batch_size = config[a.hyperparameters][a.batch_size] * hvd.size()
learning_rate = config[a.hyperparameters][a.learning_rate] * hvd.size()
logger.info(
f"Scaled: 'batch_size' from {config[a.hyperparameters][a.batch_size]} to {batch_size} "
)
logger.info(
f"Scaled: 'learning_rate' from {config[a.hyperparameters][a.learning_rate]} to {learning_rate} "
)
config[a.hyperparameters][a.batch_size] = batch_size
config[a.hyperparameters][a.learning_rate] = learning_rate
input_shape, output_shape = setup_data(config)
search_space = get_search_space(config, input_shape, output_shape, seed=seed)
# Initialize Horovod
model_created = False
try:
model = search_space.sample(config["arch_seq"])
model_created = True
except Exception:
logger.info("Error: Model creation failed...")
logger.info(traceback.format_exc())
if model_created:
# Setup callbacks only
callbacks = [
# Horovod: broadcast initial variable states from rank 0 to all other processes.
# This is necessary to ensure consistent initialization of all workers when
# training is started with random weights or restored from a checkpoint.
hvd.callbacks.BroadcastGlobalVariablesCallback(0),
# Horovod: average metrics among workers at the end of every epoch.
#
# Note: This callback must be in the list before the ReduceLROnPlateau,
# TensorBoard or other metrics-based callbacks.
hvd.callbacks.MetricAverageCallback(),
# Horovod: using `lr = 1.0 * hvd.size()` from the very beginning leads to worse final
# accuracy. Scale the learning rate `lr = 1.0` ---> `lr = 1.0 * hvd.size()` during
# the first five epochs. See https://arxiv.org/abs/1706.02677 for details.
# !initial_lr argument is not available in horovod==0.19.0
hvd.callbacks.LearningRateWarmupCallback(
warmup_epochs=5, verbose=0, initial_lr=initial_lr
),
]
cb_requires_valid = False # Callbacks requires validation data
callbacks_config = config[a.hyperparameters].get(a.callbacks, {})
if callbacks_config is not None:
for cb_name, cb_conf in callbacks_config.items():
if cb_name in default_callbacks_config:
# cb_bame in hvd_root_cb implies hvd.rank() == 0
if not (cb_name in hvd_root_cb) or hvd.rank() == 0:
default_callbacks_config[cb_name].update(cb_conf)
# Import and create corresponding callback
Callback = import_callback(cb_name)
callbacks.append(Callback(**default_callbacks_config[cb_name]))
if cb_name in ["EarlyStopping"]:
cb_requires_valid = "val" in cb_conf["monitor"].split("_")
else:
logger.error(f"'{cb_name}' is not an accepted callback!")
trainer = HorovodTrainer(config=config, model=model)
trainer.callbacks.extend(callbacks)
last_only, with_pred = preproc_trainer(config)
last_only = last_only and not cb_requires_valid
history = trainer.train(with_pred=with_pred, last_only=last_only)
# save history
if hvd.rank() == 0:
save_history(config.get("log_dir", None), history, config)
result = compute_objective(config["objective"], history)
else:
# penalising actions if model cannot be created
result = -1
if result < -10:
result = -10
return result
| 6,420 | 37.680723 | 407 | py |
deephyper | deephyper-master/deephyper/nas/run/_run_distributed_base_trainer.py | """The :func:`deephyper.nas.run.tf_distributed.run` function is used to deploy a data-distributed training (on a single node) with ``tensorflow.distribute.MirroredStrategy``. It follows the same training pipeline as :func:`deephyper.nas.run.alpha.run`. Two hyperparameters arguments can be used to activate or deactivate the linear scaling rule (aka ``lsr``) for the learning rate and batch size, respectively:
.. code-block:: python
Problem.hyperparameters(
...
lsr_batch_size=True,
lsr_learning_rate=True,
warmup_lr=True,
warmup_epochs=5
...
)
The hyperparameters of the form ``patience_{Callback}`` such as ``patience_EarlyStopping`` and ``patience_ReduceLROnPlateau`` are valid when the corresponding callback is declared:
.. code-block:: python
Problem.hyperparameters(
...
patience_ReduceLROnPlateau=5,
patience_EarlyStopping=10,
callbacks=dict(
ReduceLROnPlateau=dict(monitor="val_r2", mode="max", verbose=0),
EarlyStopping=dict(monitor="val_r2", min_delta=0, mode="max", verbose=0),
),
...
)
"""
import traceback
import logging
import numpy as np
import tensorflow as tf
from deephyper.keras.callbacks import import_callback
from deephyper.keras.callbacks import LearningRateWarmupCallback
from deephyper.nas.run._util import (
compute_objective,
load_config,
preproc_trainer,
save_history,
setup_data,
get_search_space,
default_callbacks_config,
)
from deephyper.nas.trainer import BaseTrainer
import deephyper.nas.trainer._arch as a
logger = logging.getLogger(__name__)
def run_distributed_base_trainer(config):
physical_devices = tf.config.list_physical_devices("GPU")
try:
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
except Exception:
# Invalid device or cannot modify virtual devices once initialized.
pass
distributed_strategy = tf.distribute.MirroredStrategy()
n_replicas = distributed_strategy.num_replicas_in_sync
seed = config["seed"]
if seed is not None:
np.random.seed(seed)
tf.random.set_seed(seed)
load_config(config)
# Scale batch size and learning rate according to the number of ranks
initial_lr = config[a.hyperparameters][a.learning_rate]
if config[a.hyperparameters].get("lsr_batch_size"):
batch_size = config[a.hyperparameters][a.batch_size] * n_replicas
else:
batch_size = config[a.hyperparameters][a.batch_size]
if config[a.hyperparameters].get("lsr_learning_rate"):
learning_rate = config[a.hyperparameters][a.learning_rate] * n_replicas
else:
learning_rate = config[a.hyperparameters][a.learning_rate]
logger.info(
f"Scaled: 'batch_size' from {config[a.hyperparameters][a.batch_size]} to {batch_size} "
)
logger.info(
f"Scaled: 'learning_rate' from {config[a.hyperparameters][a.learning_rate]} to {learning_rate} "
)
config[a.hyperparameters][a.batch_size] = batch_size
config[a.hyperparameters][a.learning_rate] = learning_rate
input_shape, output_shape = setup_data(config)
search_space = get_search_space(config, input_shape, output_shape, seed=seed)
model_created = False
with distributed_strategy.scope():
try:
model = search_space.sample(config["arch_seq"])
model_created = True
except Exception:
logger.info("Error: Model creation failed...")
logger.info(traceback.format_exc())
else:
# Setup callbacks
callbacks = []
cb_requires_valid = False # Callbacks requires validation data
callbacks_config = config["hyperparameters"].get("callbacks")
if callbacks_config is not None:
for cb_name, cb_conf in callbacks_config.items():
if cb_name in default_callbacks_config:
default_callbacks_config[cb_name].update(cb_conf)
# Special dynamic parameters for callbacks
if cb_name == "ModelCheckpoint":
default_callbacks_config[cb_name][
"filepath"
] = f'best_model_{config["id"]}.h5'
# replace patience hyperparameter
if "patience" in default_callbacks_config[cb_name]:
patience = config["hyperparameters"].get(
f"patience_{cb_name}"
)
if patience is not None:
default_callbacks_config[cb_name]["patience"] = patience
# Import and create corresponding callback
Callback = import_callback(cb_name)
callbacks.append(Callback(**default_callbacks_config[cb_name]))
if cb_name in ["EarlyStopping"]:
cb_requires_valid = "val" in cb_conf["monitor"].split("_")
else:
logger.error(f"'{cb_name}' is not an accepted callback!")
# WarmupLR
if config[a.hyperparameters].get("warmup_lr"):
warmup_epochs = config[a.hyperparameters].get("warmup_epochs", 5)
callbacks.append(
LearningRateWarmupCallback(
n_replicas=n_replicas,
warmup_epochs=warmup_epochs,
verbose=0,
initial_lr=initial_lr,
)
)
trainer = BaseTrainer(config=config, model=model)
trainer.callbacks.extend(callbacks)
last_only, with_pred = preproc_trainer(config)
last_only = last_only and not cb_requires_valid
if model_created:
history = trainer.train(with_pred=with_pred, last_only=last_only)
# save history
save_history(config.get("log_dir", None), history, config)
result = compute_objective(config["objective"], history)
else:
# penalising actions if model cannot be created
result = -1
if result < -10 or np.isnan(result):
result = -10
return result
| 6,405 | 37.359281 | 410 | py |
deephyper | deephyper-master/deephyper/nas/run/_run_debug.py | """The :func:`deephyper.nas.run.quick_random.run` function is a function used to check the good behaviour of an hyperparameter or neural architecture search algorithm. It will simply return an objective of the sum of hyperparameters combined with a random sample to check the good reproducibility of DeepHyper experiments while setting a random seed in the problem definition.
"""
import numpy as np
def run_debug(config: dict) -> float:
random = np.random.RandomState(config.get("seed"))
if "arch_seq" in config:
return sum(config["arch_seq"]) + random.random()
else:
return sum(config.values()) + random.random()
| 645 | 52.833333 | 376 | py |
deephyper | deephyper-master/deephyper/nas/run/_run_debug_arch.py | """The :func:`deephyper.nas.run.quick.run` function is a function used to check the good behaviour of a neural architecture search algorithm. It will simply return the sum of the scalar values encoding a neural architecture in the ``config["arch_seq"]`` key.
"""
def run_debug_arch(config: dict) -> float:
return sum(config["arch_seq"])
| 343 | 48.142857 | 258 | py |
deephyper | deephyper-master/deephyper/nas/run/_run_debug_hp_arch.py | """The :func:`deephyper.nas.run.quick2.run` function is a function used to check the good behaviour of a mixed hyperparameter and neural architecture search algorithm. It will simply return an objective combining the sum of the scalar values encoding a neural architecture in the ``config["arch_seq"]`` key then divide this sum by the ``batch_size`` hyperparameter and scale it by the ``learning_rate`` hyperparameter::
(sum(arch_seq) + randn() * noise_level) / batch_size * learning_rate
"""
import numpy as np
def run_debug_hp_arch(config: dict) -> float:
noise_level = 1.0
lr = config["hyperparameters"]["learning_rate"]
bs = config["hyperparameters"]["batch_size"]
return (sum(config["arch_seq"]) + np.random.randn() * noise_level) / bs * lr
| 770 | 54.071429 | 419 | py |
deephyper | deephyper-master/deephyper/nas/run/_util.py | """Utilitaries functions to ease the processing of a configuration (``dict``) generated by a neural architecture search algorithm.
"""
import logging
import copy
import json
import os
import pathlib
import uuid
from datetime import datetime
import numpy as np
import tensorflow as tf
from deephyper.core.exceptions.problem import WrongProblemObjective
from deephyper.evaluator._encoder import Encoder
from deephyper.core.utils import load_attr
from deephyper.nas.lr_scheduler import exponential_decay
default_callbacks_config = {
"EarlyStopping": dict(
monitor="val_loss", min_delta=0, mode="min", verbose=0, patience=0
),
"ModelCheckpoint": dict(
monitor="val_loss",
mode="min",
save_best_only=True,
verbose=1,
filepath="model.h5",
save_weights_only=False,
),
"TensorBoard": dict(
log_dir="",
histogram_freq=0,
batch_size=32,
write_graph=False,
write_grads=False,
write_images=False,
update_freq="epoch",
),
"CSVLogger": dict(filename="training.csv", append=True),
"CSVExtendedLogger": dict(filename="training.csv", append=True),
"TimeStopping": dict(),
"ReduceLROnPlateau": dict(monitor="val_loss", mode="auto", verbose=0, patience=5),
"LearningRateScheduler": dict(schedule=exponential_decay),
"LearningRateWarmupCallback": dict(
n_replicas=1,
initial_lr=1e-3,
warmup_epochs=5,
momentum_correction=True,
steps_per_epoch=None,
verbose=0,
),
}
def load_config(config: dict) -> None:
"""Load in place the different Python objects serialized as "str" in the configuration. The ``"load_data", "augment", "create_search_space", "preprocessing", "objective"`` keys of ``config`` are handled.
Args:
config (dict): The JSON encoded configuration generated by DeepHyper.
"""
# ! load functions
config["load_data"]["func"] = load_attr(config["load_data"]["func"])
# load augmentation strategy
if not config.get("augment") is None:
config["augment"]["func"] = load_attr(config["augment"]["func"])
# load the function creating the search space
config["search_space"]["class"] = load_attr(config["search_space"]["class"])
if not config.get("preprocessing") is None:
config["preprocessing"]["func"] = load_attr(config["preprocessing"]["func"])
else:
config["preprocessing"] = None
if type(config["objective"]) is str and "." in config["objective"]:
config["objective"] = load_attr(config["objective"])
def setup_data(config: dict, add_to_config: bool = True) -> tuple:
"""Load the data defined by the ``"load_data"`` key in the ``config`` dictionnary. The ``load_data`` function has to return numpy arrays of the form `(X_train, y_train), (X_valid, y_valid)`` or a dictionnary defining generators such as:
.. code-block:: python
def train_gen():
for x0, x1, y in zip(tX0, tX1, ty):
yield ({
"input_0": x0,
"input_1": x1
}, y)
def valid_gen():
for x0, x1, y in zip(vX0, vX1, vy):
yield ({
"input_0": x0,
"input_1": x1
}, y)
res = {
"train_gen": train_gen,
"train_size": len(ty),
"valid_gen": valid_gen,
"valid_size": len(vy),
"types": ({"input_0": tf.float64, "input_1": tf.float64}, tf.float64),
"shapes": ({"input_0": (5, ), "input_1": (5, )}, (1, ))
}
Args:
config (dict): The JSON encoded configuration generated by DeepHyper.
add_to_config (bool, optional): If the loaded should be added as a ``"data"`` of the ``config`` dictionnary. Defaults to True.
Raises:
RuntimeError: ...
Returns:
tuple: Of the form ``input_shape, output_shape``.
"""
# Loading data
load_data = config["load_data"]["func"]
kwargs = config["load_data"].get("kwargs")
data = load_data() if kwargs is None else load_data(**kwargs)
logging.info(f"Data loaded with kwargs: {kwargs}")
# Set data shape
if type(data) is tuple:
if len(data) != 2:
raise RuntimeError(
f"Loaded data are tuple, should ((training_input, training_output), (validation_input, validation_output)) but length=={len(data)}"
)
(t_X, t_y), (v_X, v_y) = data
if (
type(t_X) is np.ndarray
and type(t_y) is np.ndarray
and type(v_X) is np.ndarray
and type(v_y) is np.ndarray
):
input_shape = np.shape(t_X)[1:]
output_shape = np.shape(t_y)[1:]
elif (
type(t_X) is list
and type(t_y) is np.ndarray
and type(v_X) is list
and type(v_y) is np.ndarray
):
# interested in shape of data not in length
input_shape = [np.shape(itX)[1:] for itX in t_X]
output_shape = np.shape(t_y)[1:]
elif (
type(t_X) is np.ndarray
and type(t_y) is list
and type(v_X) is np.ndarray
and type(v_y) is list
):
# interested in shape of data not in length
input_shape = np.shape(t_X)[1:]
output_shape = [np.shape(ity)[1:] for ity in t_y]
elif (
type(t_X) is list
and type(t_y) is list
and type(v_X) is list
and type(v_y) is list
):
# interested in shape of data not in length
input_shape = [np.shape(itX)[1:] for itX in t_X]
output_shape = [np.shape(ity)[1:] for ity in t_y]
else:
raise RuntimeError(
f"Data returned by load_data function are of a wrong type: type(t_X)=={type(t_X)}, type(t_y)=={type(t_y)}, type(v_X)=={type(v_X)}, type(v_y)=={type(v_y)}"
)
if add_to_config:
config["data"] = {
"train_X": t_X,
"train_Y": t_y,
"valid_X": v_X,
"valid_Y": v_y,
}
elif type(data) is dict:
if add_to_config:
config["data"] = data
if len(data["shapes"][0]) == 1:
input_shape = data["shapes"][0]["input_0"]
else:
input_shape = [
data["shapes"][0][f"input_{i}"] for i in range(len(data["shapes"][0]))
]
output_shape = data["shapes"][1]
else:
raise RuntimeError(
f"Data returned by load_data function are of an unsupported type: {type(data)}"
)
if (
output_shape == ()
): # basicaly means data with shape=(num_elements) == (num_elements, 1)
output_shape = (1,)
logging.info(f"input_shape: {input_shape}")
logging.info(f"output_shape: {output_shape}")
if add_to_config:
return input_shape, output_shape
else:
return input_shape, output_shape, data
def get_search_space(config, input_shape, output_shape, seed):
space_class = config["search_space"]["class"]
cs_kwargs = config["search_space"].get("kwargs")
if cs_kwargs is None:
search_space = space_class(input_shape, output_shape, seed=seed)
else:
search_space = space_class(input_shape, output_shape, seed=seed, **cs_kwargs)
search_space.build()
return search_space
def compute_objective(objective, history: dict) -> float:
"""Compute an objective based on the history
Args:
objective (str|callable): the definition of the objective. If ``str`` has to be one of the metrics' name (e.g. ``"acc"``). It can have a prefix ``"-"`` to ask for the negative scalar. It can have a suffix ``__max``, ``__min`` or ``__last`` depending if the maximum, minimum of last epoch objective should be used. If it is a callable, it will be passed the ``history`` and has to return a scalar value.
history (dict): The training history of the model.
Raises:
WrongProblemObjective: raised when the value of ``objective`` is not correct.
Returns:
float: the deducted objective from ``history``.
"""
# set a multiplier to turn objective to its negative
if type(objective) is str:
if objective[0] == "-":
multiplier = -1
objective = objective[1:]
else:
multiplier = 1
if type(objective) is str and ("__" in objective or objective in history):
split_objective = objective.split("__")
kind = split_objective[1] if len(split_objective) > 1 else "last"
mname = split_objective[0]
if kind == "min":
res = min(history[mname])
elif kind == "max":
res = max(history[mname])
else: # 'last' or else, by default it will be the last one
res = history[mname][-1]
return multiplier * res
elif callable(objective):
func = objective
return func(history)
else:
raise WrongProblemObjective(objective)
def preproc_trainer(config):
if type(config["objective"]) is str:
last_only = "__last" in config["objective"]
else: # should be callable
last_only = "__last" in config["objective"].__name__
with_pred = (
not type(config["objective"]) is str
and "with_pred" in config["objective"].__name__
)
return last_only, with_pred
def hash_arch_seq(arch_seq: list) -> str:
return "_".join([str(el) for el in arch_seq])
def set_memory_growth_for_visible_gpus(enable=True):
# GPU Configuration if available
physical_devices = tf.config.list_physical_devices("GPU")
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], enable)
class HistorySaver:
def __init__(
self,
config: dict,
save_dir="save",
history_dir="history",
model_dir="model",
config_dir="config",
):
self.id = config.get("job_id", uuid.uuid1())
self.date = HistorySaver.get_date()
self.config = copy.deepcopy(config)
self.save_dir = save_dir
self.history_dir = os.path.join(self.save_dir, history_dir)
self.model_dir = os.path.join(self.save_dir, model_dir)
self.config_dir = os.path.join(self.save_dir, config_dir)
@property
def name(self) -> str:
return f"{self.id}"
@property
def model_path(self) -> str:
return os.path.join(self.model_dir, f"{self.name}.h5")
@property
def history_path(self) -> str:
return os.path.join(self.history_dir, f"{self.name}.json")
@property
def config_path(self) -> str:
return os.path.join(self.config_dir, f"{self.name}.json")
@staticmethod
def get_date() -> str:
date = datetime.now()
date = date.strftime("%d-%b-%Y_%H-%M-%S")
return date
def write_history(self, history: dict) -> None:
if not (os.path.exists(self.history_dir)):
pathlib.Path(self.history_dir).mkdir(parents=True, exist_ok=True)
logging.info(f"Saving history at: {self.history_path}")
with open(self.history_path, "w") as f:
json.dump(history, f, cls=Encoder)
def write_config(self):
if not (os.path.exists(self.config_dir)):
pathlib.Path(self.config_dir).mkdir(parents=True, exist_ok=True)
with open(self.config_path, "w") as f:
json.dump(self.config, f, cls=Encoder)
def write_model(self, model):
if not (os.path.exists(self.model_dir)):
pathlib.Path(self.model_dir).mkdir(parents=True, exist_ok=True)
def save_history(log_dir: str, history: dict, config: dict):
if not (log_dir is None):
history_path = os.path.join(log_dir, "history")
pathlib.Path(history_path).mkdir(parents=False, exist_ok=False)
now = datetime.now()
now = now.strftime("%d-%b-%Y_%H-%M-%S")
history_path = os.path.join(
history_path, f"{now}oo{hash_arch_seq(config['arch_seq'])}.json"
)
logging.info(f"Saving history at: {history_path}")
with open(history_path, "w") as f:
json.dump(history, f, cls=Encoder)
| 12,348 | 33.785915 | 410 | py |
deephyper | deephyper-master/deephyper/nas/run/_run_debug_slow.py | """The :func:`deephyper.nas.run.quick_random.run` function is a function used to check the good behaviour of an hyperparameter or neural architecture search algorithm. It will simply return an objective of the sum of hyperparameters combined with a random sample to check the good reproducibility of DeepHyper experiments while setting a random seed in the problem definition.
"""
import time
import numpy as np
def run_debug_slow(config: dict) -> float:
time.sleep(1)
random = np.random.RandomState(config.get("seed"))
if "arch_seq" in config:
return sum(config["arch_seq"]) + random.random()
else:
return sum(config.values()) + random.random()
| 680 | 47.642857 | 376 | py |
deephyper | deephyper-master/deephyper/nas/run/_run_base_trainer.py | """The :func:`deephyper.nas.run.alpha.run` function is used to evaluate a deep neural network by loading the data, building the model, training the model and returning a scalar value corresponding to the objective defined in the used :class:`deephyper.problem.NaProblem`.
"""
import os
import traceback
import logging
import numpy as np
import tensorflow as tf
from deephyper.keras.callbacks import import_callback
from deephyper.nas.run._util import (
compute_objective,
load_config,
preproc_trainer,
setup_data,
get_search_space,
default_callbacks_config,
HistorySaver,
)
from deephyper.nas.trainer import BaseTrainer
logger = logging.getLogger(__name__)
def run_base_trainer(job):
config = job.parameters
config["job_id"] = job.id
tf.keras.backend.clear_session()
# tf.config.optimizer.set_jit(True)
# setup history saver
if config.get("log_dir") is None:
config["log_dir"] = "."
save_dir = os.path.join(config["log_dir"], "save")
saver = HistorySaver(config, save_dir)
saver.write_config()
saver.write_model(None)
# GPU Configuration if available
physical_devices = tf.config.list_physical_devices("GPU")
try:
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
except Exception:
# Invalid device or cannot modify virtual devices once initialized.
logger.info("error memory growth for GPU device")
# Threading configuration
if (
len(physical_devices) == 0
and os.environ.get("OMP_NUM_THREADS", None) is not None
):
logger.info(f"OMP_NUM_THREADS is {os.environ.get('OMP_NUM_THREADS')}")
num_intra = int(os.environ.get("OMP_NUM_THREADS"))
try:
tf.config.threading.set_intra_op_parallelism_threads(num_intra)
tf.config.threading.set_inter_op_parallelism_threads(2)
except RuntimeError: # Session already initialized
pass
tf.config.set_soft_device_placement(True)
seed = config.get("seed")
if seed is not None:
np.random.seed(seed)
tf.random.set_seed(seed)
load_config(config)
input_shape, output_shape = setup_data(config)
search_space = get_search_space(config, input_shape, output_shape, seed=seed)
model_created = False
try:
model = search_space.sample(config["arch_seq"])
model_created = True
except Exception:
logger.info("Error: Model creation failed...")
logger.info(traceback.format_exc())
if model_created:
# Setup callbacks
callbacks = []
cb_requires_valid = False # Callbacks requires validation data
callbacks_config = config["hyperparameters"].get("callbacks")
if callbacks_config is not None:
for cb_name, cb_conf in callbacks_config.items():
if cb_name in default_callbacks_config:
default_callbacks_config[cb_name].update(cb_conf)
# Special dynamic parameters for callbacks
if cb_name == "ModelCheckpoint":
default_callbacks_config[cb_name]["filepath"] = saver.model_path
# replace patience hyperparameter
if "patience" in default_callbacks_config[cb_name]:
patience = config["hyperparameters"].get(f"patience_{cb_name}")
if patience is not None:
default_callbacks_config[cb_name]["patience"] = patience
# Import and create corresponding callback
Callback = import_callback(cb_name)
callbacks.append(Callback(**default_callbacks_config[cb_name]))
if cb_name in ["EarlyStopping"]:
cb_requires_valid = "val" in cb_conf["monitor"].split("_")
else:
logger.error(f"'{cb_name}' is not an accepted callback!")
trainer = BaseTrainer(config=config, model=model)
trainer.callbacks.extend(callbacks)
last_only, with_pred = preproc_trainer(config)
last_only = last_only and not cb_requires_valid
history = trainer.train(with_pred=with_pred, last_only=last_only)
# save history
saver.write_history(history)
result = compute_objective(config["objective"], history)
else:
# penalising actions if model cannot be created
logger.info("Model could not be created returning -Inf!")
result = -float("inf")
if np.isnan(result):
logger.info("Computed objective is NaN returning -Inf instead!")
result = -float("inf")
return result
| 4,751 | 34.2 | 271 | py |
deephyper | deephyper-master/deephyper/nas/run/__init__.py | """The :mod:`deephyper.nas.run` sub-package provides a set of functions which can evaluates configurations generated by search algorithms of DeepHyper.
"""
from ._run_base_trainer import run_base_trainer
from ._run_distributed_base_trainer import run_distributed_base_trainer
from ._run_debug_arch import run_debug_arch
from ._run_debug_hp_arch import run_debug_hp_arch
from ._run_debug import run_debug
from ._run_debug_slow import run_debug_slow
__all__ = [
"run_base_trainer",
"run_distributed_base_trainer",
"run_debug",
"run_debug_arch",
"run_debug_hp_arch",
"run_debug_slow",
]
try:
from ._run_horovod import run_horovod # noqa: F401
__all__.append("run_horovod")
except Exception:
pass
| 733 | 28.36 | 151 | py |
deephyper | deephyper-master/deephyper/nas/run/_test_horovod.py | """The :func:`deephyper.nas.run.test_horovod.run` function is used to check the good behaviour of a call made by within an Horovod context.
"""
import os
import time
import random
import horovod.tensorflow as hvd
def run(config: dict) -> float:
"""Using the stateless `run` method, a function can take in any args or kwargs"""
print("hvd init...", end="", flush=True)
hvd.init()
print("OK", flush=True)
print(
"hvd rank: ",
hvd.rank(),
" - CUDA_VISIBLE: ",
os.environ.get("CUDA_VISIBLE_DEVICES"),
)
duration = random.choice([3, 4, 5])
print(f"sleep {duration}...", end="", flush=True)
time.sleep(duration)
print("OK", flush=True)
return 0
| 722 | 23.1 | 139 | py |
deephyper | deephyper-master/deephyper/nas/preprocessing/_base.py | """The preprocessing module provides a few functions which returns a preprocessing pipeline following the Scikit-Learn API.
"""
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, MinMaxScaler
def stdscaler() -> Pipeline:
"""Standard normalization where the mean is of each row is set to zero and the standard deviation is set to one.
Returns:
Pipeline: a pipeline with one step ``StandardScaler``.
"""
preprocessor = Pipeline([("stdscaler", StandardScaler())])
return preprocessor
def minmaxscaler() -> Pipeline:
"""Standard normalization where the mean is of each row is set to zero and the standard deviation is set to one.
Returns:
Pipeline: a pipeline with one step ``StandardScaler``.
"""
preprocessor = Pipeline([("minmaxscaler", MinMaxScaler())])
return preprocessor
def minmaxstdscaler() -> Pipeline:
"""MinMax preprocesssing followed by Standard normalization.
Returns:
Pipeline: a pipeline with two steps ``[MinMaxScaler, StandardScaler]``.
"""
preprocessor = Pipeline(
[
("minmaxscaler", MinMaxScaler()),
("stdscaler", StandardScaler()),
]
)
return preprocessor
| 1,247 | 30.2 | 123 | py |
deephyper | deephyper-master/deephyper/nas/preprocessing/__init__.py | from ._base import minmaxstdscaler, stdscaler
__all__ = ["minmaxstdscaler", "stdscaler"]
| 90 | 21.75 | 45 | py |
deephyper | deephyper-master/deephyper/nas/operation/_merge.py | import deephyper as dh
import tensorflow as tf
from ._base import Operation
class Concatenate(Operation):
"""Concatenate operation.
Args:
graph:
node (Node):
stacked_nodes (list(Node)): nodes to concatenate
axis (int): axis to concatenate
"""
def __init__(self, search_space, stacked_nodes=None, axis=-1):
self.search_space = search_space
self.node = None # current_node of the operation
self.stacked_nodes = stacked_nodes
self.axis = axis
def __str__(self):
return "Concatenate"
def init(self, current_node):
self.node = current_node
if self.stacked_nodes is not None:
for n in self.stacked_nodes:
self.search_space.connect(n, self.node)
def __call__(self, values, **kwargs):
# case where there is no inputs
if len(values) == 0:
return []
len_shp = max([len(x.get_shape()) for x in values])
if len_shp > 4:
raise RuntimeError(
f"This concatenation is for 2D or 3D tensors only but a {len_shp-1}D is passed!"
)
# zeros padding
if len(values) > 1:
if all(
map(
lambda x: len(x.get_shape()) == len_shp
or len(x.get_shape()) == (len_shp - 1),
values,
)
): # all tensors should have same number of dimensions 2d or 3d, but we can also accept a mix of 2d en 3d tensors
# we have a mix of 2d and 3d tensors so we are expanding 2d tensors to be 3d with last_dim==1
for i, v in enumerate(values):
if len(v.get_shape()) < len_shp:
values[i] = tf.keras.layers.Reshape(
(*tuple(v.get_shape()[1:]), 1)
)(v)
# for 3d tensors concatenation is applied along last dim (axis=-1), so we are applying a zero padding to make 2nd dimensions (ie. shape()[1]) equals
if len_shp == 3:
max_len = max(map(lambda x: int(x.get_shape()[1]), values))
paddings = map(lambda x: max_len - int(x.get_shape()[1]), values)
for i, (p, v) in enumerate(zip(paddings, values)):
lp = p // 2
rp = p - lp
values[i] = tf.keras.layers.ZeroPadding1D(padding=(lp, rp))(v)
# elif len_shp == 2 nothing to do
else:
raise RuntimeError(
f"All inputs of concatenation operation should have same shape length:\n"
f"number_of_inputs=={len(values)}\n"
f"shape_of_inputs=={[str(x.get_shape()) for x in values]}"
)
# concatenation
if len(values) > 1:
out = tf.keras.layers.Concatenate(axis=-1)(values)
else:
out = values[0]
return out
class AddByPadding(Operation):
"""Add operation. If tensor are of different shapes a padding will be applied before adding them.
Args:
search_space (KSearchSpace): [description]. Defaults to None.
activation ([type], optional): Activation function to apply after adding ('relu', tanh', 'sigmoid'...). Defaults to None.
stacked_nodes (list(Node)): nodes to add.
axis (int): axis to concatenate.
"""
def __init__(self, search_space, stacked_nodes=None, activation=None, axis=-1):
self.search_space = search_space
self.node = None # current_node of the operation
self.stacked_nodes = stacked_nodes
self.activation = activation
self.axis = axis
def init(self, current_node):
self.node = current_node
if self.stacked_nodes is not None:
for n in self.stacked_nodes:
self.search_space.connect(n, self.node)
def __call__(self, values, **kwargs):
# case where there is no inputs
if len(values) == 0:
return []
values = values[:]
max_len_shp = max([len(x.get_shape()) for x in values])
# zeros padding
if len(values) > 1:
for i, v in enumerate(values):
if len(v.get_shape()) < max_len_shp:
values[i] = tf.keras.layers.Reshape(
(
*tuple(v.get_shape()[1:]),
*tuple(1 for i in range(max_len_shp - len(v.get_shape()))),
)
)(v)
def max_dim_i(i):
return max(map(lambda x: int(x.get_shape()[i]), values))
max_dims = [None] + list(map(max_dim_i, range(1, max_len_shp)))
def paddings_dim_i(i):
return list(map(lambda x: max_dims[i] - int(x.get_shape()[i]), values))
paddings_dim = list(map(paddings_dim_i, range(1, max_len_shp)))
for i in range(len(values)):
paddings = list()
for j in range(len(paddings_dim)):
p = paddings_dim[j][i]
lp = p // 2
rp = p - lp
paddings.append([lp, rp])
if sum(map(sum, paddings)) != 0:
values[i] = dh.layers.Padding(paddings)(values[i])
# concatenation
if len(values) > 1:
out = tf.keras.layers.Add()(values)
if self.activation is not None:
out = tf.keras.layers.Activation(self.activation)(out)
else:
out = values[0]
return out
class AddByProjecting(Operation):
"""Add operation. If tensors are of different shapes a projection will be applied before adding them.
Args:
search_space (KSearchSpace): [description]. Defaults to None.
activation ([type], optional): Activation function to apply after adding ('relu', tanh', 'sigmoid'...). Defaults to None.
stacked_nodes (list(Node)): nodes to add.
axis (int): axis to concatenate.
"""
def __init__(self, search_space, stacked_nodes=None, activation=None, axis=-1):
self.search_space = search_space
self.node = None # current_node of the operation
self.stacked_nodes = stacked_nodes
self.activation = activation
self.axis = axis
def init(self, current_node):
self.node = current_node
if self.stacked_nodes is not None:
for n in self.stacked_nodes:
self.search_space.connect(n, self.node)
def __call__(self, values, seed=None, **kwargs):
# case where there is no inputs
if len(values) == 0:
return []
values = values[:]
max_len_shp = max([len(x.get_shape()) for x in values])
# projection
if len(values) > 1:
for i, v in enumerate(values):
if len(v.get_shape()) < max_len_shp:
values[i] = tf.keras.layers.Reshape(
(
*tuple(v.get_shape()[1:]),
*tuple(1 for i in range(max_len_shp - len(v.get_shape()))),
)
)(v)
proj_size = values[0].get_shape()[self.axis]
for i in range(len(values)):
if values[i].get_shape()[self.axis] != proj_size:
values[i] = tf.keras.layers.Dense(
units=proj_size,
kernel_initializer=tf.keras.initializers.glorot_uniform(
seed=seed
),
)(values[i])
# concatenation
if len(values) > 1:
out = tf.keras.layers.Add()(values)
if self.activation is not None:
out = tf.keras.layers.Activation(self.activation)(out)
else:
out = values[0]
return out
| 8,004 | 35.221719 | 164 | py |
deephyper | deephyper-master/deephyper/nas/operation/_base.py | import tensorflow as tf
class Operation:
"""Interface of an operation.
>>> import tensorflow as tf
>>> from deephyper.nas.space.op import Operation
>>> Operation(layer=tf.keras.layers.Dense(10))
Dense
Args:
layer (Layer): a ``tensorflow.keras.layers.Layer``.
"""
def __init__(self, layer: tf.keras.layers.Layer):
assert isinstance(layer, tf.keras.layers.Layer)
self.from_keras_layer = True
self._layer = layer
def __str__(self):
return self.__repr__()
def __repr__(self):
if hasattr(self, "from_keras_layer"):
return type(self._layer).__name__
else:
try:
return str(self)
except Exception:
return type(self).__name__
def __call__(self, tensors: list, seed: int = None, **kwargs):
"""
Args:
tensors (list): a list of incoming tensors.
Returns:
tensor: an output tensor.
"""
if len(tensors) == 1:
out = self._layer(tensors[0])
else:
out = self._layer(tensors)
return out
def init(self, current_node):
"""Preprocess the current operation."""
def operation(cls):
"""Dynamically creates a sub-class of Operation from a Keras layer.
Args:
cls (tf.keras.layers.Layer): takes a Keras layer class as input and return an operation class corresponding to this layer.
"""
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
self._layer = None
def __repr__(self):
return cls.__name__
def __call__(self, inputs, **kwargs):
if self._layer is None:
self._layer = cls(*self._args, **self._kwargs)
if len(inputs) == 1:
out = self._layer(inputs[0])
else:
out = self._layer(inputs)
return out
cls_attrs = dict(__init__=__init__, __repr__=__repr__, __call__=__call__)
op_class = type(cls.__name__, (Operation,), cls_attrs)
return op_class
class Identity(Operation):
def __init__(self):
pass
def __call__(self, inputs, **kwargs):
assert (
len(inputs) == 1
), f"{type(self).__name__} as {len(inputs)} inputs when 1 is required."
return inputs[0]
class Tensor(Operation):
def __init__(self, tensor, *args, **kwargs):
self.tensor = tensor
def __str__(self):
return str(self.tensor)
def __call__(self, *args, **kwargs):
return self.tensor
class Zero(Operation):
def __init__(self):
self.tensor = []
def __str__(self):
return "Zero"
def __call__(self, *args, **kwargs):
return self.tensor
class Connect(Operation):
"""Connection node.
Represents a possibility to create a connection between n1 -> n2.
Args:
graph (nx.DiGraph): a graph
source_node (Node): source
"""
def __init__(self, search_space, source_node, *args, **kwargs):
self.search_space = search_space
self.source_node = source_node
self.destin_node = None
def __str__(self):
if type(self.source_node) is list:
if len(self.source_node) > 0:
ids = str(self.source_node[0].id)
for n in self.source_node[1:]:
ids += "," + str(n.id)
else:
ids = "None"
else:
ids = self.source_node.id
if self.destin_node is None:
return f"{type(self).__name__}_{ids}->?"
else:
return f"{type(self).__name__}_{ids}->{self.destin_node.id}"
def init(self, current_node):
"""Set the connection in the search_space graph from n1 -> n2."""
self.destin_node = current_node
if type(self.source_node) is list:
for n in self.source_node:
self.search_space.connect(n, self.destin_node)
else:
self.search_space.connect(self.source_node, self.destin_node)
def __call__(self, value, *args, **kwargs):
return value
| 4,141 | 25.382166 | 130 | py |
deephyper | deephyper-master/deephyper/nas/operation/__init__.py | """Operations for neural architecture search space definition."""
from ._base import Connect, Identity, Operation, Tensor, Zero, operation
from ._merge import AddByPadding, AddByProjecting, Concatenate
__all__ = [
"AddByPadding",
"AddByProjecting",
"Concatenate",
"Connect",
"Identity",
"Operation",
"operation",
"Tensor",
"Zero",
]
| 370 | 22.1875 | 72 | py |
deephyper | deephyper-master/deephyper/problem/_hyperparameter.py | import copy
import json
import ConfigSpace as cs
import ConfigSpace.hyperparameters as csh
import numpy as np
from ConfigSpace.read_and_write import json as cs_json
import deephyper.core.exceptions as dh_exceptions
import deephyper.skopt
def convert_to_skopt_dim(cs_hp, surrogate_model=None):
if surrogate_model in ["RF", "ET", "GBRT"]:
# models not sensitive to the metric space such as trees
surrogate_model_type = "rule_based"
else:
# models sensitive to the metric space such as GP, neural networks
surrogate_model_type = "distance_based"
if isinstance(cs_hp, csh.UniformIntegerHyperparameter):
skopt_dim = deephyper.skopt.space.Integer(
low=cs_hp.lower,
high=cs_hp.upper,
prior="log-uniform" if cs_hp.log else "uniform",
name=cs_hp.name,
)
elif isinstance(cs_hp, csh.UniformFloatHyperparameter):
skopt_dim = deephyper.skopt.space.Real(
low=cs_hp.lower,
high=cs_hp.upper,
prior="log-uniform" if cs_hp.log else "uniform",
name=cs_hp.name,
)
elif isinstance(cs_hp, csh.CategoricalHyperparameter):
# the transform is important if we don't want the complexity of trees
# to explode with categorical variables
skopt_dim = deephyper.skopt.space.Categorical(
categories=cs_hp.choices,
name=cs_hp.name,
transform="onehot" if surrogate_model_type == "distance_based" else "label",
)
elif isinstance(cs_hp, csh.OrdinalHyperparameter):
categories = list(cs_hp.sequence)
if all(
isinstance(x, (int, np.integer)) or isinstance(x, (float, np.floating))
for x in categories
):
transform = "identity"
else:
transform = "label"
skopt_dim = deephyper.skopt.space.Categorical(
categories=categories, name=cs_hp.name, transform=transform
)
else:
raise TypeError(f"Cannot convert hyperparameter of type {type(cs_hp)}")
return skopt_dim
def convert_to_skopt_space(cs_space, surrogate_model=None):
"""Convert a ConfigurationSpace to a scikit-optimize Space.
Args:
cs_space (ConfigurationSpace): the ``ConfigurationSpace`` to convert.
surrogate_model (str, optional): the type of surrogate model/base estimator used to perform Bayesian optimization. Defaults to None.
Raises:
TypeError: if the input space is not a ConfigurationSpace.
RuntimeError: if the input space contains forbiddens.
RuntimeError: if the input space contains conditions
Returns:
deephyper.skopt.space.Space: a scikit-optimize Space.
"""
# verify pre-conditions
if not (isinstance(cs_space, cs.ConfigurationSpace)):
raise TypeError("Input space should be of type ConfigurationSpace")
if len(cs_space.get_conditions()) > 0:
raise RuntimeError("Cannot convert a ConfigSpace with Conditions!")
if len(cs_space.get_forbiddens()) > 0:
raise RuntimeError("Cannot convert a ConfigSpace with Forbiddens!")
# convert the ConfigSpace to deephyper.skopt.space.Space
dimensions = []
for hp in cs_space.get_hyperparameters():
dimensions.append(convert_to_skopt_dim(hp, surrogate_model))
skopt_space = deephyper.skopt.space.Space(dimensions)
return skopt_space
def check_hyperparameter(parameter, name=None, default_value=None):
"""Check if the passed parameter is a valid description of an hyperparameter.
:meta private:
Args:
parameter (str|Hyperparameter): an instance of ``ConfigSpace.hyperparameters.hyperparameter`` or a synthetic description (e.g., ``list``, ``tuple``).
parameter (str): the name of the hyperparameter. Only required when the parameter is not a ``ConfigSpace.hyperparameters.hyperparameter``.
default_value: a default value for the hyperparameter.
Returns:
Hyperparameter: the ConfigSpace hyperparameter instance corresponding to the ``parameter`` description.
"""
if isinstance(parameter, csh.Hyperparameter):
return parameter
if not isinstance(parameter, (list, tuple, np.ndarray, dict)):
raise ValueError(
"Shortcut definition of an hyper-parameter has to be a list, tuple, array or dict."
)
if not (type(name) is str):
raise ValueError("The 'name' of an hyper-parameter should be a string!")
kwargs = {}
if default_value is not None:
kwargs["default_value"] = default_value
if type(parameter) is tuple: # Range of reals or integers
if len(parameter) == 2:
prior = "uniform"
elif len(parameter) == 3:
prior = parameter[2]
assert prior in [
"uniform",
"log-uniform",
], f"Prior has to be 'uniform' or 'log-uniform' when {prior} was given for parameter '{name}'"
parameter = parameter[:2]
log = prior == "log-uniform"
if all([isinstance(p, int) for p in parameter]):
return csh.UniformIntegerHyperparameter(
name=name, lower=parameter[0], upper=parameter[1], log=log, **kwargs
)
elif any([isinstance(p, float) for p in parameter]):
return csh.UniformFloatHyperparameter(
name=name, lower=parameter[0], upper=parameter[1], log=log, **kwargs
)
elif type(parameter) is list: # Categorical
if any(
[isinstance(p, (str, bool)) or isinstance(p, np.bool_) for p in parameter]
):
return csh.CategoricalHyperparameter(name, choices=parameter, **kwargs)
elif all([isinstance(p, (int, float)) for p in parameter]):
return csh.OrdinalHyperparameter(name, sequence=parameter, **kwargs)
elif type(parameter) is dict: # Integer or Real distribution
# Normal
if "mu" in parameter and "sigma" in parameter:
if type(parameter["mu"]) is float:
return csh.NormalFloatHyperparameter(name=name, **parameter, **kwargs)
elif type(parameter["mu"]) is int:
return csh.NormalIntegerHyperparameter(name=name, **parameter, **kwargs)
else:
raise ValueError(
"Wrong hyperparameter definition! 'mu' should be either a float or an integer."
)
raise ValueError(
f"Invalid dimension {name}: {parameter}. Read the documentation for"
f" supported types."
)
class HpProblem:
"""Class to define an hyperparameter problem.
>>> from deephyper.problem import HpProblem
>>> problem = HpProblem()
Args:
config_space (ConfigurationSpace, optional): In case the ``HpProblem`` is defined from a `ConfigurationSpace`.
"""
def __init__(self, config_space=None):
if config_space is not None and not (
isinstance(config_space, cs.ConfigurationSpace)
):
raise ValueError(
"Parameter 'config_space' should be an instance of ConfigurationSpace!"
)
if config_space:
self._space = copy.deepcopy(config_space)
else:
self._space = cs.ConfigurationSpace()
self.references = [] # starting points
def __str__(self):
return repr(self)
def __repr__(self):
prob = repr(self._space)
return prob
def add_hyperparameter(
self, value, name: str = None, default_value=None
) -> csh.Hyperparameter:
"""Add an hyperparameter to the ``HpProblem``.
Hyperparameters can be added to a ``HpProblem`` with a short syntax:
>>> problem.add_hyperparameter((0, 10), "discrete", default_value=5)
>>> problem.add_hyperparameter((0.0, 10.0), "real", default_value=5.0)
>>> problem.add_hyperparameter([0, 10], "categorical", default_value=0)
Sampling distributions can be provided:
>>> problem.add_hyperparameter((0.0, 10.0, "log-uniform"), "real", default_value=5.0)
It is also possible to use `ConfigSpace <https://automl.github.io/ConfigSpace/master/API-Doc.html#hyperparameters>`_ ``Hyperparameters``:
>>> import ConfigSpace.hyperparameters as csh
>>> csh_hp = csh.UniformIntegerHyperparameter(
... name='uni_int', lower=10, upper=100, log=False)
>>> problem.add_hyperparameter(csh_hp)
Args:
value (tuple or list or ConfigSpace.Hyperparameter): a valid hyperparametr description.
name (str): The name of the hyperparameter to add.
default_value (float or int or str): A default value for the corresponding hyperparameter.
Returns:
ConfigSpace.Hyperparameter: a ConfigSpace ``Hyperparameter`` object corresponding to the ``(value, name, default_value)``.
"""
if not (type(name) is str or name is None):
raise dh_exceptions.problem.SpaceDimNameOfWrongType(name)
csh_parameter = check_hyperparameter(value, name, default_value=default_value)
self._space.add_hyperparameter(csh_parameter)
return csh_parameter
def add_hyperparameters(self, hp_list):
"""Add a list of hyperparameters. It can be useful when a list of ``ConfigSpace.Hyperparameter`` are defined and we need to add them to the ``HpProblem``.
Args:
hp_list (ConfigSpace.Hyperparameter): a list of ConfigSpace hyperparameters.
Returns:
list: The list of added hyperparameters.
"""
return [self.add_hyperparameter(hp) for hp in hp_list]
def add_forbidden_clause(self, clause):
"""Add a `forbidden clause <https://automl.github.io/ConfigSpace/master/API-Doc.html#forbidden-clauses>`_ to the ``HpProblem``.
For example if we want to optimize :math:`\\frac{1}{x}` where :math:`x` cannot be equal to 0:
>>> from deephyper.problem import HpProblem
>>> import ConfigSpace as cs
>>> problem = HpProblem()
>>> x = problem.add_hyperparameter((0.0, 10.0), "x")
>>> problem.add_forbidden_clause(cs.ForbiddenEqualsClause(x, 0.0))
Args:
clause: a ConfigSpace forbidden clause.
"""
self._space.add_forbidden_clause(clause)
def add_condition(self, condition):
"""Add a `condition <https://automl.github.io/ConfigSpace/master/API-Doc.html#conditions>`_ to the ``HpProblem``.
>>> from deephyper.problem import HpProblem
>>> import ConfigSpace as cs
>>> problem = HpProblem()
>>> x = problem.add_hyperparameter((0.0, 10.0), "x")
>>> y = problem.add_hyperparameter((1e-4, 1.0), "y")
>>> problem.add_condition(cs.LessThanCondition(y, x, 1.0))
Args:
condition: A ConfigSpace condition.
"""
self._space.add_condition(condition)
def add_conditions(self, conditions: list) -> None:
"""Add a list of `condition <https://automl.github.io/ConfigSpace/master/API-Doc.html#conditions>`_ to the ``HpProblem``.
Args:
conditions (list): A list of ConfigSpace conditions.
"""
self._space.add_conditions(conditions)
@property
def space(self):
"""The wrapped ConfigSpace object."""
return self._space
@property
def hyperparameter_names(self):
"""The list of hyperparameters names."""
return self._space.get_hyperparameter_names()
def check_configuration(self, parameters: dict):
"""Check if a configuration is valid. Raise an error if not."""
config = cs.Configuration(self._space, parameters)
self._space.check_configuration(config)
@property
def default_configuration(self):
"""The default configuration as a dictionnary."""
config = self._space.get_default_configuration().get_dictionary()
return config
def to_json(self):
"""Returns a dict version of the space which can be saved as JSON."""
json_format = json.loads(cs_json.write(self._space))
return json_format
| 12,175 | 37.653968 | 162 | py |
deephyper | deephyper-master/deephyper/problem/_neuralarchitecture.py | from collections import OrderedDict
from copy import deepcopy
from inspect import signature
import ConfigSpace.hyperparameters as csh
import tensorflow as tf
from deephyper.core.exceptions.problem import (
NaProblemError,
ProblemLoadDataIsNotCallable,
ProblemPreprocessingIsNotCallable,
SearchSpaceBuilderMissingParameter,
WrongProblemObjective,
)
from deephyper.nas.run._util import get_search_space, setup_data
from deephyper.problem import HpProblem
class NaProblem:
"""A Neural Architecture Problem specification for Neural Architecture Search.
>>> from deephyper.problem import NaProblem
>>> from deephyper.nas.preprocessing import minmaxstdscaler
>>> from deepspace.tabular import OneLayerSpace
>>> Problem = NaProblem()
>>> Problem.load_data(load_data)
>>> Problem.preprocessing(minmaxstdscaler)
>>> Problem.search_space(OneLayerSpace)
>>> Problem.hyperparameters(
... batch_size=100,
... learning_rate=0.1,
... optimizer='adam',
... num_epochs=10,
... callbacks=dict(
... EarlyStopping=dict(
... monitor='val_r2',
... mode='max',
... verbose=0,
... patience=5
... )
... )
... )
>>> Problem.loss('mse')
>>> Problem.metrics(['r2'])
>>> Problem.objective('val_r2__last')
"""
def __init__(self):
self._space = OrderedDict()
self._hp_space = HpProblem()
self._space["metrics"] = []
self._space["hyperparameters"] = dict(verbose=0)
def __repr__(self):
preprocessing = (
None
if self._space.get("preprocessing") is None
else module_location(self._space["preprocessing"]["func"])
)
hps = "".join(
[
f"\n * {h}: {self._space['hyperparameters'][h]}"
for h in self._space["hyperparameters"]
]
)
if type(self._space["metrics"]) is list:
metrics = "".join([f"\n * {m}" for m in self._space["metrics"]])
else:
metrics = "".join(
[f"\n * {m[0]}: {m[1]}" for m in self._space["metrics"].items()]
)
objective = self._space["objective"]
if not type(objective) is str:
objective = module_location(objective)
out = (
f"Problem is:\n"
f" - search space : {module_location(self._space['search_space']['class'])}\n"
f" - data loading : {module_location(self._space['load_data']['func'])}\n"
f" - preprocessing : {preprocessing}\n"
f" - hyperparameters: {hps}\n"
f" - loss : {self._space['loss']}\n"
f" - metrics : {metrics}\n"
f" - objective : {objective}\n"
)
return out
def load_data(self, func: callable, **kwargs):
"""Define the function loading the data.
.. code-block:: python
Problem.load_data(load_data, load_data_kwargs)
This ``load_data`` callable can follow two different interfaces: Numpy arrays or generators.
1. **Numpy arrays**:
In the case of Numpy arrays, the callable passed to ``Problem.load_data(...)`` has to return the following tuple: ``(X_train, y_train), (X_valid, y_valid)``. In the most simple case where the model takes a single input, each of these elements is a Numpy array. Generally, ``X_train`` and ``y_train`` have to be of the same length (i.e., same ``array.shape[0]``) which is also the case for ``X_valid`` and ``y_valid``. Similarly, the shape of the elements of ``X_train`` and ``X_valid`` which is also the case for ``y_train`` and ``y_valid``. An example ``load_data`` function can be
.. code-block:: python
import numpy as np
def load_data(N=100):
X = np.zeros((N, 1))
y = np.zeros((N,1))
return (X, y), (X, y)
It is also possible for the model to take several inputs. In fact, experimentaly it can be notices that separating some inputs with different inputs can significantly help the learning of the model. Also, sometimes different inputs may be of the "types" for example two molecular fingerprints. In this case, it can be very interesting to share the weights of the model to process these two inputs. In the case of multi-inputs models the ``load_data`` function will also return ``(X_train, y_train), (X_valid, y_valid)`` bu where ``X_train`` and ``X_valid`` are two lists of Numpy arrays. For example, the following is correct:
.. code-block:: python
import numpy as np
def load_data(N=100):
X = np.zeros((N, 1))
y = np.zeros((N,1))
return ([X, X], y), ([X, X], y)
2. **Generators**:
Returning generators with a single input:
.. code-block:: python
def load_data(N=100):
tX, ty = np.zeros((N,1)), np.zeros((N,1))
vX, vy = np.zeros((N,1)), np.zeros((N,1))
def train_gen():
for x, y in zip(tX, ty):
yield ({"input_0": x}, y)
def valid_gen():
for x, y in zip(vX, vy):
yield ({"input_0": x}, y)
res = {
"train_gen": train_gen,
"train_size": N,
"valid_gen": valid_gen,
"valid_size": N,
"types": ({"input_0": tf.float64}, tf.float64),
"shapes": ({"input_0": (1, )}, (1, ))
}
return res
Returning generators with multiple inputs:
.. code-block:: python
def load_data(N=100):
tX0, tX1, ty = np.zeros((N,1)), np.zeros((N,1)), np.zeros((N,1)),
vX0, vX1, vy = np.zeros((N,1)), np.zeros((N,1)), np.zeros((N,1)),
def train_gen():
for x0, x1, y in zip(tX0, tX1, ty):
yield ({
"input_0": x0,
"input_1": x1
}, y)
def valid_gen():
for x0, x1, y in zip(vX0, vX1, vy):
yield ({
"input_0": x0,
"input_1": x1
}, y)
res = {
"train_gen": train_gen,
"train_size": N,
"valid_gen": valid_gen,
"valid_size": N,
"types": ({"input_0": tf.float64, "input_1": tf.float64}, tf.float64),
"shapes": ({"input_0": (5, ), "input_1": (5, )}, (1, ))
}
return res
Args:
func (callable): the load data function.
"""
if not callable(func):
raise ProblemLoadDataIsNotCallable(func)
self._space["load_data"] = {"func": func, "kwargs": kwargs}
def augment(self, func: callable, **kwargs):
"""
:meta private:
"""
if not callable(func):
raise ProblemLoadDataIsNotCallable(func)
self._space["augment"] = {"func": func, "kwargs": kwargs}
def search_space(self, space_class, **kwargs):
"""Set a search space for neural architecture search.
Args:
space_class (KSearchSpace): an object of type ``KSearchSpace`` which has to implement the ``build()`` method.
Raises:
SearchSpaceBuilderMissingParameter: raised when either of ``(input_shape, output_shape)`` are missing parameters of ``func``.
"""
sign = signature(space_class)
if "input_shape" not in sign.parameters:
raise SearchSpaceBuilderMissingParameter("input_shape")
if "output_shape" not in sign.parameters:
raise SearchSpaceBuilderMissingParameter("output_shape")
self._space["search_space"] = {"class": space_class, "kwargs": kwargs}
def add_hyperparameter(
self, value, name: str = None, default_value=None
) -> csh.Hyperparameter:
"""Add hyperparameters to search the neural architecture search problem.
>>> Problem.hyperparameters(
... batch_size=problem.add_hyperparameter((32, 256), "batch_size")
... )
Args:
value: a hyperparameter description.
name: a name of the defined hyperparameter, the same as the current key.
default_value (Optional): a default value of the hyperparameter.
Returns:
Hyperparameter: the defined hyperparameter.
"""
return self._hp_space.add_hyperparameter(value, name, default_value)
def preprocessing(self, func: callable):
"""Define how to preprocess your data.
Args:
func (callable): a function which returns a preprocessing scikit-learn ``Pipeline``.
"""
if not callable(func):
raise ProblemPreprocessingIsNotCallable(func)
self._space["preprocessing"] = {"func": func}
def hyperparameters(self, **kwargs):
"""Define hyperparameters used to evaluate generated architectures.
Hyperparameters can be defined such as:
.. code-block:: python
Problem.hyperparameters(
batch_size=256,
learning_rate=0.01,
optimizer="adam",
num_epochs=20,
verbose=0,
callbacks=dict(...),
)
"""
if self._space.get("hyperparameters") is None:
self._space["hyperparameters"] = dict()
self._space["hyperparameters"].update(kwargs)
def loss(self, loss, loss_weights=None, class_weights=None):
"""Define the loss used to train generated architectures.
It can be a ``str`` corresponding to a Keras loss function:
.. code-block:: python
problem.loss("categorical_crossentropy")
A custom loss function can also be defined:
.. code-block:: python
def NLL(y, rv_y):
return -rv_y.log_prob(y)
problem.loss(NLL)
The loss can be automatically searched:
.. code-block:: python
problem.loss(
problem.add_hyperparameter(
["mae", "mse", "huber_loss", "log_cosh", "mape", "msle"], "loss"
)
)
It is possible to define a different loss for each output:
.. code-block:: python
problem.loss(
loss={"output_0": "mse", "output_1": "mse"},
loss_weights={"output_0": 0.0, "output_1": 1.0},
)
Args:
loss (str or callable orlist): a string indicating a specific loss function.
loss_weights (list): Optional.
class_weights (dict): Optional.
"""
if not (type(loss) is csh.CategoricalHyperparameter):
if not type(loss) is str and not callable(loss) and not type(loss) is dict:
raise RuntimeError(
f"The loss should be either a str, dict or a callable when it's of type {type(loss)}"
)
if (
type(loss) is dict
and loss_weights is not None
and len(loss) != len(loss_weights)
):
raise RuntimeError(
f"The losses list (len={len(loss)}) and the weights list (len={len(loss_weights)}) should be of same length!"
)
self._space["loss"] = loss
if loss_weights is not None:
self._space["loss_weights"] = loss_weights
if class_weights is not None:
self._space["class_weights"] = class_weights
def metrics(self, metrics=None):
"""Define a list of metrics for the training of generated architectures.
A list of metrics can be defined to be monitored or used as an objective. It can be a keyword or a callable. For example, if it is a keyword:
.. code-block:: python
problem.metrics(["acc"])
In case you need multiple metrics:
.. code-block:: python
problem.metrics["mae", "mse"]
In case you want to use a custom metric:
.. code-block:: python
def sparse_perplexity(y_true, y_pred):
cross_entropy = tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred)
perplexity = tf.pow(2.0, cross_entropy)
return perplexity
problem.metrics([sparse_perplexity])
Args:
metrics (list(str or callable) or dict): If ``str`` the metric should be defined in Keras or in DeepHyper. If ``callable`` it should take 2 arguments ``(y_pred, y_true)`` which are a prediction and a true value respectively.
"""
if metrics is None:
metrics = []
self._space["metrics"] = metrics
def check_objective(self, objective):
"""
:meta private:
"""
if not type(objective) is str and not callable(objective):
raise WrongProblemObjective(objective)
# elif type(objective) is str:
# list_suffix = ["__min", "__max", "__last"]
# for suffix in list_suffix:
# if suffix in objective:
# objective = objective.replace(suffix, "")
# break # only one suffix autorized
# objective = objective.replace("val_", "")
# possible_names = list()
# if type(self._space["metrics"]) is dict:
# metrics = list(self._space["metrics"].values())
# for k in self._space["metrics"].keys():
# objective = objective.replace(f"{k}_", "")
# else: # assuming it s a list
# metrics = self._space["metrics"]
# for val in ["loss"] + metrics:
# if callable(val):
# possible_names.append(val.__name__)
# else:
# possible_names.append(val)
# if not (objective in possible_names):
# raise WrongProblemObjective(objective, possible_names)
def objective(self, objective):
"""Define the objective you want to maximize for the search.
If you want to use the validation accuracy at the last epoch:
.. code-block:: python
problem.objective("val_acc")
.. note:: Be sure to define ``acc`` in the ``problem.metrics(["acc"])``.
It can accept some prefix and suffix such as ``__min, __max, __last``:
.. code-block:: python
problem.objective("-val_acc__max")
It can be a ``callable``:
.. code-block:: python
def myobjective(history: dict) -> float:
return history["val_acc"][-1]
problem.objective(myobjective)
Args:
objective (str or callable): The objective will be maximized. If ``objective`` is ``str`` then it should be either 'loss' or a defined metric. You can use the ``'val_'`` prefix when you want to select the objective on the validation set. You can use one of ``['__min', '__max', '__last']`` which respectively means you want to select the min, max or last value among all epochs. Using '__last' will save a consequent compute time because the evaluation will not compute metrics on validation set for all epochs but the last. If ``objective`` is callable it should return a scalar value (i.e. float) and it will take a ``dict`` parameter. The ``dict`` will contain keys corresponding to loss and metrics such as ``['loss', 'val_loss', 'r2', 'val_r2]``, it will also contains ``'n_parameters'`` which corresponds to the number of trainable parameters of the current model, ``'training_time'`` which corresponds to the time required to train the model, ``'predict_time'`` which corresponds to the time required to make a prediction over the whole validation set. If this callable has a ``'__last'`` suffix then the evaluation will only compute validation loss/metrics for the last epoch. If this callable has contains 'with_pred' in its name then the ``dict`` will have two other keys ``['y_pred', 'y_true']`` where ``'y_pred`` corresponds to prediction of the model on validation set and ``'y_true'`` corresponds to real prediction.
Raise:
WrongProblemObjective: raised when the objective is of a wrong definition.
"""
if (
not self._space.get("loss") is None
and not self._space.get("metrics") is None
):
self.check_objective(objective)
else:
raise NaProblemError(
".loss and .metrics should be defined before .objective!"
)
self._space["objective"] = objective
@property
def space(self):
keys = list(self._space.keys())
keys.sort()
space = OrderedDict(**{d: self._space[d] for d in keys})
return space
def build_search_space(self, seed=None):
"""Build and return a search space object using the infered data shapes after loading data.
Returns:
KSearchSpace: A search space instance.
"""
config = self.space
input_shape, output_shape, _ = setup_data(config, add_to_config=False)
search_space = get_search_space(config, input_shape, output_shape, seed=seed)
return search_space
def get_keras_model(self, arch_seq: list) -> tf.keras.Model:
"""Get a keras model object from a set of decisions in the current search space.
Args:
arch_seq (list): a list of int of floats describing a choice of operations for the search space defined in the current problem.
"""
search_space = self.build_search_space()
return search_space.sample(arch_seq)
def gen_config(self, arch_seq: list, hp_values: list) -> dict:
"""Generate a ``dict`` configuration from the ``arch_seq`` and ``hp_values`` passed.
Args:
arch_seq (list): a valid embedding of a neural network described by the search space of the current ``NaProblem``.
hp_values (list): a valid list of hyperparameters corresponding to the defined hyperparameters of the current ``NaProblem``.
"""
config = deepcopy(self.space)
# architecture DNA
config["arch_seq"] = arch_seq
# replace hp values in the config
hp_names = self._hp_space._space.get_hyperparameter_names()
for hp_name, hp_value in zip(hp_names, hp_values):
if hp_name == "loss":
config["loss"] = hp_value
else:
config["hyperparameters"][hp_name] = hp_value
return config
def extract_hp_values(self, config):
"""Extract the value of hyperparameters present in ``config`` based on the defined hyperparameters in the current ``NaProblem``"""
hp_names = self.hyperparameter_names
hp_values = []
for hp_name in hp_names:
if hp_name == "loss":
hp_values.append(config["loss"])
else:
hp_values.append(config["hyperparameters"][hp_name])
return hp_values
@property
def hyperparameter_names(self):
"""The list of hyperparameters names."""
return self._hp_space.hyperparameter_names
@property
def default_hp_configuration(self):
"""The default configuration as a dictionnary."""
return self._hp_space.default_configuration
def module_location(attr):
"""
:meta private:
"""
return f"{attr.__module__}.{attr.__name__}"
| 19,985 | 36.287313 | 1,442 | py |
deephyper | deephyper-master/deephyper/problem/__init__.py | """This module provides tools to define hyperparameter and neural architecture search problems. Some features of this module are based on the `ConfigSpace <https://automl.github.io/ConfigSpace/master/>`_ project.
"""
from ConfigSpace import * # noqa: F401, F403
from ._hyperparameter import HpProblem
__all__ = ["HpProblem"]
# make import of NaProblem optional
try:
from ._neuralarchitecture import NaProblem # noqa: F401
__all__.append("NaProblem")
except ModuleNotFoundError as e:
if "tensorflow" in str(e):
pass
elif "networkx" in str(e):
pass
else:
raise e
__all__ = ["HpProblem"]
| 636 | 26.695652 | 212 | py |
deephyper | deephyper-master/deephyper/keras/utils.py | 0 | 0 | 0 | py | |
deephyper | deephyper-master/deephyper/keras/__init__.py | 0 | 0 | 0 | py | |
deephyper | deephyper-master/deephyper/keras/callbacks/learning_rate_warmup.py | """
Adapted from Horovod implementation: https://github.com/horovod/horovod/blob/master/horovod/keras/callbacks.py
"""
import tensorflow as tf
class LearningRateScheduleCallback(tf.keras.callbacks.Callback):
def __init__(
self,
initial_lr,
multiplier,
start_epoch=0,
end_epoch=None,
staircase=True,
momentum_correction=True,
steps_per_epoch=None,
*args
):
super(LearningRateScheduleCallback, self).__init__(*args)
self.start_epoch = start_epoch
self.end_epoch = end_epoch
self.staircase = staircase
self.momentum_correction = momentum_correction
self.initial_lr = initial_lr
self.restore_momentum = None
self.steps_per_epoch = steps_per_epoch
self.current_epoch = None
if not callable(multiplier):
self.staircase = True
self.multiplier = lambda epoch: multiplier
else:
self.multiplier = multiplier
if self.initial_lr is None:
raise ValueError("Parameter `initial_lr` is required")
def _autodetect_steps_per_epoch(self):
if self.params.get("steps"):
# The number of steps is provided in the parameters.
return self.params["steps"]
elif self.params.get("samples") and self.params.get("batch_size"):
# Compute the number of steps per epoch using # of samples and a batch size.
return self.params["samples"] // self.params["batch_size"]
else:
raise ValueError(
"Could not autodetect the number of steps per epoch. "
"Please specify the steps_per_epoch parameter to the "
"%s() or upgrade to the latest version of Keras."
% self.__class__.__name__
)
def _adjust_learning_rate(self, epoch):
old_lr = tf.keras.backend.get_value(self.model.optimizer.lr)
new_lr = self.initial_lr * self.multiplier(epoch)
tf.keras.backend.set_value(self.model.optimizer.lr, new_lr)
if hasattr(self.model.optimizer, "momentum") and self.momentum_correction:
# See the paper cited above for more information about momentum correction.
self.restore_momentum = tf.keras.backend.get_value(
self.model.optimizer.momentum
)
tf.keras.backend.set_value(
self.model.optimizer.momentum, self.restore_momentum * new_lr / old_lr
)
def _restore_momentum_if_needed(self):
if self.restore_momentum:
tf.keras.backend.set_value(
self.model.optimizer.momentum, self.restore_momentum
)
self.restore_momentum = None
def on_train_begin(self, logs=None):
if self.initial_lr is None:
self.initial_lr = tf.keras.backend.get_value(self.model.optimizer.lr)
if not self.staircase and not self.steps_per_epoch:
self.steps_per_epoch = self._autodetect_steps_per_epoch()
def on_epoch_begin(self, epoch, logs=None):
self.current_epoch = epoch
def on_batch_begin(self, batch, logs=None):
if self.current_epoch < self.start_epoch or (
self.end_epoch is not None and self.current_epoch >= self.end_epoch
):
# Outside of the adjustment scope.
return
if self.staircase and batch == 0:
# Do on first batch of every epoch.
self._adjust_learning_rate(self.current_epoch)
elif not self.staircase:
epoch = self.current_epoch + float(batch) / self.steps_per_epoch
self._adjust_learning_rate(epoch)
def on_batch_end(self, batch, logs=None):
self._restore_momentum_if_needed()
def on_epoch_end(self, epoch, logs=None):
if logs is not None:
# Log current learning rate.
logs["lr"] = tf.keras.backend.get_value(self.model.optimizer.lr)
class LearningRateWarmupCallback(LearningRateScheduleCallback):
def __init__(
self,
n_replicas,
initial_lr,
warmup_epochs=5,
momentum_correction=True,
steps_per_epoch=None,
verbose=0,
*args
):
def multiplier(epoch):
# Adjust epoch to produce round numbers at the end of each epoch, so that TensorBoard
# learning rate graphs look better.
epoch += 1.0 / self.steps_per_epoch
return 1.0 / n_replicas * (epoch * (n_replicas - 1) / warmup_epochs + 1)
super(LearningRateWarmupCallback, self).__init__(
initial_lr,
multiplier,
start_epoch=0,
end_epoch=warmup_epochs,
staircase=False,
momentum_correction=momentum_correction,
steps_per_epoch=steps_per_epoch,
*args
)
self.verbose = verbose
def on_epoch_end(self, epoch, logs=None):
super(LearningRateWarmupCallback, self).on_epoch_end(epoch, logs)
if epoch == self.end_epoch - 1 and self.verbose > 0:
new_lr = tf.keras.backend.get_value(self.model.optimizer.lr)
print(
"\nEpoch %d: finished gradual learning rate warmup to %g."
% (epoch + 1, new_lr)
)
| 5,317 | 35.930556 | 110 | py |
deephyper | deephyper-master/deephyper/keras/callbacks/utils.py | from typing import Type
import deephyper
import deephyper.core.exceptions
import tensorflow as tf
def import_callback(cb_name: str) -> Type[tf.keras.callbacks.Callback]:
"""Import a callback class from its name.
Args:
cb_name (str): class name of the callback to import fron ``tensorflow.keras.callbacks`` or ``deephyper.keras.callbacks``.
Raises:
DeephyperRuntimeError: raised if the class name of the callback is not registered in corresponding packages.
Returns:
tensorflow.keras.callbacks.Callback: the class corresponding to the given class name.
"""
if cb_name in dir(tf.keras.callbacks):
return getattr(tf.keras.callbacks, cb_name)
elif cb_name in dir(deephyper.keras.callbacks):
return getattr(deephyper.keras.callbacks, cb_name)
else:
raise deephyper.core.exceptions.DeephyperRuntimeError(
f"Callback '{cb_name}' is not registered in tensorflow.keras and deephyper.keras.callbacks."
)
| 1,000 | 34.75 | 129 | py |
deephyper | deephyper-master/deephyper/keras/callbacks/stop_if_unfeasible.py | import time
import tensorflow as tf
class StopIfUnfeasible(tf.keras.callbacks.Callback):
def __init__(self, time_limit=600, patience=20):
super().__init__()
self.time_limit = time_limit
self.timing = list()
self.stopped = False # boolean set to True if the model training has been stopped due to time_limit condition
self.patience = patience
def set_params(self, params):
self.params = params
if self.params["steps"] is None:
self.steps = self.params["samples"] // self.params["batch_size"]
self.steps = self.params["samples"] // self.params["batch_size"]
if self.steps * self.params["batch_size"] < self.params["samples"]:
self.steps += 1
else:
self.steps = self.params["steps"]
def on_batch_begin(self, batch, logs=None):
"""Called at the beginning of a training batch in `fit` methods.
Subclasses should override for any actions to run.
Args:
batch (int): index of batch within the current epoch.
logs (dict): has keys `batch` and `size` representing the current
batch number and the size of the batch.
"""
self.timing.append(time.time())
def on_batch_end(self, batch, logs=None):
"""Called at the end of a training batch in `fit` methods.
Subclasses should override for any actions to run.
Args:
batch (int): index of batch within the current epoch.
logs (dict): metric results for this batch.
"""
self.timing[-1] = time.time() - self.timing[-1]
self.avr_batch_time = sum(self.timing) / len(self.timing)
self.estimate_training_time = sum(self.timing) + self.avr_batch_time * (
self.steps - len(self.timing)
)
if (
len(self.timing) >= self.patience
and self.estimate_training_time > self.time_limit
):
self.stopped = True
self.model.stop_training = True
| 2,047 | 36.236364 | 118 | py |
deephyper | deephyper-master/deephyper/keras/callbacks/stop_on_timeout.py | from datetime import datetime
from tensorflow.keras.callbacks import Callback
class TerminateOnTimeOut(Callback):
def __init__(self, timeout_in_min=10):
super(TerminateOnTimeOut, self).__init__()
self.run_timestamp = None
self.timeout_in_sec = timeout_in_min * 60
# self.validation_data = validation_data
def on_train_begin(self, logs={}):
self.run_timestamp = datetime.now()
def on_batch_end(self, epoch, logs={}):
run_end = datetime.now()
run_duration = run_end - self.run_timestamp
run_in_sec = run_duration.total_seconds() # / (60 * 60)
# print(' - current training time = %2.3fs/%2.3fs' % (run_in_sec, self.timeout_in_sec))
if self.timeout_in_sec != -1:
if run_in_sec >= self.timeout_in_sec:
print(
" - timeout: training time = %2.3fs/%2.3fs"
% (run_in_sec, self.timeout_in_sec)
)
# print('TimeoutRuntime: %2.3fs, Maxtime: %2.3fs' % (run_in_sec, self.timeout_in_sec))
self.model.stop_training = True
# if self.validation_data is not None:
# x, y = self.validation_data[0], self.validation_data[1]
# loss, acc = self.model.evaluate(x,y)
# #print(self.model.history.keys())
| 1,364 | 40.363636 | 102 | py |
deephyper | deephyper-master/deephyper/keras/callbacks/csv_extended_logger.py | import collections
import io
import time
import csv
import numpy as np
import six
import tensorflow as tf
from tensorflow.python.lib.io import file_io
from tensorflow.python.util.compat import collections_abc
class CSVExtendedLogger(tf.keras.callbacks.Callback):
"""Callback that streams epoch results to a csv file.
Supports all values that can be represented as a string,
including 1D iterables such as np.ndarray.
Example:
.. code-block:: python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
Args:
filename: filename of the csv file, e.g. 'run/log.csv'.
separator: string used to separate elements in the csv file.
append: True: append if file exists (useful for continuing
training). False: overwrite existing file,
"""
def __init__(self, filename, separator=",", append=False):
self.sep = separator
self.filename = filename
self.append = append
self.writer = None
self.keys = None
self.append_header = True
if six.PY2:
self.file_flags = "b"
self._open_args = {}
else:
self.file_flags = ""
self._open_args = {"newline": "\n"}
self.timestamp = None
super(CSVExtendedLogger, self).__init__()
def on_train_begin(self, logs=None):
if self.append:
if file_io.file_exists(self.filename):
with open(self.filename, "r" + self.file_flags) as f:
self.append_header = not bool(len(f.readline()))
mode = "a"
else:
mode = "w"
self.csv_file = io.open(
self.filename, mode + self.file_flags, **self._open_args
)
def on_epoch_begin(self, epoch, logs=None):
self.timestamp = time.time()
def on_epoch_end(self, epoch, logs=None):
timestamp = time.time()
duration = timestamp - self.timestamp # duration of curent epoch
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, six.string_types):
return k
elif isinstance(k, collections_abc.Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (", ".join(map(str, k)))
else:
return k
if self.keys is None:
self.keys = sorted(logs.keys())
if self.model.stop_training:
# We set NA so that csv parsers do not fail for this last epoch.
logs = {k: logs[k] if k in logs else (k, "NA") for k in self.keys}
if not self.writer:
class CustomDialect(csv.excel):
delimiter = self.sep
fieldnames = ["epoch", "timestamp", "duration"] + self.keys
if six.PY2:
fieldnames = [f"{x}" for x in fieldnames]
self.writer = csv.DictWriter(
self.csv_file, fieldnames=fieldnames, dialect=CustomDialect
)
if self.append_header:
self.writer.writeheader()
row_dict = collections.OrderedDict(
{"epoch": epoch, "timestamp": timestamp, "duration": duration}
)
row_dict.update((key, handle_value(logs[key])) for key in self.keys)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
| 3,539 | 30.891892 | 85 | py |
deephyper | deephyper-master/deephyper/keras/callbacks/__init__.py | from deephyper.keras.callbacks.utils import import_callback
from deephyper.keras.callbacks.stop_if_unfeasible import StopIfUnfeasible
from deephyper.keras.callbacks.csv_extended_logger import CSVExtendedLogger
from deephyper.keras.callbacks.time_stopping import TimeStopping
from deephyper.keras.callbacks.learning_rate_warmup import (
LearningRateScheduleCallback,
LearningRateWarmupCallback,
)
__all__ = [
"import_callback",
"StopIfUnfeasible",
"CSVExtendedLogger",
"TimeStopping",
"LearningRateScheduleCallback",
"LearningRateWarmupCallback",
]
| 581 | 31.333333 | 75 | py |
deephyper | deephyper-master/deephyper/keras/callbacks/time_stopping.py | """Callback that stops training when a specified amount of time has passed.
source: https://github.com/tensorflow/addons/blob/master/tensorflow_addons/callbacks/time_stopping.py
"""
import datetime
import time
import tensorflow as tf
class TimeStopping(tf.keras.callbacks.Callback):
"""Stop training when a specified amount of time has passed.
Args:
seconds: maximum amount of time before stopping.
Defaults to 86400 (1 day).
verbose: verbosity mode. Defaults to 0.
"""
def __init__(self, seconds: int = 86400, verbose: int = 0):
super().__init__()
self.seconds = seconds
self.verbose = verbose
self.stopped_epoch = None
def on_train_begin(self, logs=None):
self.stopping_time = time.time() + self.seconds
def on_epoch_end(self, epoch, logs={}):
if time.time() >= self.stopping_time:
self.model.stop_training = True
self.stopped_epoch = epoch
def on_train_end(self, logs=None):
if self.stopped_epoch is not None and self.verbose > 0:
formatted_time = datetime.timedelta(seconds=self.seconds)
msg = "Timed stopping at epoch {} after training for {}".format(
self.stopped_epoch + 1, formatted_time
)
print(msg)
def get_config(self):
config = {"seconds": self.seconds, "verbose": self.verbose}
base_config = super().get_config()
return {**base_config, **config}
| 1,499 | 30.25 | 101 | py |
deephyper | deephyper-master/deephyper/keras/layers/_mpnn.py | import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras import activations
from tensorflow.keras.layers import Dense
class SparseMPNN(tf.keras.layers.Layer):
"""Message passing cell.
Args:
state_dim (int): number of output channels.
T (int): number of message passing repetition.
attn_heads (int): number of attention heads.
attn_method (str): type of attention methods.
aggr_method (str): type of aggregation methods.
activation (str): type of activation functions.
update_method (str): type of update functions.
"""
def __init__(
self,
state_dim,
T,
aggr_method,
attn_method,
update_method,
attn_head,
activation,
):
super(SparseMPNN, self).__init__(self)
self.state_dim = state_dim
self.T = T
self.activation = activations.get(activation)
self.aggr_method = aggr_method
self.attn_method = attn_method
self.attn_head = attn_head
self.update_method = update_method
def build(self, input_shape):
self.embed = tf.keras.layers.Dense(self.state_dim, activation=self.activation)
self.MP = MessagePassing(
self.state_dim,
self.aggr_method,
self.activation,
self.attn_method,
self.attn_head,
self.update_method,
)
self.built = True
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (list):
X (tensor): node feature tensor (batch size * # nodes * # node features)
A (tensor): edge pair tensor (batch size * # edges * 2), one is source ID, one is target ID
E (tensor): edge feature tensor (batch size * # edges * # edge features)
mask (tensor): node mask tensor to mask out non-existent nodes (batch size * # nodes)
degree (tensor): node degree tensor for GCN attention (batch size * # edges)
Returns:
X (tensor): results after several repetitions of edge network, attention, aggregation and update function (batch size * # nodes * # node features)
"""
# the input contains a list of five tensors
X, A, E, mask, degree = inputs
# edge pair needs to be in the int format
A = tf.cast(A, tf.int32)
# this is a limitation of MPNN in general, the node feature is mapped to (batch size * # nodes * # node
# features)
X = self.embed(X)
# run T times message passing
for _ in range(self.T):
X = self.MP([X, A, E, mask, degree])
return X
class MessagePassing(tf.keras.layers.Layer):
"""Message passing layer.
Args:
state_dim (int): number of output channels.
attn_heads (int): number of attention heads.
attn_method (str): type of attention methods.
aggr_method (str): type of aggregation methods.
activation (str): type of activation functions.
update_method (str): type of update functions.
"""
def __init__(
self, state_dim, aggr_method, activation, attn_method, attn_head, update_method
):
super(MessagePassing, self).__init__(self)
self.state_dim = state_dim
self.aggr_method = aggr_method
self.activation = activation
self.attn_method = attn_method
self.attn_head = attn_head
self.update_method = update_method
def build(self, input_shape):
self.message_passer = MessagePasserNNM(
self.state_dim,
self.attn_head,
self.attn_method,
self.aggr_method,
self.activation,
)
if self.update_method == "gru":
self.update_functions = UpdateFuncGRU(self.state_dim)
elif self.update_method == "mlp":
self.update_functions = UpdateFuncMLP(self.state_dim, self.activation)
self.built = True
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (list):
X (tensor): node feature tensor (batch size * # nodes * state dimension)
A (tensor): edge pair tensor (batch size * # edges * 2), one is source ID, one is target ID
E (tensor): edge feature tensor (batch size * # edges * # edge features)
mask (tensor): node mask tensor to mask out non-existent nodes (batch size * # nodes)
degree (tensor): node degree tensor for GCN attention (batch size * # edges)
Returns:
updated_nodes (tensor): results after edge network, attention, aggregation and update function (batch size * # nodes * state dimension)
"""
# the input contains a list of five tensors
X, A, E, mask, degree = inputs
# use the message passing to generate aggregated results
# agg_m (batch size * # nodes * state dimension)
agg_m = self.message_passer([X, A, E, degree])
# expand the mask to (batch size * # nodes * state dimension)
mask = tf.tile(mask[..., None], [1, 1, self.state_dim])
# use the mask to screen out non-existent nodes
# agg_m (batch size * # nodes * state dimension)
agg_m = tf.multiply(agg_m, mask)
# update function using the old node feature X and new aggregated node feature agg_m
# updated_nodes (batch size * # nodes * state dimension)
updated_nodes = self.update_functions([X, agg_m])
# use the mask to screen out non-existent nodes
# updated_nodes (batch size * # nodes * state dimension)
updated_nodes = tf.multiply(updated_nodes, mask)
return updated_nodes
class MessagePasserNNM(tf.keras.layers.Layer):
"""Message passing kernel.
Args:
state_dim (int): number of output channels.
attn_heads (int): number of attention heads.
attn_method (str): type of attention methods.
aggr_method (str): type of aggregation methods.
activation (str): type of activation functions.
"""
def __init__(self, state_dim, attn_heads, attn_method, aggr_method, activation):
super(MessagePasserNNM, self).__init__()
self.state_dim = state_dim
self.attn_heads = attn_heads
self.attn_method = attn_method
self.aggr_method = aggr_method
self.activation = activation
def build(self, input_shape):
self.nn1 = tf.keras.layers.Dense(units=32, activation=tf.nn.relu)
self.nn2 = tf.keras.layers.Dense(units=32, activation=tf.nn.relu)
self.nn3 = tf.keras.layers.Dense(
units=self.attn_heads * self.state_dim * self.state_dim,
activation=tf.nn.relu,
)
if self.attn_method == "gat":
self.attn_func = AttentionGAT(self.state_dim, self.attn_heads)
elif self.attn_method == "sym-gat":
self.attn_func = AttentionSymGAT(self.state_dim, self.attn_heads)
elif self.attn_method == "cos":
self.attn_func = AttentionCOS(self.state_dim, self.attn_heads)
elif self.attn_method == "linear":
self.attn_func = AttentionLinear(self.state_dim, self.attn_heads)
elif self.attn_method == "gen-linear":
self.attn_func = AttentionGenLinear(self.state_dim, self.attn_heads)
elif self.attn_method == "const":
self.attn_func = AttentionConst(self.state_dim, self.attn_heads)
elif self.attn_method == "gcn":
self.attn_func = AttentionGCN(self.state_dim, self.attn_heads)
self.bias = self.add_weight(
name="attn_bias", shape=[self.state_dim], initializer="zeros"
)
self.built = True
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (list):
X (tensor): node feature tensor (batch size * # nodes * state dimension)
A (tensor): edge pair tensor (batch size * # edges * 2), one is source ID, one is target ID
E (tensor): edge feature tensor (batch size * # edges * # edge features)
degree (tensor): node degree tensor for GCN attention (batch size * # edges)
Returns:
output (tensor): results after edge network, attention and aggregation (batch size * # nodes * state dimension)
"""
# Edge network to transform edge information to message weight
# the input contains a list of four tensors
X, A, E, degree = inputs
# N is the number of nodes (scalar)
N = K.int_shape(X)[1]
# extract target and source IDs from the edge pair
# targets (batch size * # edges)
# sources (batch size * # edges)
targets, sources = A[..., -2], A[..., -1]
# the first edge network layer that maps edge features to a weight tensor W
# W (batch size * # edges * 128)
W = self.nn1(E)
# W (batch size * # edges * 128)
W = self.nn2(W)
# W (batch size * # edges * state dimension ** 2)
W = self.nn3(W)
# reshape W to (batch size * # edges * #attention heads * state dimension * state dimension)
W = tf.reshape(
W, [-1, tf.shape(E)[1], self.attn_heads, self.state_dim, self.state_dim]
)
# expand the dimension of node features to
# (batch size * # nodes * state dimension * #attention heads)
X = tf.tile(X[..., None], [1, 1, 1, self.attn_heads])
# transpose the node features to
# (batch size * # nodes * #attention heads * node features)
X = tf.transpose(X, [0, 1, 3, 2])
# attention added to the message weight
# attn_coef (batch size * # edges * #attention heads * state dimension)
attn_coef = self.attn_func([X, N, targets, sources, degree])
# gather source node features
# The batch_dims argument lets you gather different items from each element of a batch.
# Using batch_dims=1 is equivalent to having an outer loop over the first axis of params and indices:
# Here is an example from https://www.tensorflow.org/api_docs/python/tf/gather
# params = tf.constant([
# [0, 0, 1, 0, 2],
# [3, 0, 0, 0, 4],
# [0, 5, 0, 6, 0]])
# indices = tf.constant([
# [2, 4],
# [0, 4],
# [1, 3]])
# tf.gather(params, indices, axis=1, batch_dims=1).numpy()
# array([[1, 2],
# [3, 4],
# [5, 6]], dtype=int32)
# messages (batch size * # edges * #attention heads * state dimension)
messages = tf.gather(X, sources, batch_dims=1, axis=1)
# messages (batch size * # edges * #attention heads * state dimension * 1)
messages = messages[..., None]
# W (batch size * # edges * #attention heads * state dimension * state dimension)
# messages (batch size * # edges * #attention heads * state dimension * 1)
# --> messages (batch size * # edges * #attention heads * state dimension * 1)
messages = tf.matmul(W, messages)
# messages (batch size * # edges * #attention heads * state dimension)
messages = messages[..., 0]
# attn_coef (batch size * # edges * # attention heads * state dimension)
# messages (batch size * # edges * # attention heads * state dimension)
# --> output (batch size * # edges * # attention heads * state dimension)
output = attn_coef * messages
# batch size
num_rows = tf.shape(targets)[0]
# [0, ..., batch size] (batch size)
rows_idx = tf.range(num_rows)
# N is # nodes, add this to distinguish each batch
segment_ids_per_row = targets + N * tf.expand_dims(rows_idx, axis=1)
# Aggregation to summarize neighboring node messages
# output (batch size * # nodes * # attention heads * state dimension)
if self.aggr_method == "max":
output = tf.math.unsorted_segment_max(
output, segment_ids_per_row, N * num_rows
)
elif self.aggr_method == "mean":
output = tf.math.unsorted_segment_mean(
output, segment_ids_per_row, N * num_rows
)
elif self.aggr_method == "sum":
output = tf.math.unsorted_segment_sum(
output, segment_ids_per_row, N * num_rows
)
# output the mean of all attention heads
# output (batch size * # nodes * # attention heads * state dimension)
output = tf.reshape(output, [-1, N, self.attn_heads, self.state_dim])
# output (batch size * # nodes * state dimension)
output = tf.reduce_mean(output, axis=-2)
# add bias, output (batch size * # nodes * state dimension)
output = K.bias_add(output, self.bias)
return output
class UpdateFuncGRU(tf.keras.layers.Layer):
"""Gated recurrent unit update function.
Check details here https://arxiv.org/abs/1412.3555
Args:
state_dim (int): number of output channels.
"""
def __init__(self, state_dim):
super(UpdateFuncGRU, self).__init__()
self.state_dim = state_dim
def build(self, input_shape):
self.concat_layer = tf.keras.layers.Concatenate(axis=1)
self.GRU = tf.keras.layers.GRU(self.state_dim)
self.built = True
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (list):
old_state (tensor): node hidden feature tensor (batch size * # nodes * state dimension)
agg_messages (tensor): node hidden feature tensor (batch size * # nodes * state dimension)
Returns:
activation (tensor): activated tensor from update function (batch size * # nodes * state dimension)
"""
# Remember node dim
# old_state (batch size * # nodes * state dimension)
# agg_messages (batch size * # nodes * state dimension)
old_state, agg_messages = inputs
# B is batch size
# N is # nodes
# F is # node features = state dimension
B, N, F = K.int_shape(old_state)
# similar to B, N, F
B1, N1, F1 = K.int_shape(agg_messages)
# reshape so GRU can be applied, concat so old_state and messages are in sequence
# old_state (batch size * # nodes * 1 * state dimension)
old_state = tf.reshape(old_state, [-1, 1, F])
# agg_messages (batch size * # nodes * 1 * state dimension)
agg_messages = tf.reshape(agg_messages, [-1, 1, F1])
# agg_messages (batch size * # nodes * 2 * state dimension)
concat = self.concat_layer([old_state, agg_messages])
# Apply GRU and then reshape so it can be returned
# activation (batch size * # nodes * state dimension)
activation = self.GRU(concat)
activation = tf.reshape(activation, [-1, N, F])
return activation
class UpdateFuncMLP(tf.keras.layers.Layer):
"""Multi-layer perceptron update function.
Args:
state_dim (int): number of output channels.
activation (str): the type of activation functions.
"""
def __init__(self, state_dim, activation):
super(UpdateFuncMLP, self).__init__()
self.state_dim = state_dim
self.activation = activation
def build(self, input_shape):
self.concat_layer = tf.keras.layers.Concatenate(axis=-1)
self.dense = tf.keras.layers.Dense(
self.state_dim, activation=self.activation, kernel_initializer="zeros"
)
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (list):
old_state (tensor): node hidden feature tensor
agg_messages (tensor): node hidden feature tensor
Returns:
activation (tensor): activated tensor from update function (
"""
old_state, agg_messages = inputs
concat = self.concat_layer([old_state, agg_messages])
activation = self.dense(concat)
return activation
class AttentionGAT(tf.keras.layers.Layer):
"""GAT Attention. Check details here https://arxiv.org/abs/1710.10903
The attention coefficient between node :math:`i` and :math:`j` is calculated as:
.. math::
\\text{LeakyReLU}(\\textbf{a}(\\textbf{Wh}_i||\\textbf{Wh}_j))
where :math:`\\textbf{a}` is a trainable vector, and :math:`||` represents concatenation.
Args:
state_dim (int): number of output channels.
attn_heads (int): number of attention heads.
"""
def __init__(self, state_dim, attn_heads):
super(AttentionGAT, self).__init__()
self.state_dim = state_dim
self.attn_heads = attn_heads
def build(self, input_shape):
self.attn_kernel_self = self.add_weight(
name="attn_kernel_self",
shape=[self.state_dim, self.attn_heads, 1],
initializer="glorot_uniform",
)
self.attn_kernel_adjc = self.add_weight(
name="attn_kernel_adjc",
shape=[self.state_dim, self.attn_heads, 1],
initializer="glorot_uniform",
)
self.built = True
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (list):
X (tensor): node feature tensor
N (int): number of nodes
targets (tensor): target node index tensor
sources (tensor): source node index tensor
degree (tensor): node degree sqrt tensor (for GCN attention)
Returns:
attn_coef (tensor): attention coefficient tensor
"""
X, N, targets, sources, _ = inputs
attn_kernel_self = tf.transpose(self.attn_kernel_self, (2, 1, 0))
attn_kernel_adjc = tf.transpose(self.attn_kernel_adjc, (2, 1, 0))
attn_for_self = tf.reduce_sum(X * attn_kernel_self[None, ...], -1)
attn_for_self = tf.gather(attn_for_self, targets, batch_dims=1)
attn_for_adjc = tf.reduce_sum(X * attn_kernel_adjc[None, ...], -1)
attn_for_adjc = tf.gather(attn_for_adjc, sources, batch_dims=1)
attn_coef = attn_for_self + attn_for_adjc
attn_coef = tf.nn.leaky_relu(attn_coef, alpha=0.2)
attn_coef = tf.exp(
attn_coef
- tf.gather(tf.math.unsorted_segment_max(attn_coef, targets, N), targets)
)
attn_coef /= tf.gather(
tf.math.unsorted_segment_max(attn_coef, targets, N) + 1e-9, targets
)
attn_coef = tf.nn.dropout(attn_coef, 0.5)
attn_coef = attn_coef[..., None]
return attn_coef
class AttentionSymGAT(tf.keras.layers.Layer):
"""GAT Symmetry Attention.
The attention coefficient between node :math:`i` and :math:`j` is calculated as:
.. math::
\\alpha_{ij} + \\alpha_{ij}
based on GAT.
Args:
state_dim (int): number of output channels.
attn_heads (int): number of attention heads.
"""
def __init__(self, state_dim, attn_heads):
super(AttentionSymGAT, self).__init__()
self.state_dim = state_dim
self.attn_heads = attn_heads
def build(self, input_shape):
self.attn_kernel_self = self.add_weight(
name="attn_kernel_self",
shape=[self.state_dim, self.attn_heads, 1],
initializer="glorot_uniform",
)
self.attn_kernel_adjc = self.add_weight(
name="attn_kernel_adjc",
shape=[self.state_dim, self.attn_heads, 1],
initializer="glorot_uniform",
)
self.built = True
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (list):
X (tensor): node feature tensor
N (int): number of nodes
targets (tensor): target node index tensor
sources (tensor): source node index tensor
degree (tensor): node degree sqrt tensor (for GCN attention)
Returns:
attn_coef (tensor): attention coefficient tensor
"""
X, N, targets, sources, _ = inputs
attn_kernel_self = tf.transpose(self.attn_kernel_self, (2, 1, 0))
attn_kernel_adjc = tf.transpose(self.attn_kernel_adjc, (2, 1, 0))
attn_for_self = tf.reduce_sum(X * attn_kernel_self[None, ...], -1)
attn_for_self = tf.gather(attn_for_self, targets, batch_dims=1)
attn_for_adjc = tf.reduce_sum(X * attn_kernel_adjc[None, ...], -1)
attn_for_adjc = tf.gather(attn_for_adjc, sources, batch_dims=1)
attn_for_self_reverse = tf.gather(attn_for_self, sources, batch_dims=1)
attn_for_adjc_reverse = tf.gather(attn_for_self, targets, batch_dims=1)
attn_coef = (
attn_for_self
+ attn_for_adjc
+ attn_for_self_reverse
+ attn_for_adjc_reverse
)
attn_coef = tf.nn.leaky_relu(attn_coef, alpha=0.2)
attn_coef = tf.exp(
attn_coef
- tf.gather(tf.math.unsorted_segment_max(attn_coef, targets, N), targets)
)
attn_coef /= tf.gather(
tf.math.unsorted_segment_max(attn_coef, targets, N) + 1e-9, targets
)
attn_coef = tf.nn.dropout(attn_coef, 0.5)
attn_coef = attn_coef[..., None]
return attn_coef
class AttentionCOS(tf.keras.layers.Layer):
"""COS Attention.
Check details here https://arxiv.org/abs/1803.07294
The attention coefficient between node $i$ and $j$ is calculated as:
.. math::
\\textbf{a}(\\textbf{Wh}_i || \\textbf{Wh}_j)
where :math:`\\textbf{a}` is a trainable vector.
Args:
state_dim (int): number of output channels.
attn_heads (int): number of attention heads.
"""
def __init__(self, state_dim, attn_heads):
super(AttentionCOS, self).__init__()
self.state_dim = state_dim
self.attn_heads = attn_heads
def build(self, input_shape):
self.attn_kernel_self = self.add_weight(
name="attn_kernel_self",
shape=[self.state_dim, self.attn_heads, 1],
initializer="glorot_uniform",
)
self.attn_kernel_adjc = self.add_weight(
name="attn_kernel_adjc",
shape=[self.state_dim, self.attn_heads, 1],
initializer="glorot_uniform",
)
self.built = True
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (list):
X (tensor): node feature tensor
N (int): number of nodes
targets (tensor): target node index tensor
sources (tensor): source node index tensor
degree (tensor): node degree sqrt tensor (for GCN attention)
Returns:
attn_coef (tensor): attention coefficient tensor (batch, E, H, 1)
"""
X, N, targets, sources, _ = inputs
attn_kernel_self = tf.transpose(self.attn_kernel_self, (2, 1, 0))
attn_kernel_adjc = tf.transpose(self.attn_kernel_adjc, (2, 1, 0))
attn_for_self = tf.reduce_sum(X * attn_kernel_self[None, ...], -1)
attn_for_self = tf.gather(attn_for_self, targets, batch_dims=1)
attn_for_adjc = tf.reduce_sum(X * attn_kernel_adjc[None, ...], -1)
attn_for_adjc = tf.gather(attn_for_adjc, sources, batch_dims=1)
attn_coef = tf.multiply(attn_for_self, attn_for_adjc)
attn_coef = tf.nn.leaky_relu(attn_coef, alpha=0.2)
attn_coef = tf.exp(
attn_coef
- tf.gather(tf.math.unsorted_segment_max(attn_coef, targets, N), targets)
)
attn_coef /= tf.gather(
tf.math.unsorted_segment_max(attn_coef, targets, N) + 1e-9, targets
)
attn_coef = tf.nn.dropout(attn_coef, 0.5)
attn_coef = attn_coef[..., None]
return attn_coef
class AttentionLinear(tf.keras.layers.Layer):
"""Linear Attention.
The attention coefficient between node :math:`i` and :math:`j` is calculated as:
.. math::
\\text{tanh} (\\textbf{a}_l\\textbf{Wh}_i + \\textbf{a}_r\\textbf{Wh}_j)
where :math:`\\textbf{a}_l` and :math:`\\textbf{a}_r` are trainable vectors.
Args:
state_dim (int): number of output channels.
attn_heads (int): number of attention heads.
"""
def __init__(self, state_dim, attn_heads):
super(AttentionLinear, self).__init__()
self.state_dim = state_dim
self.attn_heads = attn_heads
def build(self, input_shape):
self.attn_kernel_adjc = self.add_weight(
name="attn_kernel_adjc",
shape=[self.state_dim, self.attn_heads, 1],
initializer="glorot_uniform",
)
self.built = True
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (list):
X (tensor): node feature tensor
N (int): number of nodes
targets (tensor): target node index tensor
sources (tensor): source node index tensor
degree (tensor): node degree sqrt tensor (for GCN attention)
Returns:
attn_coef (tensor): attention coefficient tensor
"""
X, N, targets, sources, _ = inputs
attn_kernel_adjc = tf.transpose(self.attn_kernel_adjc, (2, 1, 0))
attn_for_adjc = tf.reduce_sum(X * attn_kernel_adjc[None, ...], -1)
attn_for_adjc = tf.gather(attn_for_adjc, sources, batch_dims=1)
attn_coef = attn_for_adjc
attn_coef = tf.nn.tanh(attn_coef)
attn_coef = tf.exp(
attn_coef
- tf.gather(tf.math.unsorted_segment_max(attn_coef, targets, N), targets)
)
attn_coef /= tf.gather(
tf.math.unsorted_segment_max(attn_coef, targets, N) + 1e-9, targets
)
attn_coef = tf.nn.dropout(attn_coef, 0.5)
attn_coef = attn_coef[..., None]
return attn_coef
class AttentionGenLinear(tf.keras.layers.Layer):
"""Generalized Linear Attention.
Check details here https://arxiv.org/abs/1802.00910
The attention coefficient between node :math:`i` and :math:`j` is calculated as:
.. math::
\\textbf{W}_G \\text{tanh} (\\textbf{Wh}_i + \\textbf{Wh}_j)
where :math:`\\textbf{W}_G` is a trainable matrix.
Args:
state_dim (int): number of output channels.
attn_heads (int): number of attention heads.
"""
def __init__(self, state_dim, attn_heads):
super(AttentionGenLinear, self).__init__()
self.state_dim = state_dim
self.attn_heads = attn_heads
def build(self, input_shape):
self.attn_kernel_self = self.add_weight(
name="attn_kernel_self",
shape=[self.state_dim, self.attn_heads, 1],
initializer="glorot_uniform",
)
self.attn_kernel_adjc = self.add_weight(
name="attn_kernel_adjc",
shape=[self.state_dim, self.attn_heads, 1],
initializer="glorot_uniform",
)
self.gen_nn = tf.keras.layers.Dense(
units=self.attn_heads, kernel_initializer="glorot_uniform", use_bias=False
)
self.built = True
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (list):
X (tensor): node feature tensor
N (int): number of nodes
targets (tensor): target node index tensor
sources (tensor): source node index tensor
degree (tensor): node degree sqrt tensor (for GCN attention)
Returns:
attn_coef (tensor): attention coefficient tensor
"""
X, N, targets, sources, _ = inputs
attn_kernel_self = tf.transpose(self.attn_kernel_self, (2, 1, 0))
attn_kernel_adjc = tf.transpose(self.attn_kernel_adjc, (2, 1, 0))
attn_for_self = tf.reduce_sum(X * attn_kernel_self[None, ...], -1)
attn_for_self = tf.gather(attn_for_self, targets, batch_dims=1)
attn_for_adjc = tf.reduce_sum(X * attn_kernel_adjc[None, ...], -1)
attn_for_adjc = tf.gather(attn_for_adjc, sources, batch_dims=1)
attn_coef = attn_for_self + attn_for_adjc
attn_coef = tf.nn.tanh(attn_coef)
attn_coef = self.gen_nn(attn_coef)
attn_coef = tf.exp(
attn_coef
- tf.gather(tf.math.unsorted_segment_max(attn_coef, targets, N), targets)
)
attn_coef /= tf.gather(
tf.math.unsorted_segment_max(attn_coef, targets, N) + 1e-9, targets
)
attn_coef = tf.nn.dropout(attn_coef, 0.5)
attn_coef = attn_coef[..., None]
return attn_coef
class AttentionGCN(tf.keras.layers.Layer):
"""GCN Attention.
The attention coefficient between node :math:`i` and :math:`j` is calculated as:
.. math::
\\frac{1}{\sqrt{|\mathcal{N}(i)||\mathcal{N}(j)|}}
where :math:`\mathcal{N}(i)` is the number of neighboring nodes of node :math:`i`.
Args:
state_dim (int): number of output channels.
attn_heads (int): number of attention heads.
"""
def __init__(self, state_dim, attn_heads):
super(AttentionGCN, self).__init__()
self.state_dim = state_dim
self.attn_heads = attn_heads
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (list):
X (tensor): node feature tensor
N (int): number of nodes
targets (tensor): target node index tensor
sources (tensor): source node index tensor
degree (tensor): node degree sqrt tensor (for GCN attention)
Returns:
attn_coef (tensor): attention coefficient tensor
"""
_, _, _, _, degree = inputs
attn_coef = degree[..., None, None]
attn_coef = tf.tile(attn_coef, [1, 1, self.attn_heads, 1])
return attn_coef
class AttentionConst(tf.keras.layers.Layer):
"""Constant Attention.
The attention coefficient between node :math:`i` and :math:`j` is calculated as:
.. math::
\\alpha_{ij} = 1
Args:
state_dim (int): number of output channels.
attn_heads (int): number of attention heads.
"""
def __init__(self, state_dim, attn_heads):
super(AttentionConst, self).__init__()
self.state_dim = state_dim
self.attn_heads = attn_heads
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (list):
X (tensor): node feature tensor
N (int): number of nodes
targets (tensor): target node index tensor
sources (tensor): source node index tensor
degree (tensor): node degree sqrt tensor (for GCN attention)
Returns:
attn_coef (tensor): attention coefficient tensor
"""
_, _, targets, _, degree = inputs
attn_coef = tf.ones(
(tf.shape(targets)[0], tf.shape(targets)[1], self.attn_heads, 1)
)
return attn_coef
class GlobalAttentionPool(tf.keras.layers.Layer):
"""Global Attention Pool.
A gated attention global pooling layer as presented by [Li et al. (2017)](https://arxiv.org/abs/1511.05493). Details can be seen from https://github.com/danielegrattarola/spektral
Args:
state_dim (int): number of output channels.
"""
def __init__(self, state_dim, **kwargs):
super(GlobalAttentionPool, self).__init__()
self.state_dim = state_dim
self.kwargs = kwargs
def __str__(self):
return "GlobalAttentionPool"
def build(self, input_shape):
self.features_layer = Dense(self.state_dim, name="features_layer")
self.attention_layer = Dense(
self.state_dim, name="attention_layer", activation="sigmoid"
)
self.built = True
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (tensor): the node feature tensor
Returns:
GlobalAttentionPool tensor (tensor)
"""
inputs_linear = self.features_layer(inputs)
attn = self.attention_layer(inputs)
masked_inputs = inputs_linear * attn
output = K.sum(masked_inputs, axis=-2, keepdims=False)
return output
class GlobalAttentionSumPool(tf.keras.layers.Layer):
"""Global Attention Summation Pool.
Pools a graph by learning attention coefficients to sum node features.
Details can be seen from https://github.com/danielegrattarola/spektral
"""
def __init__(self, **kwargs):
super(GlobalAttentionSumPool, self).__init__()
self.kwargs = kwargs
def __str__(self):
return "GlobalAttentionSumPool"
def build(self, input_shape):
F = int(input_shape[-1])
# Attention kernels
self.attn_kernel = self.add_weight(
shape=(F, 1), initializer="glorot_uniform", name="attn_kernel"
)
self.built = True
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (tensor): the node feature tensor
Returns:
GlobalAttentionSumPool tensor (tensor)
"""
X = inputs
attn_coeff = K.dot(X, self.attn_kernel)
attn_coeff = K.squeeze(attn_coeff, -1)
attn_coeff = K.softmax(attn_coeff)
output = K.batch_dot(attn_coeff, X)
return output
class GlobalAvgPool(tf.keras.layers.Layer):
"""Global Average Pool.
Takes the average over all the nodes or features.
Details can be seen from https://github.com/danielegrattarola/spektral
Args:
axis (int): the axis to take average.
"""
def __init__(self, axis=-2, **kwargs):
super(GlobalAvgPool, self).__init__()
self.axis = axis
self.kwargs = kwargs
def __str__(self):
return "GlobalAvgPool"
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (tensor): the node feature tensor
Returns:
GlobalAvgPool tensor (tensor)
"""
return tf.reduce_mean(inputs, axis=self.axis)
class GlobalMaxPool(tf.keras.layers.Layer):
"""Global Max Pool.
Takes the max value over all the nodes or features.
Details can be seen from https://github.com/danielegrattarola/spektral
Args:
axis (int): the axis to take the max value.
"""
def __init__(self, axis=-2, **kwargs):
super(GlobalMaxPool, self).__init__()
self.axis = axis
self.kwargs = kwargs
def __str__(self):
return "GlobalMaxPool"
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (tensor): the node feature tensor
Returns:
GlobalMaxPool tensor (tensor)
"""
return tf.reduce_max(inputs, axis=self.axis)
class GlobalSumPool(tf.keras.layers.Layer):
"""Global Summation Pool.
Takes the summation over all the nodes or features.
Details can be seen from https://github.com/danielegrattarola/spektral
Args:
axis (int): the axis to take summation.
"""
def __init__(self, axis=-2, **kwargs):
super(GlobalSumPool, self).__init__()
self.axis = axis
self.kwargs = kwargs
def __str__(self):
return "GlobalSumPool"
def call(self, inputs, **kwargs):
"""Apply the layer on input tensors.
Args:
inputs (tensor): the node feature tensor
Returns:
GlobalSumPool tensor (tensor)
"""
return tf.reduce_sum(inputs, axis=self.axis)
| 36,142 | 35.471241 | 183 | py |
deephyper | deephyper-master/deephyper/keras/layers/__init__.py | from deephyper.keras.layers._mpnn import (
AttentionConst,
AttentionCOS,
AttentionGAT,
AttentionGCN,
AttentionGenLinear,
AttentionLinear,
AttentionSymGAT,
GlobalAttentionPool,
GlobalAttentionSumPool,
GlobalAvgPool,
GlobalMaxPool,
GlobalSumPool,
MessagePasserNNM,
MessagePassing,
SparseMPNN,
UpdateFuncGRU,
UpdateFuncMLP,
)
from deephyper.keras.layers._padding import Padding
__all__ = [
"AttentionConst",
"AttentionCOS",
"AttentionGAT",
"AttentionGCN",
"AttentionGenLinear",
"AttentionLinear",
"AttentionSymGAT",
"GlobalAttentionPool",
"GlobalAttentionSumPool",
"GlobalAvgPool",
"GlobalMaxPool",
"GlobalSumPool",
"MessagePasserNNM",
"MessagePassing",
"SparseMPNN",
"UpdateFuncGRU",
"UpdateFuncMLP",
]
# When loading models with: "model.load('file.h5', custom_objects=custom_objects)"
custom_objects = {"Padding": Padding}
| 960 | 20.840909 | 82 | py |
deephyper | deephyper-master/deephyper/keras/layers/_padding.py | import tensorflow as tf
class Padding(tf.keras.layers.Layer):
"""Multi-dimensions padding layer.
This operation pads a tensor according to the paddings you specify. paddings is an
integer tensor with shape [n-1, 2], where n is the rank of tensor. For each dimension
D of input, paddings[D, 0] indicates how many values to add before the contents of
tensor in that dimension, and paddings[D, 1] indicates how many values to add after
the contents of tensor in that dimension. The first dimension corresponding to the
batch size cannot be padded.
Args:
padding (list(list(int))): e.g. [[1, 1]]
mode (str): 'CONSTANT', 'REFLECT' or 'SYMMETRIC'
"""
def __init__(self, padding, mode="CONSTANT", constant_values=0, **kwargs):
super(Padding, self).__init__(**kwargs)
self.padding = [[0, 0]] + padding
self.mode = mode
self.constant_values = constant_values
def call(self, x, mask=None):
padding = tf.constant(self.padding)
return tf.pad(
tensor=x,
paddings=padding,
mode=self.mode,
constant_values=self.constant_values,
)
def compute_output_shape(self, input_shape):
return tf.TensorShape(
[
input_shape[i] + sum(self.padding[i])
if not input_shape[i] is None
else None
for i in range(len(input_shape))
]
)
def get_config(self):
config = {
"padding": self.padding[1:],
"mode": self.mode,
"constant_values": self.constant_values,
}
base_config = super(Padding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 1,788 | 32.12963 | 89 | py |
deephyper | deephyper-master/deephyper/search/_search.py | import abc
import copy
import functools
import os
import pathlib
import numpy as np
import pandas as pd
import yaml
from deephyper.core.exceptions import SearchTerminationError
from deephyper.core.utils._introspection import get_init_params_as_json
from deephyper.core.utils._timeout import terminate_on_timeout
from deephyper.evaluator import Evaluator
from deephyper.evaluator.callback import TqdmCallback
class Search(abc.ABC):
"""Abstract class which represents a search algorithm.
Args:
problem ([type]): [description]
evaluator ([type]): [description]
random_state ([type], optional): [description]. Defaults to None.
log_dir (str, optional): [description]. Defaults to ".".
verbose (int, optional): [description]. Defaults to 0.
"""
def __init__(
self, problem, evaluator, random_state=None, log_dir=".", verbose=0, **kwargs
):
# get the __init__ parameters
self._init_params = locals()
self._call_args = []
self._problem = copy.deepcopy(problem)
# if a callable is directly passed wrap it around the serial evaluator
self.check_evaluator(evaluator)
self._seed = None
if type(random_state) is int:
self._seed = random_state
self._random_state = np.random.RandomState(random_state)
elif isinstance(random_state, np.random.RandomState):
self._random_state = random_state
else:
self._random_state = np.random.RandomState()
# Create logging directory if does not exist
self._log_dir = os.path.abspath(log_dir)
pathlib.Path(log_dir).mkdir(parents=False, exist_ok=True)
self._verbose = verbose
def check_evaluator(self, evaluator):
if not (isinstance(evaluator, Evaluator)):
if callable(evaluator):
self._evaluator = Evaluator.create(
evaluator,
method="serial",
method_kwargs={"callbacks": [TqdmCallback()]},
)
else:
raise TypeError(
f"The evaluator shoud be an instance of deephyper.evaluator.Evaluator by is {type(evaluator)}!"
)
else:
self._evaluator = evaluator
def to_json(self):
"""Returns a json version of the search object."""
json_self = {
"search": {
"type": type(self).__name__,
**get_init_params_as_json(self),
},
"calls": self._call_args,
}
return json_self
def dump_context(self):
"""Dumps the context in the log folder."""
context = self.to_json()
path_context = os.path.join(self._log_dir, "context.yaml")
with open(path_context, "w") as file:
yaml.dump(context, file)
def _set_timeout(self, timeout=None):
"""If the `timeout` parameter is valid. Run the search in an other thread and trigger a timeout when this thread exhaust the allocated time budget."""
if timeout is not None:
if type(timeout) is not int:
raise ValueError(
f"'timeout' shoud be of type'int' but is of type '{type(timeout)}'!"
)
if timeout <= 0:
raise ValueError("'timeout' should be > 0!")
if np.isscalar(timeout) and timeout > 0:
self._evaluator.set_timeout(timeout)
self._search = functools.partial(
terminate_on_timeout, timeout, self._search
)
def search(self, max_evals: int = -1, timeout: int = None):
"""Execute the search algorithm.
Args:
max_evals (int, optional): The maximum number of evaluations of the run function to perform before stopping the search. Defaults to ``-1``, will run indefinitely.
timeout (int, optional): The time budget (in seconds) of the search before stopping. Defaults to ``None``, will not impose a time budget.
Returns:
DataFrame: a pandas DataFrame containing the evaluations performed or ``None`` if the search could not evaluate any configuration.
"""
self._set_timeout(timeout)
# save the search call arguments for the context
self._call_args.append({"timeout": timeout, "max_evals": max_evals})
# save the context in the log folder
self.dump_context()
# init tqdm callback
if max_evals > 1:
for cb in self._evaluator._callbacks:
if isinstance(cb, TqdmCallback):
cb.set_max_evals(max_evals)
try:
self._search(max_evals, timeout)
except SearchTerminationError:
if "saved_keys" in dir(self):
self._evaluator.dump_evals(saved_keys=self.saved_keys)
else:
self._evaluator.dump_evals(log_dir=self._log_dir)
try:
path_results = os.path.join(self._log_dir, "results.csv")
df_results = pd.read_csv(path_results)
return df_results
except FileNotFoundError:
return None
@abc.abstractmethod
def _search(self, max_evals, timeout):
"""Search algorithm to be implemented.
Args:
max_evals (int, optional): The maximum number of evaluations of the run function to perform before stopping the search. Defaults to -1, will run indefinitely.
timeout (int, optional): The time budget of the search before stopping.Defaults to None, will not impose a time budget.
"""
@property
def search_id(self):
"""The identifier of the search used by the evaluator."""
return self._evaluator._search_id
| 5,785 | 35.620253 | 174 | py |
deephyper | deephyper-master/deephyper/search/__init__.py | """
The ``search`` module brings a modular way to implement new search algorithms and two sub modules. One is for hyperparameter search ``deephyper.search.hps`` and one is for neural architecture search ``deephyper.search.nas``.
The ``Search`` class is abstract and has different subclasses such as: ``deephyper.search.hps.CBO`` and ``deephyper.search.nas.AgEBO``.
"""
from deephyper.search._search import Search
__all__ = ["Search"]
| 436 | 47.555556 | 224 | py |
deephyper | deephyper-master/deephyper/search/hps/_mpi_dbo.py | import logging
import mpi4py
import numpy as np
import scipy.stats
# !To avoid initializing MPI when module is imported (MPI is optional)
mpi4py.rc.initialize = False
mpi4py.rc.finalize = True
from mpi4py import MPI # noqa: E402
from deephyper.evaluator import Evaluator # noqa: E402
from deephyper.evaluator.callback import TqdmCallback # noqa: E402
from deephyper.evaluator.storage import Storage # noqa: E402
from deephyper.search.hps._cbo import CBO # noqa: E402
from deephyper.stopper import Stopper # noqa: E402
MAP_acq_func = {
"UCB": "LCB",
}
class MPIDistributedBO(CBO):
"""Distributed Bayesian Optimization Search using MPI to launch parallel search instances.
Args:
problem (HpProblem): Hyperparameter problem describing the search space to explore.
evaluator (Evaluator): An ``Evaluator`` instance responsible of distributing the tasks.
random_state (int, optional): Random seed. Defaults to ``None``.
log_dir (str, optional): Log directory where search's results are saved. Defaults to ``"."``.
verbose (int, optional): Indicate the verbosity level of the search. Defaults to ``0``.
surrogate_model (Union[str,sklearn.base.RegressorMixin], optional): Surrogate model used by the Bayesian optimization. Can be a value in ``["RF", "GP", "ET", "GBRT", "DUMMY"]`` or a sklearn regressor. ``"RF"`` is for Random-Forest which is the best compromise between speed and quality when performing a lot of parallel evaluations, i.e., reaching more than hundreds of evaluations. ``"GP"`` is for Gaussian-Process which is the best choice when maximizing the quality of iteration but quickly slow down when reaching hundreds of evaluations, also it does not support conditional search space. ``"ET"`` is for Extra-Tree, faster than random forest but with worse mean estimate and poor uncertainty quantification capabilities. ``"GBRT"`` is for Gradient-Boosting Regression Tree, it has better mean estimate than other tree-based method worse uncertainty quantification capabilities and slower than ``"RF"``. Defaults to ``"RF"``.
acq_func (str, optional): Acquisition function used by the Bayesian optimization. Can be a value in ``["UCB", "EI", "PI", "gp_hedge"]``. Defaults to ``"UCB"``.
acq_optimizer (str, optional): Method used to minimze the acquisition function. Can be a value in ``["sampling", "lbfgs"]``. Defaults to ``"auto"``.
kappa (float, optional): Manage the exploration/exploitation tradeoff for the "UCB" acquisition function. Defaults to ``1.96`` which corresponds to 95% of the confidence interval.
xi (float, optional): Manage the exploration/exploitation tradeoff of ``"EI"`` and ``"PI"`` acquisition function. Defaults to ``0.001``.
n_points (int, optional): The number of configurations sampled from the search space to infer each batch of new evaluated configurations.
filter_duplicated (bool, optional): Force the optimizer to sample unique points until the search space is "exhausted" in the sens that no new unique points can be found given the sampling size ``n_points``. Defaults to ``True``.
multi_point_strategy (str, optional): Definition of the constant value use for the Liar strategy. Can be a value in ``["cl_min", "cl_mean", "cl_max", "qUCB"]``. All ``"cl_..."`` strategies follow the constant-liar scheme, where if $N$ new points are requested, the surrogate model is re-fitted $N-1$ times with lies (respectively, the minimum, mean and maximum objective found so far; for multiple objectives, these are the minimum, mean and maximum of the individual objectives) to infer the acquisition function. Constant-Liar strategy have poor scalability because of this repeated re-fitting. The ``"qUCB"`` strategy is much more efficient by sampling a new $kappa$ value for each new requested point without re-fitting the model, but it is only compatible with ``acq_func == "UCB"``. Defaults to ``"cl_max"``.
n_jobs (int, optional): Number of parallel processes used to fit the surrogate model of the Bayesian optimization. A value of ``-1`` will use all available cores. Not used in ``surrogate_model`` if passed as own sklearn regressor. Defaults to ``1``.
n_initial_points (int, optional): Number of collected objectives required before fitting the surrogate-model. Defaults to ``10``.
initial_point_generator (str, optional): Sets an initial points generator. Can be either ``["random", "sobol", "halton", "hammersly", "lhs", "grid"]``. Defaults to ``"random"``.
initial_points (List[Dict], optional): A list of initial points to evaluate where each point is a dictionnary where keys are names of hyperparameters and values their corresponding choice. Defaults to ``None`` for them to be generated randomly from the search space.
sync_communcation (bool, optional): Performs the search in a batch-synchronous manner. Defaults to ``False`` for asynchronous updates.
filter_failures (str, optional): Replace objective of failed configurations by ``"min"`` or ``"mean"``. If ``"ignore"`` is passed then failed configurations will be filtered-out and not passed to the surrogate model. For multiple objectives, failure of any single objective will lead to treating that configuration as failed and each of these multiple objective will be replaced by their individual ``"min"`` or ``"mean"`` of past configurations. Defaults to ``"mean"`` to replace by failed configurations by the running mean of objectives.
max_failures (int, optional): Maximum number of failed configurations allowed before observing a valid objective value when ``filter_failures`` is not equal to ``"ignore"``. Defaults to ``100``.
moo_scalarization_strategy (str, optional): Scalarization strategy used in multiobjective optimization. Can be a value in ``["Linear", "Chebyshev", "AugChebyshev", "PBI", "Quadratic", "rLinear", "rChebyshev", "rAugChebyshev", "rPBI", "rQuadratic"]``. Defaults to ``"Chebyshev"``.
moo_scalarization_weight (list, optional): Scalarization weights to be used in multiobjective optimization with length equal to the number of objective functions. Defaults to ``None``.
scheduler (dict, callable, optional): a method to manage the the value of ``kappa, xi`` with iterations. Defaults to ``None`` which does not use any scheduler.
objective_scaler (str, optional): a way to map the objective space to some other support for example to normalize it. Defaults to ``"auto"`` which automatically set it to "identity" for any surrogate model except "RF" which will use "minmaxlog".
stopper (Stopper, optional): a stopper to leverage multi-fidelity when evaluating the function. Defaults to ``None`` which does not use any stopper.
comm (Comm, optional): communicator used with MPI. Defaults to ``None`` for ``COMM_WORLD``.
"""
def __init__(
self,
problem,
evaluator,
random_state: int = None,
log_dir: str = ".",
verbose: int = 0,
surrogate_model="RF",
acq_func: str = "UCB",
acq_optimizer: str = "auto",
kappa: float = 1.96,
xi: float = 0.001,
n_points: int = 10000,
filter_duplicated: bool = True,
update_prior: bool = False, # TODO: check what this is doing?
multi_point_strategy: str = "cl_max",
n_jobs: int = 1,
n_initial_points: int = 10,
initial_point_generator: str = "random",
initial_points=None,
sync_communication: bool = False,
filter_failures: str = "mean",
max_failures: int = 100,
moo_scalarization_strategy: str = "Chebyshev",
moo_scalarization_weight=None,
scheduler=None,
objective_scaler="auto",
stopper: Stopper = None,
comm: MPI.Comm = None,
**kwargs,
):
# get the __init__ parameters
_init_params = locals()
if not MPI.Is_initialized():
MPI.Init_thread()
self.comm = comm if comm else MPI.COMM_WORLD
self.rank = self.comm.Get_rank()
self.size = self.comm.Get_size()
self.check_evaluator(evaluator)
if type(random_state) is int:
random_state = np.random.RandomState(random_state)
elif isinstance(random_state, np.random.RandomState):
random_state = random_state
else:
random_state = np.random.RandomState()
if acq_optimizer == "auto":
if acq_func[0] == "q":
acq_optimizer = "sampling"
elif acq_func[0] == "b":
acq_optimizer == "boltzmann_sampling"
else:
acq_optimizer = "sampling"
if acq_func[0] == "q":
kappa = scipy.stats.expon.rvs(
size=self.size, scale=kappa, random_state=random_state
)[self._evaluator.rank]
xi = scipy.stats.expon.rvs(
size=self.size, scale=xi, random_state=random_state
)[self._evaluator.rank]
acq_func = acq_func[1:]
elif acq_func[0] == "b":
acq_func[0] = acq_func[1:]
# set random state for given rank
random_state = np.random.RandomState(
random_state.randint(low=0, high=2**32, size=self.size)[self.rank]
)
if self.rank == 0:
super().__init__(
problem=problem,
evaluator=evaluator,
random_state=random_state,
log_dir=log_dir,
verbose=verbose,
surrogate_model=surrogate_model,
acq_func=acq_func,
acq_optimizer=acq_optimizer,
kappa=kappa,
xi=xi,
n_points=n_points,
filter_duplicated=filter_duplicated,
update_prior=update_prior,
multi_point_strategy=multi_point_strategy,
n_jobs=n_jobs,
n_initial_points=n_initial_points,
initial_point_generator=initial_point_generator,
initial_points=initial_points,
sync_communication=sync_communication,
filter_failures=filter_failures,
max_failures=max_failures,
moo_scalarization_strategy=moo_scalarization_strategy,
moo_scalarization_weight=moo_scalarization_weight,
scheduler=scheduler,
objective_scaler=objective_scaler,
stopper=stopper,
**kwargs,
)
self.comm.Barrier()
if self.rank > 0:
super().__init__(
problem=problem,
evaluator=evaluator,
random_state=random_state,
log_dir=log_dir,
verbose=verbose,
surrogate_model=surrogate_model,
acq_func=acq_func,
acq_optimizer=acq_optimizer,
kappa=kappa,
xi=xi,
n_points=n_points,
filter_duplicated=filter_duplicated,
update_prior=update_prior,
multi_point_strategy=multi_point_strategy,
n_jobs=n_jobs,
n_initial_points=n_initial_points,
initial_point_generator=initial_point_generator,
initial_points=initial_points,
sync_communication=sync_communication,
filter_failures=filter_failures,
max_failures=max_failures,
moo_scalarization_strategy=moo_scalarization_strategy,
moo_scalarization_weight=moo_scalarization_weight,
scheduler=scheduler,
objective_scaler=objective_scaler,
stopper=stopper,
**kwargs,
)
self.comm.Barrier()
# Replace CBO _init_params by DBO _init_params
self._init_params = _init_params
logging.info(
f"MPIDistributedBO rank {self.rank} has {self._evaluator.num_workers} local worker(s)"
)
def check_evaluator(self, evaluator):
if not (isinstance(evaluator, Evaluator)):
if callable(evaluator):
self._evaluator = self.bootstrap_evaluator(
run_function=evaluator,
evaluator_type="serial",
storage_type="redis",
comm=self.comm,
root=0,
)
else:
raise TypeError(
f"The evaluator shoud be an instance of deephyper.evaluator.Evaluator by is {type(evaluator)}!"
)
else:
self._evaluator = evaluator
@staticmethod
def bootstrap_evaluator(
run_function,
evaluator_type: str = "serial",
evaluator_kwargs: dict = None,
storage_type: str = "redis",
storage_kwargs: dict = None,
comm=None,
root=0,
):
comm = comm if comm is None else MPI.COMM_WORLD
rank = comm.Get_rank()
evaluator_kwargs = evaluator_kwargs if evaluator_kwargs else {}
storage_kwargs = storage_kwargs if storage_kwargs else {}
storage = Storage.create(storage_type, storage_kwargs).connect()
search_id = None
if rank == root:
search_id = storage.create_new_search()
search_id = comm.bcast(search_id)
callbacks = []
if "callbacks" in evaluator_kwargs:
callbacks = evaluator_kwargs["callbacks"]
if rank == root and not (any(isinstance(cb, TqdmCallback) for cb in callbacks)):
callbacks.append(TqdmCallback())
evaluator_kwargs["callbacks"] = callbacks
# all processes are using the same search_id
evaluator_kwargs["storage"] = storage
evaluator_kwargs["search_id"] = search_id
evaluator = Evaluator.create(
run_function,
method=evaluator_type,
method_kwargs=evaluator_kwargs,
)
# all ranks synchronise with timestamp on root rank
evaluator.timestamp = comm.bcast(evaluator.timestamp)
# replace dump_evals of evaluator by empty function to avoid concurrent writtings in file
if rank != root:
def dumps_evals(*args, **kwargs):
pass
evaluator.dump_evals = dumps_evals
return evaluator
| 14,475 | 52.025641 | 938 | py |
deephyper | deephyper-master/deephyper/search/hps/__init__.py | """Hyperparameter search algorithms.
"""
from deephyper.search.hps._cbo import CBO, AMBS
__all__ = ["CBO", "AMBS"]
try:
from deephyper.search.hps._mpi_dbo import MPIDistributedBO # noqa: F401
__all__.append("MPIDistributedBO")
except ImportError:
pass
| 268 | 19.692308 | 76 | py |
deephyper | deephyper-master/deephyper/search/hps/_cbo.py | import functools
import logging
import time
import warnings
import ConfigSpace as CS
import ConfigSpace.hyperparameters as csh
import numpy as np
import pandas as pd
import deephyper.core.exceptions
import deephyper.skopt
from deephyper.problem._hyperparameter import convert_to_skopt_space
from deephyper.search._search import Search
from deephyper.skopt.moo import non_dominated_set, non_dominated_set_ranked
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.base import is_regressor
from deephyper.skopt.utils import use_named_args
# Adapt minimization -> maximization with DeepHyper
MAP_multi_point_strategy = {"cl_min": "cl_max", "cl_max": "cl_min", "qUCB": "qLCB"}
MAP_acq_func = {"UCB": "LCB", "qUCB": "qLCB"}
MAP_filter_failures = {"min": "max"}
# schedulers
def scheduler_periodic_exponential_decay(i, eta_0, periode, rate, delay):
"""Periodic exponential decay scheduler for exploration-exploitation."""
eta_i = eta_0 * np.exp(-rate * ((i - 1 - delay) % periode))
return eta_i
class CBO(Search):
"""Centralized Bayesian Optimisation Search, previously named as "Asynchronous Model-Based Search" (AMBS). It follows a manager-workers architecture where the manager runs the Bayesian optimization loop and workers execute parallel evaluations of the black-box function.
Example Usage:
>>> search = CBO(problem, evaluator)
>>> results = search.search(max_evals=100, timeout=120)
Args:
problem (HpProblem): Hyperparameter problem describing the search space to explore.
evaluator (Evaluator): An ``Evaluator`` instance responsible of distributing the tasks.
random_state (int, optional): Random seed. Defaults to ``None``.
log_dir (str, optional): Log directory where search's results are saved. Defaults to ``"."``.
verbose (int, optional): Indicate the verbosity level of the search. Defaults to ``0``.
surrogate_model (Union[str,sklearn.base.RegressorMixin], optional): Surrogate model used by the Bayesian optimization. Can be a value in ``["RF", "GP", "ET", "GBRT", "DUMMY"]`` or a sklearn regressor. ``"RF"`` is for Random-Forest which is the best compromise between speed and quality when performing a lot of parallel evaluations, i.e., reaching more than hundreds of evaluations. ``"GP"`` is for Gaussian-Process which is the best choice when maximizing the quality of iteration but quickly slow down when reaching hundreds of evaluations, also it does not support conditional search space. ``"ET"`` is for Extra-Tree, faster than random forest but with worse mean estimate and poor uncertainty quantification capabilities. ``"GBRT"`` is for Gradient-Boosting Regression Tree, it has better mean estimate than other tree-based method worse uncertainty quantification capabilities and slower than ``"RF"``. Defaults to ``"RF"``.
acq_func (str, optional): Acquisition function used by the Bayesian optimization. Can be a value in ``["UCB", "EI", "PI", "gp_hedge"]``. Defaults to ``"UCB"``.
acq_optimizer (str, optional): Method used to minimze the acquisition function. Can be a value in ``["sampling", "lbfgs"]``. Defaults to ``"auto"``.
kappa (float, optional): Manage the exploration/exploitation tradeoff for the "UCB" acquisition function. Defaults to ``1.96`` which corresponds to 95% of the confidence interval.
xi (float, optional): Manage the exploration/exploitation tradeoff of ``"EI"`` and ``"PI"`` acquisition function. Defaults to ``0.001``.
n_points (int, optional): The number of configurations sampled from the search space to infer each batch of new evaluated configurations.
filter_duplicated (bool, optional): Force the optimizer to sample unique points until the search space is "exhausted" in the sens that no new unique points can be found given the sampling size ``n_points``. Defaults to ``True``.
multi_point_strategy (str, optional): Definition of the constant value use for the Liar strategy. Can be a value in ``["cl_min", "cl_mean", "cl_max", "qUCB"]``. All ``"cl_..."`` strategies follow the constant-liar scheme, where if $N$ new points are requested, the surrogate model is re-fitted $N-1$ times with lies (respectively, the minimum, mean and maximum objective found so far; for multiple objectives, these are the minimum, mean and maximum of the individual objectives) to infer the acquisition function. Constant-Liar strategy have poor scalability because of this repeated re-fitting. The ``"qUCB"`` strategy is much more efficient by sampling a new $kappa$ value for each new requested point without re-fitting the model, but it is only compatible with ``acq_func == "UCB"``. Defaults to ``"cl_max"``.
n_jobs (int, optional): Number of parallel processes used to fit the surrogate model of the Bayesian optimization. A value of ``-1`` will use all available cores. Not used in ``surrogate_model`` if passed as own sklearn regressor. Defaults to ``1``.
n_initial_points (int, optional): Number of collected objectives required before fitting the surrogate-model. Defaults to ``10``.
initial_point_generator (str, optional): Sets an initial points generator. Can be either ``["random", "sobol", "halton", "hammersly", "lhs", "grid"]``. Defaults to ``"random"``.
initial_points (List[Dict], optional): A list of initial points to evaluate where each point is a dictionnary where keys are names of hyperparameters and values their corresponding choice. Defaults to ``None`` for them to be generated randomly from the search space.
sync_communcation (bool, optional): Performs the search in a batch-synchronous manner. Defaults to ``False`` for asynchronous updates.
filter_failures (str, optional): Replace objective of failed configurations by ``"min"`` or ``"mean"``. If ``"ignore"`` is passed then failed configurations will be filtered-out and not passed to the surrogate model. For multiple objectives, failure of any single objective will lead to treating that configuration as failed and each of these multiple objective will be replaced by their individual ``"min"`` or ``"mean"`` of past configurations. Defaults to ``"mean"`` to replace by failed configurations by the running mean of objectives.
max_failures (int, optional): Maximum number of failed configurations allowed before observing a valid objective value when ``filter_failures`` is not equal to ``"ignore"``. Defaults to ``100``.
moo_scalarization_strategy (str, optional): Scalarization strategy used in multiobjective optimization. Can be a value in ``["Linear", "Chebyshev", "AugChebyshev", "PBI", "Quadratic", "rLinear", "rChebyshev", "rAugChebyshev", "rPBI", "rQuadratic"]``. Defaults to ``"Chebyshev"``.
moo_scalarization_weight (list, optional): Scalarization weights to be used in multiobjective optimization with length equal to the number of objective functions. Defaults to ``None``.
scheduler (dict, callable, optional): a method to manage the the value of ``kappa, xi`` with iterations. Defaults to ``None`` which does not use any scheduler.
objective_scaler (str, optional): a way to map the objective space to some other support for example to normalize it. Defaults to ``"auto"`` which automatically set it to "identity" for any surrogate model except "RF" which will use "minmaxlog".
stopper (Stopper, optional): a stopper to leverage multi-fidelity when evaluating the function. Defaults to ``None`` which does not use any stopper.
"""
def __init__(
self,
problem,
evaluator,
random_state: int = None,
log_dir: str = ".",
verbose: int = 0,
surrogate_model="RF",
acq_func: str = "UCB",
acq_optimizer: str = "auto",
kappa: float = 1.96,
xi: float = 0.001,
n_points: int = 10000,
filter_duplicated: bool = True,
update_prior: bool = False,
multi_point_strategy: str = "cl_max",
n_jobs: int = 1, # 32 is good for Theta
n_initial_points: int = 10,
initial_point_generator: str = "random",
initial_points=None,
sync_communication: bool = False,
filter_failures: str = "mean",
max_failures: int = 100,
moo_scalarization_strategy: str = "Chebyshev",
moo_scalarization_weight=None,
scheduler=None,
objective_scaler="auto",
stopper=None,
**kwargs,
):
super().__init__(problem, evaluator, random_state, log_dir, verbose)
# get the __init__ parameters
self._init_params = locals()
# check input parameters
if not (type(n_jobs) is int):
raise ValueError(f"Parameter n_jobs={n_jobs} should be an integer value!")
surrogate_model_allowed = ["RF", "ET", "GBRT", "DUMMY", "GP", "MF"]
if surrogate_model in surrogate_model_allowed:
base_estimator = self._get_surrogate_model(
surrogate_model,
n_jobs,
random_state=self._random_state.randint(0, 2**32),
)
elif is_regressor(surrogate_model):
base_estimator = surrogate_model
else:
raise ValueError(
f"Parameter 'surrogate_model={surrogate_model}' should have a value in {surrogate_model_allowed}, or be a sklearn regressor!"
)
acq_func_allowed = ["UCB", "EI", "PI", "gp_hedge", "qUCB"]
if not (acq_func in acq_func_allowed):
raise ValueError(
f"Parameter 'acq_func={acq_func}' should have a value in {acq_func_allowed}!"
)
if not (np.isscalar(kappa)):
raise ValueError("Parameter 'kappa' should be a scalar value!")
if not (np.isscalar(xi)):
raise ValueError("Parameter 'xi' should be a scalar value!")
if not (type(n_points) is int):
raise ValueError("Parameter 'n_points' shoud be an integer value!")
if not (type(filter_duplicated) is bool):
raise ValueError(
f"Parameter filter_duplicated={filter_duplicated} should be a boolean value!"
)
if not (type(max_failures) is int):
raise ValueError(
f"Parameter max_failures={max_failures} should be an integer value!"
)
moo_scalarization_strategy_allowed = [
"Linear",
"Chebyshev",
"AugChebyshev",
"PBI",
"Quadratic",
]
moo_scalarization_strategy_allowed = moo_scalarization_strategy_allowed + [
f"r{s}" for s in moo_scalarization_strategy_allowed
]
if not (moo_scalarization_strategy in moo_scalarization_strategy_allowed):
raise ValueError(
f"Parameter 'moo_scalarization_strategy={acq_func}' should have a value in {moo_scalarization_strategy_allowed}!"
)
self._moo_scalarization_strategy = moo_scalarization_strategy
self._moo_scalarization_weight = moo_scalarization_weight
multi_point_strategy_allowed = [
"cl_min",
"cl_mean",
"cl_max",
"topk",
"boltzmann",
"qUCB",
]
if not (multi_point_strategy in multi_point_strategy_allowed):
raise ValueError(
f"Parameter multi_point_strategy={multi_point_strategy} should have a value in {multi_point_strategy_allowed}!"
)
self._n_initial_points = n_initial_points
self._initial_points = []
if initial_points is not None and len(initial_points) > 0:
for point in initial_points:
if isinstance(point, list):
self._initial_points.append(point)
elif isinstance(point, dict):
self._initial_points.append(
[point[hp_name] for hp_name in problem.hyperparameter_names]
)
else:
raise ValueError(
f"Initial points should be dict or list but {type(point)} was given!"
)
self._multi_point_strategy = MAP_multi_point_strategy.get(
multi_point_strategy, multi_point_strategy
)
self._fitted = False
# check if it is possible to convert the ConfigSpace to standard skopt Space
if (
isinstance(self._problem.space, CS.ConfigurationSpace)
and len(self._problem.space.get_forbiddens()) == 0
and len(self._problem.space.get_conditions()) == 0
):
self._opt_space = convert_to_skopt_space(
self._problem.space, surrogate_model=surrogate_model
)
else:
self._opt_space = self._problem.space
self._opt = None
self._opt_kwargs = dict(
dimensions=self._opt_space,
base_estimator=base_estimator,
# optimizer
initial_point_generator=initial_point_generator,
acq_optimizer=acq_optimizer,
acq_optimizer_kwargs={
"n_points": n_points,
"filter_duplicated": filter_duplicated,
"update_prior": update_prior,
"n_jobs": n_jobs,
"filter_failures": MAP_filter_failures.get(
filter_failures, filter_failures
),
"max_failures": max_failures,
"boltzmann_gamma": 1,
},
# acquisition function
acq_func=MAP_acq_func.get(acq_func, acq_func),
acq_func_kwargs={"xi": xi, "kappa": kappa},
n_initial_points=self._n_initial_points,
initial_points=self._initial_points,
random_state=self._random_state,
moo_scalarization_strategy=self._moo_scalarization_strategy,
moo_scalarization_weight=self._moo_scalarization_weight,
objective_scaler=objective_scaler,
)
self._gather_type = "ALL" if sync_communication else "BATCH"
# scheduler policy
self.scheduler = None
if type(scheduler) is dict:
scheduler = scheduler.copy()
scheduler_type = scheduler.pop("type", None)
assert scheduler_type in ["periodic-exp-decay"]
if scheduler_type == "periodic-exp-decay":
scheduler_params = {
"periode": 25,
"rate": 0.1,
"delay": n_initial_points,
}
scheduler_func = scheduler_periodic_exponential_decay
scheduler_params.update(scheduler)
eta_0 = np.array([kappa, xi])
self.scheduler = functools.partial(
scheduler_func, eta_0=eta_0, **scheduler_params
)
logging.info(
f"Set up scheduler '{scheduler_type}' with parameters '{scheduler_params}'"
)
# stopper
self._evaluator._stopper = stopper
def _setup_optimizer(self):
if self._fitted:
self._opt_kwargs["n_initial_points"] = 0
self._opt = deephyper.skopt.Optimizer(**self._opt_kwargs)
def _apply_scheduler(self, i):
"""Apply scheduler policy and update corresponding values in Optimizer."""
if self.scheduler is not None:
kappa, xi = self.scheduler(i)
values = {"kappa": kappa, "xi": xi}
logging.info(
f"Updated exploration-exploitation policy with {values} from scheduler"
)
self._opt.acq_func_kwargs.update(values)
def _search(self, max_evals, timeout):
if self._opt is None:
self._setup_optimizer()
num_evals_done = 0
num_local_evals_done = 0
logging.info(f"Asking {self._evaluator.num_workers} initial configurations...")
t1 = time.time()
new_X = self._opt.ask(n_points=self._evaluator.num_workers)
logging.info(f"Asking took {time.time() - t1:.4f} sec.")
# Transform list to dict configurations
logging.info("Transforming configurations to dict...")
t1 = time.time()
new_batch = []
for x in new_X:
new_cfg = self._to_dict(x)
new_batch.append(new_cfg)
logging.info(f"Transformation took {time.time() - t1:.4f} sec.")
# submit new configurations
logging.info(f"Submitting {len(new_batch)} configurations...")
t1 = time.time()
self._evaluator.submit(new_batch)
logging.info(f"Submition took {time.time() - t1:.4f} sec.")
# Main loop
while max_evals < 0 or num_evals_done < max_evals:
# Collecting finished evaluations
logging.info("Gathering jobs...")
t1 = time.time()
new_results = self._evaluator.gather(self._gather_type, size=1)
if isinstance(new_results, tuple) and len(new_results) == 2:
local_results, other_results = new_results
new_results = local_results + other_results
num_new_local_results = len(local_results)
num_new_other_results = len(other_results)
logging.info(
f"Gathered {num_new_local_results} local job(s) and {num_new_other_results} other job(s) in {time.time() - t1:.4f} sec."
)
else:
num_new_local_results = len(new_results)
logging.info(
f"Gathered {num_new_local_results} job(s) in {time.time() - t1:.4f} sec."
)
num_local_evals_done += num_new_local_results
if num_new_local_results > 0:
logging.info("Dumping evaluations...")
t1 = time.time()
self._evaluator.dump_evals(log_dir=self._log_dir)
logging.info(f"Dumping took {time.time() - t1:.4f} sec.")
num_evals_done += len(new_results)
if max_evals > 0 and num_evals_done >= max_evals:
break
# Transform configurations to list to fit optimizer
logging.info("Transforming received configurations to list...")
t1 = time.time()
opt_X = [] # input configuration
opt_y = [] # objective value
opt_b = [] # budget (optional)
# for cfg, obj in new_results:
for job_i in new_results:
cfg, obj = job_i
x = list(cfg.values())
# retrieve budget consumed by job with multiple observations
if job_i.observations is not None:
# # TODO: use ALC to reduce the problem to a scalar maximization/estimation
from deephyper.stopper._lcmodel_stopper import (
area_learning_curve,
)
# z_values: are the steps z from the budget function b(z)
# the job observations returns the observed (budgets, objectives)
# steps can be deduced from the length of these lists
# y_values: are the objective f(b(z))
_, y_values = np.array(job_i.observations)
z_values = np.arange(len(y_values)) + 1
y_values = -y_values
y = area_learning_curve(
z_values, y_values, z_max=self._evaluator._stopper.max_steps
)
opt_X.append(x)
opt_y.append(y)
# TODO: the following approach will not scale!
# for b, y in zip(*job_i.observations):
# opt_X.append(x)
# opt_b.append(b)
# opt_y.append(-y)
# single observation returned without budget
else:
if np.all(np.isreal(obj)):
opt_X.append(x)
opt_y.append(np.negative(obj).tolist()) # !maximizing
elif (type(obj) is str and "F" == obj[0]) or np.any(
type(objval) is str and "F" == objval[0] for objval in obj
):
if (
self._opt_kwargs["acq_optimizer_kwargs"][
"filter_failures"
]
== "ignore"
):
continue
else:
opt_X.append(x)
opt_y.append("F")
logging.info(f"Transformation took {time.time() - t1:.4f} sec.")
# apply scheduler
self._apply_scheduler(i=num_local_evals_done)
logging.info("Fitting the optimizer...")
t1 = time.time()
if len(opt_y) > 0:
opt_b = None if len(opt_b) == 0 else opt_b
self._opt.tell(opt_X, opt_y, budget=opt_b)
logging.info(f"Fitting took {time.time() - t1:.4f} sec.")
logging.info(f"Asking {num_new_local_results} new configurations...")
t1 = time.time()
new_X = self._opt.ask(
n_points=num_new_local_results, strategy=self._multi_point_strategy
)
logging.info(f"Asking took {time.time() - t1:.4f} sec.")
# Transform list to dict configurations
logging.info("Transforming configurations to dict...")
t1 = time.time()
new_batch = []
for x in new_X:
new_cfg = self._to_dict(x)
new_batch.append(new_cfg)
logging.info(f"Transformation took {time.time() - t1:.4f} sec.")
# submit new configurations
logging.info(f"Submitting {len(new_batch)} configurations...")
t1 = time.time()
self._evaluator.submit(new_batch)
logging.info(f"Submition took {time.time() - t1:.4f} sec.")
def _get_surrogate_model(
self, name: str, n_jobs: int = None, random_state: int = None
):
"""Get a surrogate model from Scikit-Optimize.
Args:
name (str): name of the surrogate model.
n_jobs (int): number of parallel processes to distribute the computation of the surrogate model.
Raises:
ValueError: when the name of the surrogate model is unknown.
"""
accepted_names = ["RF", "ET", "GBRT", "DUMMY", "GP", "MF"]
if not (name in accepted_names):
raise ValueError(
f"Unknown surrogate model {name}, please choose among {accepted_names}."
)
if name == "RF":
surrogate = deephyper.skopt.learning.RandomForestRegressor(
# n_estimators=100,
# max_features=1,
# min_samples_leaf=3,
n_jobs=n_jobs,
random_state=random_state,
)
elif name == "ET":
surrogate = deephyper.skopt.learning.ExtraTreesRegressor(
# n_estimators=100,
# min_samples_leaf=3,
n_jobs=n_jobs,
random_state=random_state,
)
elif name == "GBRT":
gbrt = GradientBoostingRegressor(n_estimators=30, loss="quantile")
surrogate = deephyper.skopt.learning.GradientBoostingQuantileRegressor(
base_estimator=gbrt, n_jobs=n_jobs, random_state=random_state
)
elif name == "MF":
try:
surrogate = deephyper.skopt.learning.MondrianForestRegressor(
n_estimators=100, n_jobs=n_jobs, random_state=random_state
)
except AttributeError:
raise deephyper.core.exceptions.MissingRequirementError(
"Installing 'deephyper/scikit-garden' is required to use MondrianForest (MF) regressor as a surrogate model!"
)
else: # for DUMMY and GP
surrogate = name
return surrogate
def _return_cond(self, cond, cst_new):
parent = cst_new.get_hyperparameter(cond.parent.name)
child = cst_new.get_hyperparameter(cond.child.name)
if type(cond) == CS.EqualsCondition:
value = cond.value
cond_new = CS.EqualsCondition(child, parent, cond.value)
elif type(cond) == CS.GreaterThanCondition:
value = cond.value
cond_new = CS.GreaterThanCondition(child, parent, value)
elif type(cond) == CS.NotEqualsCondition:
value = cond.value
cond_new = CS.GreaterThanCondition(child, parent, value)
elif type(cond) == CS.LessThanCondition:
value = cond.value
cond_new = CS.GreaterThanCondition(child, parent, value)
elif type(cond) == CS.InCondition:
values = cond.values
cond_new = CS.GreaterThanCondition(child, parent, values)
else:
print("Not supported type" + str(type(cond)))
return cond_new
def _return_forbid(self, cond, cst_new):
if type(cond) == CS.ForbiddenEqualsClause or type(cond) == CS.ForbiddenInClause:
hp = cst_new.get_hyperparameter(cond.hyperparameter.name)
if type(cond) == CS.ForbiddenEqualsClause:
value = cond.value
cond_new = CS.ForbiddenEqualsClause(hp, value)
elif type(cond) == CS.ForbiddenInClause:
values = cond.values
cond_new = CS.ForbiddenInClause(hp, values)
else:
print("Not supported type" + str(type(cond)))
return cond_new
def fit_surrogate(self, df):
"""Fit the surrogate model of the search from a checkpointed Dataframe.
Args:
df (str|DataFrame): a checkpoint from a previous search.
Example Usage:
>>> search = CBO(problem, evaluator)
>>> search.fit_surrogate("results.csv")
"""
if type(df) is str and df[-4:] == ".csv":
df = pd.read_csv(df)
assert isinstance(df, pd.DataFrame)
self._fitted = True
if self._opt is None:
self._setup_optimizer()
hp_names = [f"p:{name}" for name in self._problem.hyperparameter_names]
try:
x = df[hp_names].values.tolist()
# check single or multiple objectives
if "objective" in df.columns:
y = df.objective.tolist()
else:
y = df.filter(regex=r"^objective_\d+$").values.tolist()
except KeyError:
raise ValueError(
"Incompatible dataframe 'df' to fit surrogate model of CBO."
)
self._opt.tell(x, [np.negative(yi).tolist() for yi in y])
def fit_generative_model(self, df, q=0.90, n_iter_optimize=0, n_samples=100):
"""Learn the distribution of hyperparameters for the top-``(1-q)x100%`` configurations and sample from this distribution. It can be used for transfer learning. For multiobjective problems, this function computes the top-``(1-q)x100%`` configurations in terms of their ranking with respect to pareto efficiency: all points on the first non-dominated pareto front have rank 1 and in general, points on the k'th non-dominated front have rank k.
Example Usage:
>>> search = CBO(problem, evaluator)
>>> search.fit_surrogate("results.csv")
Args:
df (str|DataFrame): a dataframe or path to CSV from a previous search.
q (float, optional): the quantile defined the set of top configurations used to bias the search. Defaults to ``0.90`` which select the top-10% configurations from ``df``.
n_iter_optimize (int, optional): the number of iterations used to optimize the generative model which samples the data for the search. Defaults to ``0`` with no optimization for the generative model.
n_samples (int, optional): the number of samples used to score the generative model.
Returns:
tuple: ``score, model`` which are a metric which measures the quality of the learned generated-model and the generative model respectively.
"""
# to make sdv optional
try:
import sdv
except ModuleNotFoundError:
raise deephyper.core.exceptions.MissingRequirementError(
"Installing 'sdv' is required to use 'fit_generative_model' please run 'pip install \"deephyper[sdv]\"'"
)
if type(df) is str and df[-4:] == ".csv":
df = pd.read_csv(df)
assert isinstance(df, pd.DataFrame)
if len(df) < 10:
raise ValueError(
f"The passed DataFrame contains only {len(df)} results when a minimum of 10 is required!"
)
# !avoid error linked to `n_components=10` a parameter of generative model used
q_max = 1 - 10 / len(df)
if q_max < q:
warnings.warn(
f"The value of q={q} is replaced by q_max={q_max} because a minimum of 10 results are required to perform transfer-learning!",
category=UserWarning,
)
q = q_max
# check single or multiple objectives
hp_cols = [k for k in df.columns if "p:" == k[:2]]
if "objective" in df.columns:
# filter failures
if pd.api.types.is_string_dtype(df.objective):
df = df[~df.objective.str.startswith("F")]
df.objective = df.objective.astype(float)
q_val = np.quantile(df.objective.values, q)
req_df = df.loc[df["objective"] > q_val]
else:
# filter failures
objcol = list(df.filter(regex=r"^objective_\d+$").columns)
for col in objcol:
if pd.api.types.is_string_dtype(df[col]):
df = df[~df[col].str.startswith("F")]
df[col] = df[col].astype(float)
top = non_dominated_set_ranked(-np.asarray(df[objcol]), 1.0 - q)
req_df = df.loc[top]
req_df = req_df[hp_cols]
req_df = req_df.rename(columns={k: k[2:] for k in hp_cols})
# constraints
scalar_constraints = []
for hp_name in self._problem.space:
if hp_name in req_df.columns:
hp = self._problem.space.get_hyperparameter(hp_name)
# TODO: Categorical and Ordinal are both considered non-ordered for SDV
# TODO: it could be useful to use the "category" type of Pandas and the ordered=True/False argument
# TODO: to extend the capability of SDV
if isinstance(hp, csh.CategoricalHyperparameter) or isinstance(
hp, csh.OrdinalHyperparameter
):
req_df[hp_name] = req_df[hp_name].astype("O")
else:
scalar_constraints.append(
sdv.constraints.ScalarRange(
hp_name, hp.lower, hp.upper, strict_boundaries=True
)
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
model = sdv.tabular.TVAE(constraints=scalar_constraints)
model.fit(req_df)
synthetic_data = model.sample(n_samples)
score = sdv.evaluation.evaluate(synthetic_data, req_df)
if n_iter_optimize > 0:
space = [
deephyper.skopt.space.Integer(1, 20, name="epochs"),
# deephyper.skopt.space.Integer(1, np.floor(req_df.shape[0]/10), name='batch_size'),
deephyper.skopt.space.Integer(1, 8, name="embedding_dim"),
deephyper.skopt.space.Integer(1, 8, name="compress_dims"),
deephyper.skopt.space.Integer(1, 8, name="decompress_dims"),
deephyper.skopt.space.Real(
10**-8, 10**-4, "log-uniform", name="l2scale"
),
deephyper.skopt.space.Integer(1, 5, name="loss_factor"),
]
def model_fit(params):
params["epochs"] = 10 * params["epochs"]
# params['batch_size'] = 10*params['batch_size']
params["embedding_dim"] = 2 ** params["embedding_dim"]
params["compress_dims"] = [
2 ** params["compress_dims"],
2 ** params["compress_dims"],
]
params["decompress_dims"] = [
2 ** params["decompress_dims"],
2 ** params["decompress_dims"],
]
model = sdv.tabular.TVAE(**params)
model.fit(req_df)
synthetic_data = model.sample(n_samples)
score = sdv.evaluation.evaluate(synthetic_data, req_df)
return -score, model
@use_named_args(space)
def objective(**params):
score, _ = model_fit(params)
return score
# run sequential optimization of generative model hyperparameters
opt = deephyper.skopt.Optimizer(space)
for i in range(n_iter_optimize):
x = opt.ask()
y = objective(x)
opt.tell(x, y)
logging.info(f"iteration {i}: {x} -> {y}")
min_index = np.argmin(opt.yi)
best_params = opt.Xi[min_index]
logging.info(
f"Min-Score of the SDV generative model: {opt.yi[min_index]}"
)
best_params = {d.name: v for d, v in zip(space, best_params)}
logging.info(
f"Best configuration for SDV generative model: {best_params}"
)
score, model = model_fit(best_params)
# we pass the learned generative model from sdv to the
# skopt Optimizer
self._opt_kwargs["model_sdv"] = model
return score, model
def fit_search_space(self, df, fac_numerical=0.125, fac_categorical=10):
"""Apply prior-guided transfer learning based on a DataFrame of results.
Example Usage:
>>> search = CBO(problem, evaluator)
>>> search.fit_surrogate("results.csv")
Args:
df (str|DataFrame): a checkpoint from a previous search.
fac_numerical (float): the factor used to compute the sigma of a truncated normal distribution based on ``sigma = max(1.0, (upper - lower) * fac_numerical)``. A small large factor increase exploration while a small factor increase exploitation around the best-configuration from the ``df`` parameter.
fac_categorical (float): the weight given to a categorical feature part of the best configuration. A large weight ``> 1`` increase exploitation while a small factor close to ``1`` increase exploration.
"""
if type(df) is str and df[-4:] == ".csv":
df = pd.read_csv(df)
assert isinstance(df, pd.DataFrame)
# check single or multiple objectives
if "objective" in df.columns:
# filter failures
if pd.api.types.is_string_dtype(df.objective):
df = df[~df.objective.str.startswith("F")]
df.objective = df.objective.astype(float)
else:
# filter failures
objcol = df.filter(regex=r"^objective_\d+$").columns
for col in objcol:
if pd.api.types.is_string_dtype(df[col]):
df = df[~df[col].str.startswith("F")]
df[col] = df[col].astype(float)
cst = self._problem.space
if type(cst) != CS.ConfigurationSpace:
logging.error(f"{type(cst)}: not supported for trainsfer learning")
res_df = df
res_df_names = res_df.columns.values
if "objective" in df.columns:
best_index = np.argmax(res_df["objective"].values)
best_param = res_df.iloc[best_index]
else:
best_index = non_dominated_set(
-np.asarray(res_df[objcol]), return_mask=False
)[0]
best_param = res_df.iloc[best_index]
cst_new = CS.ConfigurationSpace(seed=self._random_state.randint(0, 2**32))
hp_names = cst.get_hyperparameter_names()
for hp_name in hp_names:
hp = cst.get_hyperparameter(hp_name)
if hp_name in res_df_names:
if (
type(hp) is csh.UniformIntegerHyperparameter
or type(hp) is csh.UniformFloatHyperparameter
):
mu = best_param[hp.name]
lower = hp.lower
upper = hp.upper
sigma = max(1.0, (upper - lower) * fac_numerical)
if type(hp) is csh.UniformIntegerHyperparameter:
param_new = csh.NormalIntegerHyperparameter(
name=hp.name,
default_value=mu,
mu=mu,
sigma=sigma,
lower=lower,
upper=upper,
)
else: # type is csh.UniformFloatHyperparameter:
param_new = csh.NormalFloatHyperparameter(
name=hp.name,
default_value=mu,
mu=mu,
sigma=sigma,
lower=lower,
upper=upper,
)
cst_new.add_hyperparameter(param_new)
elif (
type(hp) is csh.CategoricalHyperparameter
or type(hp) is csh.OrdinalHyperparameter
):
if type(hp) is csh.OrdinalHyperparameter:
choices = hp.sequence
else:
choices = hp.choices
weights = len(choices) * [1.0]
index = choices.index(best_param[hp.name])
weights[index] = fac_categorical
norm_weights = [float(i) / sum(weights) for i in weights]
param_new = csh.CategoricalHyperparameter(
name=hp.name, choices=choices, weights=norm_weights
)
cst_new.add_hyperparameter(param_new)
else:
logging.warning(f"Not fitting {hp} because it is not supported!")
cst_new.add_hyperparameter(hp)
else:
logging.warning(
f"Not fitting {hp} because it was not found in the dataframe!"
)
cst_new.add_hyperparameter(hp)
# For conditions
for cond in cst.get_conditions():
if type(cond) == CS.AndConjunction or type(cond) == CS.OrConjunction:
cond_list = []
for comp in cond.components:
cond_list.append(self._return_cond(comp, cst_new))
if type(cond) is CS.AndConjunction:
cond_new = CS.AndConjunction(*cond_list)
elif type(cond) is CS.OrConjunction:
cond_new = CS.OrConjunction(*cond_list)
else:
logging.warning(f"Condition {type(cond)} is not implemented!")
else:
cond_new = self._return_cond(cond, cst_new)
cst_new.add_condition(cond_new)
# For forbiddens
for cond in cst.get_forbiddens():
if type(cond) is CS.ForbiddenAndConjunction:
cond_list = []
for comp in cond.components:
cond_list.append(self._return_forbid(comp, cst_new))
cond_new = CS.ForbiddenAndConjunction(*cond_list)
elif (
type(cond) is CS.ForbiddenEqualsClause
or type(cond) is CS.ForbiddenInClause
):
cond_new = self._return_forbid(cond, cst_new)
else:
logging.warning(f"Forbidden {type(cond)} is not implemented!")
cst_new.add_forbidden_clause(cond_new)
self._opt_kwargs["dimensions"] = cst_new
def _to_dict(self, x: list) -> dict:
"""Transform a list of hyperparameter values to a ``dict`` where keys are hyperparameters names and values are hyperparameters values.
Args:
x (list): a list of hyperparameter values.
Returns:
dict: a dictionnary of hyperparameter names and values.
"""
res = {}
hps_names = self._problem.hyperparameter_names
# to enforce native python types instead of numpy types
x = map(lambda xi: getattr(xi, "tolist", lambda: xi)(), x)
for hps_name, xi in zip(hps_names, x):
res[hps_name] = xi
return res
class AMBS(CBO):
"""AMBS is now deprecated and will be removed in the future use 'CBO' (Centralized Bayesian Optimization) instead!"""
def __init__(
self,
problem,
evaluator,
random_state: int = None,
log_dir: str = ".",
verbose: int = 0,
surrogate_model="RF",
acq_func: str = "UCB",
acq_optimizer: str = "auto",
kappa: float = 1.96,
xi: float = 0.001,
n_points: int = 10000,
filter_duplicated: bool = True,
update_prior: bool = False,
multi_point_strategy: str = "cl_max",
n_jobs: int = 1,
n_initial_points=10,
initial_points=None,
sync_communication: bool = False,
filter_failures: str = "mean",
**kwargs,
):
super().__init__(
problem,
evaluator,
random_state,
log_dir,
verbose,
surrogate_model,
acq_func,
acq_optimizer,
kappa,
xi,
n_points,
filter_duplicated,
update_prior,
multi_point_strategy,
n_jobs,
n_initial_points,
initial_points,
sync_communication,
filter_failures,
**kwargs,
)
import warnings
warnings.warn(
"'AMBS' is now deprecated and will be removed in the future use 'CBO' (Centralized Bayesian Optimization) instead!",
category=DeprecationWarning,
)
| 43,289 | 45.349036 | 938 | py |
deephyper | deephyper-master/deephyper/search/nas/_agebo.py | import collections
import deephyper.skopt
import numpy as np
from deephyper.search.nas._regevo import RegularizedEvolution
# Adapt minimization -> maximization with DeepHyper
MAP_liar_strategy = {
"cl_min": "cl_max",
"cl_max": "cl_min",
}
MAP_acq_func = {
"UCB": "LCB",
}
class AgEBO(RegularizedEvolution):
"""`Aging evolution with Bayesian Optimization <https://arxiv.org/abs/2010.16358>`_.
This algorithm build on the `Regularized Evolution <https://arxiv.org/abs/1802.01548>`_. It cumulates Hyperparameter optimization with Bayesian optimisation and Neural architecture search with regularized evolution.
Args:
problem (NaProblem): Neural architecture search problem describing the search space to explore.
evaluator (Evaluator): An ``Evaluator`` instance responsible of distributing the tasks.
random_state (int, optional): Random seed. Defaults to None.
log_dir (str, optional): Log directory where search's results are saved. Defaults to ".".
verbose (int, optional): Indicate the verbosity level of the search. Defaults to 0.
population_size (int, optional): the number of individuals to keep in the population. Defaults to ``100``.
sample_size (int, optional): the number of individuals that should participate in each tournament. Defaults to ``10``.
n_initial_points (int, optional): Number of collected objectives required before fitting the surrogate-model. Defaults to ``10``.
initial_points (List[Dict], optional): A list of initial points to evaluate where each point is a dictionnary where keys are names of hyperparameters and values their corresponding choice. Defaults to ``None`` for them to be generated randomly from the search space.
surrogate_model (str, optional): Surrogate model used by the Bayesian optimization. Can be a value in ``["RF", "ET", "GBRT", "DUMMY"]``. Defaults to ``"RF"``.
acq_func (str, optional): Acquisition function used by the Bayesian optimization. Can be a value in ``["UCB", "EI", "PI", "gp_hedge"]``. Defaults to ``"UCB"``.
kappa (float, optional): Manage the exploration/exploitation tradeoff for the "UCB" acquisition function. Defaults to ``0.001`` for strong exploitation.
xi (float, optional): Manage the exploration/exploitation tradeoff of ``"EI"`` and ``"PI"`` acquisition function. Defaults to ``0.000001`` for strong exploitation.
n_points (int, optional): The number of configurations sampled from the search space to infer each batch of new evaluated configurations. Defaults to ``10000``.
liar_strategy (str, optional): Definition of the constant value use for the Liar strategy. Can be a value in ``["cl_min", "cl_mean", "cl_max"]`` . Defaults to ``"cl_max"``.
n_jobs (int, optional): Number of parallel processes used to fit the surrogate model of the Bayesian optimization. A value of ``-1`` will use all available cores. Defaults to ``1``.
sync_communcation (bool, optional): Performs the search in a batch-synchronous manner. Defaults to ``False`` for asynchronous updates.
"""
def __init__(
self,
problem,
evaluator,
random_state: int = None,
log_dir: str = ".",
verbose: int = 0,
# RE
population_size: int = 100,
sample_size: int = 10,
# BO
n_initial_points: int = 10,
initial_points=None,
surrogate_model: str = "RF",
acq_func: str = "UCB",
kappa: float = 0.001,
xi: float = 0.000001,
n_points: int = 10000,
liar_strategy: str = "cl_max",
n_jobs: int = 1,
sync_communication: bool = False,
):
super().__init__(
problem,
evaluator,
random_state,
log_dir,
verbose,
population_size,
sample_size,
)
# Initialize opitmizer of hyperparameter space
if len(self._problem._hp_space._space) == 0:
raise ValueError(
"No hyperparameter space was defined for this problem use 'RegularizedEvolution' instead!"
)
# check input parameters
surrogate_model_allowed = ["RF", "ET", "GBRT", "DUMMY"]
if not (surrogate_model in surrogate_model_allowed):
raise ValueError(
f"Parameter 'surrogate_model={surrogate_model}' should have a value in {surrogate_model_allowed}!"
)
acq_func_allowed = ["UCB", "EI", "PI", "gp_hedge"]
if not (acq_func in acq_func_allowed):
raise ValueError(
f"Parameter 'acq_func={acq_func}' should have a value in {acq_func_allowed}!"
)
if not (np.isscalar(kappa)):
raise ValueError("Parameter 'kappa' should be a scalar value!")
if not (np.isscalar(xi)):
raise ValueError("Parameter 'xi' should be a scalar value!")
if not (type(n_points) is int):
raise ValueError("Parameter 'n_points' shoud be an integer value!")
liar_strategy_allowed = ["cl_min", "cl_mean", "cl_max"]
if not (liar_strategy in liar_strategy_allowed):
raise ValueError(
f"Parameter 'liar_strategy={liar_strategy}' should have a value in {liar_strategy_allowed}!"
)
if not (type(n_jobs) is int):
raise ValueError("Parameter 'n_jobs' should be an integer value!")
self._n_initial_points = n_initial_points
self._initial_points = []
if initial_points is not None and len(initial_points) > 0:
for point in initial_points:
if isinstance(point, list):
self._initial_points.append(point)
elif isinstance(point, dict):
self._initial_points.append(
[point[hp_name] for hp_name in problem.hyperparameter_names]
)
else:
raise ValueError(
f"Initial points should be dict or list but {type(point)} was given!"
)
self._liar_strategy = MAP_liar_strategy.get(liar_strategy, liar_strategy)
base_estimator = self._get_surrogate_model(
surrogate_model, n_jobs, random_state=self._random_state.randint(0, 2**32)
)
self._hp_opt = None
self._hp_opt_kwargs = dict(
acq_optimizer="sampling",
acq_optimizer_kwargs={
"n_points": n_points,
"filter_duplicated": False,
},
dimensions=self._problem._hp_space._space,
base_estimator=base_estimator,
acq_func=MAP_acq_func.get(acq_func, acq_func),
acq_func_kwargs={"xi": xi, "kappa": kappa},
n_initial_points=self._n_initial_points,
initial_points=self._initial_points,
random_state=self._random_state,
)
self._gather_type = "ALL" if sync_communication else "BATCH"
def _setup_hp_optimizer(self):
self._hp_opt = deephyper.skopt.Optimizer(**self._hp_opt_kwargs)
def _saved_keys(self, job):
res = {"arch_seq": str(job.config["arch_seq"])}
hp_names = self._problem._hp_space._space.get_hyperparameter_names()
for hp_name in hp_names:
if hp_name == "loss":
res["loss"] = job.config["loss"]
else:
res[hp_name] = job.config["hyperparameters"][hp_name]
return res
def _search(self, max_evals, timeout):
if self._hp_opt is None:
self._setup_hp_optimizer()
num_evals_done = 0
population = collections.deque(maxlen=self._population_size)
# Filling available nodes at start
batch = self._gen_random_batch(size=self._evaluator.num_workers)
self._evaluator.submit(batch)
# Main loop
while max_evals < 0 or num_evals_done < max_evals:
# Collecting finished evaluations
new_results = list(self._evaluator.gather(self._gather_type, size=1))
if len(new_results) > 0:
population.extend(new_results)
self._evaluator.dump_evals(
saved_keys=self._saved_keys, log_dir=self._log_dir
)
num_received = len(new_results)
num_evals_done += num_received
hp_results_X, hp_results_y = [], []
# If the population is big enough evolve the population
if len(population) == self._population_size:
children_batch = []
# For each new parent/result we create a child from it
for new_i in range(len(new_results)):
# select_sample
indexes = self._random_state.choice(
self._population_size, self._sample_size, replace=False
)
sample = [population[i] for i in indexes]
# select_parent
parent = self._select_parent(sample)
# copy_mutate_parent
child = self._copy_mutate_arch(parent)
# add child to batch
children_batch.append(child)
# collect infos for hp optimization
new_i_hp_values = self._problem.extract_hp_values(
config=new_results[new_i][0]
)
new_i_y = new_results[new_i][1]
hp_results_X.append(new_i_hp_values)
hp_results_y.append(-new_i_y)
self._hp_opt.tell(hp_results_X, hp_results_y) # !fit: costly
new_hps = self._hp_opt.ask(
n_points=len(new_results), strategy=self._liar_strategy
)
new_configs = []
for hp_values, child_arch_seq in zip(new_hps, children_batch):
new_config = self._problem.gen_config(child_arch_seq, hp_values)
new_configs.append(new_config)
# submit_childs
if len(new_results) > 0:
self._evaluator.submit(new_configs)
else: # If the population is too small keep increasing it
# For each new parent/result we create a child from it
for new_i in range(len(new_results)):
new_i_hp_values = self._problem.extract_hp_values(
config=new_results[new_i][0]
)
new_i_y = new_results[new_i][1]
hp_results_X.append(new_i_hp_values)
hp_results_y.append(-new_i_y)
self._hp_opt.tell(hp_results_X, hp_results_y) # !fit: costly
new_hps = self._hp_opt.ask(
n_points=len(new_results), strategy=self._liar_strategy
)
new_batch = self._gen_random_batch(
size=len(new_results), hps=new_hps
)
self._evaluator.submit(new_batch)
def _gen_random_batch(self, size: int, hps: list = None) -> list:
batch = []
if hps is None:
points = self._hp_opt.ask(n_points=size)
for hp_values in points:
arch_seq = self._random_search_space()
config = self._problem.gen_config(arch_seq, hp_values)
batch.append(config)
else: # passed hps are used
assert size == len(hps)
for hp_values in hps:
arch_seq = self._random_search_space()
config = self._problem.gen_config(arch_seq, hp_values)
batch.append(config)
return batch
def _copy_mutate_arch(self, parent_arch: list) -> list:
"""
# ! Time performance is critical because called sequentialy
Args:
parent_arch (list(int)): embedding of the parent's architecture.
Returns:
dict: embedding of the mutated architecture of the child.
"""
i = self._random_state.choice(len(parent_arch))
child_arch = parent_arch[:]
range_upper_bound = self.space_list[i][1]
elements = [j for j in range(range_upper_bound + 1) if j != child_arch[i]]
# The mutation has to create a different search_space!
sample = self._random_state.choice(elements, 1)[0]
child_arch[i] = sample
return child_arch
def _get_surrogate_model(
self, name: str, n_jobs: int = None, random_state: int = None
):
"""Get a surrogate model from Scikit-Optimize.
Args:
name (str): name of the surrogate model.
n_jobs (int): number of parallel processes to distribute the computation of the surrogate model.
Raises:
ValueError: when the name of the surrogate model is unknown.
"""
accepted_names = ["RF", "ET", "GBRT", "DUMMY"]
if not (name in accepted_names):
raise ValueError(
f"Unknown surrogate model {name}, please choose among {accepted_names}."
)
if name == "RF":
surrogate = deephyper.skopt.learning.RandomForestRegressor(
n_jobs=n_jobs, random_state=random_state
)
elif name == "ET":
surrogate = deephyper.skopt.learning.ExtraTreesRegressor(
n_jobs=n_jobs, random_state=random_state
)
elif name == "GBRT":
surrogate = deephyper.skopt.learning.GradientBoostingQuantileRegressor(
n_jobs=n_jobs, random_state=random_state
)
else: # for DUMMY and GP
surrogate = name
return surrogate
| 14,057 | 41.343373 | 274 | py |
deephyper | deephyper-master/deephyper/search/nas/_ambsmixed.py | import logging
import ConfigSpace as CS
import numpy as np
import deephyper.skopt
from deephyper.problem import HpProblem
from deephyper.search.nas._base import NeuralArchitectureSearch
# Adapt minimization -> maximization with DeepHyper
MAP_liar_strategy = {
"cl_min": "cl_max",
"cl_max": "cl_min",
}
MAP_acq_func = {
"UCB": "LCB",
}
class AMBSMixed(NeuralArchitectureSearch):
"""Asynchronous Model-Based Search baised on the `Scikit-Optimized Optimizer <https://scikit-optimize.github.io/stable/modules/generated/deephyper.skopt.Optimizer.html#deephyper.skopt.Optimizer>`_. It is extended to the case of joint hyperparameter and neural architecture search.
Args:
problem (NaProblem): Neural architecture search problem describing the search space to explore.
evaluator (Evaluator): An ``Evaluator`` instance responsible of distributing the tasks.
random_state (int, optional): Random seed. Defaults to None.
log_dir (str, optional): Log directory where search's results are saved. Defaults to ".".
verbose (int, optional): Indicate the verbosity level of the search. Defaults to 0.
surrogate_model (str, optional): Surrogate model used by the Bayesian optimization. Can be a value in ``["RF", "ET", "GBRT", "DUMMY"]``. Defaults to ``"RF"``.
acq_func (str, optional): Acquisition function used by the Bayesian optimization. Can be a value in ``["UCB", "EI", "PI", "gp_hedge"]``. Defaults to ``"UCB"``.
kappa (float, optional): Manage the exploration/exploitation tradeoff for the "UCB" acquisition function. Defaults to ``1.96`` for a balance between exploitation and exploration.
xi (float, optional): Manage the exploration/exploitation tradeoff of ``"EI"`` and ``"PI"`` acquisition function. Defaults to ``0.001`` for a balance between exploitation and exploration.
n_points (int, optional): The number of configurations sampled from the search space to infer each batch of new evaluated configurations. Defaults to ``10000``.
liar_strategy (str, optional): Definition of the constant value use for the Liar strategy. Can be a value in ``["cl_min", "cl_mean", "cl_max"]`` . Defaults to ``"cl_max"``.
n_jobs (int, optional): Number of parallel processes used to fit the surrogate model of the Bayesian optimization. A value of ``-1`` will use all available cores. Defaults to ``1``.
"""
def __init__(
self,
problem,
evaluator,
random_state=None,
log_dir=".",
verbose=0,
surrogate_model: str = "RF",
acq_func: str = "UCB",
kappa: float = 1.96,
xi: float = 0.001,
n_points: int = 10000,
liar_strategy: str = "cl_max",
n_jobs: int = 1,
**kwargs,
):
super().__init__(problem, evaluator, random_state, log_dir, verbose)
# Setup the search space
na_search_space = self._problem.build_search_space()
self.hp_space = self._problem._hp_space # !hyperparameters
self.hp_size = len(self.hp_space.space.get_hyperparameter_names())
self.na_space = HpProblem()
self.na_space._space.seed(self._random_state.get_state()[1][0])
for i, vnode in enumerate(na_search_space.variable_nodes):
self.na_space.add_hyperparameter(
(0, vnode.num_ops - 1), name=f"vnode_{i:05d}"
)
self._space = CS.ConfigurationSpace(seed=self._random_state.get_state()[1][0])
self._space.add_configuration_space(
prefix="1", configuration_space=self.hp_space.space
)
self._space.add_configuration_space(
prefix="2", configuration_space=self.na_space.space
)
# check input parameters
surrogate_model_allowed = ["RF", "ET", "GBRT", "DUMMY"]
if not (surrogate_model in surrogate_model_allowed):
raise ValueError(
f"Parameter 'surrogate_model={surrogate_model}' should have a value in {surrogate_model_allowed}!"
)
acq_func_allowed = ["UCB", "EI", "PI", "gp_hedge"]
if not (acq_func in acq_func_allowed):
raise ValueError(
f"Parameter 'acq_func={acq_func}' should have a value in {acq_func_allowed}!"
)
if not (np.isscalar(kappa)):
raise ValueError("Parameter 'kappa' should be a scalar value!")
if not (np.isscalar(xi)):
raise ValueError("Parameter 'xi' should be a scalar value!")
if not (type(n_points) is int):
raise ValueError("Parameter 'n_points' shoud be an integer value!")
liar_strategy_allowed = ["cl_min", "cl_mean", "cl_max"]
if not (liar_strategy in liar_strategy_allowed):
raise ValueError(
f"Parameter 'liar_strategy={liar_strategy}' should have a value in {liar_strategy_allowed}!"
)
if not (type(n_jobs) is int):
raise ValueError("Parameter 'n_jobs' should be an integer value!")
self._n_initial_points = self._evaluator.num_workers
self._liar_strategy = MAP_liar_strategy.get(liar_strategy, liar_strategy)
base_estimator = self._get_surrogate_model(
surrogate_model, n_jobs, random_state=self._random_state.get_state()[1][0]
)
self._opt = None
self._opt_kwargs = dict(
dimensions=self._space,
base_estimator=base_estimator,
acq_func=MAP_acq_func.get(acq_func, acq_func),
acq_optimizer="sampling",
acq_func_kwargs={"xi": xi, "kappa": kappa, "n_points": n_points},
n_initial_points=self._n_initial_points,
random_state=self._random_state,
)
def _setup_optimizer(self):
self._opt = deephyper.skopt.Optimizer(**self._opt_kwargs)
def _saved_keys(self, job):
res = {"arch_seq": str(job.config["arch_seq"])}
hp_names = self._problem._hp_space._space.get_hyperparameter_names()
for hp_name in hp_names:
if hp_name == "loss":
res["loss"] = job.config["loss"]
else:
res[hp_name] = job.config["hyperparameters"][hp_name]
return res
def _search(self, max_evals, timeout):
if self._opt is None:
self._setup_optimizer()
num_evals_done = 0
# Filling available nodes at start
logging.info(f"Generating {self._evaluator.num_workers} initial points...")
self._evaluator.submit(self._get_random_batch(size=self._n_initial_points))
# Main loop
while max_evals < 0 or num_evals_done < max_evals:
# Collecting finished evaluations
new_results = list(self._evaluator.gather("BATCH", size=1))
num_received = len(new_results)
if num_received > 0:
self._evaluator.dump_evals(
saved_keys=self._saved_keys, log_dir=self._log_dir
)
num_evals_done += num_received
if num_evals_done >= max_evals:
break
# Transform configurations to list to fit optimizer
opt_X = []
opt_y = []
for cfg, obj in new_results:
arch_seq = cfg["arch_seq"]
hp_val = self._problem.extract_hp_values(cfg)
x = replace_nan(hp_val + arch_seq)
opt_X.append(x)
opt_y.append(-obj) # !maximizing
self._opt.tell(opt_X, opt_y) # !fit: costly
new_X = self._opt.ask(
n_points=len(new_results), strategy=self._liar_strategy
)
new_batch = []
for x in new_X:
new_cfg = self._problem.gen_config(
x[self.hp_size :], x[: self.hp_size]
)
new_batch.append(new_cfg)
# submit_childs
if len(new_results) > 0:
self._evaluator.submit(new_batch)
def _get_surrogate_model(
self, name: str, n_jobs: int = None, random_state: int = None
):
"""Get a surrogate model from Scikit-Optimize.
Args:
name (str): name of the surrogate model.
n_jobs (int): number of parallel processes to distribute the computation of the surrogate model.
Raises:
ValueError: when the name of the surrogate model is unknown.
"""
accepted_names = ["RF", "ET", "GBRT", "DUMMY"]
if not (name in accepted_names):
raise ValueError(
f"Unknown surrogate model {name}, please choose among {accepted_names}."
)
if name == "RF":
surrogate = deephyper.skopt.learning.RandomForestRegressor(
n_jobs=n_jobs, random_state=random_state
)
elif name == "ET":
surrogate = deephyper.skopt.learning.ExtraTreesRegressor(
n_jobs=n_jobs, random_state=random_state
)
elif name == "GBRT":
surrogate = deephyper.skopt.learning.GradientBoostingQuantileRegressor(
n_jobs=n_jobs, random_state=random_state
)
else: # for DUMMY and GP
surrogate = name
return surrogate
def _get_random_batch(self, size: int) -> list:
batch = []
n_points = max(0, size - len(batch))
if n_points > 0:
points = self._opt.ask(n_points=n_points)
for point in points:
point_as_dict = self._problem.gen_config(
point[self.hp_size :], point[: self.hp_size]
)
batch.append(point_as_dict)
return batch
def replace_nan(x):
"""
:meta private:
"""
return [np.nan if x_i == "nan" else x_i for x_i in x]
| 9,952 | 39.295547 | 284 | py |
deephyper | deephyper-master/deephyper/search/nas/_regevo.py | import collections
from deephyper.search.nas._base import NeuralArchitectureSearch
class RegularizedEvolution(NeuralArchitectureSearch):
"""`Regularized evolution <https://arxiv.org/abs/1802.01548>`_ neural architecture search. This search is only compatible with a ``NaProblem`` that has fixed hyperparameters.
Args:
problem (NaProblem): Neural architecture search problem describing the search space to explore.
evaluator (Evaluator): An ``Evaluator`` instance responsible of distributing the tasks.
random_state (int, optional): Random seed. Defaults to None.
log_dir (str, optional): Log directory where search's results are saved. Defaults to ".".
verbose (int, optional): Indicate the verbosity level of the search. Defaults to 0.
population_size (int, optional): the number of individuals to keep in the population. Defaults to 100.
sample_size (int, optional): the number of individuals that should participate in each tournament. Defaults to 10.
"""
def __init__(
self,
problem,
evaluator,
random_state: int = None,
log_dir: str = ".",
verbose: int = 0,
population_size: int = 100,
sample_size: int = 10,
**kwargs
):
super().__init__(problem, evaluator, random_state, log_dir, verbose)
if (
type(self) is RegularizedEvolution
and len(self._problem._hp_space._space) > 0
):
raise ValueError(
"An hyperparameter space was defined for this problem use 'AgEBO' instead!"
)
# Setup
self.pb_dict = self._problem.space
self.space_list = self._problem.build_search_space().choices()
self._population_size = int(population_size)
self._sample_size = int(sample_size)
self._population = collections.deque(maxlen=self._population_size)
def _saved_keys(self, job):
res = {"arch_seq": str(job.config["arch_seq"])}
return res
def _search(self, max_evals, timeout):
if len(self._problem._hp_space._space) > 0:
raise ValueError(
"An hyperparameter space was defined for this problem but the current search is not compatible with joint hyperparameter and neural architecture search. Constant values should be defined for hyperparameters."
)
num_evals_done = 0
# Filling available nodes at start
batch = self._gen_random_batch(size=self._evaluator.num_workers)
self._evaluator.submit(batch)
# Main loop
while max_evals < 0 or num_evals_done < max_evals:
# Collecting finished evaluations
new_results = self._evaluator.gather("BATCH", 1)
num_received = len(new_results)
if num_received > 0:
self._population.extend(new_results)
self._evaluator.dump_evals(
saved_keys=self._saved_keys, log_dir=self._log_dir
)
num_evals_done += num_received
if num_evals_done >= max_evals:
break
# If the population is big enough evolve the population
if len(self._population) == self._population_size:
children_batch = []
# For each new parent/result we create a child from it
for _ in range(num_received):
# select_sample
indexes = self._random_state.choice(
self._population_size, self._sample_size, replace=False
)
sample = [self._population[i] for i in indexes]
# select_parent
parent = self._select_parent(sample)
# copy_mutate_parent
child = self._copy_mutate_arch(parent)
# add child to batch
children_batch.append(child)
# submit_childs
self._evaluator.submit(children_batch)
else: # If the population is too small keep increasing it
self._evaluator.submit(self._gen_random_batch(size=num_received))
def _select_parent(self, sample: list) -> list:
cfg, _ = max(sample, key=lambda x: x[1])
return cfg["arch_seq"]
def _gen_random_batch(self, size: int) -> list:
batch = []
for _ in range(size):
cfg = self.pb_dict.copy()
cfg["arch_seq"] = self._random_search_space()
batch.append(cfg)
return batch
def _random_search_space(self) -> list:
return [self._random_state.choice(b + 1) for (_, b) in self.space_list]
def _copy_mutate_arch(self, parent_arch: list) -> dict:
"""
# ! Time performance is critical because called sequentialy
Args:
parent_arch (list(int)): [description]
Returns:
dict: [description]
"""
i = self._random_state.choice(len(parent_arch))
child_arch = parent_arch[:]
range_upper_bound = self.space_list[i][1]
elements = [j for j in range(range_upper_bound + 1) if j != child_arch[i]]
# The mutation has to create a different search_space!
sample = self._random_state.choice(elements, 1)[0]
child_arch[i] = sample
cfg = self.pb_dict.copy()
cfg["arch_seq"] = child_arch
return cfg
| 5,556 | 37.86014 | 224 | py |
deephyper | deephyper-master/deephyper/search/nas/_base.py | from deephyper.search._search import Search
class NeuralArchitectureSearch(Search):
def __init__(
self, problem, evaluator, random_state=None, log_dir=".", verbose=0, **kwargs
):
super().__init__(problem, evaluator, random_state, log_dir, verbose)
self._problem._space["log_dir"] = self._log_dir
self._problem._space["verbose"] = self._verbose
self._problem._space["seed"] = self._random_state.get_state()[1][0]
# HPS search space
self._problem._hp_space._space.seed(self._random_state.get_state()[1][0])
def _add_default_keys(self, config: dict) -> dict:
config["log_dir"] = self._log_dir
config["seed"] = self._seed
config["verbose"] = self._verbose
return config
| 771 | 34.090909 | 85 | py |
deephyper | deephyper-master/deephyper/search/nas/_random.py | from deephyper.search.nas._base import NeuralArchitectureSearch
class Random(NeuralArchitectureSearch):
"""Random neural architecture search. This search algorithm is compatible with a ``NaProblem`` defining fixed or variable hyperparameters.
Args:
problem (NaProblem): Neural architecture search problem describing the search space to explore.
evaluator (Evaluator): An ``Evaluator`` instance responsible of distributing the tasks.
random_state (int or RandomState, optional): Random seed. Defaults to None.
log_dir (str, optional): Log directory where search's results are saved. Defaults to ".".
verbose (int, optional): Indicate the verbosity level of the search. Defaults to 0.
"""
def __init__(
self,
problem,
evaluator,
random_state: int = None,
log_dir: str = ".",
verbose: int = 0,
**kwargs
):
super().__init__(problem, evaluator, random_state, log_dir, verbose)
# NAS search space
self._space_list = self._problem.build_search_space().choices()
def _saved_keys(self, job):
res = {"arch_seq": str(job.config["arch_seq"])}
hp_names = self._problem._hp_space._space.get_hyperparameter_names()
for hp_name in hp_names:
if hp_name == "loss":
res["loss"] = job.config["loss"]
else:
res[hp_name] = job.config["hyperparameters"][hp_name]
return res
def _search(self, max_evals, timeout):
num_evals_done = 0
# Filling available nodes at start
batch = self._gen_random_batch(size=self._evaluator.num_workers)
self._evaluator.submit(batch)
# Main loop
while max_evals < 0 or num_evals_done < max_evals:
results = self._evaluator.gather("BATCH", 1)
num_received = num_evals_done
num_evals_done += len(results)
num_received = num_evals_done - num_received
# Filling available nodes
if num_received > 0:
self._evaluator.dump_evals(
saved_keys=self._saved_keys, log_dir=self._log_dir
)
if max_evals < 0 or num_evals_done < max_evals:
self._evaluator.submit(self._gen_random_batch(size=num_received))
def _gen_random_batch(self, size: int) -> list:
batch = []
hp_values_samples = self._problem._hp_space._space.sample_configuration(size)
if size == 1:
hp_values_samples = [hp_values_samples]
for i in range(size):
arch_seq = self._gen_random_arch()
hp_values = list(dict(hp_values_samples[i]).values())
config = self._problem.gen_config(arch_seq, hp_values)
config = self._add_default_keys(config)
batch.append(config)
return batch
def _gen_random_arch(self) -> list:
return [self._random_state.choice(b + 1) for (_, b) in self._space_list]
| 3,028 | 34.635294 | 142 | py |
deephyper | deephyper-master/deephyper/search/nas/__init__.py | """Neural architecture search algorithms.
"""
from deephyper.search.nas._base import NeuralArchitectureSearch
from deephyper.search.nas._regevo import RegularizedEvolution
from deephyper.search.nas._agebo import AgEBO
from deephyper.search.nas._ambsmixed import AMBSMixed
from deephyper.search.nas._random import Random
from deephyper.search.nas._regevomixed import RegularizedEvolutionMixed
__all__ = [
"AgEBO",
"AMBSMixed",
"NeuralArchitectureSearch",
"Random",
"RegularizedEvolution",
"RegularizedEvolutionMixed",
]
| 544 | 29.277778 | 71 | py |
deephyper | deephyper-master/deephyper/search/nas/_regevomixed.py | import ConfigSpace as CS
from deephyper.problem import HpProblem
from deephyper.search.nas._regevo import RegularizedEvolution
class RegularizedEvolutionMixed(RegularizedEvolution):
"""Extention of the `Regularized evolution <https://arxiv.org/abs/1802.01548>`_ neural architecture search to the case of joint hyperparameter and neural architecture search.
Args:
problem (NaProblem): Neural architecture search problem describing the search space to explore.
evaluator (Evaluator): An ``Evaluator`` instance responsible of distributing the tasks.
random_state (int, optional): Random seed. Defaults to None.
log_dir (str, optional): Log directory where search's results are saved. Defaults to ".".
verbose (int, optional): Indicate the verbosity level of the search. Defaults to 0.
population_size (int, optional): the number of individuals to keep in the population. Defaults to 100.
sample_size (int, optional): the number of individuals that should participate in each tournament. Defaults to 10.
"""
def __init__(
self,
problem,
evaluator,
random_state: int = None,
log_dir: str = ".",
verbose: int = 0,
population_size: int = 100,
sample_size: int = 10,
**kwargs,
):
super().__init__(
problem,
evaluator,
random_state,
log_dir,
verbose,
population_size,
sample_size,
)
# Setup
na_search_space = self._problem.build_search_space()
self.hp_space = self._problem._hp_space # !hyperparameters
self.hp_size = len(self.hp_space.space.get_hyperparameter_names())
self.na_space = HpProblem()
self.na_space._space.seed(self._random_state.get_state()[1][0])
for i, (low, high) in enumerate(na_search_space.choices()):
self.na_space.add_hyperparameter((low, high), name=f"vnode_{i:05d}")
self._space = CS.ConfigurationSpace(seed=self._random_state.get_state()[1][0])
self._space.add_configuration_space(
prefix="1", configuration_space=self.hp_space.space
)
self._space.add_configuration_space(
prefix="2", configuration_space=self.na_space.space
)
self._space_size = len(self._space.get_hyperparameter_names())
def _saved_keys(self, job):
res = {"arch_seq": str(job.config["arch_seq"])}
hp_names = self._problem._hp_space._space.get_hyperparameter_names()
for hp_name in hp_names:
if hp_name == "loss":
res["loss"] = job.config["loss"]
else:
res[hp_name] = job.config["hyperparameters"][hp_name]
return res
def _search(self, max_evals, timeout):
num_evals_done = 0
# Filling available nodes at start
self._evaluator.submit(self._gen_random_batch(size=self._evaluator.num_workers))
# Main loop
while max_evals < 0 or num_evals_done < max_evals:
# Collecting finished evaluations
new_results = list(self._evaluator.gather("BATCH", size=1))
num_received = len(new_results)
if num_received > 0:
self._population.extend(new_results)
self._evaluator.dump_evals(
saved_keys=self._saved_keys, log_dir=self._log_dir
)
num_evals_done += num_received
if num_evals_done >= max_evals:
break
# If the population is big enough evolve the population
if len(self._population) == self._population_size:
children_batch = []
# For each new parent/result we create a child from it
for _ in range(num_received):
# select_sample
indexes = self._random_state.choice(
self._population_size, self._sample_size, replace=False
)
sample = [self._population[i] for i in indexes]
# select_parent
parent = self._select_parent(sample)
# copy_mutate_parent
child = self._copy_mutate_arch(parent)
# add child to batch
children_batch.append(child)
# submit_childs
self._evaluator.submit(children_batch)
else: # If the population is too small keep increasing it
new_batch = self._gen_random_batch(size=num_received)
self._evaluator.submit(new_batch)
def _select_parent(self, sample: list) -> dict:
cfg, _ = max(sample, key=lambda x: x[1])
return cfg
def _gen_random_batch(self, size: int) -> list:
def sample(hp, size):
return [hp.sample(self._space.random) for _ in range(size)]
batch = []
iterator = zip(*(sample(hp, size) for hp in self._space.get_hyperparameters()))
for x in iterator:
cfg = self._problem.gen_config(
list(x[self.hp_size :]), list(x[: self.hp_size])
)
batch.append(cfg)
return batch
def _copy_mutate_arch(self, parent_cfg: dict) -> dict:
"""
# ! Time performance is critical because called sequentialy
Args:
parent_arch (list(int)): embedding of the parent's architecture.
Returns:
dict: embedding of the mutated architecture of the child.
"""
hp_x = self._problem.extract_hp_values(parent_cfg)
x = hp_x + parent_cfg["arch_seq"]
i = self._random_state.choice(self._space_size)
hp = self._space.get_hyperparameters()[i]
x[i] = hp.sample(self._space.random)
child_cfg = self._problem.gen_config(x[self.hp_size :], x[: self.hp_size])
return child_cfg
| 6,069 | 35.347305 | 178 | py |
deephyper | deephyper-master/deephyper/sklearn/__init__.py | """Sub-package providing tools for automl.
"""
| 47 | 15 | 42 | py |
deephyper | deephyper-master/deephyper/sklearn/classifier/_autosklearn1.py | """
This module provides ``problem_autosklearn1`` and ``run_autosklearn`` for classification tasks.
"""
import warnings
from inspect import signature
import ConfigSpace as cs
from deephyper.problem import HpProblem
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.svm import SVC
from xgboost import XGBClassifier
def minmaxstdscaler() -> Pipeline:
"""MinMax preprocesssing followed by Standard normalization.
Returns:
Pipeline: a pipeline with two steps ``[MinMaxScaler, StandardScaler]``.
"""
preprocessor = Pipeline(
[
("minmaxscaler", MinMaxScaler()),
("stdscaler", StandardScaler()),
]
)
return preprocessor
problem_autosklearn1 = HpProblem()
classifier = problem_autosklearn1.add_hyperparameter(
name="classifier",
value=[
"RandomForest",
"Logistic",
"AdaBoost",
"KNeighbors",
"MLP",
"SVC",
"XGBoost",
],
)
# n_estimators
n_estimators = problem_autosklearn1.add_hyperparameter(
name="n_estimators", value=(1, 2000, "log-uniform")
)
cond_n_estimators = cs.OrConjunction(
cs.EqualsCondition(n_estimators, classifier, "RandomForest"),
cs.EqualsCondition(n_estimators, classifier, "AdaBoost"),
)
problem_autosklearn1.add_condition(cond_n_estimators)
# max_depth
max_depth = problem_autosklearn1.add_hyperparameter(
name="max_depth", value=(2, 100, "log-uniform")
)
cond_max_depth = cs.EqualsCondition(max_depth, classifier, "RandomForest")
problem_autosklearn1.add_condition(cond_max_depth)
# n_neighbors
n_neighbors = problem_autosklearn1.add_hyperparameter(
name="n_neighbors", value=(1, 100)
)
cond_n_neighbors = cs.EqualsCondition(n_neighbors, classifier, "KNeighbors")
problem_autosklearn1.add_condition(cond_n_neighbors)
# alpha
alpha = problem_autosklearn1.add_hyperparameter(
name="alpha", value=(1e-5, 10.0, "log-uniform")
)
cond_alpha = cs.EqualsCondition(alpha, classifier, "MLP")
problem_autosklearn1.add_condition(cond_alpha)
# C
C = problem_autosklearn1.add_hyperparameter(name="C", value=(1e-5, 10.0, "log-uniform"))
cond_C = cs.OrConjunction(
cs.EqualsCondition(C, classifier, "Logistic"),
cs.EqualsCondition(C, classifier, "SVC"),
)
problem_autosklearn1.add_condition(cond_C)
# kernel
kernel = problem_autosklearn1.add_hyperparameter(
name="kernel", value=["linear", "poly", "rbf", "sigmoid"]
)
cond_kernel = cs.EqualsCondition(kernel, classifier, "SVC")
problem_autosklearn1.add_condition(cond_kernel)
# gamma
gamma = problem_autosklearn1.add_hyperparameter(
name="gamma", value=(1e-5, 10.0, "log-uniform")
)
cond_gamma = cs.OrConjunction(
cs.EqualsCondition(gamma, kernel, "rbf"),
cs.EqualsCondition(gamma, kernel, "poly"),
cs.EqualsCondition(gamma, kernel, "sigmoid"),
)
problem_autosklearn1.add_condition(cond_gamma)
# Mapping available classifiers
CLASSIFIERS = {
"RandomForest": RandomForestClassifier,
"Logistic": LogisticRegression,
"AdaBoost": AdaBoostClassifier,
"KNeighbors": KNeighborsClassifier,
"MLP": MLPClassifier,
"SVC": SVC,
"XGBoost": XGBClassifier,
}
def run_autosklearn1(config: dict, load_data: callable) -> float:
"""Run function which can be used for AutoML classification.
It has to be used with the ``deephyper.sklearn.classifier.problem_autosklearn1`` problem definition which corresponds to:
.. code-block::
Configuration space object:
Hyperparameters:
C, Type: UniformFloat, Range: [1e-05, 10.0], Default: 0.01, on log-scale
alpha, Type: UniformFloat, Range: [1e-05, 10.0], Default: 0.01, on log-scale
classifier, Type: Categorical, Choices: {RandomForest, Logistic, AdaBoost, KNeighbors, MLP, SVC, XGBoost}, Default: RandomForest
gamma, Type: UniformFloat, Range: [1e-05, 10.0], Default: 0.01, on log-scale
kernel, Type: Categorical, Choices: {linear, poly, rbf, sigmoid}, Default: linear
max_depth, Type: UniformInteger, Range: [2, 100], Default: 14, on log-scale
n_estimators, Type: UniformInteger, Range: [1, 2000], Default: 45, on log-scale
n_neighbors, Type: UniformInteger, Range: [1, 100], Default: 50
Conditions:
(C | classifier == 'Logistic' || C | classifier == 'SVC')
(gamma | kernel == 'rbf' || gamma | kernel == 'poly' || gamma | kernel == 'sigmoid')
(n_estimators | classifier == 'RandomForest' || n_estimators | classifier == 'AdaBoost')
alpha | classifier == 'MLP'
kernel | classifier == 'SVC'
max_depth | classifier == 'RandomForest'
n_neighbors | classifier == 'KNeighbors'
Args:
config (dict): an hyperparameter configuration ``dict`` corresponding to the ``deephyper.sklearn.classifier.problem_autosklearn1``.
load_data (callable): a function returning data as Numpy arrays ``(X, y)``.
Returns:
float: returns the accuracy on the validation set.
"""
config["random_state"] = config.get("random_state", 42)
config["n_jobs"] = config.get("n_jobs", 1)
X, y = load_data()
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=config["random_state"]
)
preproc = minmaxstdscaler()
X_train = preproc.fit_transform(X_train)
X_test = preproc.transform(X_test)
mapping = CLASSIFIERS
clf_class = mapping[config["classifier"]]
# keep parameters possible for the current classifier
sig = signature(clf_class)
clf_allowed_params = list(sig.parameters.keys())
clf_params = {
k: v
for k, v in config.items()
if k in clf_allowed_params and not (v in ["nan", "NA"])
}
try: # good practice to manage the fail value yourself...
clf = clf_class(**clf_params)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
clf.fit(X_train, y_train)
fit_is_complete = True
except: # noqa: E722
fit_is_complete = False
if fit_is_complete:
y_pred = clf.predict(X_test)
acc = accuracy_score(y_test, y_pred)
else:
acc = -1.0
return acc
if __name__ == "__main__":
print(problem_autosklearn1)
| 6,739 | 30.495327 | 144 | py |
deephyper | deephyper-master/deephyper/sklearn/classifier/__init__.py | from deephyper.sklearn.classifier._autosklearn1 import (
problem_autosklearn1,
run_autosklearn1,
)
__all__ = ["problem_autosklearn1", "run_autosklearn1"]
__doc__ = """
AutoML searches are executed with the ``deephyper.search.hps.CBO`` algorithm only. We provide ready to go problems, and run functions for you to use it easily.
"""
| 342 | 30.181818 | 159 | py |
deephyper | deephyper-master/deephyper/sklearn/regressor/_autosklearn1.py | """
This module provides ``problem_autosklearn1`` and ``run_autosklearn`` for regression tasks.
"""
import warnings
from inspect import signature
import ConfigSpace as cs
from deephyper.problem import HpProblem
from sklearn.ensemble import AdaBoostRegressor, RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.svm import SVR
from xgboost import XGBRegressor
def minmaxstdscaler() -> Pipeline:
"""MinMax preprocesssing followed by Standard normalization.
Returns:
Pipeline: a pipeline with two steps ``[MinMaxScaler, StandardScaler]``.
"""
preprocessor = Pipeline(
[
("minmaxscaler", MinMaxScaler()),
("stdscaler", StandardScaler()),
]
)
return preprocessor
REGRESSORS = {
"RandomForest": RandomForestRegressor,
"Linear": LinearRegression,
"AdaBoost": AdaBoostRegressor,
"KNeighbors": KNeighborsRegressor,
"MLP": MLPRegressor,
"SVR": SVR,
"XGBoost": XGBRegressor,
}
problem_autosklearn1 = HpProblem()
regressor = problem_autosklearn1.add_hyperparameter(
name="regressor",
value=["RandomForest", "Linear", "AdaBoost", "KNeighbors", "MLP", "SVR", "XGBoost"],
)
# n_estimators
n_estimators = problem_autosklearn1.add_hyperparameter(
name="n_estimators", value=(1, 2000, "log-uniform")
)
cond_n_estimators = cs.OrConjunction(
cs.EqualsCondition(n_estimators, regressor, "RandomForest"),
cs.EqualsCondition(n_estimators, regressor, "AdaBoost"),
)
problem_autosklearn1.add_condition(cond_n_estimators)
# max_depth
max_depth = problem_autosklearn1.add_hyperparameter(
name="max_depth", value=(2, 100, "log-uniform")
)
cond_max_depth = cs.EqualsCondition(max_depth, regressor, "RandomForest")
problem_autosklearn1.add_condition(cond_max_depth)
# n_neighbors
n_neighbors = problem_autosklearn1.add_hyperparameter(
name="n_neighbors", value=(1, 100)
)
cond_n_neighbors = cs.EqualsCondition(n_neighbors, regressor, "KNeighbors")
problem_autosklearn1.add_condition(cond_n_neighbors)
# alpha
alpha = problem_autosklearn1.add_hyperparameter(
name="alpha", value=(1e-5, 10.0, "log-uniform")
)
cond_alpha = cs.EqualsCondition(alpha, regressor, "MLP")
problem_autosklearn1.add_condition(cond_alpha)
# C
C = problem_autosklearn1.add_hyperparameter(name="C", value=(1e-5, 10.0, "log-uniform"))
cond_C = cs.EqualsCondition(C, regressor, "SVR")
problem_autosklearn1.add_condition(cond_C)
# kernel
kernel = problem_autosklearn1.add_hyperparameter(
name="kernel", value=["linear", "poly", "rbf", "sigmoid"]
)
cond_kernel = cs.EqualsCondition(kernel, regressor, "SVR")
problem_autosklearn1.add_condition(cond_kernel)
# gamma
gamma = problem_autosklearn1.add_hyperparameter(
name="gamma", value=(1e-5, 10.0, "log-uniform")
)
cond_gamma = cs.OrConjunction(
cs.EqualsCondition(gamma, kernel, "rbf"),
cs.EqualsCondition(gamma, kernel, "poly"),
cs.EqualsCondition(gamma, kernel, "sigmoid"),
)
problem_autosklearn1.add_condition(cond_gamma)
def run_autosklearn1(config: dict, load_data: callable) -> float:
"""Run function which can be used for AutoML regression.
It has to be used with the ``deephyper.sklearn.regressor.problem_autosklearn1`` problem definition which corresponds to:
.. code-block::
Configuration space object:
Hyperparameters:
C, Type: UniformFloat, Range: [1e-05, 10.0], Default: 0.01, on log-scale
alpha, Type: UniformFloat, Range: [1e-05, 10.0], Default: 0.01, on log-scale
gamma, Type: UniformFloat, Range: [1e-05, 10.0], Default: 0.01, on log-scale
kernel, Type: Categorical, Choices: {linear, poly, rbf, sigmoid}, Default: linear
max_depth, Type: UniformInteger, Range: [2, 100], Default: 14, on log-scale
n_estimators, Type: UniformInteger, Range: [1, 2000], Default: 45, on log-scale
n_neighbors, Type: UniformInteger, Range: [1, 100], Default: 50
regressor, Type: Categorical, Choices: {RandomForest, Linear, AdaBoost, KNeighbors, MLP, SVR, XGBoost}, Default: RandomForest
Conditions:
(gamma | kernel == 'rbf' || gamma | kernel == 'poly' || gamma | kernel == 'sigmoid')
(n_estimators | regressor == 'RandomForest' || n_estimators | regressor == 'AdaBoost')
C | regressor == 'SVR'
alpha | regressor == 'MLP'
kernel | regressor == 'SVR'
max_depth | regressor == 'RandomForest'
n_neighbors | regressor == 'KNeighbors'
Args:
config (dict): an hyperparameter configuration ``dict`` corresponding to the ``deephyper.sklearn.regressor.problem_autosklearn1``.
load_data (callable): a function returning data as Numpy arrays ``(X, y)``.
Returns:
float: returns the :math:`R^2` on the validation set.
"""
config["random_state"] = config.get("random_state", 42)
config["n_jobs"] = config.get("n_jobs", 1)
X, y = load_data()
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=config["random_state"]
)
preproc = minmaxstdscaler()
X_train = preproc.fit_transform(X_train)
X_test = preproc.transform(X_test)
mapping = REGRESSORS
clf_class = mapping[config["regressor"]]
# keep parameters possible for the current regressor
sig = signature(clf_class)
clf_allowed_params = list(sig.parameters.keys())
clf_params = {
k: v
for k, v in config.items()
if k in clf_allowed_params and not (v in ["nan", "NA"])
}
try: # good practice to manage the fail value yourself...
clf = clf_class(**clf_params)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
clf.fit(X_train, y_train)
fit_is_complete = True
except: # noqa: E722
fit_is_complete = False
if fit_is_complete:
y_pred = clf.predict(X_test)
r2 = r2_score(y_test, y_pred)
else:
r2 = -1.0
return r2
if __name__ == "__main__":
print(problem_autosklearn1)
| 6,472 | 30.8867 | 141 | py |
deephyper | deephyper-master/deephyper/sklearn/regressor/__init__.py | from deephyper.sklearn.regressor._autosklearn1 import (
problem_autosklearn1,
run_autosklearn1,
)
__all__ = ["problem_autosklearn1", "run_autosklearn1"]
__doc__ = """
AutoML searches are executed with the ``deephyper.search.hps.CBO`` algorithm only. We provide ready to go problems, and run functions for you to use it easily.
"""
| 341 | 30.090909 | 159 | py |
deephyper | deephyper-master/deephyper/ensemble/_bagging_ensemble.py | import os
import traceback
import tensorflow as tf
import numpy as np
import ray
from deephyper.nas.metrics import selectMetric
from deephyper.ensemble import BaseEnsemble
from deephyper.nas.run._util import set_memory_growth_for_visible_gpus
def mse(y_true, y_pred):
return tf.square(y_true - y_pred)
@ray.remote(num_cpus=1)
def model_predict(model_path, X, batch_size=32, verbose=0):
"""Perform an inference of the model located at ``model_path``.
:meta private:
Args:
model_path (str): Path to the ``h5`` file to load to perform the inferencec.
X (array): array of input data for which we perform the inference.
batch_size (int, optional): Batch size used to perform the inferencec. Defaults to 32.
verbose (int, optional): Verbose option. Defaults to 0.
Returns:
array: The prediction based on the provided input data.
"""
# GPU Configuration if available
set_memory_growth_for_visible_gpus(True)
tf.keras.backend.clear_session()
model_file = model_path.split("/")[-1]
try:
if verbose:
print(f"Loading model {model_file}", flush=True)
model = tf.keras.models.load_model(model_path, compile=False)
except Exception:
if verbose:
print(f"Could not load model {model_file}", flush=True)
traceback.print_exc()
model = None
if model:
y = model.predict(X, batch_size=batch_size)
else:
y = None
return y
class BaggingEnsemble(BaseEnsemble):
"""Ensemble based on uniform averaging of the predictions of each members.
:meta private:
Args:
model_dir (str): Path to directory containing saved Keras models in .h5 format.
loss (callable): a callable taking (y_true, y_pred) as input.
size (int, optional): Number of unique models used in the ensemble. Defaults to 5.
verbose (bool, optional): Verbose mode. Defaults to True.
ray_address (str, optional): Address of the Ray cluster. If "auto" it will try to connect to an existing cluster. If "" it will start a local Ray cluster. Defaults to "".
num_cpus (int, optional): Number of CPUs allocated to load one model and predict. Defaults to 1.
num_gpus (int, optional): Number of GPUs allocated to load one model and predict. Defaults to None.
batch_size (int, optional): Batch size used batchify the inference of loaded models. Defaults to 32.
selection (str, optional): Selection strategy to build the ensemble. Value in ``["topk"]``. Default to ``topk``.
mode (str, optional): Value in ``["regression", "classification"]``. Default to ``"regression"``.
"""
def __init__(
self,
model_dir,
loss=mse,
size=5,
verbose=True,
ray_address="",
num_cpus=1,
num_gpus=None,
batch_size=32,
selection="topk",
mode="regression",
):
super().__init__(
model_dir,
loss,
size,
verbose,
ray_address,
num_cpus,
num_gpus,
batch_size,
)
assert selection in ["topk"]
self.selection = selection
assert mode in ["regression", "classification"]
self.mode = mode
def __repr__(self) -> str:
out = super().__repr__()
out += f"Mode: {self.mode}\n"
out += f"Selection: {self.selection}\n"
return out
def fit(self, X, y):
"""Fit the current algorithm to the provided data.
Args:
X (array): The input data.
y (array): The output data.
Returns:
BaseEnsemble: The current fitted instance.
"""
X_id = ray.put(X)
model_files = self._list_files_in_model_dir()
def model_path(f):
return os.path.join(self.model_dir, f)
y_pred = ray.get(
[
model_predict.options(
num_cpus=self.num_cpus, num_gpus=self.num_gpus
).remote(model_path(f), X_id, self.batch_size, self.verbose)
for f in model_files
]
)
y_pred = np.array([arr for arr in y_pred if arr is not None])
members_indexes = topk(self.loss, y_true=y, y_pred=y_pred, k=self.size)
self.members_files = [model_files[i] for i in members_indexes]
return self
def predict(self, X) -> np.ndarray:
"""Execute an inference of the ensemble for the provided data.
Args:
X (array): An array of input data.
Returns:
array: The prediction.
"""
# make predictions
X_id = ray.put(X)
def model_path(f):
os.path.join(self.model_dir, f)
y_pred = ray.get(
[
model_predict.options(
num_cpus=self.num_cpus, num_gpus=self.num_gpus
).remote(model_path(f), X_id, self.batch_size, self.verbose)
for f in self.members_files
]
)
y_pred = np.array([arr for arr in y_pred if arr is not None])
y = aggregate_predictions(y_pred, regression=(self.mode == "regression"))
return y
def evaluate(self, X, y, metrics=None):
"""Compute metrics based on the provided data.
Args:
X (array): An array of input data.
y (array): An array of true output data.
metrics (callable, optional): A metric. Defaults to None.
"""
scores = {}
y_pred = self.predict(X)
scores["loss"] = tf.reduce_mean(self.loss(y, y_pred)).numpy()
if metrics:
for metric_name in metrics:
scores[metric_name] = apply_metric(metric_name, y, y_pred)
return scores
class BaggingEnsembleRegressor(BaggingEnsemble):
"""Ensemble for regression based on uniform averaging of the predictions of each members.
Args:
model_dir (str): Path to directory containing saved Keras models in .h5 format.
loss (callable): a callable taking (y_true, y_pred) as input.
size (int, optional): Number of unique models used in the ensemble. Defaults to 5.
verbose (bool, optional): Verbose mode. Defaults to True.
ray_address (str, optional): Address of the Ray cluster. If "auto" it will try to connect to an existing cluster. If "" it will start a local Ray cluster. Defaults to "".
num_cpus (int, optional): Number of CPUs allocated to load one model and predict. Defaults to 1.
num_gpus (int, optional): Number of GPUs allocated to load one model and predict. Defaults to None.
batch_size (int, optional): Batch size used batchify the inference of loaded models. Defaults to 32.
selection (str, optional): Selection strategy to build the ensemble. Value in ``["topk"]``. Default to ``topk``.
"""
def __init__(
self,
model_dir,
loss=mse,
size=5,
verbose=True,
ray_address="",
num_cpus=1,
num_gpus=None,
selection="topk",
):
super().__init__(
model_dir,
loss,
size,
verbose,
ray_address,
num_cpus,
num_gpus,
selection,
mode="regression",
)
class BaggingEnsembleClassifier(BaggingEnsemble):
"""Ensemble for classification based on uniform averaging of the predictions of each members.
Args:
model_dir (str): Path to directory containing saved Keras models in .h5 format.
loss (callable): a callable taking (y_true, y_pred) as input.
size (int, optional): Number of unique models used in the ensemble. Defaults to 5.
verbose (bool, optional): Verbose mode. Defaults to True.
ray_address (str, optional): Address of the Ray cluster. If "auto" it will try to connect to an existing cluster. If "" it will start a local Ray cluster. Defaults to "".
num_cpus (int, optional): Number of CPUs allocated to load one model and predict. Defaults to 1.
num_gpus (int, optional): Number of GPUs allocated to load one model and predict. Defaults to None.
batch_size (int, optional): Batch size used batchify the inference of loaded models. Defaults to 32.
selection (str, optional): Selection strategy to build the ensemble. Value in ``["topk"]``. Default to ``topk``.
"""
def __init__(
self,
model_dir,
loss=mse,
size=5,
verbose=True,
ray_address="",
num_cpus=1,
num_gpus=None,
selection="topk",
):
super().__init__(
model_dir,
loss,
size,
verbose,
ray_address,
num_cpus,
num_gpus,
selection,
mode="classification",
)
def apply_metric(metric_name, y_true, y_pred) -> float:
"""Perform the computation of provided metric.
:meta private:
Args:
metric_name (str|callable): If ``str`` then it needs to be a metric available in ``deephyper.nas.metrics``.
y_true (array): Array of true predictions.
y_pred (array): Array of predicted predictions
Returns:
float: a scalar value of the computed metric.
"""
metric_func = selectMetric(metric_name)
metric = tf.reduce_mean(
metric_func(
tf.convert_to_tensor(y_true, dtype=np.float32),
tf.convert_to_tensor(y_pred, dtype=np.float32),
)
).numpy()
return metric
def aggregate_predictions(y_pred, regression=True):
"""Build an ensemble from predictions.
:meta private:
Args:
ensemble_members (np.array): Indexes of selected members in the axis-0 of y_pred.
y_pred (np.array): Predictions array of shape (n_models, n_samples, n_outputs).
regression (bool): Boolean (True) if it is a regression (False) if it is a classification.
Return:
A TFP Normal Distribution in the case of regression and a np.array with average probabilities
in the case of classification.
"""
n = np.shape(y_pred)[0]
y_pred = np.sum(y_pred, axis=0)
if regression:
agg_y_pred = y_pred / n
else: # classification
agg_y_pred = np.argmax(y_pred, axis=1)
return agg_y_pred
def topk(loss_func, y_true, y_pred, k=2) -> list:
"""Select the Top-k models to be part of the ensemble. A model can appear only once in the ensemble for this strategy.
:meta private:
Args:
loss_func (callable): loss function.
y_true (array): Array of true predictions.
y_pred (array): Array of predicted predictions
k (int, optional): Number of models composing the ensemble. Defaults to 2.
Returns:
list: a list of model indexes composing the ensembles.
"""
# losses is of shape: (n_models, n_outputs)
losses = tf.reduce_mean(loss_func(y_true, y_pred), axis=1).numpy()
ensemble_members = np.argsort(losses, axis=0)[:k].reshape(-1).tolist()
return ensemble_members
| 11,171 | 32.752266 | 178 | py |
deephyper | deephyper-master/deephyper/ensemble/_base_ensemble.py | import abc
import json
import os
import ray
class BaseEnsemble(abc.ABC):
"""Base class for ensembles, every new ensemble algorithms needs to extend this class.
Args:
model_dir (str): Path to directory containing saved Keras models in .h5 format.
loss (callable): a callable taking (y_true, y_pred) as input.
size (int, optional): Number of unique models used in the ensemble. Defaults to 5.
verbose (bool, optional): Verbose mode. Defaults to True.
ray_address (str, optional): Address of the Ray cluster. If "auto" it will try to connect to an existing cluster. If "" it will start a local Ray cluster. Defaults to "".
num_cpus (int, optional): Number of CPUs allocated to load one model and predict. Defaults to 1.
num_gpus (int, optional): Number of GPUs allocated to load one model and predict. Defaults to None.
batch_size (int, optional): Batch size used batchify the inference of loaded models. Defaults to 32.
"""
def __init__(
self,
model_dir,
loss,
size=5,
verbose=True,
ray_address="",
num_cpus=1,
num_gpus=None,
batch_size=32,
):
self.model_dir = os.path.abspath(model_dir)
self.loss = loss
self.members_files = []
self.size = size
self.verbose = verbose
self.ray_address = ray_address
self.num_cpus = num_cpus
self.num_gpus = num_gpus
self.batch_size = batch_size
if not (ray.is_initialized()):
ray.init(address=self.ray_address)
def __repr__(self) -> str:
out = ""
out += f"Model Dir: {self.model_dir}\n"
out += f"Members files: {self.members_files}\n"
out += f"Ensemble size: {len(self.members_files)}/{self.size}\n"
return out
def _list_files_in_model_dir(self):
return [f for f in os.listdir(self.model_dir) if f[-2:] == "h5"]
@abc.abstractmethod
def fit(self, X, y):
"""Fit the current algorithm to the provided data.
Args:
X (array): The input data.
y (array): The output data.
Returns:
BaseEnsemble: The current fitted instance.
"""
@abc.abstractmethod
def predict(self, X):
"""Execute an inference of the ensemble for the provided data.
Args:
X (array): An array of input data.
Returns:
array: The prediction.
"""
@abc.abstractmethod
def evaluate(self, X, y, metrics=None):
"""Compute metrics based on the provided data.
Args:
X (array): An array of input data.
y (array): An array of true output data.
metrics (callable, optional): A metric. Defaults to None.
"""
def load_members_files(self, file: str = "ensemble.json") -> None:
"""Load the members composing an ensemble.
Args:
file (str, optional): Path of JSON file containing the ensemble members. All members needs to be accessible in ``model_dir``. Defaults to "ensemble.json".
"""
with open(file, "r") as f:
self.members_files = json.load(f)
def save_members_files(self, file: str = "ensemble.json") -> None:
"""Save the list of file names of the members of the ensemble in a JSON file.
Args:
file (str, optional): Path JSON file where the file names are saved. Defaults to "ensemble.json".
"""
with open(file, "w") as f:
json.dump(self.members_files, f)
def load(self, file: str) -> None:
"""Load an ensemble from a save.
Args:
file (str): Path to the save of the ensemble.
"""
self.load_members_files(file)
def save(self, file: str = None) -> None:
"""Save an ensemble.
Args:
file (str): Path to the save of the ensemble.
"""
self.save_members_files(file)
| 3,990 | 31.713115 | 178 | py |
deephyper | deephyper-master/deephyper/ensemble/__init__.py | """The ``ensemble`` module provides a way to build ensembles of checkpointed deep neural networks from ``tensorflow.keras``, with ``.h5`` format, to regularize and boost predictive performance as well as estimate better uncertainties.
"""
from deephyper.ensemble._base_ensemble import BaseEnsemble
from deephyper.ensemble._bagging_ensemble import (
BaggingEnsembleRegressor,
BaggingEnsembleClassifier,
)
from deephyper.ensemble._uq_bagging_ensemble import (
UQBaggingEnsembleRegressor,
UQBaggingEnsembleClassifier,
)
__all__ = [
"BaseEnsemble",
"BaggingEnsembleRegressor",
"BaggingEnsembleClassifier",
"UQBaggingEnsembleRegressor",
"UQBaggingEnsembleClassifier",
]
| 702 | 34.15 | 234 | py |
deephyper | deephyper-master/deephyper/ensemble/_uq_bagging_ensemble.py | import os
import traceback
import numpy as np
import ray
import tensorflow as tf
import tensorflow_probability as tfp
from deephyper.ensemble import BaseEnsemble
from deephyper.nas.metrics import selectMetric
from deephyper.nas.run._util import set_memory_growth_for_visible_gpus
from deephyper.core.exceptions import DeephyperRuntimeError
from pandas import DataFrame
def nll(y, rv_y):
"""Negative log likelihood loss for Tensorflow probability."""
return -rv_y.log_prob(y)
cce_obj = tf.keras.losses.CategoricalCrossentropy(
reduction=tf.keras.losses.Reduction.NONE
)
def cce(y_true, y_pred):
"""Categorical cross-entropy loss."""
return cce_obj(tf.broadcast_to(y_true, y_pred.shape), y_pred)
@ray.remote(num_cpus=1)
def model_predict(model_path, X, batch_size=32, verbose=0):
"""Perform an inference of the model located at ``model_path``.
:meta private:
Args:
model_path (str): Path to the ``h5`` file to load to perform the inferencec.
X (array): array of input data for which we perform the inference.
batch_size (int, optional): Batch size used to perform the inferencec. Defaults to 32.
verbose (int, optional): Verbose option. Defaults to 0.
Returns:
array: The prediction based on the provided input data.
"""
import tensorflow as tf
import tensorflow_probability as tfp
# GPU Configuration if available
set_memory_growth_for_visible_gpus(True)
tf.keras.backend.clear_session()
model_file = model_path.split("/")[-1]
try:
if verbose:
print(f"Loading model {model_file}", end="\n", flush=True)
model = tf.keras.models.load_model(model_path, compile=False)
except Exception:
if verbose:
print(f"Could not load model {model_file}", flush=True)
traceback.print_exc()
model = None
if model is None:
return None
# dataset
if type(X) is list:
dataset = tf.data.Dataset.from_tensor_slices(
{f"input_{i}": Xi for i, Xi in enumerate(X)}
)
else:
dataset = tf.data.Dataset.from_tensor_slices(X)
dataset = dataset.batch(batch_size)
def batch_predict(dataset, convert_func=lambda x: x):
y_list = []
for batch in dataset:
y = model(batch, training=False)
y_list.append(convert_func(y))
y = np.concatenate(y_list, axis=0)
return y
y_dist = model(
next(iter(dataset)), training=False
) # just to test the type of the output
if isinstance(y_dist, tfp.distributions.Distribution):
if hasattr(y_dist, "loc") and hasattr(y_dist, "scale"):
def convert_func(y_dist):
return np.concatenate([y_dist.loc, y_dist.scale], axis=-1)
y = batch_predict(dataset, convert_func)
else:
raise DeephyperRuntimeError(
"Distribution doesn't have 'loc' or 'scale' attributes!"
)
else:
y = model.predict(X, batch_size=batch_size)
return y
class UQBaggingEnsemble(BaseEnsemble):
"""Ensemble with uncertainty quantification based on uniform averaging of the predictions of each members.
:meta private:
Args:
model_dir (str): Path to directory containing saved Keras models in .h5 format.
loss (callable): a callable taking (y_true, y_pred) as input.
size (int, optional): Number of unique models used in the ensemble. Defaults to 5.
verbose (bool, optional): Verbose mode. Defaults to True.
ray_address (str, optional): Address of the Ray cluster. If "auto" it will try to connect to an existing cluster. If "" it will start a local Ray cluster. Defaults to "".
num_cpus (int, optional): Number of CPUs allocated to load one model and predict. Defaults to 1.
num_gpus (int, optional): Number of GPUs allocated to load one model and predict. Defaults to None.
batch_size (int, optional): Batch size used batchify the inference of loaded models. Defaults to 32.
selection (str, optional): Selection strategy to build the ensemble. Value in ``["topk", "caruana"]``. Default to ``topk``.
mode (str, optional): Value in ``["regression", "classification"]``. Default to ``"regression"``.
"""
def __init__(
self,
model_dir,
loss=nll,
size=5,
verbose=True,
ray_address="",
num_cpus=1,
num_gpus=None,
batch_size=32,
selection="topk",
mode="regression",
):
super().__init__(
model_dir,
loss,
size,
verbose,
ray_address,
num_cpus,
num_gpus,
batch_size,
)
assert selection in ["topk", "caruana"]
self.selection = selection
assert mode in ["regression", "classification"]
self.mode = mode
def __repr__(self) -> str:
out = super().__repr__()
out += f"Mode: {self.mode}\n"
out += f"Selection: {self.selection}\n"
return out
def _select_members(self, loss_func, y_true, y_pred, k=2, verbose=0):
if self.selection == "topk":
func = topk
elif self.selection == "caruana":
func = greedy_caruana
else:
raise NotImplementedError
return func(loss_func, y_true, y_pred, k, verbose)
def fit(self, X, y):
X_id = ray.put(X)
model_files = self._list_files_in_model_dir()
def model_path(f):
return os.path.join(self.model_dir, f)
y_pred = ray.get(
[
model_predict.options(
num_cpus=self.num_cpus, num_gpus=self.num_gpus
).remote(model_path(f), X_id, self.batch_size, self.verbose)
for f in model_files
]
)
y_pred = np.array([arr for arr in y_pred if arr is not None])
self._members_indexes = self._select_members(
self.loss, y_true=y, y_pred=y_pred, k=self.size
)
self.members_files = [model_files[i] for i in self._members_indexes]
def predict(self, X) -> np.ndarray:
# make predictions
X_id = ray.put(X)
def model_path(f):
return os.path.join(self.model_dir, f)
y_pred = ray.get(
[
model_predict.options(
num_cpus=self.num_cpus, num_gpus=self.num_gpus
).remote(model_path(f), X_id, self.batch_size, self.verbose)
for f in self.members_files
]
)
y_pred = np.array([arr for arr in y_pred if arr is not None])
y = aggregate_predictions(y_pred, regression=(self.mode == "regression"))
return y
def evaluate(self, X, y, metrics=None, scaler_y=None):
scores = {}
y_pred = self.predict(X)
if scaler_y:
y_pred = scaler_y(y_pred)
y = scaler_y(y)
scores["loss"] = tf.reduce_mean(self.loss(y, y_pred)).numpy()
if metrics:
if type(metrics) is list:
for metric in metrics:
if callable(metric):
metric_name = metric.__name__
else:
metric_name = metric
scores[metric_name] = apply_metric(metric, y, y_pred)
elif type(metrics) is dict:
for metric_name, metric in metrics.items():
scores[metric_name] = apply_metric(metric, y, y_pred)
else:
raise ValueError("Metrics should be of type list or dict.")
return scores
class UQBaggingEnsembleRegressor(UQBaggingEnsemble):
"""Ensemble with uncertainty quantification for regression based on uniform averaging of the predictions of each members.
Args:
model_dir (str): Path to directory containing saved Keras models in .h5 format.
loss (callable): a callable taking (y_true, y_pred) as input.
size (int, optional): Number of unique models used in the ensemble. Defaults to 5.
verbose (bool, optional): Verbose mode. Defaults to True.
ray_address (str, optional): Address of the Ray cluster. If "auto" it will try to connect to an existing cluster. If "" it will start a local Ray cluster. Defaults to "".
num_cpus (int, optional): Number of CPUs allocated to load one model and predict. Defaults to 1.
num_gpus (int, optional): Number of GPUs allocated to load one model and predict. Defaults to None.
batch_size (int, optional): Batch size used batchify the inference of loaded models. Defaults to 32.
selection (str, optional): Selection strategy to build the ensemble. Value in ``[["topk", "caruana"]``. Default to ``topk``.
"""
def __init__(
self,
model_dir,
loss=nll,
size=5,
verbose=True,
ray_address="",
num_cpus=1,
num_gpus=None,
batch_size=32,
selection="topk",
):
super().__init__(
model_dir,
loss,
size,
verbose,
ray_address,
num_cpus,
num_gpus,
batch_size,
selection,
mode="regression",
)
def predict_var_decomposition(self, X):
"""Execute an inference of the ensemble for the provided data with uncertainty quantification estimates. The **aleatoric uncertainty** corresponds to the expected value of learned variance of each model composing the ensemble :math:`\mathbf{E}[\sigma_\\theta^2(\mathbf{x})]`. The **epistemic uncertainty** corresponds to the variance of learned mean estimates of each model composing the ensemble :math:`\mathbf{V}[\mu_\\theta(\mathbf{x})]`.
Args:
X (array): An array of input data.
Returns:
y, u1, u2: where ``y`` is the mixture distribution, ``u1`` is the aleatoric component of the variance of ``y`` and ``u2`` is the epistemic component of the variance of ``y``.
"""
# make predictions
X_id = ray.put(X)
def model_path(f):
return os.path.join(self.model_dir, f)
y_pred = ray.get(
[
model_predict.options(
num_cpus=self.num_cpus, num_gpus=self.num_gpus
).remote(model_path(f), X_id, self.batch_size, self.verbose)
for f in self.members_files
]
)
y_pred = np.array([arr for arr in y_pred if arr is not None])
y = aggregate_predictions(y_pred, regression=(self.mode == "regression"))
# variance decomposition
mid = np.shape(y_pred)[-1] // 2
selection = [slice(0, s) for s in np.shape(y_pred)]
selection_loc = selection[:]
selection_std = selection[:]
selection_loc[-1] = slice(0, mid)
selection_std[-1] = slice(mid, np.shape(y_pred)[-1])
loc = y_pred[tuple(selection_loc)]
scale = y_pred[tuple(selection_std)]
aleatoric_unc = np.mean(np.square(scale), axis=0)
epistemic_unc = np.square(np.std(loc, axis=0))
# dist, aleatoric uq, epistemic uq
return y, aleatoric_unc, epistemic_unc
class UQBaggingEnsembleClassifier(UQBaggingEnsemble):
"""Ensemble with uncertainty quantification for classification based on uniform averaging of the predictions of each members.
Args:
model_dir (str): Path to directory containing saved Keras models in .h5 format.
loss (callable): a callable taking (y_true, y_pred) as input.
size (int, optional): Number of unique models used in the ensemble. Defaults to 5.
verbose (bool, optional): Verbose mode. Defaults to True.
ray_address (str, optional): Address of the Ray cluster. If "auto" it will try to connect to an existing cluster. If "" it will start a local Ray cluster. Defaults to "".
num_cpus (int, optional): Number of CPUs allocated to load one model and predict. Defaults to 1.
num_gpus (int, optional): Number of GPUs allocated to load one model and predict. Defaults to None.
batch_size (int, optional): Batch size used batchify the inference of loaded models. Defaults to 32.
selection (str, optional): Selection strategy to build the ensemble. Value in ``[["topk", "caruana"]``. Default to ``topk``.
"""
def __init__(
self,
model_dir,
loss=cce,
size=5,
verbose=True,
ray_address="",
num_cpus=1,
num_gpus=None,
batch_size=32,
selection="topk",
):
super().__init__(
model_dir,
loss,
size,
verbose,
ray_address,
num_cpus,
num_gpus,
batch_size,
selection,
mode="classification",
)
def apply_metric(metric_name, y_true, y_pred) -> float:
"""Perform the computation of provided metric.
:meta private:
Args:
metric_name (str|callable): If ``str`` then it needs to be a metric available in ``deephyper.nas.metrics``.
y_true (array): Array of true predictions.
y_pred (array): Array of predicted predictions
Returns:
float: a scalar value of the computed metric.
"""
metric_func = selectMetric(metric_name)
if type(y_true) is np.ndarray:
y_true = tf.convert_to_tensor(y_true, dtype=np.float32)
if type(y_pred) is np.ndarray:
y_pred = tf.convert_to_tensor(y_pred, dtype=np.float32)
metric = metric_func(y_true, y_pred)
if tf.size(metric) >= 1:
metric = tf.reduce_mean(metric)
return metric.numpy()
def aggregate_predictions(y_pred, regression=True):
"""Build an ensemble from predictions.
:meta private:
Args:
ensemble_members (np.array): Indexes of selected members in the axis-0 of y_pred.
y_pred (np.array): Predictions array of shape (n_models, n_samples, n_outputs).
regression (bool): Boolean (True) if it is a regression (False) if it is a classification.
Return:
A TFP Normal Distribution in the case of regression and a np.array with average probabilities
in the case of classification.
"""
if regression:
# assuming first half are means, second half are std
mid = np.shape(y_pred)[-1] // 2
selection = [slice(0, s) for s in np.shape(y_pred)]
selection_loc = selection[:]
selection_std = selection[:]
selection_loc[-1] = slice(0, mid)
selection_std[-1] = slice(mid, np.shape(y_pred)[-1])
loc = y_pred[tuple(selection_loc)]
scale = y_pred[tuple(selection_std)]
mean_loc = np.mean(loc, axis=0)
sum_loc_scale = np.square(loc) + np.square(scale)
mean_scale = np.sqrt(np.mean(sum_loc_scale, axis=0) - np.square(mean_loc))
return tfp.distributions.Normal(loc=mean_loc, scale=mean_scale)
else: # classification
agg_y_pred = np.mean(y_pred[:, :, :], axis=0)
return agg_y_pred
def topk(loss_func, y_true, y_pred, k=2, verbose=0):
"""Select the top-k models to be part of the ensemble. A model can appear only once in the ensemble for this strategy.
:meta private:
"""
if np.shape(y_true)[-1] * 2 == np.shape(y_pred)[-1]: # regression
mid = np.shape(y_true)[-1]
y_pred = tfp.distributions.Normal(
loc=y_pred[:, :, :mid], scale=y_pred[:, :, mid:]
)
# losses is of shape: (n_models, n_outputs)
losses = tf.reduce_mean(loss_func(y_true, y_pred), axis=1).numpy()
if verbose:
print(f"Top-{k} losses: {losses.reshape(-1)[:k]}")
ensemble_members = np.argsort(losses, axis=0)[:k].reshape(-1).tolist()
return ensemble_members
def greedy_caruana(loss_func, y_true, y_pred, k=2, verbose=0):
"""Select the top-k models to be part of the ensemble. A model can appear only once in the ensemble for this strategy.
:meta private:
"""
regression = np.shape(y_true)[-1] * 2 == np.shape(y_pred)[-1]
n_models = np.shape(y_pred)[0]
if regression: # regression
mid = np.shape(y_true)[-1]
selection = [slice(0, s) for s in np.shape(y_pred)]
selection_loc = selection[:]
selection_std = selection[:]
selection_loc[-1] = slice(0, mid)
selection_std[-1] = slice(mid, np.shape(y_pred)[-1])
y_pred_ = tfp.distributions.Normal(
loc=y_pred[tuple(selection_loc)],
scale=y_pred[tuple(selection_std)],
)
else:
y_pred_ = y_pred
losses = tf.reduce_mean(
tf.reshape(loss_func(y_true, y_pred_), [n_models, -1]), axis=1
).numpy()
assert n_models == np.shape(losses)[0]
i_min = np.nanargmin(losses)
loss_min = losses[i_min]
ensemble_members = [i_min]
if verbose:
print(f"Loss: {loss_min:.3f} - Ensemble: {ensemble_members}")
def loss(y_true, y_pred):
return tf.reduce_mean(loss_func(y_true, y_pred)).numpy()
while len(np.unique(ensemble_members)) < k:
losses = [
loss(
y_true,
aggregate_predictions(
y_pred[ensemble_members + [i]], regression=regression
),
)
for i in range(n_models) # iterate over all models
]
i_min_ = np.nanargmin(losses)
loss_min_ = losses[i_min_]
if loss_min_ < loss_min:
if (
len(np.unique(ensemble_members)) == 1 and ensemble_members[0] == i_min_
): # numerical errors...
return ensemble_members
loss_min = loss_min_
ensemble_members.append(i_min_)
if verbose:
print(f"Loss: {loss_min:.3f} - Ensemble: {ensemble_members}")
else:
return ensemble_members
return ensemble_members
def __convert_to_block_df(a, y_col=None, group_col=None, block_col=None, melted=False):
# TODO: refactor conversion of block data to DataFrame
if melted and not all([i is not None for i in [block_col, group_col, y_col]]):
raise ValueError(
"`block_col`, `group_col`, `y_col` should be explicitly specified if using melted data"
)
if isinstance(a, DataFrame) and not melted:
x = a.copy(deep=True)
group_col = "groups"
block_col = "blocks"
y_col = "y"
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(
id_vars=block_col, var_name=group_col, value_name=y_col
)
elif isinstance(a, DataFrame) and melted:
x = DataFrame.from_dict(
{"groups": a[group_col], "blocks": a[block_col], "y": a[y_col]}
)
elif not isinstance(a, DataFrame):
x = np.array(a)
x = DataFrame(x, index=np.arange(x.shape[0]), columns=np.arange(x.shape[1]))
if not melted:
group_col = "groups"
block_col = "blocks"
y_col = "y"
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(
id_vars=block_col, var_name=group_col, value_name=y_col
)
else:
x.rename(
columns={group_col: "groups", block_col: "blocks", y_col: "y"},
inplace=True,
)
group_col = "groups"
block_col = "blocks"
y_col = "y"
return x, y_col, group_col, block_col
| 19,600 | 34.703097 | 449 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/main.py | import torch
import utility
import data
import model
import loss
from option import args
from trainer import Trainer
torch.manual_seed(args.seed)
checkpoint = utility.checkpoint(args)
def main():
global model
if args.data_test == ['video']:
from videotester import VideoTester
model = model.Model(args,checkpoint)
print('total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))
t = VideoTester(args, model, checkpoint)
t.test()
else:
if checkpoint.ok:
loader = data.Data(args)
_model = model.Model(args, checkpoint)
print('total params:%.2fM' % (sum(p.numel() for p in _model.parameters())/1000000.0))
_loss = loss.Loss(args, checkpoint) if not args.test_only else None
t = Trainer(args, loader, _model, _loss, checkpoint)
while not t.terminate():
t.train()
t.test()
checkpoint.done()
if __name__ == '__main__':
main()
| 1,026 | 27.527778 | 97 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/utility.py | import os
import math
import time
import datetime
from multiprocessing import Process
from multiprocessing import Queue
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import imageio
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lrs
class timer():
def __init__(self):
self.acc = 0
self.tic()
def tic(self):
self.t0 = time.time()
def toc(self, restart=False):
diff = time.time() - self.t0
if restart: self.t0 = time.time()
return diff
def hold(self):
self.acc += self.toc()
def release(self):
ret = self.acc
self.acc = 0
return ret
def reset(self):
self.acc = 0
class checkpoint():
def __init__(self, args):
self.args = args
self.ok = True
self.log = torch.Tensor()
now = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
if not args.load:
if not args.save:
args.save = now
self.dir = os.path.join('..', 'experiment', args.save)
else:
self.dir = os.path.join('..', 'experiment', args.load)
if os.path.exists(self.dir):
self.log = torch.load(self.get_path('psnr_log.pt'))
print('Continue from epoch {}...'.format(len(self.log)))
else:
args.load = ''
if args.reset:
os.system('rm -rf ' + self.dir)
args.load = ''
os.makedirs(self.dir, exist_ok=True)
os.makedirs(self.get_path('model'), exist_ok=True)
for d in args.data_test:
os.makedirs(self.get_path('results-{}'.format(d)), exist_ok=True)
open_type = 'a' if os.path.exists(self.get_path('log.txt'))else 'w'
self.log_file = open(self.get_path('log.txt'), open_type)
with open(self.get_path('config.txt'), open_type) as f:
f.write(now + '\n\n')
for arg in vars(args):
f.write('{}: {}\n'.format(arg, getattr(args, arg)))
f.write('\n')
self.n_processes = 8
def get_path(self, *subdir):
return os.path.join(self.dir, *subdir)
def save(self, trainer, epoch, is_best=False):
trainer.model.save(self.get_path('model'), epoch, is_best=is_best)
trainer.loss.save(self.dir)
trainer.loss.plot_loss(self.dir, epoch)
self.plot_psnr(epoch)
trainer.optimizer.save(self.dir)
torch.save(self.log, self.get_path('psnr_log.pt'))
def add_log(self, log):
self.log = torch.cat([self.log, log])
def write_log(self, log, refresh=False):
print(log)
self.log_file.write(log + '\n')
if refresh:
self.log_file.close()
self.log_file = open(self.get_path('log.txt'), 'a')
def done(self):
self.log_file.close()
def plot_psnr(self, epoch):
axis = np.linspace(1, epoch, epoch)
for idx_data, d in enumerate(self.args.data_test):
label = 'SR on {}'.format(d)
fig = plt.figure()
plt.title(label)
for idx_scale, scale in enumerate(self.args.scale):
plt.plot(
axis,
self.log[:, idx_data, idx_scale].numpy(),
label='Scale {}'.format(scale)
)
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('PSNR')
plt.grid(True)
plt.savefig(self.get_path('test_{}.pdf'.format(d)))
plt.close(fig)
def begin_background(self):
self.queue = Queue()
def bg_target(queue):
while True:
if not queue.empty():
filename, tensor = queue.get()
if filename is None: break
imageio.imwrite(filename, tensor.numpy())
self.process = [
Process(target=bg_target, args=(self.queue,)) \
for _ in range(self.n_processes)
]
for p in self.process: p.start()
def end_background(self):
for _ in range(self.n_processes): self.queue.put((None, None))
while not self.queue.empty(): time.sleep(1)
for p in self.process: p.join()
def save_results(self, dataset, filename, save_list, scale):
if self.args.save_results:
filename = self.get_path(
'results-{}'.format(dataset.dataset.name),
'{}_x{}_'.format(filename, scale)
)
postfix = ('DM', 'LQ', 'HQ')
for v, p in zip(save_list, postfix):
normalized = v[0].mul(255 / self.args.rgb_range)
tensor_cpu = normalized.byte().permute(1, 2, 0).cpu()
self.queue.put(('{}{}.png'.format(filename, p), tensor_cpu))
def quantize(img, rgb_range):
pixel_range = 255 / rgb_range
return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range)
def calc_psnr(sr, hr, scale, rgb_range, dataset=None):
if hr.nelement() == 1: return 0
diff = (sr - hr) / rgb_range
if dataset and dataset.dataset.benchmark:
shave = scale
if diff.size(1) > 5:
gray_coeffs = [65.738, 129.057, 25.064]
convert = diff.new_tensor(gray_coeffs).view(1, 3, 1, 1) / 256
diff = diff.mul(convert).sum(dim=1)
else:
shave = scale + 6
valid = diff[..., :, :]
mse = valid.pow(2).mean()
return -10 * math.log10(mse)
def make_optimizer(args, target):
'''
make optimizer and scheduler together
'''
# optimizer
trainable = filter(lambda x: x.requires_grad, target.parameters())
kwargs_optimizer = {'lr': args.lr, 'weight_decay': args.weight_decay}
if args.optimizer == 'SGD':
optimizer_class = optim.SGD
kwargs_optimizer['momentum'] = args.momentum
elif args.optimizer == 'ADAM':
optimizer_class = optim.Adam
kwargs_optimizer['betas'] = args.betas
kwargs_optimizer['eps'] = args.epsilon
elif args.optimizer == 'RMSprop':
optimizer_class = optim.RMSprop
kwargs_optimizer['eps'] = args.epsilon
# scheduler
milestones = list(map(lambda x: int(x), args.decay.split('-')))
kwargs_scheduler = {'milestones': milestones, 'gamma': args.gamma}
scheduler_class = lrs.MultiStepLR
class CustomOptimizer(optimizer_class):
def __init__(self, *args, **kwargs):
super(CustomOptimizer, self).__init__(*args, **kwargs)
def _register_scheduler(self, scheduler_class, **kwargs):
self.scheduler = scheduler_class(self, **kwargs)
def save(self, save_dir):
torch.save(self.state_dict(), self.get_dir(save_dir))
def load(self, load_dir, epoch=1):
self.load_state_dict(torch.load(self.get_dir(load_dir)))
if epoch > 1:
for _ in range(epoch): self.scheduler.step()
def get_dir(self, dir_path):
return os.path.join(dir_path, 'optimizer.pt')
def schedule(self):
self.scheduler.step()
def get_lr(self):
return self.scheduler.get_lr()[0]
def get_last_epoch(self):
return self.scheduler.last_epoch
optimizer = CustomOptimizer(trainable, **kwargs_optimizer)
optimizer._register_scheduler(scheduler_class, **kwargs_scheduler)
return optimizer
| 7,458 | 30.340336 | 77 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/dataloader.py | import threading
import random
import torch
import torch.multiprocessing as multiprocessing
from torch.utils.data import DataLoader
from torch.utils.data import SequentialSampler
from torch.utils.data import RandomSampler
from torch.utils.data import BatchSampler
from torch.utils.data import _utils
from torch.utils.data.dataloader import _DataLoaderIter
from torch.utils.data._utils import collate
from torch.utils.data._utils import signal_handling
from torch.utils.data._utils import MP_STATUS_CHECK_INTERVAL
from torch.utils.data._utils import ExceptionWrapper
from torch.utils.data._utils import IS_WINDOWS
from torch.utils.data._utils.worker import ManagerWatchdog
from torch._six import queue
def _ms_loop(dataset, index_queue, data_queue, done_event, collate_fn, scale, seed, init_fn, worker_id):
try:
collate._use_shared_memory = True
signal_handling._set_worker_signal_handlers()
torch.set_num_threads(1)
random.seed(seed)
torch.manual_seed(seed)
data_queue.cancel_join_thread()
if init_fn is not None:
init_fn(worker_id)
watchdog = ManagerWatchdog()
while watchdog.is_alive():
try:
r = index_queue.get(timeout=MP_STATUS_CHECK_INTERVAL)
except queue.Empty:
continue
if r is None:
assert done_event.is_set()
return
elif done_event.is_set():
continue
idx, batch_indices = r
try:
idx_scale = 0
if len(scale) > 1 and dataset.train:
idx_scale = random.randrange(0, len(scale))
dataset.set_scale(idx_scale)
samples = collate_fn([dataset[i] for i in batch_indices])
samples.append(idx_scale)
except Exception:
data_queue.put((idx, ExceptionWrapper(sys.exc_info())))
else:
data_queue.put((idx, samples))
del samples
except KeyboardInterrupt:
pass
class _MSDataLoaderIter(_DataLoaderIter):
def __init__(self, loader):
self.dataset = loader.dataset
self.scale = loader.scale
self.collate_fn = loader.collate_fn
self.batch_sampler = loader.batch_sampler
self.num_workers = loader.num_workers
self.pin_memory = loader.pin_memory and torch.cuda.is_available()
self.timeout = loader.timeout
self.sample_iter = iter(self.batch_sampler)
base_seed = torch.LongTensor(1).random_().item()
if self.num_workers > 0:
self.worker_init_fn = loader.worker_init_fn
self.worker_queue_idx = 0
self.worker_result_queue = multiprocessing.Queue()
self.batches_outstanding = 0
self.worker_pids_set = False
self.shutdown = False
self.send_idx = 0
self.rcvd_idx = 0
self.reorder_dict = {}
self.done_event = multiprocessing.Event()
base_seed = torch.LongTensor(1).random_()[0]
self.index_queues = []
self.workers = []
for i in range(self.num_workers):
index_queue = multiprocessing.Queue()
index_queue.cancel_join_thread()
w = multiprocessing.Process(
target=_ms_loop,
args=(
self.dataset,
index_queue,
self.worker_result_queue,
self.done_event,
self.collate_fn,
self.scale,
base_seed + i,
self.worker_init_fn,
i
)
)
w.daemon = True
w.start()
self.index_queues.append(index_queue)
self.workers.append(w)
if self.pin_memory:
self.data_queue = queue.Queue()
pin_memory_thread = threading.Thread(
target=_utils.pin_memory._pin_memory_loop,
args=(
self.worker_result_queue,
self.data_queue,
torch.cuda.current_device(),
self.done_event
)
)
pin_memory_thread.daemon = True
pin_memory_thread.start()
self.pin_memory_thread = pin_memory_thread
else:
self.data_queue = self.worker_result_queue
_utils.signal_handling._set_worker_pids(
id(self), tuple(w.pid for w in self.workers)
)
_utils.signal_handling._set_SIGCHLD_handler()
self.worker_pids_set = True
for _ in range(2 * self.num_workers):
self._put_indices()
class MSDataLoader(DataLoader):
def __init__(self, cfg, *args, **kwargs):
super(MSDataLoader, self).__init__(
*args, **kwargs, num_workers=cfg.n_threads
)
self.scale = cfg.scale
def __iter__(self):
return _MSDataLoaderIter(self)
| 5,259 | 32.081761 | 104 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/template.py | def set_template(args):
# Set the templates here
if args.template.find('jpeg') >= 0:
args.data_train = 'DIV2K_jpeg'
args.data_test = 'DIV2K_jpeg'
args.epochs = 200
args.decay = '100'
if args.template.find('EDSR_paper') >= 0:
args.model = 'EDSR'
args.n_resblocks = 32
args.n_feats = 256
args.res_scale = 0.1
if args.template.find('MDSR') >= 0:
args.model = 'MDSR'
args.patch_size = 48
args.epochs = 650
if args.template.find('DDBPN') >= 0:
args.model = 'DDBPN'
args.patch_size = 128
args.scale = '4'
args.data_test = 'Set5'
args.batch_size = 20
args.epochs = 1000
args.decay = '500'
args.gamma = 0.1
args.weight_decay = 1e-4
args.loss = '1*MSE'
if args.template.find('GAN') >= 0:
args.epochs = 200
args.lr = 5e-5
args.decay = '150'
if args.template.find('RCAN') >= 0:
args.model = 'RCAN'
args.n_resgroups = 10
args.n_resblocks = 20
args.n_feats = 64
args.chop = True
if args.template.find('VDSR') >= 0:
args.model = 'VDSR'
args.n_resblocks = 20
args.n_feats = 64
args.patch_size = 41
args.lr = 1e-1
| 1,312 | 23.314815 | 45 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/option.py | import argparse
import template
parser = argparse.ArgumentParser(description='EDSR and MDSR')
parser.add_argument('--debug', action='store_true',
help='Enables debug mode')
parser.add_argument('--template', default='.',
help='You can set various templates in option.py')
# Hardware specifications
parser.add_argument('--n_threads', type=int, default=18,
help='number of threads for data loading')
parser.add_argument('--cpu', action='store_true',
help='use cpu only')
parser.add_argument('--n_GPUs', type=int, default=1,
help='number of GPUs')
parser.add_argument('--seed', type=int, default=1,
help='random seed')
# Data specifications
parser.add_argument('--dir_data', type=str, default='../../',help='dataset directory')
parser.add_argument('--data_train', type=str, default='DIV2K',
help='train dataset name')
parser.add_argument('--data_test', type=str, default='DIV2K',
help='test dataset name')
parser.add_argument('--data_range', type=str, default='1-800/801-805',
help='train/test data range')
parser.add_argument('--ext', type=str, default='sep',
help='dataset file extension')
parser.add_argument('--scale', type=str, default='4',
help='super resolution scale')
parser.add_argument('--patch_size', type=int, default=192,
help='output patch size')
parser.add_argument('--rgb_range', type=int, default=1,
help='maximum value of RGB')
parser.add_argument('--n_colors', type=int, default=3,
help='number of color channels to use')
parser.add_argument('--chop', action='store_true',
help='enable memory-efficient forward')
parser.add_argument('--no_augment', action='store_true',
help='do not use data augmentation')
# Model specifications
parser.add_argument('--model', default='EDSR',
help='model name')
parser.add_argument('--act', type=str, default='relu',
help='activation function')
parser.add_argument('--pre_train', type=str, default='.',
help='pre-trained model directory')
parser.add_argument('--extend', type=str, default='.',
help='pre-trained model directory')
parser.add_argument('--n_resblocks', type=int, default=16,
help='number of residual blocks')
parser.add_argument('--n_feats', type=int, default=64,
help='number of feature maps')
parser.add_argument('--res_scale', type=float, default=1,
help='residual scaling')
parser.add_argument('--shift_mean', default=True,
help='subtract pixel mean from the input')
parser.add_argument('--dilation', action='store_true',
help='use dilated convolution')
parser.add_argument('--precision', type=str, default='single',
choices=('single', 'half'),
help='FP precision for test (single | half)')
# Option for Residual dense network (RDN)
parser.add_argument('--G0', type=int, default=64,
help='default number of filters. (Use in RDN)')
parser.add_argument('--RDNkSize', type=int, default=3,
help='default kernel size. (Use in RDN)')
parser.add_argument('--RDNconfig', type=str, default='B',
help='parameters config of RDN. (Use in RDN)')
parser.add_argument('--depth', type=int, default=12,
help='number of residual groups')
# Option for Residual channel attention network (RCAN)
parser.add_argument('--n_resgroups', type=int, default=10,
help='number of residual groups')
parser.add_argument('--reduction', type=int, default=16,
help='number of feature maps reduction')
# Training specifications
parser.add_argument('--reset', action='store_true',
help='reset the training')
parser.add_argument('--test_every', type=int, default=1000,
help='do test per every N batches')
parser.add_argument('--epochs', type=int, default=1000,
help='number of epochs to train')
parser.add_argument('--batch_size', type=int, default=16,
help='input batch size for training')
parser.add_argument('--split_batch', type=int, default=1,
help='split the batch into smaller chunks')
parser.add_argument('--self_ensemble', action='store_true',
help='use self-ensemble method for test')
parser.add_argument('--test_only', action='store_true',
help='set this option to test the model')
parser.add_argument('--gan_k', type=int, default=1,
help='k value for adversarial loss')
# Optimization specifications
parser.add_argument('--lr', type=float, default=1e-4,
help='learning rate')
parser.add_argument('--decay', type=str, default='200-400-600-800',
help='learning rate decay type')
parser.add_argument('--gamma', type=float, default=0.5,
help='learning rate decay factor for step decay')
parser.add_argument('--optimizer', default='ADAM',
choices=('SGD', 'ADAM', 'RMSprop'),
help='optimizer to use (SGD | ADAM | RMSprop)')
parser.add_argument('--momentum', type=float, default=0.9,
help='SGD momentum')
parser.add_argument('--betas', type=tuple, default=(0.9, 0.999),
help='ADAM beta')
parser.add_argument('--epsilon', type=float, default=1e-8,
help='ADAM epsilon for numerical stability')
parser.add_argument('--weight_decay', type=float, default=0,
help='weight decay')
parser.add_argument('--gclip', type=float, default=0,
help='gradient clipping threshold (0 = no clipping)')
# Loss specifications
parser.add_argument('--loss', type=str, default='1*L1',
help='loss function configuration')
parser.add_argument('--skip_threshold', type=float, default='1e8',
help='skipping batch that has large error')
# Log specifications
parser.add_argument('--save', type=str, default='test',
help='file name to save')
parser.add_argument('--load', type=str, default='',
help='file name to load')
parser.add_argument('--resume', type=int, default=0,
help='resume from specific checkpoint')
parser.add_argument('--save_models', action='store_true',
help='save all intermediate models')
parser.add_argument('--print_every', type=int, default=100,
help='how many batches to wait before logging training status')
parser.add_argument('--save_results', action='store_true',
help='save output results')
parser.add_argument('--save_gt', action='store_true',
help='save low-resolution and high-resolution images together')
args = parser.parse_args()
template.set_template(args)
args.scale = list(map(lambda x: int(x), args.scale.split('+')))
args.data_train = args.data_train.split('+')
args.data_test = args.data_test.split('+')
if args.epochs == 0:
args.epochs = 1e8
for arg in vars(args):
if vars(args)[arg] == 'True':
vars(args)[arg] = True
elif vars(args)[arg] == 'False':
vars(args)[arg] = False
| 7,464 | 45.36646 | 86 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/__init__.py | 0 | 0 | 0 | py | |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/videotester.py | import os
import math
import utility
from data import common
import torch
import cv2
from tqdm import tqdm
class VideoTester():
def __init__(self, args, my_model, ckp):
self.args = args
self.scale = args.scale
self.ckp = ckp
self.model = my_model
self.filename, _ = os.path.splitext(os.path.basename(args.dir_demo))
def test(self):
torch.set_grad_enabled(False)
self.ckp.write_log('\nEvaluation on video:')
self.model.eval()
timer_test = utility.timer()
for idx_scale, scale in enumerate(self.scale):
vidcap = cv2.VideoCapture(self.args.dir_demo)
total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
vidwri = cv2.VideoWriter(
self.ckp.get_path('{}_x{}.avi'.format(self.filename, scale)),
cv2.VideoWriter_fourcc(*'XVID'),
vidcap.get(cv2.CAP_PROP_FPS),
(
int(scale * vidcap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(scale * vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
)
)
tqdm_test = tqdm(range(total_frames), ncols=80)
for _ in tqdm_test:
success, lr = vidcap.read()
if not success: break
lr, = common.set_channel(lr, n_channels=self.args.n_colors)
lr, = common.np2Tensor(lr, rgb_range=self.args.rgb_range)
lr, = self.prepare(lr.unsqueeze(0))
sr = self.model(lr, idx_scale)
sr = utility.quantize(sr, self.args.rgb_range).squeeze(0)
normalized = sr * 255 / self.args.rgb_range
ndarr = normalized.byte().permute(1, 2, 0).cpu().numpy()
vidwri.write(ndarr)
vidcap.release()
vidwri.release()
self.ckp.write_log(
'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True
)
torch.set_grad_enabled(True)
def prepare(self, *args):
device = torch.device('cpu' if self.args.cpu else 'cuda')
def _prepare(tensor):
if self.args.precision == 'half': tensor = tensor.half()
return tensor.to(device)
return [_prepare(a) for a in args]
| 2,280 | 30.246575 | 77 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/trainer.py | import os
import math
from decimal import Decimal
import utility
import torch
import torch.nn.utils as utils
from tqdm import tqdm
class Trainer():
def __init__(self, args, loader, my_model, my_loss, ckp):
self.args = args
self.scale = args.scale
self.ckp = ckp
self.loader_train = loader.loader_train
self.loader_test = loader.loader_test
self.model = my_model
self.loss = my_loss
self.optimizer = utility.make_optimizer(args, self.model)
if self.args.load != '':
self.optimizer.load(ckp.dir, epoch=len(ckp.log))
self.error_last = 1e8
def train(self):
self.loss.step()
epoch = self.optimizer.get_last_epoch() + 1
lr = self.optimizer.get_lr()
self.ckp.write_log(
'[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr))
)
self.loss.start_log()
self.model.train()
timer_data, timer_model = utility.timer(), utility.timer()
# TEMP
self.loader_train.dataset.set_scale(0)
for batch, (lr, hr, _,) in enumerate(self.loader_train):
lr, hr = self.prepare(lr, hr)
timer_data.hold()
timer_model.tic()
self.optimizer.zero_grad()
sr = self.model(lr, 0)
loss = self.loss(sr, hr)
loss.backward()
if self.args.gclip > 0:
utils.clip_grad_value_(
self.model.parameters(),
self.args.gclip
)
self.optimizer.step()
timer_model.hold()
if (batch + 1) % self.args.print_every == 0:
self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format(
(batch + 1) * self.args.batch_size,
len(self.loader_train.dataset),
self.loss.display_loss(batch),
timer_model.release(),
timer_data.release()))
timer_data.tic()
self.loss.end_log(len(self.loader_train))
self.error_last = self.loss.log[-1, -1]
self.optimizer.schedule()
def test(self):
torch.set_grad_enabled(False)
epoch = self.optimizer.get_last_epoch()
self.ckp.write_log('\nEvaluation:')
self.ckp.add_log(
torch.zeros(1, len(self.loader_test), len(self.scale))
)
self.model.eval()
timer_test = utility.timer()
if self.args.save_results: self.ckp.begin_background()
for idx_data, d in enumerate(self.loader_test):
for idx_scale, scale in enumerate(self.scale):
d.dataset.set_scale(idx_scale)
for lr, hr, filename in tqdm(d, ncols=80):
lr, hr = self.prepare(lr, hr)
sr = self.model(lr, idx_scale)
sr = utility.quantize(sr, self.args.rgb_range)
save_list = [sr]
self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr(
sr, hr, scale, self.args.rgb_range, dataset=d
)
if self.args.save_gt:
save_list.extend([lr, hr])
if self.args.save_results:
self.ckp.save_results(d, filename[0], save_list, scale)
self.ckp.log[-1, idx_data, idx_scale] /= len(d)
best = self.ckp.log.max(0)
self.ckp.write_log(
'[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format(
d.dataset.name,
scale,
self.ckp.log[-1, idx_data, idx_scale],
best[0][idx_data, idx_scale],
best[1][idx_data, idx_scale] + 1
)
)
self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc()))
self.ckp.write_log('Saving...')
if self.args.save_results:
self.ckp.end_background()
if not self.args.test_only:
self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch))
self.ckp.write_log(
'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True
)
torch.set_grad_enabled(True)
def prepare(self, *args):
device = torch.device('cpu' if self.args.cpu else 'cuda')
def _prepare(tensor):
if self.args.precision == 'half': tensor = tensor.half()
return tensor.to(device)
return [_prepare(a) for a in args]
def terminate(self):
if self.args.test_only:
self.test()
return True
else:
epoch = self.optimizer.get_last_epoch() + 1
return epoch >= self.args.epochs
| 4,820 | 31.795918 | 79 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/loss/adversarial.py | import utility
from types import SimpleNamespace
from model import common
from loss import discriminator
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Adversarial(nn.Module):
def __init__(self, args, gan_type):
super(Adversarial, self).__init__()
self.gan_type = gan_type
self.gan_k = args.gan_k
self.dis = discriminator.Discriminator(args)
if gan_type == 'WGAN_GP':
# see https://arxiv.org/pdf/1704.00028.pdf pp.4
optim_dict = {
'optimizer': 'ADAM',
'betas': (0, 0.9),
'epsilon': 1e-8,
'lr': 1e-5,
'weight_decay': args.weight_decay,
'decay': args.decay,
'gamma': args.gamma
}
optim_args = SimpleNamespace(**optim_dict)
else:
optim_args = args
self.optimizer = utility.make_optimizer(optim_args, self.dis)
def forward(self, fake, real):
# updating discriminator...
self.loss = 0
fake_detach = fake.detach() # do not backpropagate through G
for _ in range(self.gan_k):
self.optimizer.zero_grad()
# d: B x 1 tensor
d_fake = self.dis(fake_detach)
d_real = self.dis(real)
retain_graph = False
if self.gan_type == 'GAN':
loss_d = self.bce(d_real, d_fake)
elif self.gan_type.find('WGAN') >= 0:
loss_d = (d_fake - d_real).mean()
if self.gan_type.find('GP') >= 0:
epsilon = torch.rand_like(fake).view(-1, 1, 1, 1)
hat = fake_detach.mul(1 - epsilon) + real.mul(epsilon)
hat.requires_grad = True
d_hat = self.dis(hat)
gradients = torch.autograd.grad(
outputs=d_hat.sum(), inputs=hat,
retain_graph=True, create_graph=True, only_inputs=True
)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_norm = gradients.norm(2, dim=1)
gradient_penalty = 10 * gradient_norm.sub(1).pow(2).mean()
loss_d += gradient_penalty
# from ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks
elif self.gan_type == 'RGAN':
better_real = d_real - d_fake.mean(dim=0, keepdim=True)
better_fake = d_fake - d_real.mean(dim=0, keepdim=True)
loss_d = self.bce(better_real, better_fake)
retain_graph = True
# Discriminator update
self.loss += loss_d.item()
loss_d.backward(retain_graph=retain_graph)
self.optimizer.step()
if self.gan_type == 'WGAN':
for p in self.dis.parameters():
p.data.clamp_(-1, 1)
self.loss /= self.gan_k
# updating generator...
d_fake_bp = self.dis(fake) # for backpropagation, use fake as it is
if self.gan_type == 'GAN':
label_real = torch.ones_like(d_fake_bp)
loss_g = F.binary_cross_entropy_with_logits(d_fake_bp, label_real)
elif self.gan_type.find('WGAN') >= 0:
loss_g = -d_fake_bp.mean()
elif self.gan_type == 'RGAN':
better_real = d_real - d_fake_bp.mean(dim=0, keepdim=True)
better_fake = d_fake_bp - d_real.mean(dim=0, keepdim=True)
loss_g = self.bce(better_fake, better_real)
# Generator loss
return loss_g
def state_dict(self, *args, **kwargs):
state_discriminator = self.dis.state_dict(*args, **kwargs)
state_optimizer = self.optimizer.state_dict()
return dict(**state_discriminator, **state_optimizer)
def bce(self, real, fake):
label_real = torch.ones_like(real)
label_fake = torch.zeros_like(fake)
bce_real = F.binary_cross_entropy_with_logits(real, label_real)
bce_fake = F.binary_cross_entropy_with_logits(fake, label_fake)
bce_loss = bce_real + bce_fake
return bce_loss
# Some references
# https://github.com/kuc2477/pytorch-wgan-gp/blob/master/model.py
# OR
# https://github.com/caogang/wgan-gp/blob/master/gan_cifar10.py
| 4,393 | 37.884956 | 84 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/loss/discriminator.py | from model import common
import torch.nn as nn
class Discriminator(nn.Module):
'''
output is not normalized
'''
def __init__(self, args):
super(Discriminator, self).__init__()
in_channels = args.n_colors
out_channels = 64
depth = 7
def _block(_in_channels, _out_channels, stride=1):
return nn.Sequential(
nn.Conv2d(
_in_channels,
_out_channels,
3,
padding=1,
stride=stride,
bias=False
),
nn.BatchNorm2d(_out_channels),
nn.LeakyReLU(negative_slope=0.2, inplace=True)
)
m_features = [_block(in_channels, out_channels)]
for i in range(depth):
in_channels = out_channels
if i % 2 == 1:
stride = 1
out_channels *= 2
else:
stride = 2
m_features.append(_block(in_channels, out_channels, stride=stride))
patch_size = args.patch_size // (2**((depth + 1) // 2))
m_classifier = [
nn.Linear(out_channels * patch_size**2, 1024),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Linear(1024, 1)
]
self.features = nn.Sequential(*m_features)
self.classifier = nn.Sequential(*m_classifier)
def forward(self, x):
features = self.features(x)
output = self.classifier(features.view(features.size(0), -1))
return output
| 1,595 | 27.5 | 79 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/loss/vgg.py | from model import common
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
class VGG(nn.Module):
def __init__(self, conv_index, rgb_range=1):
super(VGG, self).__init__()
vgg_features = models.vgg19(pretrained=True).features
modules = [m for m in vgg_features]
if conv_index.find('22') >= 0:
self.vgg = nn.Sequential(*modules[:8])
elif conv_index.find('54') >= 0:
self.vgg = nn.Sequential(*modules[:35])
vgg_mean = (0.485, 0.456, 0.406)
vgg_std = (0.229 * rgb_range, 0.224 * rgb_range, 0.225 * rgb_range)
self.sub_mean = common.MeanShift(rgb_range, vgg_mean, vgg_std)
for p in self.parameters():
p.requires_grad = False
def forward(self, sr, hr):
def _forward(x):
x = self.sub_mean(x)
x = self.vgg(x)
return x
vgg_sr = _forward(sr)
with torch.no_grad():
vgg_hr = _forward(hr.detach())
loss = F.mse_loss(vgg_sr, vgg_hr)
return loss
| 1,106 | 28.918919 | 75 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/loss/__init__.py | import os
from importlib import import_module
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class Loss(nn.modules.loss._Loss):
def __init__(self, args, ckp):
super(Loss, self).__init__()
print('Preparing loss function:')
self.n_GPUs = args.n_GPUs
self.loss = []
self.loss_module = nn.ModuleList()
for loss in args.loss.split('+'):
weight, loss_type = loss.split('*')
if loss_type == 'MSE':
loss_function = nn.MSELoss()
elif loss_type == 'L1':
loss_function = nn.L1Loss()
elif loss_type.find('VGG') >= 0:
module = import_module('loss.vgg')
loss_function = getattr(module, 'VGG')(
loss_type[3:],
rgb_range=args.rgb_range
)
elif loss_type.find('GAN') >= 0:
module = import_module('loss.adversarial')
loss_function = getattr(module, 'Adversarial')(
args,
loss_type
)
self.loss.append({
'type': loss_type,
'weight': float(weight),
'function': loss_function}
)
if loss_type.find('GAN') >= 0:
self.loss.append({'type': 'DIS', 'weight': 1, 'function': None})
if len(self.loss) > 1:
self.loss.append({'type': 'Total', 'weight': 0, 'function': None})
for l in self.loss:
if l['function'] is not None:
print('{:.3f} * {}'.format(l['weight'], l['type']))
self.loss_module.append(l['function'])
self.log = torch.Tensor()
device = torch.device('cpu' if args.cpu else 'cuda')
self.loss_module.to(device)
if args.precision == 'half': self.loss_module.half()
if not args.cpu and args.n_GPUs > 1:
self.loss_module = nn.DataParallel(
self.loss_module, range(args.n_GPUs)
)
if args.load != '': self.load(ckp.dir, cpu=args.cpu)
def forward(self, sr, hr):
losses = []
for i, l in enumerate(self.loss):
if l['function'] is not None:
loss = l['function'](sr, hr)
effective_loss = l['weight'] * loss
losses.append(effective_loss)
self.log[-1, i] += effective_loss.item()
elif l['type'] == 'DIS':
self.log[-1, i] += self.loss[i - 1]['function'].loss
loss_sum = sum(losses)
if len(self.loss) > 1:
self.log[-1, -1] += loss_sum.item()
return loss_sum
def step(self):
for l in self.get_loss_module():
if hasattr(l, 'scheduler'):
l.scheduler.step()
def start_log(self):
self.log = torch.cat((self.log, torch.zeros(1, len(self.loss))))
def end_log(self, n_batches):
self.log[-1].div_(n_batches)
def display_loss(self, batch):
n_samples = batch + 1
log = []
for l, c in zip(self.loss, self.log[-1]):
log.append('[{}: {:.4f}]'.format(l['type'], c / n_samples))
return ''.join(log)
def plot_loss(self, apath, epoch):
axis = np.linspace(1, epoch, epoch)
for i, l in enumerate(self.loss):
label = '{} Loss'.format(l['type'])
fig = plt.figure()
plt.title(label)
plt.plot(axis, self.log[:, i].numpy(), label=label)
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.grid(True)
plt.savefig(os.path.join(apath, 'loss_{}.pdf'.format(l['type'])))
plt.close(fig)
def get_loss_module(self):
if self.n_GPUs == 1:
return self.loss_module
else:
return self.loss_module.module
def save(self, apath):
torch.save(self.state_dict(), os.path.join(apath, 'loss.pt'))
torch.save(self.log, os.path.join(apath, 'loss_log.pt'))
def load(self, apath, cpu=False):
if cpu:
kwargs = {'map_location': lambda storage, loc: storage}
else:
kwargs = {}
self.load_state_dict(torch.load(
os.path.join(apath, 'loss.pt'),
**kwargs
))
self.log = torch.load(os.path.join(apath, 'loss_log.pt'))
for l in self.get_loss_module():
if hasattr(l, 'scheduler'):
for _ in range(len(self.log)): l.scheduler.step()
| 4,659 | 31.361111 | 80 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/utils/tools.py | import os
import torch
import numpy as np
from PIL import Image
import torch.nn.functional as F
def normalize(x):
return x.mul_(2).add_(-1)
def same_padding(images, ksizes, strides, rates):
assert len(images.size()) == 4
batch_size, channel, rows, cols = images.size()
out_rows = (rows + strides[0] - 1) // strides[0]
out_cols = (cols + strides[1] - 1) // strides[1]
effective_k_row = (ksizes[0] - 1) * rates[0] + 1
effective_k_col = (ksizes[1] - 1) * rates[1] + 1
padding_rows = max(0, (out_rows-1)*strides[0]+effective_k_row-rows)
padding_cols = max(0, (out_cols-1)*strides[1]+effective_k_col-cols)
# Pad the input
padding_top = int(padding_rows / 2.)
padding_left = int(padding_cols / 2.)
padding_bottom = padding_rows - padding_top
padding_right = padding_cols - padding_left
paddings = (padding_left, padding_right, padding_top, padding_bottom)
images = torch.nn.ZeroPad2d(paddings)(images)
return images
def extract_image_patches(images, ksizes, strides, rates, padding='same'):
"""
Extract patches from images and put them in the C output dimension.
:param padding:
:param images: [batch, channels, in_rows, in_cols]. A 4-D Tensor with shape
:param ksizes: [ksize_rows, ksize_cols]. The size of the sliding window for
each dimension of images
:param strides: [stride_rows, stride_cols]
:param rates: [dilation_rows, dilation_cols]
:return: A Tensor
"""
assert len(images.size()) == 4
assert padding in ['same', 'valid']
batch_size, channel, height, width = images.size()
if padding == 'same':
images = same_padding(images, ksizes, strides, rates)
elif padding == 'valid':
pass
else:
raise NotImplementedError('Unsupported padding type: {}.\
Only "same" or "valid" are supported.'.format(padding))
unfold = torch.nn.Unfold(kernel_size=ksizes,
dilation=rates,
padding=0,
stride=strides)
patches = unfold(images)
return patches # [N, C*k*k, L], L is the total number of such blocks
def reduce_mean(x, axis=None, keepdim=False):
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.mean(x, dim=i, keepdim=keepdim)
return x
def reduce_std(x, axis=None, keepdim=False):
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.std(x, dim=i, keepdim=keepdim)
return x
def reduce_sum(x, axis=None, keepdim=False):
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.sum(x, dim=i, keepdim=keepdim)
return x
| 2,777 | 32.878049 | 79 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/utils/__init__.py | 0 | 0 | 0 | py | |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/data/div2kjpeg.py | import os
from data import srdata
from data import div2k
class DIV2KJPEG(div2k.DIV2K):
def __init__(self, args, name='', train=True, benchmark=False):
self.q_factor = int(name.replace('DIV2K-Q', ''))
super(DIV2KJPEG, self).__init__(
args, name=name, train=train, benchmark=benchmark
)
def _set_filesystem(self, dir_data):
self.apath = os.path.join(dir_data, 'DIV2K')
self.dir_hr = os.path.join(self.apath, 'DIV2K_train_HR')
self.dir_lr = os.path.join(
self.apath, 'DIV2K_Q{}'.format(self.q_factor)
)
if self.input_large: self.dir_lr += 'L'
self.ext = ('.png', '.jpg')
| 675 | 31.190476 | 67 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/data/sr291.py | from data import srdata
class SR291(srdata.SRData):
def __init__(self, args, name='SR291', train=True, benchmark=False):
super(SR291, self).__init__(args, name=name)
| 180 | 24.857143 | 72 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/data/benchmark.py | import os
from data import common
from data import srdata
import numpy as np
import torch
import torch.utils.data as data
class Benchmark(srdata.SRData):
def __init__(self, args, name='', train=True, benchmark=True):
super(Benchmark, self).__init__(
args, name=name, train=train, benchmark=True
)
def _set_filesystem(self, dir_data):
self.apath = os.path.join(dir_data, 'benchmark', self.name)
self.dir_hr = os.path.join(self.apath, 'HR')
if self.input_large:
self.dir_lr = os.path.join(self.apath, 'LR_bicubicL')
else:
self.dir_lr = os.path.join(self.apath, 'LR_bicubic')
self.ext = ('', '.png')
| 703 | 26.076923 | 67 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/data/video.py | import os
from data import common
import cv2
import numpy as np
import imageio
import torch
import torch.utils.data as data
class Video(data.Dataset):
def __init__(self, args, name='Video', train=False, benchmark=False):
self.args = args
self.name = name
self.scale = args.scale
self.idx_scale = 0
self.train = False
self.do_eval = False
self.benchmark = benchmark
self.filename, _ = os.path.splitext(os.path.basename(args.dir_demo))
self.vidcap = cv2.VideoCapture(args.dir_demo)
self.n_frames = 0
self.total_frames = int(self.vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
def __getitem__(self, idx):
success, lr = self.vidcap.read()
if success:
self.n_frames += 1
lr, = common.set_channel(lr, n_channels=self.args.n_colors)
lr_t, = common.np2Tensor(lr, rgb_range=self.args.rgb_range)
return lr_t, -1, '{}_{:0>5}'.format(self.filename, self.n_frames)
else:
vidcap.release()
return None
def __len__(self):
return self.total_frames
def set_scale(self, idx_scale):
self.idx_scale = idx_scale
| 1,207 | 25.844444 | 77 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/data/srdata.py | import os
import glob
import random
import pickle
from data import common
import numpy as np
import imageio
import torch
import torch.utils.data as data
class SRData(data.Dataset):
def __init__(self, args, name='', train=True, benchmark=False):
self.args = args
self.name = name
self.train = train
self.split = 'train' if train else 'test'
self.do_eval = True
self.benchmark = benchmark
self.input_large = (args.model == 'VDSR')
self.scale = args.scale
self.idx_scale = 0
self._set_filesystem(args.dir_data)
if args.ext.find('img') < 0:
path_bin = os.path.join(self.apath, 'bin')
os.makedirs(path_bin, exist_ok=True)
list_hr, list_lr = self._scan()
if args.ext.find('img') >= 0 or benchmark:
self.images_hr, self.images_lr = list_hr, list_lr
elif args.ext.find('sep') >= 0:
os.makedirs(
self.dir_hr.replace(self.apath, path_bin),
exist_ok=True
)
for s in self.scale:
os.makedirs(
os.path.join(
self.dir_lr.replace(self.apath, path_bin),
'X{}'.format(s)
),
exist_ok=True
)
self.images_hr, self.images_lr = [], [[] for _ in self.scale]
for h in list_hr:
b = h.replace(self.apath, path_bin)
b = b.replace(self.ext[0], '.pt')
self.images_hr.append(b)
self._check_and_load(args.ext, h, b, verbose=True)
for i, ll in enumerate(list_lr):
for l in ll:
b = l.replace(self.apath, path_bin)
b = b.replace(self.ext[1], '.pt')
self.images_lr[i].append(b)
self._check_and_load(args.ext, l, b, verbose=True)
if train:
n_patches = args.batch_size * args.test_every
n_images = len(args.data_train) * len(self.images_hr)
if n_images == 0:
self.repeat = 0
else:
self.repeat = max(n_patches // n_images, 1)
# Below functions as used to prepare images
def _scan(self):
names_hr = sorted(
glob.glob(os.path.join(self.dir_hr, '*' + self.ext[0]))
)
names_lr = [[] for _ in self.scale]
for f in names_hr:
filename, _ = os.path.splitext(os.path.basename(f))
for si, s in enumerate(self.scale):
names_lr[si].append(os.path.join(
self.dir_lr, 'X{}/{}{}'.format(
s, filename, self.ext[1]
)
))
return names_hr, names_lr
def _set_filesystem(self, dir_data):
self.apath = os.path.join(dir_data, self.name)
self.dir_hr = os.path.join(self.apath, 'HR')
self.dir_lr = os.path.join(self.apath, 'LR_bicubic')
if self.input_large: self.dir_lr += 'L'
self.ext = ('.png', '.png')
def _check_and_load(self, ext, img, f, verbose=True):
if not os.path.isfile(f) or ext.find('reset') >= 0:
if verbose:
print('Making a binary: {}'.format(f))
with open(f, 'wb') as _f:
pickle.dump(imageio.imread(img), _f)
def __getitem__(self, idx):
lr, hr, filename = self._load_file(idx)
pair = self.get_patch(lr, hr)
pair = common.set_channel(*pair, n_channels=self.args.n_colors)
pair_t = common.np2Tensor(*pair, rgb_range=self.args.rgb_range)
return pair_t[0], pair_t[1], filename
def __len__(self):
if self.train:
return len(self.images_hr) * self.repeat
else:
return len(self.images_hr)
def _get_index(self, idx):
if self.train:
return idx % len(self.images_hr)
else:
return idx
def _load_file(self, idx):
idx = self._get_index(idx)
f_hr = self.images_hr[idx]
f_lr = self.images_lr[self.idx_scale][idx]
filename, _ = os.path.splitext(os.path.basename(f_hr))
if self.args.ext == 'img' or self.benchmark:
hr = imageio.imread(f_hr)
lr = imageio.imread(f_lr)
elif self.args.ext.find('sep') >= 0:
with open(f_hr, 'rb') as _f:
hr = pickle.load(_f)
with open(f_lr, 'rb') as _f:
lr = pickle.load(_f)
return lr, hr, filename
def get_patch(self, lr, hr):
scale = self.scale[self.idx_scale]
if self.train:
lr, hr = common.get_patch(
lr, hr,
patch_size=self.args.patch_size,
scale=scale,
multi=(len(self.scale) > 1),
input_large=self.input_large
)
if not self.args.no_augment: lr, hr = common.augment(lr, hr)
else:
ih, iw = lr.shape[:2]
hr = hr[0:ih * scale, 0:iw * scale]
return lr, hr
def set_scale(self, idx_scale):
if not self.input_large:
self.idx_scale = idx_scale
else:
self.idx_scale = random.randint(0, len(self.scale) - 1)
| 5,337 | 32.78481 | 73 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/data/demo.py | import os
from data import common
import numpy as np
import imageio
import torch
import torch.utils.data as data
class Demo(data.Dataset):
def __init__(self, args, name='Demo', train=False, benchmark=False):
self.args = args
self.name = name
self.scale = args.scale
self.idx_scale = 0
self.train = False
self.benchmark = benchmark
self.filelist = []
for f in os.listdir(args.dir_demo):
if f.find('.png') >= 0 or f.find('.jp') >= 0:
self.filelist.append(os.path.join(args.dir_demo, f))
self.filelist.sort()
def __getitem__(self, idx):
filename = os.path.splitext(os.path.basename(self.filelist[idx]))[0]
lr = imageio.imread(self.filelist[idx])
lr, = common.set_channel(lr, n_channels=self.args.n_colors)
lr_t, = common.np2Tensor(lr, rgb_range=self.args.rgb_range)
return lr_t, -1, filename
def __len__(self):
return len(self.filelist)
def set_scale(self, idx_scale):
self.idx_scale = idx_scale
| 1,075 | 25.9 | 76 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/data/common.py | import random
import numpy as np
import skimage.color as sc
import torch
def get_patch(*args, patch_size=96, scale=1, multi=False, input_large=False):
ih, iw = args[0].shape[:2]
if not input_large:
p = 1 if multi else 1
tp = p * patch_size
ip = tp // 1
else:
tp = patch_size
ip = patch_size
ix = random.randrange(0, iw - ip + 1)
iy = random.randrange(0, ih - ip + 1)
if not input_large:
tx, ty = 1 * ix, 1 * iy
else:
tx, ty = ix, iy
ret = [
args[0][iy:iy + ip, ix:ix + ip, :],
*[a[ty:ty + tp, tx:tx + tp, :] for a in args[1:]]
]
return ret
def set_channel(*args, n_channels=3):
def _set_channel(img):
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
c = img.shape[2]
if n_channels == 1 and c == 3:
img = np.expand_dims(sc.rgb2ycbcr(img)[:, :, 0], 2)
elif n_channels == 3 and c == 1:
img = np.concatenate([img] * n_channels, 2)
return img
return [_set_channel(a) for a in args]
def np2Tensor(*args, rgb_range=255):
def _np2Tensor(img):
np_transpose = np.ascontiguousarray(img.transpose((2, 0, 1)))
tensor = torch.from_numpy(np_transpose).float()
tensor.mul_(rgb_range / 255)
return tensor
return [_np2Tensor(a) for a in args]
def augment(*args, hflip=True, rot=True):
hflip = hflip and random.random() < 0.5
vflip = rot and random.random() < 0.5
rot90 = rot and random.random() < 0.5
def _augment(img):
if hflip: img = img[:, ::-1, :]
if vflip: img = img[::-1, :, :]
if rot90: img = img.transpose(1, 0, 2)
return img
return [_augment(a) for a in args]
| 1,770 | 23.260274 | 77 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/data/__init__.py | from importlib import import_module
#from dataloader import MSDataLoader
from torch.utils.data import dataloader
from torch.utils.data import ConcatDataset
# This is a simple wrapper function for ConcatDataset
class MyConcatDataset(ConcatDataset):
def __init__(self, datasets):
super(MyConcatDataset, self).__init__(datasets)
self.train = datasets[0].train
def set_scale(self, idx_scale):
for d in self.datasets:
if hasattr(d, 'set_scale'): d.set_scale(idx_scale)
class Data:
def __init__(self, args):
self.loader_train = None
if not args.test_only:
datasets = []
for d in args.data_train:
module_name = d if d.find('DIV2K-Q') < 0 else 'DIV2KJPEG'
m = import_module('data.' + module_name.lower())
datasets.append(getattr(m, module_name)(args, name=d))
self.loader_train = dataloader.DataLoader(
MyConcatDataset(datasets),
batch_size=args.batch_size,
shuffle=True,
pin_memory=not args.cpu,
num_workers=args.n_threads,
)
self.loader_test = []
for d in args.data_test:
if d in ['CBSD68','Kodak24','McM','Set5', 'Set14', 'B100', 'Urban100']:
m = import_module('data.benchmark')
testset = getattr(m, 'Benchmark')(args, train=False, name=d)
else:
module_name = d if d.find('DIV2K-Q') < 0 else 'DIV2KJPEG'
m = import_module('data.' + module_name.lower())
testset = getattr(m, module_name)(args, train=False, name=d)
self.loader_test.append(
dataloader.DataLoader(
testset,
batch_size=1,
shuffle=False,
pin_memory=not args.cpu,
num_workers=args.n_threads,
)
)
| 1,974 | 36.264151 | 83 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/data/div2k.py | import os
from data import srdata
class DIV2K(srdata.SRData):
def __init__(self, args, name='DIV2K', train=True, benchmark=False):
data_range = [r.split('-') for r in args.data_range.split('/')]
if train:
data_range = data_range[0]
else:
if args.test_only and len(data_range) == 1:
data_range = data_range[0]
else:
data_range = data_range[1]
self.begin, self.end = list(map(lambda x: int(x), data_range))
super(DIV2K, self).__init__(
args, name=name, train=train, benchmark=benchmark
)
def _scan(self):
names_hr, names_lr = super(DIV2K, self)._scan()
names_hr = names_hr[self.begin - 1:self.end]
names_lr = [n[self.begin - 1:self.end] for n in names_lr]
return names_hr, names_lr
def _set_filesystem(self, dir_data):
super(DIV2K, self)._set_filesystem(dir_data)
self.dir_hr = os.path.join(self.apath, 'DIV2K_train_HR')
self.dir_lr = os.path.join(self.apath, 'DIV2K_train_LR_bicubic')
if self.input_large: self.dir_lr += 'L'
| 1,134 | 33.393939 | 72 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/model/rcan.py | ## ECCV-2018-Image Super-Resolution Using Very Deep Residual Channel Attention Networks
## https://arxiv.org/abs/1807.02758
from model import common
import torch.nn as nn
def make_model(args, parent=False):
return RCAN(args)
## Channel Attention (CA) Layer
class CALayer(nn.Module):
def __init__(self, channel, reduction=16):
super(CALayer, self).__init__()
# global average pooling: feature --> point
self.avg_pool = nn.AdaptiveAvgPool2d(1)
# feature channel downscale and upscale --> channel weight
self.conv_du = nn.Sequential(
nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),
nn.Sigmoid()
)
def forward(self, x):
y = self.avg_pool(x)
y = self.conv_du(y)
return x * y
## Residual Channel Attention Block (RCAB)
class RCAB(nn.Module):
def __init__(
self, conv, n_feat, kernel_size, reduction,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(RCAB, self).__init__()
modules_body = []
for i in range(2):
modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias))
if bn: modules_body.append(nn.BatchNorm2d(n_feat))
if i == 0: modules_body.append(act)
modules_body.append(CALayer(n_feat, reduction))
self.body = nn.Sequential(*modules_body)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x)
#res = self.body(x).mul(self.res_scale)
res += x
return res
## Residual Group (RG)
class ResidualGroup(nn.Module):
def __init__(self, conv, n_feat, kernel_size, reduction, act, res_scale, n_resblocks):
super(ResidualGroup, self).__init__()
modules_body = []
modules_body = [
RCAB(
conv, n_feat, kernel_size, reduction, bias=True, bn=False, act=nn.ReLU(True), res_scale=1) \
for _ in range(n_resblocks)]
modules_body.append(conv(n_feat, n_feat, kernel_size))
self.body = nn.Sequential(*modules_body)
def forward(self, x):
res = self.body(x)
res += x
return res
## Residual Channel Attention Network (RCAN)
class RCAN(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(RCAN, self).__init__()
n_resgroups = args.n_resgroups
n_resblocks = args.n_resblocks
n_feats = args.n_feats
kernel_size = 3
reduction = args.reduction
scale = args.scale[0]
act = nn.ReLU(True)
# RGB mean for DIV2K
rgb_mean = (0.4488, 0.4371, 0.4040)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)
# define head module
modules_head = [conv(args.n_colors, n_feats, kernel_size)]
# define body module
modules_body = [
ResidualGroup(
conv, n_feats, kernel_size, reduction, act=act, res_scale=args.res_scale, n_resblocks=n_resblocks) \
for _ in range(n_resgroups)]
modules_body.append(conv(n_feats, n_feats, kernel_size))
# define tail module
modules_tail = [
common.Upsampler(conv, scale, n_feats, act=False),
conv(n_feats, args.n_colors, kernel_size)]
self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)
self.head = nn.Sequential(*modules_head)
self.body = nn.Sequential(*modules_body)
self.tail = nn.Sequential(*modules_tail)
def forward(self, x):
x = self.sub_mean(x)
x = self.head(x)
res = self.body(x)
res += x
x = self.tail(res)
x = self.add_mean(x)
return x
def load_state_dict(self, state_dict, strict=False):
own_state = self.state_dict()
for name, param in state_dict.items():
if name in own_state:
if isinstance(param, nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
if name.find('tail') >= 0:
print('Replace pre-trained upsampler to new one...')
else:
raise RuntimeError('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(), param.size()))
elif strict:
if name.find('tail') == -1:
raise KeyError('unexpected key "{}" in state_dict'
.format(name))
if strict:
missing = set(own_state.keys()) - set(state_dict.keys())
if len(missing) > 0:
raise KeyError('missing keys in state_dict: "{}"'.format(missing))
| 5,178 | 34.717241 | 116 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/model/ddbpn.py | # Deep Back-Projection Networks For Super-Resolution
# https://arxiv.org/abs/1803.02735
from model import common
import torch
import torch.nn as nn
def make_model(args, parent=False):
return DDBPN(args)
def projection_conv(in_channels, out_channels, scale, up=True):
kernel_size, stride, padding = {
2: (6, 2, 2),
4: (8, 4, 2),
8: (12, 8, 2)
}[scale]
if up:
conv_f = nn.ConvTranspose2d
else:
conv_f = nn.Conv2d
return conv_f(
in_channels, out_channels, kernel_size,
stride=stride, padding=padding
)
class DenseProjection(nn.Module):
def __init__(self, in_channels, nr, scale, up=True, bottleneck=True):
super(DenseProjection, self).__init__()
if bottleneck:
self.bottleneck = nn.Sequential(*[
nn.Conv2d(in_channels, nr, 1),
nn.PReLU(nr)
])
inter_channels = nr
else:
self.bottleneck = None
inter_channels = in_channels
self.conv_1 = nn.Sequential(*[
projection_conv(inter_channels, nr, scale, up),
nn.PReLU(nr)
])
self.conv_2 = nn.Sequential(*[
projection_conv(nr, inter_channels, scale, not up),
nn.PReLU(inter_channels)
])
self.conv_3 = nn.Sequential(*[
projection_conv(inter_channels, nr, scale, up),
nn.PReLU(nr)
])
def forward(self, x):
if self.bottleneck is not None:
x = self.bottleneck(x)
a_0 = self.conv_1(x)
b_0 = self.conv_2(a_0)
e = b_0.sub(x)
a_1 = self.conv_3(e)
out = a_0.add(a_1)
return out
class DDBPN(nn.Module):
def __init__(self, args):
super(DDBPN, self).__init__()
scale = args.scale[0]
n0 = 128
nr = 32
self.depth = 6
rgb_mean = (0.4488, 0.4371, 0.4040)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)
initial = [
nn.Conv2d(args.n_colors, n0, 3, padding=1),
nn.PReLU(n0),
nn.Conv2d(n0, nr, 1),
nn.PReLU(nr)
]
self.initial = nn.Sequential(*initial)
self.upmodules = nn.ModuleList()
self.downmodules = nn.ModuleList()
channels = nr
for i in range(self.depth):
self.upmodules.append(
DenseProjection(channels, nr, scale, True, i > 1)
)
if i != 0:
channels += nr
channels = nr
for i in range(self.depth - 1):
self.downmodules.append(
DenseProjection(channels, nr, scale, False, i != 0)
)
channels += nr
reconstruction = [
nn.Conv2d(self.depth * nr, args.n_colors, 3, padding=1)
]
self.reconstruction = nn.Sequential(*reconstruction)
self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)
def forward(self, x):
x = self.sub_mean(x)
x = self.initial(x)
h_list = []
l_list = []
for i in range(self.depth - 1):
if i == 0:
l = x
else:
l = torch.cat(l_list, dim=1)
h_list.append(self.upmodules[i](l))
l_list.append(self.downmodules[i](torch.cat(h_list, dim=1)))
h_list.append(self.upmodules[-1](torch.cat(l_list, dim=1)))
out = self.reconstruction(torch.cat(h_list, dim=1))
out = self.add_mean(out)
return out
| 3,629 | 26.5 | 78 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/model/rdn.py | # Residual Dense Network for Image Super-Resolution
# https://arxiv.org/abs/1802.08797
from model import common
import torch
import torch.nn as nn
def make_model(args, parent=False):
return RDN(args)
class RDB_Conv(nn.Module):
def __init__(self, inChannels, growRate, kSize=3):
super(RDB_Conv, self).__init__()
Cin = inChannels
G = growRate
self.conv = nn.Sequential(*[
nn.Conv2d(Cin, G, kSize, padding=(kSize-1)//2, stride=1),
nn.ReLU()
])
def forward(self, x):
out = self.conv(x)
return torch.cat((x, out), 1)
class RDB(nn.Module):
def __init__(self, growRate0, growRate, nConvLayers, kSize=3):
super(RDB, self).__init__()
G0 = growRate0
G = growRate
C = nConvLayers
convs = []
for c in range(C):
convs.append(RDB_Conv(G0 + c*G, G))
self.convs = nn.Sequential(*convs)
# Local Feature Fusion
self.LFF = nn.Conv2d(G0 + C*G, G0, 1, padding=0, stride=1)
def forward(self, x):
return self.LFF(self.convs(x)) + x
class RDN(nn.Module):
def __init__(self, args):
super(RDN, self).__init__()
r = args.scale[0]
G0 = args.G0
kSize = args.RDNkSize
# number of RDB blocks, conv layers, out channels
self.D, C, G = {
'A': (20, 6, 32),
'B': (16, 8, 64),
}[args.RDNconfig]
# Shallow feature extraction net
self.SFENet1 = nn.Conv2d(args.n_colors, G0, kSize, padding=(kSize-1)//2, stride=1)
self.SFENet2 = nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1)
# Redidual dense blocks and dense feature fusion
self.RDBs = nn.ModuleList()
for i in range(self.D):
self.RDBs.append(
RDB(growRate0 = G0, growRate = G, nConvLayers = C)
)
# Global Feature Fusion
self.GFF = nn.Sequential(*[
nn.Conv2d(self.D * G0, G0, 1, padding=0, stride=1),
nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1)
])
# Up-sampling net
if r == 2 or r == 3:
self.UPNet = nn.Sequential(*[
nn.Conv2d(G0, G * r * r, kSize, padding=(kSize-1)//2, stride=1),
nn.PixelShuffle(r),
nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1)
])
elif r == 4:
self.UPNet = nn.Sequential(*[
nn.Conv2d(G0, G * 4, kSize, padding=(kSize-1)//2, stride=1),
nn.PixelShuffle(2),
nn.Conv2d(G, G * 4, kSize, padding=(kSize-1)//2, stride=1),
nn.PixelShuffle(2),
nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1)
])
else:
raise ValueError("scale must be 2 or 3 or 4.")
def forward(self, x):
f__1 = self.SFENet1(x)
x = self.SFENet2(f__1)
RDBs_out = []
for i in range(self.D):
x = self.RDBs[i](x)
RDBs_out.append(x)
x = self.GFF(torch.cat(RDBs_out,1))
x += f__1
return self.UPNet(x)
| 3,202 | 29.216981 | 90 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/model/mdsr.py | from model import common
import torch.nn as nn
def make_model(args, parent=False):
return MDSR(args)
class MDSR(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(MDSR, self).__init__()
n_resblocks = args.n_resblocks
n_feats = args.n_feats
kernel_size = 3
self.scale_idx = 0
act = nn.ReLU(True)
rgb_mean = (0.4488, 0.4371, 0.4040)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)
m_head = [conv(args.n_colors, n_feats, kernel_size)]
self.pre_process = nn.ModuleList([
nn.Sequential(
common.ResBlock(conv, n_feats, 5, act=act),
common.ResBlock(conv, n_feats, 5, act=act)
) for _ in args.scale
])
m_body = [
common.ResBlock(
conv, n_feats, kernel_size, act=act
) for _ in range(n_resblocks)
]
m_body.append(conv(n_feats, n_feats, kernel_size))
self.upsample = nn.ModuleList([
common.Upsampler(
conv, s, n_feats, act=False
) for s in args.scale
])
m_tail = [conv(n_feats, args.n_colors, kernel_size)]
self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)
self.head = nn.Sequential(*m_head)
self.body = nn.Sequential(*m_body)
self.tail = nn.Sequential(*m_tail)
def forward(self, x):
x = self.sub_mean(x)
x = self.head(x)
x = self.pre_process[self.scale_idx](x)
res = self.body(x)
res += x
x = self.upsample[self.scale_idx](res)
x = self.tail(x)
x = self.add_mean(x)
return x
def set_scale(self, scale_idx):
self.scale_idx = scale_idx
| 1,837 | 25.637681 | 78 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/model/common.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def default_conv(in_channels, out_channels, kernel_size,stride=1, bias=True):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2),stride=stride, bias=bias)
class MeanShift(nn.Conv2d):
def __init__(
self, rgb_range,
rgb_mean=(0.4488, 0.4371, 0.4040), rgb_std=(1.0, 1.0, 1.0), sign=-1):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1) / std.view(3, 1, 1, 1)
self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean) / std
for p in self.parameters():
p.requires_grad = False
class BasicBlock(nn.Sequential):
def __init__(
self, conv, in_channels, out_channels, kernel_size, stride=1, bias=True,
bn=False, act=nn.PReLU()):
m = [conv(in_channels, out_channels, kernel_size, bias=bias)]
if bn:
m.append(nn.BatchNorm2d(out_channels))
if act is not None:
m.append(act)
super(BasicBlock, self).__init__(*m)
class ResBlock(nn.Module):
def __init__(
self, conv, n_feats, kernel_size,
bias=True, bn=False, act=nn.PReLU(), res_scale=1):
super(ResBlock, self).__init__()
m = []
for i in range(2):
m.append(conv(n_feats, n_feats, kernel_size, bias=bias))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if i == 0:
m.append(act)
self.body = nn.Sequential(*m)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x).mul(self.res_scale)
res += x
return res
class Upsampler(nn.Sequential):
def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True):
m = []
if (scale & (scale - 1)) == 0: # Is scale = 2^n?
for _ in range(int(math.log(scale, 2))):
m.append(conv(n_feats, 4 * n_feats, 3, bias))
m.append(nn.PixelShuffle(2))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
elif scale == 3:
m.append(conv(n_feats, 9 * n_feats, 3, bias))
m.append(nn.PixelShuffle(3))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
else:
raise NotImplementedError
super(Upsampler, self).__init__(*m)
| 2,799 | 30.460674 | 80 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/model/__init__.py | import os
from importlib import import_module
import torch
import torch.nn as nn
from torch.autograd import Variable
class Model(nn.Module):
def __init__(self, args, ckp):
super(Model, self).__init__()
print('Making model...')
self.scale = args.scale
self.idx_scale = 0
self.self_ensemble = args.self_ensemble
self.chop = args.chop
self.precision = args.precision
self.cpu = args.cpu
self.device = torch.device('cpu' if args.cpu else 'cuda')
self.n_GPUs = args.n_GPUs
self.save_models = args.save_models
module = import_module('model.' + args.model.lower())
self.model = module.make_model(args).to(self.device)
if args.precision == 'half': self.model.half()
if not args.cpu and args.n_GPUs > 1:
self.model = nn.DataParallel(self.model, range(args.n_GPUs))
self.load(
ckp.dir,
pre_train=args.pre_train,
resume=args.resume,
cpu=args.cpu
)
print(self.model, file=ckp.log_file)
def forward(self, x, idx_scale):
self.idx_scale = idx_scale
target = self.get_model()
if hasattr(target, 'set_scale'):
target.set_scale(idx_scale)
if self.self_ensemble and not self.training:
if self.chop:
forward_function = self.forward_chop
else:
forward_function = self.model.forward
return self.forward_x8(x, forward_function)
elif self.chop and not self.training:
return self.forward_chop(x)
else:
return self.model(x)
def get_model(self):
if self.n_GPUs == 1:
return self.model
else:
return self.model.module
def state_dict(self, **kwargs):
target = self.get_model()
return target.state_dict(**kwargs)
def save(self, apath, epoch, is_best=False):
target = self.get_model()
torch.save(
target.state_dict(),
os.path.join(apath, 'model_latest.pt')
)
if is_best:
torch.save(
target.state_dict(),
os.path.join(apath, 'model_best.pt')
)
if self.save_models:
torch.save(
target.state_dict(),
os.path.join(apath, 'model_{}.pt'.format(epoch))
)
def load(self, apath, pre_train='.', resume=-1, cpu=False):
if cpu:
kwargs = {'map_location': lambda storage, loc: storage}
else:
kwargs = {}
if resume == -1:
self.get_model().load_state_dict(
torch.load(
os.path.join(apath, 'model_latest.pt'),
**kwargs
),
strict=False
)
elif resume == 0:
if pre_train != '.':
print('Loading model from {}'.format(pre_train))
self.get_model().load_state_dict(
torch.load(pre_train, **kwargs),
strict=False
)
else:
self.get_model().load_state_dict(
torch.load(
os.path.join(apath, 'model', 'model_{}.pt'.format(resume)),
**kwargs
),
strict=False
)
def forward_chop(self, x, shave=10, min_size=6400):
scale = self.scale[self.idx_scale]
scale = 1
n_GPUs = min(self.n_GPUs, 4)
b, c, h, w = x.size()
h_half, w_half = h // 2, w // 2
h_size, w_size = h_half + shave, w_half + shave
lr_list = [
x[:, :, 0:h_size, 0:w_size],
x[:, :, 0:h_size, (w - w_size):w],
x[:, :, (h - h_size):h, 0:w_size],
x[:, :, (h - h_size):h, (w - w_size):w]]
if w_size * h_size < min_size:
sr_list = []
for i in range(0, 4, n_GPUs):
lr_batch = torch.cat(lr_list[i:(i + n_GPUs)], dim=0)
sr_batch = self.model(lr_batch)
sr_list.extend(sr_batch.chunk(n_GPUs, dim=0))
else:
sr_list = [
self.forward_chop(patch, shave=shave, min_size=min_size) \
for patch in lr_list
]
h, w = scale * h, scale * w
h_half, w_half = scale * h_half, scale * w_half
h_size, w_size = scale * h_size, scale * w_size
shave *= scale
output = x.new(b, c, h, w)
output[:, :, 0:h_half, 0:w_half] \
= sr_list[0][:, :, 0:h_half, 0:w_half]
output[:, :, 0:h_half, w_half:w] \
= sr_list[1][:, :, 0:h_half, (w_size - w + w_half):w_size]
output[:, :, h_half:h, 0:w_half] \
= sr_list[2][:, :, (h_size - h + h_half):h_size, 0:w_half]
output[:, :, h_half:h, w_half:w] \
= sr_list[3][:, :, (h_size - h + h_half):h_size, (w_size - w + w_half):w_size]
return output
def forward_x8(self, x, forward_function):
def _transform(v, op):
if self.precision != 'single': v = v.float()
v2np = v.data.cpu().numpy()
if op == 'v':
tfnp = v2np[:, :, :, ::-1].copy()
elif op == 'h':
tfnp = v2np[:, :, ::-1, :].copy()
elif op == 't':
tfnp = v2np.transpose((0, 1, 3, 2)).copy()
ret = torch.Tensor(tfnp).to(self.device)
if self.precision == 'half': ret = ret.half()
return ret
lr_list = [x]
for tf in 'v', 'h', 't':
lr_list.extend([_transform(t, tf) for t in lr_list])
sr_list = [forward_function(aug) for aug in lr_list]
for i in range(len(sr_list)):
if i > 3:
sr_list[i] = _transform(sr_list[i], 't')
if i % 4 > 1:
sr_list[i] = _transform(sr_list[i], 'h')
if (i % 4) % 2 == 1:
sr_list[i] = _transform(sr_list[i], 'v')
output_cat = torch.cat(sr_list, dim=0)
output = output_cat.mean(dim=0, keepdim=True)
return output
| 6,200 | 31.465969 | 90 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/model/panet.py | from model import common
from model import attention
import torch.nn as nn
def make_model(args, parent=False):
return PANET(args)
class PANET(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(PANET, self).__init__()
n_resblocks = args.n_resblocks
n_feats = args.n_feats
kernel_size = 3
scale = args.scale[0]
rgb_mean = (0.4488, 0.4371, 0.4040)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)
msa = attention.PyramidAttention()
# define head module
m_head = [conv(args.n_colors, n_feats, kernel_size)]
# define body module
m_body = [
common.ResBlock(
conv, n_feats, kernel_size, nn.PReLU(), res_scale=args.res_scale
) for _ in range(n_resblocks//2)
]
m_body.append(msa)
for i in range(n_resblocks//2):
m_body.append(common.ResBlock(conv,n_feats,kernel_size,nn.PReLU(),res_scale=args.res_scale))
m_body.append(conv(n_feats, n_feats, kernel_size))
# define tail module
#m_tail = [
# common.Upsampler(conv, scale, n_feats, act=False),
# conv(n_feats, args.n_colors, kernel_size)
#]
m_tail = [
conv(n_feats, args.n_colors, kernel_size)
]
self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)
self.head = nn.Sequential(*m_head)
self.body = nn.Sequential(*m_body)
self.tail = nn.Sequential(*m_tail)
def forward(self, x):
#x = self.sub_mean(x)
x = self.head(x)
res = self.body(x)
res += x
x = self.tail(res)
#x = self.add_mean(x)
return x
def load_state_dict(self, state_dict, strict=True):
own_state = self.state_dict()
for name, param in state_dict.items():
if name in own_state:
if isinstance(param, nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
if name.find('tail') == -1:
raise RuntimeError('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(), param.size()))
elif strict:
if name.find('tail') == -1:
raise KeyError('unexpected key "{}" in state_dict'
.format(name))
| 2,779 | 32.493976 | 104 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/model/attention.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from torchvision import utils as vutils
from model import common
from utils.tools import extract_image_patches,\
reduce_mean, reduce_sum, same_padding
class PyramidAttention(nn.Module):
def __init__(self, level=5, res_scale=1, channel=64, reduction=2, ksize=3, stride=1, softmax_scale=10, average=True, conv=common.default_conv):
super(PyramidAttention, self).__init__()
self.ksize = ksize
self.stride = stride
self.res_scale = res_scale
self.softmax_scale = softmax_scale
self.scale = [1-i/10 for i in range(level)]
self.average = average
escape_NaN = torch.FloatTensor([1e-4])
self.register_buffer('escape_NaN', escape_NaN)
self.conv_match_L_base = common.BasicBlock(conv,channel,channel//reduction, 1, bn=False, act=nn.PReLU())
self.conv_match = common.BasicBlock(conv,channel, channel//reduction, 1, bn=False, act=nn.PReLU())
self.conv_assembly = common.BasicBlock(conv,channel, channel,1,bn=False, act=nn.PReLU())
def forward(self, input):
res = input
#theta
match_base = self.conv_match_L_base(input)
shape_base = list(res.size())
input_groups = torch.split(match_base,1,dim=0)
# patch size for matching
kernel = self.ksize
# raw_w is for reconstruction
raw_w = []
# w is for matching
w = []
#build feature pyramid
for i in range(len(self.scale)):
ref = input
if self.scale[i]!=1:
ref = F.interpolate(input, scale_factor=self.scale[i], mode='bicubic')
#feature transformation function f
base = self.conv_assembly(ref)
shape_input = base.shape
#sampling
raw_w_i = extract_image_patches(base, ksizes=[kernel, kernel],
strides=[self.stride,self.stride],
rates=[1, 1],
padding='same') # [N, C*k*k, L]
raw_w_i = raw_w_i.view(shape_input[0], shape_input[1], kernel, kernel, -1)
raw_w_i = raw_w_i.permute(0, 4, 1, 2, 3) # raw_shape: [N, L, C, k, k]
raw_w_i_groups = torch.split(raw_w_i, 1, dim=0)
raw_w.append(raw_w_i_groups)
#feature transformation function g
ref_i = self.conv_match(ref)
shape_ref = ref_i.shape
#sampling
w_i = extract_image_patches(ref_i, ksizes=[self.ksize, self.ksize],
strides=[self.stride, self.stride],
rates=[1, 1],
padding='same')
w_i = w_i.view(shape_ref[0], shape_ref[1], self.ksize, self.ksize, -1)
w_i = w_i.permute(0, 4, 1, 2, 3) # w shape: [N, L, C, k, k]
w_i_groups = torch.split(w_i, 1, dim=0)
w.append(w_i_groups)
y = []
for idx, xi in enumerate(input_groups):
#group in a filter
wi = torch.cat([w[i][idx][0] for i in range(len(self.scale))],dim=0) # [L, C, k, k]
#normalize
max_wi = torch.max(torch.sqrt(reduce_sum(torch.pow(wi, 2),
axis=[1, 2, 3],
keepdim=True)),
self.escape_NaN)
wi_normed = wi/ max_wi
#matching
xi = same_padding(xi, [self.ksize, self.ksize], [1, 1], [1, 1]) # xi: 1*c*H*W
yi = F.conv2d(xi, wi_normed, stride=1) # [1, L, H, W] L = shape_ref[2]*shape_ref[3]
yi = yi.view(1,wi.shape[0], shape_base[2], shape_base[3]) # (B=1, C=32*32, H=32, W=32)
# softmax matching score
yi = F.softmax(yi*self.softmax_scale, dim=1)
if self.average == False:
yi = (yi == yi.max(dim=1,keepdim=True)[0]).float()
# deconv for patch pasting
raw_wi = torch.cat([raw_w[i][idx][0] for i in range(len(self.scale))],dim=0)
yi = F.conv_transpose2d(yi, raw_wi, stride=self.stride,padding=1)/4.
y.append(yi)
y = torch.cat(y, dim=0)+res*self.res_scale # back to the mini-batch
return y | 4,427 | 46.106383 | 147 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/model/vdsr.py | from model import common
import torch.nn as nn
import torch.nn.init as init
url = {
'r20f64': ''
}
def make_model(args, parent=False):
return VDSR(args)
class VDSR(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(VDSR, self).__init__()
n_resblocks = args.n_resblocks
n_feats = args.n_feats
kernel_size = 3
self.url = url['r{}f{}'.format(n_resblocks, n_feats)]
self.sub_mean = common.MeanShift(args.rgb_range)
self.add_mean = common.MeanShift(args.rgb_range, sign=1)
def basic_block(in_channels, out_channels, act):
return common.BasicBlock(
conv, in_channels, out_channels, kernel_size,
bias=True, bn=False, act=act
)
# define body module
m_body = []
m_body.append(basic_block(args.n_colors, n_feats, nn.ReLU(True)))
for _ in range(n_resblocks - 2):
m_body.append(basic_block(n_feats, n_feats, nn.ReLU(True)))
m_body.append(basic_block(n_feats, args.n_colors, None))
self.body = nn.Sequential(*m_body)
def forward(self, x):
x = self.sub_mean(x)
res = self.body(x)
res += x
x = self.add_mean(res)
return x
| 1,275 | 26.148936 | 73 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/model/utils/tools.py | import os
import torch
import numpy as np
from PIL import Image
import torch.nn.functional as F
def normalize(x):
return x.mul_(2).add_(-1)
def same_padding(images, ksizes, strides, rates):
assert len(images.size()) == 4
batch_size, channel, rows, cols = images.size()
out_rows = (rows + strides[0] - 1) // strides[0]
out_cols = (cols + strides[1] - 1) // strides[1]
effective_k_row = (ksizes[0] - 1) * rates[0] + 1
effective_k_col = (ksizes[1] - 1) * rates[1] + 1
padding_rows = max(0, (out_rows-1)*strides[0]+effective_k_row-rows)
padding_cols = max(0, (out_cols-1)*strides[1]+effective_k_col-cols)
# Pad the input
padding_top = int(padding_rows / 2.)
padding_left = int(padding_cols / 2.)
padding_bottom = padding_rows - padding_top
padding_right = padding_cols - padding_left
paddings = (padding_left, padding_right, padding_top, padding_bottom)
images = torch.nn.ZeroPad2d(paddings)(images)
return images
def extract_image_patches(images, ksizes, strides, rates, padding='same'):
"""
Extract patches from images and put them in the C output dimension.
:param padding:
:param images: [batch, channels, in_rows, in_cols]. A 4-D Tensor with shape
:param ksizes: [ksize_rows, ksize_cols]. The size of the sliding window for
each dimension of images
:param strides: [stride_rows, stride_cols]
:param rates: [dilation_rows, dilation_cols]
:return: A Tensor
"""
assert len(images.size()) == 4
assert padding in ['same', 'valid']
batch_size, channel, height, width = images.size()
if padding == 'same':
images = same_padding(images, ksizes, strides, rates)
elif padding == 'valid':
pass
else:
raise NotImplementedError('Unsupported padding type: {}.\
Only "same" or "valid" are supported.'.format(padding))
unfold = torch.nn.Unfold(kernel_size=ksizes,
dilation=rates,
padding=0,
stride=strides)
patches = unfold(images)
return patches # [N, C*k*k, L], L is the total number of such blocks
def reduce_mean(x, axis=None, keepdim=False):
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.mean(x, dim=i, keepdim=keepdim)
return x
def reduce_std(x, axis=None, keepdim=False):
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.std(x, dim=i, keepdim=keepdim)
return x
def reduce_sum(x, axis=None, keepdim=False):
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.sum(x, dim=i, keepdim=keepdim)
return x
| 2,777 | 32.878049 | 79 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/Demosaic/code/model/utils/__init__.py | 0 | 0 | 0 | py | |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/SR/code/main.py | import torch
import utility
import data
import model
import loss
from option import args
from trainer import Trainer
torch.manual_seed(args.seed)
checkpoint = utility.checkpoint(args)
def main():
global model
if args.data_test == ['video']:
from videotester import VideoTester
model = model.Model(args, checkpoint)
print('Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))
t = VideoTester(args, model, checkpoint)
t.test()
else:
if checkpoint.ok:
loader = data.Data(args)
_model = model.Model(args, checkpoint)
print('Total params: %.2fM' % (sum(p.numel() for p in _model.parameters())/1000000.0))
_loss = loss.Loss(args, checkpoint) if not args.test_only else None
t = Trainer(args, loader, _model, _loss, checkpoint)
while not t.terminate():
t.train()
t.test()
checkpoint.done()
if __name__ == '__main__':
main()
| 1,028 | 27.583333 | 98 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/SR/code/utility.py | import os
import math
import time
import datetime
from multiprocessing import Process
from multiprocessing import Queue
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import imageio
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lrs
class timer():
def __init__(self):
self.acc = 0
self.tic()
def tic(self):
self.t0 = time.time()
def toc(self, restart=False):
diff = time.time() - self.t0
if restart: self.t0 = time.time()
return diff
def hold(self):
self.acc += self.toc()
def release(self):
ret = self.acc
self.acc = 0
return ret
def reset(self):
self.acc = 0
class checkpoint():
def __init__(self, args):
self.args = args
self.ok = True
self.log = torch.Tensor()
now = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
if not args.load:
if not args.save:
args.save = now
self.dir = os.path.join('..', 'experiment', args.save)
else:
self.dir = os.path.join('..', 'experiment', args.load)
if os.path.exists(self.dir):
self.log = torch.load(self.get_path('psnr_log.pt'))
print('Continue from epoch {}...'.format(len(self.log)))
else:
args.load = ''
if args.reset:
os.system('rm -rf ' + self.dir)
args.load = ''
os.makedirs(self.dir, exist_ok=True)
os.makedirs(self.get_path('model'), exist_ok=True)
for d in args.data_test:
os.makedirs(self.get_path('results-{}'.format(d)), exist_ok=True)
open_type = 'a' if os.path.exists(self.get_path('log.txt'))else 'w'
self.log_file = open(self.get_path('log.txt'), open_type)
with open(self.get_path('config.txt'), open_type) as f:
f.write(now + '\n\n')
for arg in vars(args):
f.write('{}: {}\n'.format(arg, getattr(args, arg)))
f.write('\n')
self.n_processes = 8
def get_path(self, *subdir):
return os.path.join(self.dir, *subdir)
def save(self, trainer, epoch, is_best=False):
trainer.model.save(self.get_path('model'), epoch, is_best=is_best)
trainer.loss.save(self.dir)
trainer.loss.plot_loss(self.dir, epoch)
self.plot_psnr(epoch)
trainer.optimizer.save(self.dir)
torch.save(self.log, self.get_path('psnr_log.pt'))
def add_log(self, log):
self.log = torch.cat([self.log, log])
def write_log(self, log, refresh=False):
print(log)
self.log_file.write(log + '\n')
if refresh:
self.log_file.close()
self.log_file = open(self.get_path('log.txt'), 'a')
def done(self):
self.log_file.close()
def plot_psnr(self, epoch):
axis = np.linspace(1, epoch, epoch)
for idx_data, d in enumerate(self.args.data_test):
label = 'SR on {}'.format(d)
fig = plt.figure()
plt.title(label)
for idx_scale, scale in enumerate(self.args.scale):
plt.plot(
axis,
self.log[:, idx_data, idx_scale].numpy(),
label='Scale {}'.format(scale)
)
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('PSNR')
plt.grid(True)
plt.savefig(self.get_path('test_{}.pdf'.format(d)))
plt.close(fig)
def begin_background(self):
self.queue = Queue()
def bg_target(queue):
while True:
if not queue.empty():
filename, tensor = queue.get()
if filename is None: break
imageio.imwrite(filename, tensor.numpy())
self.process = [
Process(target=bg_target, args=(self.queue,)) \
for _ in range(self.n_processes)
]
for p in self.process: p.start()
def end_background(self):
for _ in range(self.n_processes): self.queue.put((None, None))
while not self.queue.empty(): time.sleep(1)
for p in self.process: p.join()
def save_results(self, dataset, filename, save_list, scale):
if self.args.save_results:
filename = self.get_path(
'results-{}'.format(dataset.dataset.name),
'{}_x{}_'.format(filename, scale)
)
postfix = ('SR', 'LR', 'HR')
for v, p in zip(save_list, postfix):
normalized = v[0].mul(255 / self.args.rgb_range)
tensor_cpu = normalized.byte().permute(1, 2, 0).cpu()
self.queue.put(('{}{}.png'.format(filename, p), tensor_cpu))
def quantize(img, rgb_range):
pixel_range = 255 / rgb_range
return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range)
def calc_psnr(sr, hr, scale, rgb_range, dataset=None):
if hr.nelement() == 1: return 0
diff = (sr - hr) / rgb_range
if dataset and dataset.dataset.benchmark:
shave = scale
if diff.size(1) > 1:
gray_coeffs = [65.738, 129.057, 25.064]
convert = diff.new_tensor(gray_coeffs).view(1, 3, 1, 1) / 256
diff = diff.mul(convert).sum(dim=1)
else:
shave = scale + 6
valid = diff[..., shave:-shave, shave:-shave]
mse = valid.pow(2).mean()
return -10 * math.log10(mse)
def make_optimizer(args, target):
'''
make optimizer and scheduler together
'''
# optimizer
trainable = filter(lambda x: x.requires_grad, target.parameters())
kwargs_optimizer = {'lr': args.lr, 'weight_decay': args.weight_decay}
if args.optimizer == 'SGD':
optimizer_class = optim.SGD
kwargs_optimizer['momentum'] = args.momentum
elif args.optimizer == 'ADAM':
optimizer_class = optim.Adam
kwargs_optimizer['betas'] = args.betas
kwargs_optimizer['eps'] = args.epsilon
elif args.optimizer == 'RMSprop':
optimizer_class = optim.RMSprop
kwargs_optimizer['eps'] = args.epsilon
# scheduler
milestones = list(map(lambda x: int(x), args.decay.split('-')))
kwargs_scheduler = {'milestones': milestones, 'gamma': args.gamma}
scheduler_class = lrs.MultiStepLR
class CustomOptimizer(optimizer_class):
def __init__(self, *args, **kwargs):
super(CustomOptimizer, self).__init__(*args, **kwargs)
def _register_scheduler(self, scheduler_class, **kwargs):
self.scheduler = scheduler_class(self, **kwargs)
def save(self, save_dir):
torch.save(self.state_dict(), self.get_dir(save_dir))
def load(self, load_dir, epoch=1):
self.load_state_dict(torch.load(self.get_dir(load_dir)))
if epoch > 1:
for _ in range(epoch): self.scheduler.step()
def get_dir(self, dir_path):
return os.path.join(dir_path, 'optimizer.pt')
def schedule(self):
self.scheduler.step()
def get_lr(self):
return self.scheduler.get_lr()[0]
def get_last_epoch(self):
return self.scheduler.last_epoch
optimizer = CustomOptimizer(trainable, **kwargs_optimizer)
optimizer._register_scheduler(scheduler_class, **kwargs_scheduler)
return optimizer
| 7,480 | 30.432773 | 77 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/SR/code/dataloader.py | import threading
import random
import torch
import torch.multiprocessing as multiprocessing
from torch.utils.data import DataLoader
from torch.utils.data import SequentialSampler
from torch.utils.data import RandomSampler
from torch.utils.data import BatchSampler
from torch.utils.data import _utils
from torch.utils.data.dataloader import _DataLoaderIter
from torch.utils.data._utils import collate
from torch.utils.data._utils import signal_handling
from torch.utils.data._utils import MP_STATUS_CHECK_INTERVAL
from torch.utils.data._utils import ExceptionWrapper
from torch.utils.data._utils import IS_WINDOWS
from torch.utils.data._utils.worker import ManagerWatchdog
from torch._six import queue
def _ms_loop(dataset, index_queue, data_queue, done_event, collate_fn, scale, seed, init_fn, worker_id):
try:
collate._use_shared_memory = True
signal_handling._set_worker_signal_handlers()
torch.set_num_threads(1)
random.seed(seed)
torch.manual_seed(seed)
data_queue.cancel_join_thread()
if init_fn is not None:
init_fn(worker_id)
watchdog = ManagerWatchdog()
while watchdog.is_alive():
try:
r = index_queue.get(timeout=MP_STATUS_CHECK_INTERVAL)
except queue.Empty:
continue
if r is None:
assert done_event.is_set()
return
elif done_event.is_set():
continue
idx, batch_indices = r
try:
idx_scale = 0
if len(scale) > 1 and dataset.train:
idx_scale = random.randrange(0, len(scale))
dataset.set_scale(idx_scale)
samples = collate_fn([dataset[i] for i in batch_indices])
samples.append(idx_scale)
except Exception:
data_queue.put((idx, ExceptionWrapper(sys.exc_info())))
else:
data_queue.put((idx, samples))
del samples
except KeyboardInterrupt:
pass
class _MSDataLoaderIter(_DataLoaderIter):
def __init__(self, loader):
self.dataset = loader.dataset
self.scale = loader.scale
self.collate_fn = loader.collate_fn
self.batch_sampler = loader.batch_sampler
self.num_workers = loader.num_workers
self.pin_memory = loader.pin_memory and torch.cuda.is_available()
self.timeout = loader.timeout
self.sample_iter = iter(self.batch_sampler)
base_seed = torch.LongTensor(1).random_().item()
if self.num_workers > 0:
self.worker_init_fn = loader.worker_init_fn
self.worker_queue_idx = 0
self.worker_result_queue = multiprocessing.Queue()
self.batches_outstanding = 0
self.worker_pids_set = False
self.shutdown = False
self.send_idx = 0
self.rcvd_idx = 0
self.reorder_dict = {}
self.done_event = multiprocessing.Event()
base_seed = torch.LongTensor(1).random_()[0]
self.index_queues = []
self.workers = []
for i in range(self.num_workers):
index_queue = multiprocessing.Queue()
index_queue.cancel_join_thread()
w = multiprocessing.Process(
target=_ms_loop,
args=(
self.dataset,
index_queue,
self.worker_result_queue,
self.done_event,
self.collate_fn,
self.scale,
base_seed + i,
self.worker_init_fn,
i
)
)
w.daemon = True
w.start()
self.index_queues.append(index_queue)
self.workers.append(w)
if self.pin_memory:
self.data_queue = queue.Queue()
pin_memory_thread = threading.Thread(
target=_utils.pin_memory._pin_memory_loop,
args=(
self.worker_result_queue,
self.data_queue,
torch.cuda.current_device(),
self.done_event
)
)
pin_memory_thread.daemon = True
pin_memory_thread.start()
self.pin_memory_thread = pin_memory_thread
else:
self.data_queue = self.worker_result_queue
_utils.signal_handling._set_worker_pids(
id(self), tuple(w.pid for w in self.workers)
)
_utils.signal_handling._set_SIGCHLD_handler()
self.worker_pids_set = True
for _ in range(2 * self.num_workers):
self._put_indices()
class MSDataLoader(DataLoader):
def __init__(self, cfg, *args, **kwargs):
super(MSDataLoader, self).__init__(
*args, **kwargs, num_workers=cfg.n_threads
)
self.scale = cfg.scale
def __iter__(self):
return _MSDataLoaderIter(self)
| 5,259 | 32.081761 | 104 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/SR/code/template.py | def set_template(args):
# Set the templates here
if args.template.find('jpeg') >= 0:
args.data_train = 'DIV2K_jpeg'
args.data_test = 'DIV2K_jpeg'
args.epochs = 200
args.decay = '100'
if args.template.find('EDSR_paper') >= 0:
args.model = 'EDSR'
args.n_resblocks = 32
args.n_feats = 256
args.res_scale = 0.1
if args.template.find('MDSR') >= 0:
args.model = 'MDSR'
args.patch_size = 48
args.epochs = 650
if args.template.find('DDBPN') >= 0:
args.model = 'DDBPN'
args.patch_size = 128
args.scale = '4'
args.data_test = 'Set5'
args.batch_size = 20
args.epochs = 1000
args.decay = '500'
args.gamma = 0.1
args.weight_decay = 1e-4
args.loss = '1*MSE'
if args.template.find('GAN') >= 0:
args.epochs = 200
args.lr = 5e-5
args.decay = '150'
if args.template.find('RCAN') >= 0:
args.model = 'RCAN'
args.n_resgroups = 10
args.n_resblocks = 20
args.n_feats = 64
args.chop = True
if args.template.find('VDSR') >= 0:
args.model = 'VDSR'
args.n_resblocks = 20
args.n_feats = 64
args.patch_size = 41
args.lr = 1e-1
| 1,312 | 23.314815 | 45 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/SR/code/option.py | import argparse
import template
parser = argparse.ArgumentParser(description='EDSR and MDSR')
parser.add_argument('--debug', action='store_true',
help='Enables debug mode')
parser.add_argument('--template', default='.',
help='You can set various templates in option.py')
# Hardware specifications
parser.add_argument('--n_threads', type=int, default=18,
help='number of threads for data loading')
parser.add_argument('--cpu', action='store_true',
help='use cpu only')
parser.add_argument('--n_GPUs', type=int, default=1,
help='number of GPUs')
parser.add_argument('--seed', type=int, default=1,
help='random seed')
parser.add_argument('--local_rank',type=int, default=0)
# Data specifications
parser.add_argument('--dir_data', type=str, default='../../../',
help='dataset directory')
parser.add_argument('--dir_demo', type=str, default='../Demo',
help='demo image directory')
parser.add_argument('--data_train', type=str, default='DIV2K',
help='train dataset name')
parser.add_argument('--data_test', type=str, default='DIV2K',
help='test dataset name')
parser.add_argument('--data_range', type=str, default='1-800/801-810',
help='train/test data range')
parser.add_argument('--ext', type=str, default='sep',
help='dataset file extension')
parser.add_argument('--scale', type=str, default='4',
help='super resolution scale')
parser.add_argument('--patch_size', type=int, default=192,
help='output patch size')
parser.add_argument('--rgb_range', type=int, default=255,
help='maximum value of RGB')
parser.add_argument('--n_colors', type=int, default=3,
help='number of color channels to use')
parser.add_argument('--chop', action='store_true',
help='enable memory-efficient forward')
parser.add_argument('--no_augment', action='store_true',
help='do not use data augmentation')
# Model specifications
parser.add_argument('--model', default='EDSR',
help='model name')
parser.add_argument('--act', type=str, default='relu',
help='activation function')
parser.add_argument('--pre_train', type=str, default='.',
help='pre-trained model directory')
parser.add_argument('--extend', type=str, default='.',
help='pre-trained model directory')
parser.add_argument('--n_resblocks', type=int, default=20,
help='number of residual blocks')
parser.add_argument('--n_feats', type=int, default=64,
help='number of feature maps')
parser.add_argument('--res_scale', type=float, default=1,
help='residual scaling')
parser.add_argument('--shift_mean', default=True,
help='subtract pixel mean from the input')
parser.add_argument('--dilation', action='store_true',
help='use dilated convolution')
parser.add_argument('--precision', type=str, default='single',
choices=('single', 'half'),
help='FP precision for test (single | half)')
# Option for Residual dense network (RDN)
parser.add_argument('--G0', type=int, default=64,
help='default number of filters. (Use in RDN)')
parser.add_argument('--RDNkSize', type=int, default=3,
help='default kernel size. (Use in RDN)')
parser.add_argument('--RDNconfig', type=str, default='B',
help='parameters config of RDN. (Use in RDN)')
parser.add_argument('--depth', type=int, default=12,
help='number of residual groups')
# Option for Residual channel attention network (RCAN)
parser.add_argument('--n_resgroups', type=int, default=10,
help='number of residual groups')
parser.add_argument('--reduction', type=int, default=16,
help='number of feature maps reduction')
# Training specifications
parser.add_argument('--reset', action='store_true',
help='reset the training')
parser.add_argument('--test_every', type=int, default=1000,
help='do test per every N batches')
parser.add_argument('--epochs', type=int, default=1000,
help='number of epochs to train')
parser.add_argument('--batch_size', type=int, default=16,
help='input batch size for training')
parser.add_argument('--split_batch', type=int, default=1,
help='split the batch into smaller chunks')
parser.add_argument('--self_ensemble', action='store_true',
help='use self-ensemble method for test')
parser.add_argument('--test_only', action='store_true',
help='set this option to test the model')
parser.add_argument('--gan_k', type=int, default=1,
help='k value for adversarial loss')
# Optimization specifications
parser.add_argument('--lr', type=float, default=1e-4,
help='learning rate')
parser.add_argument('--decay', type=str, default='150',
help='learning rate decay type')
parser.add_argument('--gamma', type=float, default=0.5,
help='learning rate decay factor for step decay')
parser.add_argument('--optimizer', default='ADAM',
choices=('SGD', 'ADAM', 'RMSprop'),
help='optimizer to use (SGD | ADAM | RMSprop)')
parser.add_argument('--momentum', type=float, default=0.9,
help='SGD momentum')
parser.add_argument('--betas', type=tuple, default=(0.9, 0.999),
help='ADAM beta')
parser.add_argument('--epsilon', type=float, default=1e-8,
help='ADAM epsilon for numerical stability')
parser.add_argument('--weight_decay', type=float, default=0,
help='weight decay')
parser.add_argument('--gclip', type=float, default=0,
help='gradient clipping threshold (0 = no clipping)')
# Loss specifications
parser.add_argument('--loss', type=str, default='1*L1',
help='loss function configuration')
parser.add_argument('--skip_threshold', type=float, default='1e8',
help='skipping batch that has large error')
# Log specifications
parser.add_argument('--save', type=str, default='test',
help='file name to save')
parser.add_argument('--load', type=str, default='',
help='file name to load')
parser.add_argument('--resume', type=int, default=0,
help='resume from specific checkpoint')
parser.add_argument('--save_models', action='store_true',
help='save all intermediate models')
parser.add_argument('--print_every', type=int, default=100,
help='how many batches to wait before logging training status')
parser.add_argument('--save_results', action='store_true',
help='save output results')
parser.add_argument('--save_gt', action='store_true',
help='save low-resolution and high-resolution images together')
args = parser.parse_args()
template.set_template(args)
args.scale = list(map(lambda x: int(x), args.scale.split('+')))
args.data_train = args.data_train.split('+')
args.data_test = args.data_test.split('+')
if args.epochs == 0:
args.epochs = 1e8
for arg in vars(args):
if vars(args)[arg] == 'True':
vars(args)[arg] = True
elif vars(args)[arg] == 'False':
vars(args)[arg] = False
| 7,645 | 45.621951 | 83 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/SR/code/__init__.py | 0 | 0 | 0 | py | |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/SR/code/videotester.py | import os
import math
import utility
from data import common
import torch
import cv2
from tqdm import tqdm
class VideoTester():
def __init__(self, args, my_model, ckp):
self.args = args
self.scale = args.scale
self.ckp = ckp
self.model = my_model
self.filename, _ = os.path.splitext(os.path.basename(args.dir_demo))
def test(self):
torch.set_grad_enabled(False)
self.ckp.write_log('\nEvaluation on video:')
self.model.eval()
timer_test = utility.timer()
for idx_scale, scale in enumerate(self.scale):
vidcap = cv2.VideoCapture(self.args.dir_demo)
total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
vidwri = cv2.VideoWriter(
self.ckp.get_path('{}_x{}.avi'.format(self.filename, scale)),
cv2.VideoWriter_fourcc(*'XVID'),
vidcap.get(cv2.CAP_PROP_FPS),
(
int(scale * vidcap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(scale * vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
)
)
tqdm_test = tqdm(range(total_frames), ncols=80)
for _ in tqdm_test:
success, lr = vidcap.read()
if not success: break
lr, = common.set_channel(lr, n_channels=self.args.n_colors)
lr, = common.np2Tensor(lr, rgb_range=self.args.rgb_range)
lr, = self.prepare(lr.unsqueeze(0))
sr = self.model(lr, idx_scale)
sr = utility.quantize(sr, self.args.rgb_range).squeeze(0)
normalized = sr * 255 / self.args.rgb_range
ndarr = normalized.byte().permute(1, 2, 0).cpu().numpy()
vidwri.write(ndarr)
vidcap.release()
vidwri.release()
self.ckp.write_log(
'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True
)
torch.set_grad_enabled(True)
def prepare(self, *args):
device = torch.device('cpu' if self.args.cpu else 'cuda')
def _prepare(tensor):
if self.args.precision == 'half': tensor = tensor.half()
return tensor.to(device)
return [_prepare(a) for a in args]
| 2,280 | 30.246575 | 77 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/SR/code/trainer.py | import os
import math
from decimal import Decimal
import utility
import torch
import torch.nn.utils as utils
from tqdm import tqdm
class Trainer():
def __init__(self, args, loader, my_model, my_loss, ckp):
self.args = args
self.scale = args.scale
self.ckp = ckp
self.loader_train = loader.loader_train
self.loader_test = loader.loader_test
self.model = my_model
self.loss = my_loss
self.optimizer = utility.make_optimizer(args, self.model)
if self.args.load != '':
self.optimizer.load(ckp.dir, epoch=len(ckp.log))
self.error_last = 1e8
def train(self):
self.loss.step()
epoch = self.optimizer.get_last_epoch() + 1
lr = self.optimizer.get_lr()
self.ckp.write_log(
'[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr))
)
self.loss.start_log()
self.model.train()
timer_data, timer_model = utility.timer(), utility.timer()
# TEMP
self.loader_train.dataset.set_scale(0)
for batch, (lr, hr, _,) in enumerate(self.loader_train):
lr, hr = self.prepare(lr, hr)
timer_data.hold()
timer_model.tic()
self.optimizer.zero_grad()
sr = self.model(lr, 0)
loss = self.loss(sr, hr)
loss.backward()
if self.args.gclip > 0:
utils.clip_grad_value_(
self.model.parameters(),
self.args.gclip
)
self.optimizer.step()
timer_model.hold()
if (batch + 1) % self.args.print_every == 0:
self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format(
(batch + 1) * self.args.batch_size,
len(self.loader_train.dataset),
self.loss.display_loss(batch),
timer_model.release(),
timer_data.release()))
timer_data.tic()
self.loss.end_log(len(self.loader_train))
self.error_last = self.loss.log[-1, -1]
self.optimizer.schedule()
def test(self):
torch.set_grad_enabled(False)
epoch = self.optimizer.get_last_epoch()
self.ckp.write_log('\nEvaluation:')
self.ckp.add_log(
torch.zeros(1, len(self.loader_test), len(self.scale))
)
self.model.eval()
timer_test = utility.timer()
if self.args.save_results: self.ckp.begin_background()
for idx_data, d in enumerate(self.loader_test):
for idx_scale, scale in enumerate(self.scale):
d.dataset.set_scale(idx_scale)
for lr, hr, filename in tqdm(d, ncols=80):
lr, hr = self.prepare(lr, hr)
sr = self.model(lr, idx_scale)
sr = utility.quantize(sr, self.args.rgb_range)
save_list = [sr]
self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr(
sr, hr, scale, self.args.rgb_range, dataset=d
)
if self.args.save_gt:
save_list.extend([lr, hr])
if self.args.save_results:
self.ckp.save_results(d, filename[0], save_list, scale)
self.ckp.log[-1, idx_data, idx_scale] /= len(d)
best = self.ckp.log.max(0)
self.ckp.write_log(
'[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format(
d.dataset.name,
scale,
self.ckp.log[-1, idx_data, idx_scale],
best[0][idx_data, idx_scale],
best[1][idx_data, idx_scale] + 1
)
)
self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc()))
self.ckp.write_log('Saving...')
if self.args.save_results:
self.ckp.end_background()
if not self.args.test_only:
self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch))
self.ckp.write_log(
'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True
)
torch.set_grad_enabled(True)
def prepare(self, *args):
device = torch.device('cpu' if self.args.cpu else 'cuda')
def _prepare(tensor):
if self.args.precision == 'half': tensor = tensor.half()
return tensor.to(device)
return [_prepare(a) for a in args]
def terminate(self):
if self.args.test_only:
self.test()
return True
else:
epoch = self.optimizer.get_last_epoch() + 1
return epoch >= self.args.epochs
| 4,820 | 31.795918 | 79 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/SR/code/loss/adversarial.py | import utility
from types import SimpleNamespace
from model import common
from loss import discriminator
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Adversarial(nn.Module):
def __init__(self, args, gan_type):
super(Adversarial, self).__init__()
self.gan_type = gan_type
self.gan_k = args.gan_k
self.dis = discriminator.Discriminator(args)
if gan_type == 'WGAN_GP':
# see https://arxiv.org/pdf/1704.00028.pdf pp.4
optim_dict = {
'optimizer': 'ADAM',
'betas': (0, 0.9),
'epsilon': 1e-8,
'lr': 1e-5,
'weight_decay': args.weight_decay,
'decay': args.decay,
'gamma': args.gamma
}
optim_args = SimpleNamespace(**optim_dict)
else:
optim_args = args
self.optimizer = utility.make_optimizer(optim_args, self.dis)
def forward(self, fake, real):
# updating discriminator...
self.loss = 0
fake_detach = fake.detach() # do not backpropagate through G
for _ in range(self.gan_k):
self.optimizer.zero_grad()
# d: B x 1 tensor
d_fake = self.dis(fake_detach)
d_real = self.dis(real)
retain_graph = False
if self.gan_type == 'GAN':
loss_d = self.bce(d_real, d_fake)
elif self.gan_type.find('WGAN') >= 0:
loss_d = (d_fake - d_real).mean()
if self.gan_type.find('GP') >= 0:
epsilon = torch.rand_like(fake).view(-1, 1, 1, 1)
hat = fake_detach.mul(1 - epsilon) + real.mul(epsilon)
hat.requires_grad = True
d_hat = self.dis(hat)
gradients = torch.autograd.grad(
outputs=d_hat.sum(), inputs=hat,
retain_graph=True, create_graph=True, only_inputs=True
)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_norm = gradients.norm(2, dim=1)
gradient_penalty = 10 * gradient_norm.sub(1).pow(2).mean()
loss_d += gradient_penalty
# from ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks
elif self.gan_type == 'RGAN':
better_real = d_real - d_fake.mean(dim=0, keepdim=True)
better_fake = d_fake - d_real.mean(dim=0, keepdim=True)
loss_d = self.bce(better_real, better_fake)
retain_graph = True
# Discriminator update
self.loss += loss_d.item()
loss_d.backward(retain_graph=retain_graph)
self.optimizer.step()
if self.gan_type == 'WGAN':
for p in self.dis.parameters():
p.data.clamp_(-1, 1)
self.loss /= self.gan_k
# updating generator...
d_fake_bp = self.dis(fake) # for backpropagation, use fake as it is
if self.gan_type == 'GAN':
label_real = torch.ones_like(d_fake_bp)
loss_g = F.binary_cross_entropy_with_logits(d_fake_bp, label_real)
elif self.gan_type.find('WGAN') >= 0:
loss_g = -d_fake_bp.mean()
elif self.gan_type == 'RGAN':
better_real = d_real - d_fake_bp.mean(dim=0, keepdim=True)
better_fake = d_fake_bp - d_real.mean(dim=0, keepdim=True)
loss_g = self.bce(better_fake, better_real)
# Generator loss
return loss_g
def state_dict(self, *args, **kwargs):
state_discriminator = self.dis.state_dict(*args, **kwargs)
state_optimizer = self.optimizer.state_dict()
return dict(**state_discriminator, **state_optimizer)
def bce(self, real, fake):
label_real = torch.ones_like(real)
label_fake = torch.zeros_like(fake)
bce_real = F.binary_cross_entropy_with_logits(real, label_real)
bce_fake = F.binary_cross_entropy_with_logits(fake, label_fake)
bce_loss = bce_real + bce_fake
return bce_loss
# Some references
# https://github.com/kuc2477/pytorch-wgan-gp/blob/master/model.py
# OR
# https://github.com/caogang/wgan-gp/blob/master/gan_cifar10.py
| 4,393 | 37.884956 | 84 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/SR/code/loss/discriminator.py | from model import common
import torch.nn as nn
class Discriminator(nn.Module):
'''
output is not normalized
'''
def __init__(self, args):
super(Discriminator, self).__init__()
in_channels = args.n_colors
out_channels = 64
depth = 7
def _block(_in_channels, _out_channels, stride=1):
return nn.Sequential(
nn.Conv2d(
_in_channels,
_out_channels,
3,
padding=1,
stride=stride,
bias=False
),
nn.BatchNorm2d(_out_channels),
nn.LeakyReLU(negative_slope=0.2, inplace=True)
)
m_features = [_block(in_channels, out_channels)]
for i in range(depth):
in_channels = out_channels
if i % 2 == 1:
stride = 1
out_channels *= 2
else:
stride = 2
m_features.append(_block(in_channels, out_channels, stride=stride))
patch_size = args.patch_size // (2**((depth + 1) // 2))
m_classifier = [
nn.Linear(out_channels * patch_size**2, 1024),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Linear(1024, 1)
]
self.features = nn.Sequential(*m_features)
self.classifier = nn.Sequential(*m_classifier)
def forward(self, x):
features = self.features(x)
output = self.classifier(features.view(features.size(0), -1))
return output
| 1,595 | 27.5 | 79 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/SR/code/loss/vgg.py | from model import common
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
class VGG(nn.Module):
def __init__(self, conv_index, rgb_range=1):
super(VGG, self).__init__()
vgg_features = models.vgg19(pretrained=True).features
modules = [m for m in vgg_features]
if conv_index.find('22') >= 0:
self.vgg = nn.Sequential(*modules[:8])
elif conv_index.find('54') >= 0:
self.vgg = nn.Sequential(*modules[:35])
vgg_mean = (0.485, 0.456, 0.406)
vgg_std = (0.229 * rgb_range, 0.224 * rgb_range, 0.225 * rgb_range)
self.sub_mean = common.MeanShift(rgb_range, vgg_mean, vgg_std)
for p in self.parameters():
p.requires_grad = False
def forward(self, sr, hr):
def _forward(x):
x = self.sub_mean(x)
x = self.vgg(x)
return x
vgg_sr = _forward(sr)
with torch.no_grad():
vgg_hr = _forward(hr.detach())
loss = F.mse_loss(vgg_sr, vgg_hr)
return loss
| 1,106 | 28.918919 | 75 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/SR/code/loss/__init__.py | import os
from importlib import import_module
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class Loss(nn.modules.loss._Loss):
def __init__(self, args, ckp):
super(Loss, self).__init__()
print('Preparing loss function:')
self.n_GPUs = args.n_GPUs
self.loss = []
self.loss_module = nn.ModuleList()
for loss in args.loss.split('+'):
weight, loss_type = loss.split('*')
if loss_type == 'MSE':
loss_function = nn.MSELoss()
elif loss_type == 'L1':
loss_function = nn.L1Loss()
elif loss_type.find('VGG') >= 0:
module = import_module('loss.vgg')
loss_function = getattr(module, 'VGG')(
loss_type[3:],
rgb_range=args.rgb_range
)
elif loss_type.find('GAN') >= 0:
module = import_module('loss.adversarial')
loss_function = getattr(module, 'Adversarial')(
args,
loss_type
)
self.loss.append({
'type': loss_type,
'weight': float(weight),
'function': loss_function}
)
if loss_type.find('GAN') >= 0:
self.loss.append({'type': 'DIS', 'weight': 1, 'function': None})
if len(self.loss) > 1:
self.loss.append({'type': 'Total', 'weight': 0, 'function': None})
for l in self.loss:
if l['function'] is not None:
print('{:.3f} * {}'.format(l['weight'], l['type']))
self.loss_module.append(l['function'])
self.log = torch.Tensor()
device = torch.device('cpu' if args.cpu else 'cuda')
self.loss_module.to(device)
if args.precision == 'half': self.loss_module.half()
if not args.cpu and args.n_GPUs > 1:
self.loss_module = nn.DataParallel(self.loss_module,range(args.n_GPUs))
if args.load != '': self.load(ckp.dir, cpu=args.cpu)
def forward(self, sr, hr):
losses = []
for i, l in enumerate(self.loss):
if l['function'] is not None:
loss = l['function'](sr, hr)
effective_loss = l['weight'] * loss
losses.append(effective_loss)
self.log[-1, i] += effective_loss.item()
elif l['type'] == 'DIS':
self.log[-1, i] += self.loss[i - 1]['function'].loss
loss_sum = sum(losses)
if len(self.loss) > 1:
self.log[-1, -1] += loss_sum.item()
return loss_sum
def step(self):
for l in self.get_loss_module():
if hasattr(l, 'scheduler'):
l.scheduler.step()
def start_log(self):
self.log = torch.cat((self.log, torch.zeros(1, len(self.loss))))
def end_log(self, n_batches):
self.log[-1].div_(n_batches)
def display_loss(self, batch):
n_samples = batch + 1
log = []
for l, c in zip(self.loss, self.log[-1]):
log.append('[{}: {:.4f}]'.format(l['type'], c / n_samples))
return ''.join(log)
def plot_loss(self, apath, epoch):
axis = np.linspace(1, epoch, epoch)
for i, l in enumerate(self.loss):
label = '{} Loss'.format(l['type'])
fig = plt.figure()
plt.title(label)
plt.plot(axis, self.log[:, i].numpy(), label=label)
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.grid(True)
plt.savefig(os.path.join(apath, 'loss_{}.pdf'.format(l['type'])))
plt.close(fig)
def get_loss_module(self):
if self.n_GPUs == 1:
return self.loss_module
else:
return self.loss_module.module
def save(self, apath):
torch.save(self.state_dict(), os.path.join(apath, 'loss.pt'))
torch.save(self.log, os.path.join(apath, 'loss_log.pt'))
def load(self, apath, cpu=False):
if cpu:
kwargs = {'map_location': lambda storage, loc: storage}
else:
kwargs = {}
self.load_state_dict(torch.load(
os.path.join(apath, 'loss.pt'),
**kwargs
))
self.log = torch.load(os.path.join(apath, 'loss_log.pt'))
for l in self.get_loss_module():
if hasattr(l, 'scheduler'):
for _ in range(len(self.log)): l.scheduler.step()
| 4,628 | 31.598592 | 83 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/SR/code/loss/__loss__.py | 0 | 0 | 0 | py | |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/SR/code/utils/tools.py | import os
import torch
import numpy as np
from PIL import Image
import torch.nn.functional as F
def normalize(x):
return x.mul_(2).add_(-1)
def same_padding(images, ksizes, strides, rates):
assert len(images.size()) == 4
batch_size, channel, rows, cols = images.size()
out_rows = (rows + strides[0] - 1) // strides[0]
out_cols = (cols + strides[1] - 1) // strides[1]
effective_k_row = (ksizes[0] - 1) * rates[0] + 1
effective_k_col = (ksizes[1] - 1) * rates[1] + 1
padding_rows = max(0, (out_rows-1)*strides[0]+effective_k_row-rows)
padding_cols = max(0, (out_cols-1)*strides[1]+effective_k_col-cols)
# Pad the input
padding_top = int(padding_rows / 2.)
padding_left = int(padding_cols / 2.)
padding_bottom = padding_rows - padding_top
padding_right = padding_cols - padding_left
paddings = (padding_left, padding_right, padding_top, padding_bottom)
images = torch.nn.ZeroPad2d(paddings)(images)
return images
def extract_image_patches(images, ksizes, strides, rates, padding='same'):
"""
Extract patches from images and put them in the C output dimension.
:param padding:
:param images: [batch, channels, in_rows, in_cols]. A 4-D Tensor with shape
:param ksizes: [ksize_rows, ksize_cols]. The size of the sliding window for
each dimension of images
:param strides: [stride_rows, stride_cols]
:param rates: [dilation_rows, dilation_cols]
:return: A Tensor
"""
assert len(images.size()) == 4
assert padding in ['same', 'valid']
batch_size, channel, height, width = images.size()
if padding == 'same':
images = same_padding(images, ksizes, strides, rates)
elif padding == 'valid':
pass
else:
raise NotImplementedError('Unsupported padding type: {}.\
Only "same" or "valid" are supported.'.format(padding))
unfold = torch.nn.Unfold(kernel_size=ksizes,
dilation=rates,
padding=0,
stride=strides)
patches = unfold(images)
return patches # [N, C*k*k, L], L is the total number of such blocks
def reduce_mean(x, axis=None, keepdim=False):
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.mean(x, dim=i, keepdim=keepdim)
return x
def reduce_std(x, axis=None, keepdim=False):
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.std(x, dim=i, keepdim=keepdim)
return x
def reduce_sum(x, axis=None, keepdim=False):
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.sum(x, dim=i, keepdim=keepdim)
return x
| 2,777 | 32.878049 | 79 | py |
Pyramid-Attention-Networks | Pyramid-Attention-Networks-master/SR/code/utils/__init__.py | 0 | 0 | 0 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.