repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
rllab | rllab-master/rllab/optimizers/lbfgs_optimizer.py | from rllab.misc.ext import compile_function, lazydict, flatten_tensor_variables
from rllab.core.serializable import Serializable
import theano
import scipy.optimize
import time
class LbfgsOptimizer(Serializable):
"""
Performs unconstrained optimization via L-BFGS.
"""
def __init__(self, max_opt_itr=20, callback=None):
Serializable.quick_init(self, locals())
self._max_opt_itr = max_opt_itr
self._opt_fun = None
self._target = None
self._callback = callback
def update_opt(self, loss, target, inputs, extra_inputs=None, gradients=None, *args, **kwargs):
"""
:param loss: Symbolic expression for the loss function.
:param target: A parameterized object to optimize over. It should implement methods of the
:class:`rllab.core.paramerized.Parameterized` class.
:param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
:param inputs: A list of symbolic variables as inputs
:param gradients: symbolic expressions for the gradients of trainable parameters of the target. By default
this will be computed by calling theano.grad
:return: No return value.
"""
self._target = target
def get_opt_output(gradients):
if gradients is None:
gradients = theano.grad(loss, target.get_params(trainable=True))
flat_grad = flatten_tensor_variables(gradients)
return [loss.astype('float64'), flat_grad.astype('float64')]
if extra_inputs is None:
extra_inputs = list()
self._opt_fun = lazydict(
f_loss=lambda: compile_function(inputs + extra_inputs, loss),
f_opt=lambda: compile_function(
inputs=inputs + extra_inputs,
outputs=get_opt_output(gradients),
)
)
def loss(self, inputs, extra_inputs=None):
if extra_inputs is None:
extra_inputs = list()
return self._opt_fun["f_loss"](*(list(inputs) + list(extra_inputs)))
def optimize(self, inputs, extra_inputs=None):
f_opt = self._opt_fun["f_opt"]
if extra_inputs is None:
extra_inputs = list()
def f_opt_wrapper(flat_params):
self._target.set_param_values(flat_params, trainable=True)
return f_opt(*inputs)
itr = [0]
start_time = time.time()
if self._callback:
def opt_callback(params):
loss = self._opt_fun["f_loss"](*(inputs + extra_inputs))
elapsed = time.time() - start_time
self._callback(dict(
loss=loss,
params=params,
itr=itr[0],
elapsed=elapsed,
))
itr[0] += 1
else:
opt_callback = None
scipy.optimize.fmin_l_bfgs_b(
func=f_opt_wrapper, x0=self._target.get_param_values(trainable=True),
maxiter=self._max_opt_itr, callback=opt_callback,
)
| 3,102 | 34.666667 | 114 | py |
rllab | rllab-master/rllab/plotter/__init__.py | from .plotter import *
| 23 | 11 | 22 | py |
rllab | rllab-master/rllab/plotter/plotter.py | import atexit
from queue import Empty
from multiprocessing import Process, Queue
from rllab.sampler.utils import rollout
import numpy as np
__all__ = [
'init_worker',
'init_plot',
'update_plot'
]
process = None
queue = None
def _worker_start():
env = None
policy = None
max_length = None
try:
while True:
msgs = {}
# Only fetch the last message of each type
while True:
try:
msg = queue.get_nowait()
msgs[msg[0]] = msg[1:]
except Empty:
break
if 'stop' in msgs:
break
elif 'update' in msgs:
env, policy = msgs['update']
# env.start_viewer()
elif 'demo' in msgs:
param_values, max_length = msgs['demo']
policy.set_param_values(param_values)
rollout(env, policy, max_path_length=max_length, animated=True, speedup=5)
else:
if max_length:
rollout(env, policy, max_path_length=max_length, animated=True, speedup=5)
except KeyboardInterrupt:
pass
def _shutdown_worker():
if process:
queue.put(['stop'])
queue.close()
process.join()
def init_worker():
global process, queue
queue = Queue()
process = Process(target=_worker_start)
process.start()
atexit.register(_shutdown_worker)
def init_plot(env, policy):
queue.put(['update', env, policy])
def update_plot(policy, max_length=np.inf):
queue.put(['demo', policy.get_param_values(), max_length])
| 1,664 | 23.485294 | 94 | py |
rllab | rllab-master/rllab/viskit/core.py | import csv
from rllab.misc import ext
import os
import numpy as np
import base64
import pickle
import json
import itertools
# import ipywidgets
# import IPython.display
# import plotly.offline as po
# import plotly.graph_objs as go
import pdb
def unique(l):
return list(set(l))
def flatten(l):
return [item for sublist in l for item in sublist]
def load_progress(progress_csv_path):
print("Reading %s" % progress_csv_path)
entries = dict()
with open(progress_csv_path, 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
for k, v in row.items():
if k not in entries:
entries[k] = []
try:
entries[k].append(float(v))
except:
entries[k].append(0.)
entries = dict([(k, np.array(v)) for k, v in entries.items()])
return entries
def to_json(stub_object):
from rllab.misc.instrument import StubObject
from rllab.misc.instrument import StubAttr
if isinstance(stub_object, StubObject):
assert len(stub_object.args) == 0
data = dict()
for k, v in stub_object.kwargs.items():
data[k] = to_json(v)
data["_name"] = stub_object.proxy_class.__module__ + \
"." + stub_object.proxy_class.__name__
return data
elif isinstance(stub_object, StubAttr):
return dict(
obj=to_json(stub_object.obj),
attr=to_json(stub_object.attr_name)
)
return stub_object
def flatten_dict(d):
flat_params = dict()
for k, v in d.items():
if isinstance(v, dict):
v = flatten_dict(v)
for subk, subv in flatten_dict(v).items():
flat_params[k + "." + subk] = subv
else:
flat_params[k] = v
return flat_params
def load_params(params_json_path):
with open(params_json_path, 'r') as f:
data = json.loads(f.read())
if "args_data" in data:
del data["args_data"]
if "exp_name" not in data:
data["exp_name"] = params_json_path.split("/")[-2]
return data
def lookup(d, keys):
if not isinstance(keys, list):
keys = keys.split(".")
for k in keys:
if hasattr(d, "__getitem__"):
if k in d:
d = d[k]
else:
return None
else:
return None
return d
def load_exps_data(exp_folder_paths,disable_variant=False):
exps = []
for exp_folder_path in exp_folder_paths:
exps += [x[0] for x in os.walk(exp_folder_path)]
exps_data = []
for exp in exps:
try:
exp_path = exp
params_json_path = os.path.join(exp_path, "params.json")
variant_json_path = os.path.join(exp_path, "variant.json")
progress_csv_path = os.path.join(exp_path, "progress.csv")
progress = load_progress(progress_csv_path)
if disable_variant:
params = load_params(params_json_path)
else:
try:
params = load_params(variant_json_path)
except IOError:
params = load_params(params_json_path)
exps_data.append(ext.AttrDict(
progress=progress, params=params, flat_params=flatten_dict(params)))
except IOError as e:
print(e)
return exps_data
def smart_repr(x):
if isinstance(x, tuple):
if len(x) == 0:
return "tuple()"
elif len(x) == 1:
return "(%s,)" % smart_repr(x[0])
else:
return "(" + ",".join(map(smart_repr, x)) + ")"
else:
if hasattr(x, "__call__"):
return "__import__('pydoc').locate('%s')" % (x.__module__ + "." + x.__name__)
else:
return repr(x)
def extract_distinct_params(exps_data, excluded_params=('exp_name', 'seed', 'log_dir'), l=1):
# all_pairs = unique(flatten([d.flat_params.items() for d in exps_data]))
# if logger:
# logger("(Excluding {excluded})".format(excluded=', '.join(excluded_params)))
# def cmp(x,y):
# if x < y:
# return -1
# elif x > y:
# return 1
# else:
# return 0
try:
stringified_pairs = sorted(
map(
eval,
unique(
flatten(
[
list(
map(
smart_repr,
list(d.flat_params.items())
)
)
for d in exps_data
]
)
)
),
key=lambda x: (
tuple(0. if it is None else it for it in x),
)
)
except Exception as e:
print(e)
import ipdb; ipdb.set_trace()
proposals = [(k, [x[1] for x in v])
for k, v in itertools.groupby(stringified_pairs, lambda x: x[0])]
filtered = [(k, v) for (k, v) in proposals if len(v) > l and all(
[k.find(excluded_param) != 0 for excluded_param in excluded_params])]
return filtered
class Selector(object):
def __init__(self, exps_data, filters=None, custom_filters=None):
self._exps_data = exps_data
if filters is None:
self._filters = tuple()
else:
self._filters = tuple(filters)
if custom_filters is None:
self._custom_filters = []
else:
self._custom_filters = custom_filters
def where(self, k, v):
return Selector(self._exps_data, self._filters + ((k, v),), self._custom_filters)
def custom_filter(self, filter):
return Selector(self._exps_data, self._filters, self._custom_filters + [filter])
def _check_exp(self, exp):
# or exp.flat_params.get(k, None) is None
return all(
((str(exp.flat_params.get(k, None)) == str(v) or (k not in exp.flat_params)) for k, v in self._filters)
) and all(custom_filter(exp) for custom_filter in self._custom_filters)
def extract(self):
return list(filter(self._check_exp, self._exps_data))
def iextract(self):
return filter(self._check_exp, self._exps_data)
# Taken from plot.ly
color_defaults = [
'#1f77b4', # muted blue
'#ff7f0e', # safety orange
'#2ca02c', # cooked asparagus green
'#d62728', # brick red
'#9467bd', # muted purple
'#8c564b', # chestnut brown
'#e377c2', # raspberry yogurt pink
'#7f7f7f', # middle gray
'#bcbd22', # curry yellow-green
'#17becf' # blue-teal
]
def hex_to_rgb(hex, opacity=1.0):
if hex[0] == '#':
hex = hex[1:]
assert (len(hex) == 6)
return "rgba({0},{1},{2},{3})".format(int(hex[:2], 16), int(hex[2:4], 16), int(hex[4:6], 16), opacity)
# class VisApp(object):
#
#
# def __init__(self, exp_folder_path):
# self._logs = []
# self._plot_sequence = []
# self._exps_data = None
# self._distinct_params = None
# self._exp_filter = None
# self._plottable_keys = None
# self._plot_key = None
# self._init_data(exp_folder_path)
# self.redraw()
#
# def _init_data(self, exp_folder_path):
# self.log("Loading data...")
# self._exps_data = load_exps_data(exp_folder_path)
# self.log("Loaded {nexp} experiments".format(nexp=len(self._exps_data)))
# self._distinct_params = extract_distinct_params(self._exps_data, logger=self.log)
# assert len(self._distinct_params) == 1
# self._exp_filter = self._distinct_params[0]
# self.log("******************************************")
# self.log("Found {nvary} varying parameter{plural}".format(nvary=len(self._distinct_params), plural="" if len(
# self._distinct_params) == 1 else "s"))
# for k, v in self._distinct_params:
# self.log(k, ':', ", ".join(map(str, v)))
# self.log("******************************************")
# self._plottable_keys = self._exps_data[0].progress.keys()
# assert len(self._plottable_keys) > 0
# if 'AverageReturn' in self._plottable_keys:
# self._plot_key = 'AverageReturn'
# else:
# self._plot_key = self._plottable_keys[0]
#
# def log(self, *args, **kwargs):
# self._logs.append((args, kwargs))
#
# def _display_dropdown(self, attr_name, options):
# def f(**kwargs):
# self.__dict__[attr_name] = kwargs[attr_name]
# IPython.display.display(ipywidgets.interactive(f, **{attr_name: options}))
#
# def redraw(self):
# # print out all the logs
# for args, kwargs in self._logs:
# print(*args, **kwargs)
#
# self._display_dropdown("_plot_key", self._plottable_keys)
#
# k, vs = self._exp_filter
# selector = Selector(self._exps_data)
# to_plot = []
# for v in vs:
# filtered_data = selector.where(k, v).extract()
# returns = [exp.progress[self._plot_key] for exp in filtered_data]
# sizes = map(len, returns)
# max_size = max(sizes)
# for exp, retlen in zip(filtered_data, sizes):
# if retlen < max_size:
# self.log("Excluding {exp_name} since the trajectory is shorter: {thislen} vs. {maxlen}".format(
# exp_name=exp.params["exp_name"], thislen=retlen, maxlen=max_size))
# returns = [ret for ret in returns if len(ret) == max_size]
# mean_returns = np.mean(returns, axis=0)
# std_returns = np.std(returns, axis=0)
# self._plot_sequence.append((''))
# to_plot.append(ext.AttrDict(means=mean_returns, stds=std_returns, legend=str(v)))
# make_plot(to_plot)
| 10,040 | 32.47 | 119 | py |
rllab | rllab-master/rllab/viskit/__init__.py | __author__ = 'dementrock'
| 26 | 12.5 | 25 | py |
rllab | rllab-master/rllab/viskit/frontend.py |
import sys
sys.path.append('.')
import matplotlib
import os
matplotlib.use('Agg')
import flask # import Flask, render_template, send_from_directory
from rllab.misc.ext import flatten
from rllab.viskit import core
from rllab.misc import ext
import sys
import argparse
import json
import numpy as np
# import threading, webbrowser
import plotly.offline as po
import plotly.graph_objs as go
def sliding_mean(data_array, window=5):
data_array = np.array(data_array)
new_list = []
for i in range(len(data_array)):
indices = list(range(max(i - window + 1, 0),
min(i + window + 1, len(data_array))))
avg = 0
for j in indices:
avg += data_array[j]
avg /= float(len(indices))
new_list.append(avg)
return np.array(new_list)
import itertools
app = flask.Flask(__name__, static_url_path='/static')
exps_data = None
plottable_keys = None
distinct_params = None
@app.route('/js/<path:path>')
def send_js(path):
return flask.send_from_directory('js', path)
@app.route('/css/<path:path>')
def send_css(path):
return flask.send_from_directory('css', path)
def make_plot(plot_list, use_median=False, plot_width=None, plot_height=None, title=None):
data = []
p25, p50, p75 = [], [], []
for idx, plt in enumerate(plot_list):
color = core.color_defaults[idx % len(core.color_defaults)]
if use_median:
p25.append(np.mean(plt.percentile25))
p50.append(np.mean(plt.percentile50))
p75.append(np.mean(plt.percentile75))
x = list(range(len(plt.percentile50)))
y = list(plt.percentile50)
y_upper = list(plt.percentile75)
y_lower = list(plt.percentile25)
else:
x = list(range(len(plt.means)))
y = list(plt.means)
y_upper = list(plt.means + plt.stds)
y_lower = list(plt.means - plt.stds)
data.append(go.Scatter(
x=x + x[::-1],
y=y_upper + y_lower[::-1],
fill='tozerox',
fillcolor=core.hex_to_rgb(color, 0.2),
line=go.Line(color='transparent'),
showlegend=False,
legendgroup=plt.legend,
hoverinfo='none'
))
data.append(go.Scatter(
x=x,
y=y,
name=plt.legend,
legendgroup=plt.legend,
line=dict(color=core.hex_to_rgb(color)),
))
p25str = '['
p50str = '['
p75str = '['
for p25e, p50e, p75e in zip(p25, p50, p75):
p25str += (str(p25e) + ',')
p50str += (str(p50e) + ',')
p75str += (str(p75e) + ',')
p25str += ']'
p50str += ']'
p75str += ']'
print(p25str)
print(p50str)
print(p75str)
layout = go.Layout(
legend=dict(
x=1,
y=1,
# xanchor="left",
# yanchor="bottom",
),
width=plot_width,
height=plot_height,
title=title,
)
fig = go.Figure(data=data, layout=layout)
fig_div = po.plot(fig, output_type='div', include_plotlyjs=False)
if "footnote" in plot_list[0]:
footnote = "<br />".join([
r"<span><b>%s</b></span>: <span>%s</span>" % (plt.legend, plt.footnote)
for plt in plot_list
])
return r"%s<div>%s</div>" % (fig_div, footnote)
else:
return fig_div
def make_plot_eps(plot_list, use_median=False, counter=0):
import matplotlib.pyplot as _plt
f, ax = _plt.subplots(figsize=(8, 5))
for idx, plt in enumerate(plot_list):
color = core.color_defaults[idx % len(core.color_defaults)]
if use_median:
x = list(range(len(plt.percentile50)))
y = list(plt.percentile50)
y_upper = list(plt.percentile75)
y_lower = list(plt.percentile25)
else:
x = list(range(len(plt.means)))
y = list(plt.means)
y_upper = list(plt.means + plt.stds)
y_lower = list(plt.means - plt.stds)
plt.legend = plt.legend.replace('rllab.algos.trpo.TRPO', 'TRPO')
plt.legend = plt.legend.replace('rllab.algos.vpg.VPG', 'REINFORCE')
plt.legend = plt.legend.replace('rllab.algos.erwr.ERWR', 'ERWR')
plt.legend = plt.legend.replace('sandbox.rein.algos.trpo_vime.TRPO', 'TRPO+VIME')
plt.legend = plt.legend.replace('sandbox.rein.algos.vpg_vime.VPG', 'REINFORCE+VIME')
plt.legend = plt.legend.replace('sandbox.rein.algos.erwr_vime.ERWR', 'ERWR+VIME')
plt.legend = plt.legend.replace('0.0001', '1e-4')
# plt.legend = plt.legend.replace('0.001', 'TRPO+VIME')
# plt.legend = plt.legend.replace('0', 'TRPO')
# plt.legend = plt.legend.replace('0.005', 'TRPO+L2')
if idx == 0:
plt.legend = 'TRPO (0.0)'
if idx == 1:
plt.legend = 'TRPO+VIME (103.7)'
if idx == 2:
plt.legend = 'TRPO+L2 (0.0)'
ax.fill_between(
x, y_lower, y_upper, interpolate=True, facecolor=color, linewidth=0.0, alpha=0.3)
if idx == 2:
ax.plot(x, y, color=color, label=plt.legend, linewidth=2.0, linestyle="--")
else:
ax.plot(x, y, color=color, label=plt.legend, linewidth=2.0)
ax.grid(True)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
if counter == 1:
# ax.set_xlim([0, 120])
ax.set_ylim([-3, 60])
# ax.set_xlim([0, 80])
loc = 'upper left'
elif counter == 2:
ax.set_ylim([-0.04, 0.4])
# ax.set_ylim([-0.1, 0.4])
ax.set_xlim([0, 2000])
loc = 'upper left'
elif counter == 3:
# ax.set_xlim([0, 1000])
loc = 'lower right'
elif counter == 4:
# ax.set_xlim([0, 800])
# ax.set_ylim([0, 2])
loc = 'lower right'
leg = ax.legend(loc=loc, prop={'size': 12}, ncol=1)
for legobj in leg.legendHandles:
legobj.set_linewidth(5.0)
def y_fmt(x, y):
return str(int(np.round(x / 1000.0))) + 'K'
import matplotlib.ticker as tick
# ax.xaxis.set_major_formatter(tick.FuncFormatter(y_fmt))
_plt.savefig('tmp' + str(counter) + '.pdf', bbox_inches='tight')
def summary_name(exp, selector=None):
# if selector is not None:
# exclude_params = set([x[0] for x in selector._filters])
# else:
# exclude_params = set()
# rest_params = set([x[0] for x in distinct_params]).difference(exclude_params)
# if len(rest_params) > 0:
# name = ""
# for k in rest_params:
# name += "%s=%s;" % (k.split(".")[-1], str(exp.flat_params.get(k, "")).split(".")[-1])
# return name
return exp.params["exp_name"]
def check_nan(exp):
return all(not np.any(np.isnan(vals)) for vals in list(exp.progress.values()))
def get_plot_instruction(plot_key, split_key=None, group_key=None, filters=None, use_median=False,
only_show_best=False, only_show_best_final=False, gen_eps=False,
only_show_best_sofar=False, clip_plot_value=None, plot_width=None,
plot_height=None, filter_nan=False, smooth_curve=False, custom_filter=None,
legend_post_processor=None, normalize_error=False, custom_series_splitter=None):
print(plot_key, split_key, group_key, filters)
if filter_nan:
nonnan_exps_data = list(filter(check_nan, exps_data))
selector = core.Selector(nonnan_exps_data)
else:
selector = core.Selector(exps_data)
if legend_post_processor is None:
legend_post_processor = lambda x: x
if filters is None:
filters = dict()
for k, v in filters.items():
selector = selector.where(k, str(v))
if custom_filter is not None:
selector = selector.custom_filter(custom_filter)
# print selector._filters
if split_key is not None:
vs = [vs for k, vs in distinct_params if k == split_key][0]
split_selectors = [selector.where(split_key, v) for v in vs]
split_legends = list(map(str, vs))
else:
split_selectors = [selector]
split_legends = ["Plot"]
plots = []
counter = 1
for split_selector, split_legend in zip(split_selectors, split_legends):
if custom_series_splitter is not None:
exps = split_selector.extract()
splitted_dict = dict()
for exp in exps:
key = custom_series_splitter(exp)
if key not in splitted_dict:
splitted_dict[key] = list()
splitted_dict[key].append(exp)
splitted = list(splitted_dict.items())
group_selectors = [core.Selector(list(x[1])) for x in splitted]
group_legends = [x[0] for x in splitted]
else:
if group_key and group_key is not "exp_name":
vs = [vs for k, vs in distinct_params if k == group_key][0]
group_selectors = [split_selector.where(group_key, v) for v in vs]
group_legends = [str(x) for x in vs]
else:
group_key = "exp_name"
vs = sorted([x.params["exp_name"] for x in split_selector.extract()])
group_selectors = [split_selector.where(group_key, v) for v in vs]
group_legends = [summary_name(x.extract()[0], split_selector) for x in group_selectors]
# group_selectors = [split_selector]
# group_legends = [split_legend]
to_plot = []
for group_selector, group_legend in zip(group_selectors, group_legends):
filtered_data = group_selector.extract()
if len(filtered_data) > 0:
if only_show_best or only_show_best_final or only_show_best_sofar:
# Group by seed and sort.
# -----------------------
filtered_params = core.extract_distinct_params(filtered_data, l=0)
filtered_params2 = [p[1] for p in filtered_params]
filtered_params_k = [p[0] for p in filtered_params]
product_space = list(itertools.product(
*filtered_params2
))
data_best_regret = None
best_regret = -np.inf
kv_string_best_regret = None
for idx, params in enumerate(product_space):
selector = core.Selector(exps_data)
for k, v in zip(filtered_params_k, params):
selector = selector.where(k, str(v))
data = selector.extract()
if len(data) > 0:
progresses = [
exp.progress.get(plot_key, np.array([np.nan])) for exp in data
]
# progresses = [progress[:500] for progress in progresses ]
sizes = list(map(len, progresses))
max_size = max(sizes)
progresses = [
np.concatenate([ps, np.ones(max_size - len(ps)) * np.nan]) for ps in progresses]
if only_show_best_final:
progresses = np.asarray(progresses)[:, -1]
if only_show_best_sofar:
progresses =np.max(np.asarray(progresses), axis=1)
if use_median:
medians = np.nanmedian(progresses, axis=0)
regret = np.mean(medians)
else:
means = np.nanmean(progresses, axis=0)
regret = np.mean(means)
distinct_params_k = [p[0] for p in distinct_params]
distinct_params_v = [
v for k, v in zip(filtered_params_k, params) if k in distinct_params_k]
distinct_params_kv = [
(k, v) for k, v in zip(distinct_params_k, distinct_params_v)]
distinct_params_kv_string = str(
distinct_params_kv).replace('), ', ')\t')
print(
'{}\t{}\t{}'.format(regret, len(progresses), distinct_params_kv_string))
if regret > best_regret:
best_regret = regret
best_progress = progresses
data_best_regret = data
kv_string_best_regret = distinct_params_kv_string
print(group_selector._filters)
print('best regret: {}'.format(best_regret))
# -----------------------
if best_regret != -np.inf:
progresses = [
exp.progress.get(plot_key, np.array([np.nan])) for exp in data_best_regret]
# progresses = [progress[:500] for progress in progresses ]
sizes = list(map(len, progresses))
# more intelligent:
max_size = max(sizes)
progresses = [
np.concatenate([ps, np.ones(max_size - len(ps)) * np.nan]) for ps in progresses]
legend = '{} (mu: {:.3f}, std: {:.5f})'.format(
group_legend, best_regret, np.std(best_progress))
window_size = np.maximum(
int(np.round(max_size / float(1000))), 1)
if use_median:
percentile25 = np.nanpercentile(
progresses, q=25, axis=0)
percentile50 = np.nanpercentile(
progresses, q=50, axis=0)
percentile75 = np.nanpercentile(
progresses, q=75, axis=0)
if smooth_curve:
percentile25 = sliding_mean(percentile25,
window=window_size)
percentile50 = sliding_mean(percentile50,
window=window_size)
percentile75 = sliding_mean(percentile75,
window=window_size)
if clip_plot_value is not None:
percentile25 = np.clip(percentile25, -clip_plot_value, clip_plot_value)
percentile50 = np.clip(percentile50, -clip_plot_value, clip_plot_value)
percentile75 = np.clip(percentile75, -clip_plot_value, clip_plot_value)
to_plot.append(
ext.AttrDict(percentile25=percentile25, percentile50=percentile50,
percentile75=percentile75, legend=legend_post_processor(legend)))
else:
means = np.nanmean(progresses, axis=0)
stds = np.nanstd(progresses, axis=0)
if normalize_error:# and len(progresses) > 0:
stds /= np.sqrt(np.sum((1. - np.isnan(progresses)), axis=0))
if smooth_curve:
means = sliding_mean(means,
window=window_size)
stds = sliding_mean(stds,
window=window_size)
if clip_plot_value is not None:
means = np.clip(means, -clip_plot_value, clip_plot_value)
stds = np.clip(stds, -clip_plot_value, clip_plot_value)
to_plot.append(
ext.AttrDict(means=means, stds=stds, legend=legend_post_processor(legend)))
if len(to_plot) > 0 and len(data) > 0:
to_plot[-1]["footnote"] = "%s; e.g. %s" % (kv_string_best_regret, data[0].params.get("exp_name", "NA"))
else:
to_plot[-1]["footnote"] = ""
else:
progresses = [
exp.progress.get(plot_key, np.array([np.nan])) for exp in filtered_data]
sizes = list(map(len, progresses))
# more intelligent:
max_size = max(sizes)
progresses = [
np.concatenate([ps, np.ones(max_size - len(ps)) * np.nan]) for ps in progresses]
window_size = np.maximum(int(np.round(max_size / float(1000))), 1)
if use_median:
percentile25 = np.nanpercentile(
progresses, q=25, axis=0)
percentile50 = np.nanpercentile(
progresses, q=50, axis=0)
percentile75 = np.nanpercentile(
progresses, q=75, axis=0)
if smooth_curve:
percentile25 = sliding_mean(percentile25,
window=window_size)
percentile50 = sliding_mean(percentile50,
window=window_size)
percentile75 = sliding_mean(percentile75,
window=window_size)
if clip_plot_value is not None:
percentile25 = np.clip(percentile25, -clip_plot_value, clip_plot_value)
percentile50 = np.clip(percentile50, -clip_plot_value, clip_plot_value)
percentile75 = np.clip(percentile75, -clip_plot_value, clip_plot_value)
to_plot.append(
ext.AttrDict(percentile25=percentile25, percentile50=percentile50,
percentile75=percentile75, legend=legend_post_processor(group_legend)))
else:
means = np.nanmean(progresses, axis=0)
stds = np.nanstd(progresses, axis=0)
if smooth_curve:
means = sliding_mean(means,
window=window_size)
stds = sliding_mean(stds,
window=window_size)
if clip_plot_value is not None:
means = np.clip(means, -clip_plot_value, clip_plot_value)
stds = np.clip(stds, -clip_plot_value, clip_plot_value)
to_plot.append(
ext.AttrDict(means=means, stds=stds, legend=legend_post_processor(group_legend)))
if len(to_plot) > 0 and not gen_eps:
fig_title = "%s: %s" % (split_key, split_legend)
# plots.append("<h3>%s</h3>" % fig_title)
plots.append(make_plot(
to_plot,
use_median=use_median, title=fig_title,
plot_width=plot_width, plot_height=plot_height
))
if gen_eps:
make_plot_eps(to_plot, use_median=use_median, counter=counter)
counter += 1
return "\n".join(plots)
def parse_float_arg(args, key):
x = args.get(key, "")
try:
return float(x)
except Exception:
return None
@app.route("/plot_div")
def plot_div():
# reload_data()
args = flask.request.args
plot_key = args.get("plot_key")
split_key = args.get("split_key", "")
group_key = args.get("group_key", "")
filters_json = args.get("filters", "{}")
filters = json.loads(filters_json)
if len(split_key) == 0:
split_key = None
if len(group_key) == 0:
group_key = None
# group_key = distinct_params[0][0]
# print split_key
# exp_filter = distinct_params[0]
use_median = args.get("use_median", "") == 'True'
gen_eps = args.get("eps", "") == 'True'
only_show_best = args.get("only_show_best", "") == 'True'
only_show_best_final = args.get("only_show_best_final", "") == 'True'
only_show_best_sofar = args.get("only_show_best_sofar", "") == 'True'
normalize_error = args.get("normalize_error", "") == 'True'
filter_nan = args.get("filter_nan", "") == 'True'
smooth_curve = args.get("smooth_curve", "") == 'True'
clip_plot_value = parse_float_arg(args, "clip_plot_value")
plot_width = parse_float_arg(args, "plot_width")
plot_height = parse_float_arg(args, "plot_height")
custom_filter = args.get("custom_filter", None)
custom_series_splitter = args.get("custom_series_splitter", None)
if custom_filter is not None and len(custom_filter.strip()) > 0:
custom_filter = safer_eval(custom_filter)
else:
custom_filter = None
legend_post_processor = args.get("legend_post_processor", None)
if legend_post_processor is not None and len(legend_post_processor.strip()) > 0:
legend_post_processor = safer_eval(legend_post_processor)
else:
legend_post_processor = None
if custom_series_splitter is not None and len(custom_series_splitter.strip()) > 0:
custom_series_splitter = safer_eval(custom_series_splitter)
else:
custom_series_splitter = None
plot_div = get_plot_instruction(plot_key=plot_key, split_key=split_key, filter_nan=filter_nan,
group_key=group_key, filters=filters, use_median=use_median, gen_eps=gen_eps,
only_show_best=only_show_best, only_show_best_final=only_show_best_final,
only_show_best_sofar=only_show_best_sofar,
clip_plot_value=clip_plot_value, plot_width=plot_width, plot_height=plot_height,
smooth_curve=smooth_curve, custom_filter=custom_filter,
legend_post_processor=legend_post_processor, normalize_error=normalize_error,
custom_series_splitter=custom_series_splitter)
# print plot_div
return plot_div
def safer_eval(some_string):
"""
Not full-proof, but taking advice from:
https://nedbatchelder.com/blog/201206/eval_really_is_dangerous.html
"""
if "__" in some_string or "import" in some_string:
raise Exception("string to eval looks suspicious")
return eval(some_string, {'__builtins__': {}})
@app.route("/")
def index():
# exp_folder_path = "data/s3/experiments/ppo-atari-3"
# _load_data(exp_folder_path)
# exp_json = json.dumps(exp_data)
if "AverageReturn" in plottable_keys:
plot_key = "AverageReturn"
elif len(plottable_keys) > 0:
plot_key = plottable_keys[0]
else:
plot_key = None
if len(distinct_params) > 0:
group_key = distinct_params[0][0]
else:
group_key = None
plot_div = get_plot_instruction(
plot_key=plot_key, split_key=None, group_key=group_key)
return flask.render_template(
"main.html",
plot_div=plot_div,
plot_key=plot_key,
group_key=group_key,
plottable_keys=plottable_keys,
distinct_param_keys=[str(k) for k, v in distinct_params],
distinct_params=dict([(str(k), list(map(str, v)))
for k, v in distinct_params]),
)
def reload_data():
global exps_data
global plottable_keys
global distinct_params
exps_data = core.load_exps_data(args.data_paths,args.disable_variant)
plottable_keys = list(
set(flatten(list(exp.progress.keys()) for exp in exps_data)))
plottable_keys = sorted([k for k in plottable_keys if k is not None])
distinct_params = sorted(core.extract_distinct_params(exps_data))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("data_paths", type=str, nargs='*')
parser.add_argument("--prefix",type=str,nargs='?',default="???")
parser.add_argument("--debug", action="store_true", default=False)
parser.add_argument("--port", type=int, default=5000)
parser.add_argument("--disable-variant",default=False,action='store_true')
args = parser.parse_args(sys.argv[1:])
# load all folders following a prefix
if args.prefix != "???":
args.data_paths = []
dirname = os.path.dirname(args.prefix)
subdirprefix = os.path.basename(args.prefix)
for subdirname in os.listdir(dirname):
path = os.path.join(dirname,subdirname)
if os.path.isdir(path) and (subdirprefix in subdirname):
args.data_paths.append(path)
print("Importing data from {path}...".format(path=args.data_paths))
reload_data()
# port = 5000
# url = "http://0.0.0.0:{0}".format(port)
print("Done! View http://localhost:%d in your browser" % args.port)
app.run(host='0.0.0.0', port=args.port, debug=args.debug)
| 25,940 | 43.648881 | 131 | py |
rllab | rllab-master/contrib/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/contrib/alexbeloi/is_sampler.py | from rllab.algos.batch_polopt import BatchSampler
from math import exp, log
from numpy import var
import random
import copy
class ISSampler(BatchSampler):
"""
Sampler which alternates between live sampling iterations using BatchSampler
and importance sampling iterations.
"""
def __init__(
self,
algo,
n_backtrack = 'all',
n_is_pretrain=0,
init_is=0,
skip_is_itrs=False,
hist_variance_penalty = 0.0,
max_is_ratio = 0,
ess_threshold = 0,
):
"""
:type algo: BatchPolopt
:param n_backtrack: Number of past policies to update from
:param n_is_pretrain: Number of importance sampling iterations to
perform in beginning of training
:param init_is: (True/False) set initial iteration (after pretrain) an
importance sampling iteration
:param skip_is_itrs: (True/False) do not do any importance sampling
iterations (after pretrain)
:param hist_variance_penalty: penalize variance of historical policy
:param max_is_ratio: maximum allowed importance sampling ratio
:param ess_threshold: minimum effective sample size required
"""
self.n_backtrack = n_backtrack
self.n_is_pretrain = n_is_pretrain
self.skip_is_itrs = skip_is_itrs
self.hist_variance_penalty = hist_variance_penalty
self.max_is_ratio = max_is_ratio
self.ess_threshold = ess_threshold
self._hist = []
self._is_itr = init_is
super(ISSampler, self).__init__(algo)
@property
def history(self):
"""
History of policies that have interacted with the environment and the
data from interaction episode(s)
"""
return self._hist
def add_history(self, policy_distribution, paths):
"""
Store policy distribution and paths in history
"""
self._hist.append((policy_distribution, paths))
def get_history_list(self, n_past = 'all'):
"""
Get list of (distribution, data) tuples from history
"""
if n_past == 'all':
return self._hist
return self._hist[-min(n_past, len(self._hist)):]
def obtain_samples(self, itr):
# Importance sampling for first self.n_is_pretrain iterations
if itr < self.n_is_pretrain:
paths = self.obtain_is_samples(itr)
return paths
# Alternate between importance sampling and live sampling
if self._is_itr and not self.skip_is_itrs:
paths = self.obtain_is_samples(itr)
else:
paths = super(ISSampler, self).obtain_samples(itr)
if not self.skip_is_itrs:
self.add_history(self.algo.policy.distribution, paths)
self._is_itr = (self._is_itr + 1) % 2
return paths
def obtain_is_samples(self, itr):
paths = []
for hist_policy_distribution, hist_paths in self.get_history_list(self.n_backtrack):
h_paths = self.sample_isweighted_paths(
policy=self.algo.policy,
hist_policy_distribution=hist_policy_distribution,
max_samples=self.algo.batch_size,
max_path_length=self.algo.max_path_length,
paths=hist_paths,
hist_variance_penalty=self.hist_variance_penalty,
max_is_ratio=self.max_is_ratio,
ess_threshold=self.ess_threshold,
)
paths.extend(h_paths)
if len(paths) > self.algo.batch_size:
paths = random.sample(paths, self.algo.batch_size)
if self.algo.whole_paths:
return paths
else:
paths_truncated = parallel_sampler.truncate_paths(paths, self.algo.batch_size)
return paths_truncated
def sample_isweighted_paths(
self,
policy,
hist_policy_distribution,
max_samples,
max_path_length=100,
paths=None,
randomize_draw=False,
hist_variance_penalty=0.0,
max_is_ratio=10,
ess_threshold=0,
):
if not paths:
return []
n_paths = len(paths)
n_samples = min(len(paths), max_samples)
if randomize_draw:
samples = random.sample(paths, n_samples)
elif paths:
if n_samples == len(paths):
samples = paths
else:
start = random.randint(0,len(paths)-n_samples)
samples = paths[start:start+n_samples]
# make duplicate of samples so we don't permanently alter historical data
samples = copy.deepcopy(samples)
if ess_threshold > 0:
is_weights = []
dist1 = policy.distribution
dist2 = hist_policy_distribution
for path in samples:
_, agent_infos = policy.get_actions(path['observations'])
hist_agent_infos = path['agent_infos']
if hist_variance_penalty > 0:
hist_agent_infos['log_std'] += log(1.0+hist_variance_penalty)
path['agent_infos'] = agent_infos
# compute importance sampling weight
loglike_p = dist1.log_likelihood(path['actions'], agent_infos)
loglike_hp = dist2.log_likelihood(path['actions'], hist_agent_infos)
is_ratio = exp(sum(loglike_p) - sum(loglike_hp))
# thresholding knobs
if max_is_ratio > 0:
is_ratio = min(is_ratio, max_is_ratio)
if ess_threshold > 0:
is_weights.append(is_ratio)
# apply importance sampling weight
path['rewards'] *= is_ratio
if ess_threshold:
if kong_ess(is_weights) < ess_threshold:
return []
return samples
def kong_ess(weights):
return len(weights)/(1+var(weights))
| 6,004 | 33.119318 | 92 | py |
rllab | rllab-master/contrib/alexbeloi/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/contrib/alexbeloi/examples/trpois_cartpole.py | from rllab.algos.trpo import TRPO
from rllab.algos.tnpg import TNPG
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.envs.normalized_env import normalize
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
from contrib.alexbeloi.is_sampler import ISSampler
"""
Example using VPG with ISSampler, iterations alternate between live and
importance sampled iterations.
"""
env = normalize(CartpoleEnv())
policy = GaussianMLPPolicy(
env_spec=env.spec,
# The neural network policy should have two hidden layers, each with 32 hidden units.
hidden_sizes=(32, 32)
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
optimizer_args = dict(
# debug_nan=True,
# reg_coeff=0.1,
# cg_iters=2
)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=4000,
max_path_length=100,
n_itr=200,
discount=0.99,
step_size=0.01,
sampler_cls=ISSampler,
sampler_args=dict(n_backtrack=1),
optimizer_args=optimizer_args
)
algo.train()
| 1,096 | 23.931818 | 89 | py |
rllab | rllab-master/contrib/alexbeloi/examples/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/contrib/alexbeloi/examples/vpgis_cartpole.py | from rllab.algos.vpg import VPG
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.envs.normalized_env import normalize
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
from contrib.alexbeloi.is_sampler import ISSampler
"""
Example using VPG with ISSampler, iterations alternate between live and
importance sampled iterations.
"""
env = normalize(CartpoleEnv())
policy = GaussianMLPPolicy(
env_spec=env.spec,
# The neural network policy should have two hidden layers, each with 32 hidden units.
hidden_sizes=(32, 32)
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = VPG(
env=env,
policy=policy,
baseline=baseline,
batch_size=4000,
max_path_length=100,
n_itr=40,
discount=0.99,
step_size=0.01,
sampler_cls=ISSampler,
sampler_args=dict(n_backtrack=1),
)
algo.train()
| 938 | 25.083333 | 89 | py |
rllab | rllab-master/contrib/rllab_hyperopt/core.py | import os
import sys
sys.path.append('.')
import threading
import time
import warnings
import multiprocessing
import importlib
from rllab import config
from rllab.misc.instrument import run_experiment_lite
import polling
from hyperopt import fmin, tpe, STATUS_OK, STATUS_FAIL
from hyperopt.mongoexp import MongoTrials
class S3SyncThread(threading.Thread):
'''
Thread to periodically sync results from S3 in the background.
Uses same dirs as ./scripts/sync_s3.py.
'''
def __init__(self, sync_interval=60):
super(S3SyncThread, self).__init__()
self.sync_interval = sync_interval
self._stop_event = threading.Event()
def stop(self):
self._stop_event.set()
def stopped(self):
return self._stop_event.isSet()
def run(self):
remote_dir = config.AWS_S3_PATH
local_dir = os.path.join(config.LOG_DIR, "s3")
command = ("""
aws s3 sync {remote_dir} {local_dir} --exclude '*stdout.log' --exclude '*stdouterr.log' --content-type "UTF-8"
""".format(local_dir=local_dir, remote_dir=remote_dir))
while True:
fail = os.system(command)
if fail:
warnings.warn("Problem running the s3 sync command. You might want to run ./scripts/sync_s3.py manually in a shell to inspect.")
if self.stopped():
break
time.sleep(self.sync_interval)
def _launch_workers(exp_key, n_workers, host, port, result_db_name):
jobs = []
for i in range(n_workers):
p = multiprocessing.Process(target=_launch_worker, args=(exp_key,i,host, port, result_db_name))
jobs.append(p)
p.start()
time.sleep(1)
return jobs
def _launch_worker(exp_key, worker_id, host, port, result_db_name):
command = "hyperopt-mongo-worker --mongo={h}:{p}/{db} --poll-interval=10 --exp-key={key} > hyperopt_worker{id}.log 2>&1"
command = command.format(h=host, p=port, db=result_db_name, key=exp_key, id=worker_id)
fail = os.system(command)
if fail:
raise RuntimeError("Problem starting hyperopt-mongo-worker.")
def _wait_result(exp_prefix, exp_name, timeout):
"""
Poll for the sync of params.pkl (currently hardcoded) from S3, indicating that the task is done.
:param exp_prefix: str, experiment name prefix (dir where results are expected to be stored)
:param exp_name: str, experiment name. Name of dir below exp_prefix where result files of individual run are
expected to be stored
:param timeout: int, polling timeout in seconds
:return bool. False if the polling times out. True if successful.
"""
result_path = os.path.join(config.LOG_DIR, "s3", exp_prefix, exp_name, 'params.pkl')
print("Polling for results in",result_path)
try:
file_handle = polling.poll(
lambda: open(result_path),
ignore_exceptions=(IOError,),
timeout=timeout,
step=60)
file_handle.close()
except polling.TimeoutException:
return False
return True
def _launch_ec2(func, exp_prefix, exp_name, params, run_experiment_kwargs):
print("Launching task", exp_name)
kwargs = dict(
n_parallel=1,
snapshot_mode="last",
seed=params.get("seed",None),
mode="ec2"
)
kwargs.update(run_experiment_kwargs)
kwargs.update(dict(
exp_prefix=exp_prefix,
exp_name=exp_name,
variant=params,
confirm_remote=False))
run_experiment_lite(func,**kwargs)
def _get_stubs(params):
module_str = params.pop('task_module')
func_str = params.pop('task_function')
eval_module_str = params.pop('eval_module')
eval_func_str = params.pop('eval_function')
module = importlib.import_module(module_str)
func = getattr(module, func_str)
eval_module = importlib.import_module(eval_module_str)
eval_func = getattr(eval_module, eval_func_str)
return func, eval_func
task_id = 1
def objective_fun(params):
global task_id
exp_prefix = params.pop("exp_prefix")
exp_name = "{exp}_{pid}_{id}".format(exp=exp_prefix, pid=os.getpid(), id=task_id)
max_retries = params.pop('max_retries', 0) + 1
result_timeout = params.pop('result_timeout')
run_experiment_kwargs = params.pop('run_experiment_kwargs', {})
func, eval_func = _get_stubs(params)
result_success = False
while max_retries > 0:
_launch_ec2(func, exp_prefix, exp_name, params, run_experiment_kwargs)
task_id += 1; max_retries -= 1
if _wait_result(exp_prefix, exp_name, result_timeout):
result_success = True
break
elif max_retries > 0:
print("Timed out waiting for results. Retrying...")
if not result_success:
print("Reached max retries, no results. Giving up.")
return {'status':STATUS_FAIL}
print("Results in! Processing.")
result_dict = eval_func(exp_prefix, exp_name)
result_dict['status'] = STATUS_OK
result_dict['params'] = params
return result_dict
def launch_hyperopt_search(
task_method,
eval_method,
param_space,
hyperopt_experiment_key,
hyperopt_db_host="localhost",
hyperopt_db_port=1234,
hyperopt_db_name="rllab",
n_hyperopt_workers=1,
hyperopt_max_evals=100,
result_timeout=1200,
max_retries=0,
run_experiment_kwargs=None):
"""
Launch a hyperopt search using EC2.
This uses the hyperopt parallel processing functionality based on MongoDB. The MongoDB server at the specified host
and port is assumed to be already running. Downloading and running MongoDB is pretty straightforward, see
https://github.com/hyperopt/hyperopt/wiki/Parallelizing-Evaluations-During-Search-via-MongoDB for instructions.
The parameter space to be searched over is specified in param_space. See https://github.com/hyperopt/hyperopt/wiki/FMin,
section "Defining a search space" for further info. Also see the (very basic) example in contrib.rllab_hyperopt.example.main.py.
NOTE: While the argument n_hyperopt_workers specifies the number of (local) parallel hyperopt workers to start, an equal
number of EC2 instances will be started in parallel!
NOTE2: Rllab currently terminates / starts a new EC2 instance for every task. This means what you'll pay amounts to
hyperopt_max_evals * instance_hourly_rate. So you might want to be conservative with hyperopt_max_evals.
:param task_method: the stubbed method call that runs the actual task. Should take a single dict as argument, with
the params to evaluate. See e.g. contrib.rllab_hyperopt.example.task.py
:param eval_method: the stubbed method call that reads in results returned from S3 and produces a score. Should take
the exp_prefix and exp_name as arguments (this is where S3 results will be synced to). See e.g.
contrib.rllab_hyperopt.example.score.py
:param param_space: dict specifying the param space to search. See https://github.com/hyperopt/hyperopt/wiki/FMin,
section "Defining a search space" for further info
:param hyperopt_experiment_key: str, the key hyperopt will use to store results in the DB
:param hyperopt_db_host: str, optional (default "localhost"). The host where mongodb runs
:param hyperopt_db_port: int, optional (default 1234), the port where mongodb is listening for connections
:param hyperopt_db_name: str, optional (default "rllab"), the DB name where hyperopt will store results
:param n_hyperopt_workers: int, optional (default 1). The nr of parallel workers to start. NOTE: an equal number of
EC2 instances will be started in parallel.
:param hyperopt_max_evals: int, optional (defailt 100). Number of parameterset evaluations hyperopt should try.
NOTE: Rllab currently terminates / starts a new EC2 instance for every task. This means what you'll pay amounts to
hyperopt_max_evals * instance_hourly_rate. So you might want to be conservative with hyperopt_max_evals.
:param result_timeout: int, optional (default 1200). Nr of seconds to wait for results from S3 for a given task. If
results are not in within this time frame, <max_retries> new attempts will be made. A new attempt entails launching
the task again on a new EC2 instance.
:param max_retries: int, optional (default 0). Number of times to retry launching a task when results don't come in from S3
:param run_experiment_kwargs: dict, optional (default None). Further kwargs to pass to run_experiment_lite. Note that
specified values for exp_prefix, exp_name, variant, and confirm_remote will be ignored.
:return the best result as found by hyperopt.fmin
"""
exp_key = hyperopt_experiment_key
worker_args = {'exp_prefix':exp_key,
'task_module':task_method.__module__,
'task_function':task_method.__name__,
'eval_module':eval_method.__module__,
'eval_function':eval_method.__name__,
'result_timeout':result_timeout,
'max_retries':max_retries}
worker_args.update(param_space)
if run_experiment_kwargs is not None:
worker_args['run_experiment_kwargs'] = run_experiment_kwargs
trials = MongoTrials('mongo://{0}:{1:d}/{2}/jobs'.format(hyperopt_db_host, hyperopt_db_port, hyperopt_db_name),
exp_key=exp_key)
workers = _launch_workers(exp_key, n_hyperopt_workers, hyperopt_db_host, hyperopt_db_port, hyperopt_db_name)
s3sync = S3SyncThread()
s3sync.start()
print("Starting hyperopt")
best = fmin(objective_fun, worker_args, trials=trials, algo=tpe.suggest, max_evals=hyperopt_max_evals)
s3sync.stop()
s3sync.join()
for worker in workers:
worker.terminate()
return best
| 10,043 | 42.107296 | 144 | py |
rllab | rllab-master/contrib/rllab_hyperopt/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/contrib/rllab_hyperopt/example/main.py | '''
Main module to launch an example hyperopt search on EC2.
Launch this from outside the rllab main dir. Otherwise, rllab will try to ship the logfiles being written by this process,
which will fail because tar doesn't want to tar files that are being written to. Alternatively, disable the packaging of
log files by rllab, but I couldn't quickly find how to do this.
You can use Jupyter notebook visualize_hyperopt_results.ipynb to inspect results.
'''
from hyperopt import hp
from contrib.rllab_hyperopt.core import launch_hyperopt_search
# the functions to run the task and process result do not need to be in separate files. They do need to be separate from
# the main file though. Also, anything you import in the module that contains run_task needs to be on the Rllab AMI.
# Therefore, since I use pandas to process results, I have put them in separate files here.
from contrib.rllab_hyperopt.example.score import process_result
from contrib.rllab_hyperopt.example.task import run_task
# define a search space. See https://github.com/hyperopt/hyperopt/wiki/FMin, sect 2 for more detail
param_space = {'step_size': hp.uniform('step_size', 0.01, 0.1),
'seed': hp.choice('seed',[0, 1, 2])}
# just by way of example, pass a different config to run_experiment_lite
run_experiment_kwargs = dict(
n_parallel=16,
aws_config=dict(instance_type="c4.4xlarge",spot_price='0.7')
)
launch_hyperopt_search(
run_task, # the task to run
process_result, # the function that will process results and return a score
param_space, # param search space
hyperopt_experiment_key='test12', # key for hyperopt DB, and also exp_prefix for run_experiment_lite
n_hyperopt_workers=3, # nr of local workers AND nr of EC2 instances that will be started in parallel
hyperopt_max_evals=5, # nr of parameter values to eval
result_timeout=600, # wait this long for results from S3 before timing out
run_experiment_kwargs=run_experiment_kwargs) # additional kwargs to pass to run_experiment_lite | 2,177 | 57.864865 | 122 | py |
rllab | rllab-master/contrib/rllab_hyperopt/example/score.py | import os
import pandas as pd
from rllab import config
def process_result(exp_prefix, exp_name):
# Open the default rllab path for storing results
result_path = os.path.join(config.LOG_DIR, "s3", exp_prefix, exp_name, 'progress.csv')
print("Processing result from",result_path)
# This example uses pandas to easily read in results and create a simple smoothed learning curve
df = pd.read_csv(result_path)
curve = df['AverageReturn'].rolling(window=max(1,int(0.05*df.shape[0])), min_periods=1, center=True).mean().values.flatten()
max_ix = curve.argmax()
max_score = curve.max()
# The result dict can contain arbitrary values, but ALWAYS needs to have a "loss" entry.
return dict(
max_score=max_score,
max_iter=max_ix,
scores=curve, # returning the curve allows you to plot best, worst etc curve later
loss=-max_score
) | 911 | 38.652174 | 128 | py |
rllab | rllab-master/contrib/rllab_hyperopt/example/task.py | from rllab.algos.trpo import TRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.box2d.cartpole_env import CartpoleEnv
from rllab.envs.normalized_env import normalize
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
def run_task(v):
env = normalize(CartpoleEnv())
policy = GaussianMLPPolicy(
env_spec=env.spec,
# The neural network policy should have two hidden layers, each with 32 hidden units.
hidden_sizes=(32, 32)
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=4000,
max_path_length=100,
n_itr=40,
discount=0.99,
step_size=v["step_size"],
# Uncomment both lines (this and the plot parameter below) to enable plotting
# plot=True,
)
algo.train() | 918 | 29.633333 | 93 | py |
rllab | rllab-master/contrib/rllab_hyperopt/example/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/contrib/bichengcao/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/contrib/bichengcao/examples/trpo_gym_MountainCar-v0.py | # This doesn't work. After 150 iterations still didn't learn anything.
from rllab.algos.trpo import TRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.gym_env import GymEnv
from rllab.envs.normalized_env import normalize
from rllab.misc.instrument import run_experiment_lite
from rllab.policies.categorical_mlp_policy import CategoricalMLPPolicy
def run_task(*_):
env = normalize(GymEnv("MountainCar-v0"))
policy = CategoricalMLPPolicy(
env_spec=env.spec,
hidden_sizes=(32, 32)
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=4000,
max_path_length=env.horizon,
n_itr=150,
discount=0.99,
step_size=0.1,
plot=True,
)
algo.train()
run_experiment_lite(
run_task,
n_parallel=1,
snapshot_mode="last",
plot=True,
)
| 965 | 22.560976 | 73 | py |
rllab | rllab-master/contrib/bichengcao/examples/trpo_gym_CartPole-v0.py | from rllab.algos.trpo import TRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.gym_env import GymEnv
from rllab.envs.normalized_env import normalize
from rllab.misc.instrument import run_experiment_lite
from rllab.policies.categorical_mlp_policy import CategoricalMLPPolicy
def run_task(*_):
env = normalize(GymEnv("CartPole-v0"))
policy = CategoricalMLPPolicy(
env_spec=env.spec,
hidden_sizes=(32, 32)
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=4000,
max_path_length=env.horizon,
n_itr=50,
discount=0.99,
step_size=0.01,
plot=True,
)
algo.train()
run_experiment_lite(
run_task,
n_parallel=1,
snapshot_mode="last",
plot=True,
)
| 890 | 21.846154 | 73 | py |
rllab | rllab-master/contrib/bichengcao/examples/trpo_gym_CartPole-v1.py | from rllab.algos.trpo import TRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.gym_env import GymEnv
from rllab.envs.normalized_env import normalize
from rllab.misc.instrument import run_experiment_lite
from rllab.policies.categorical_mlp_policy import CategoricalMLPPolicy
def run_task(*_):
env = normalize(GymEnv("CartPole-v1"))
policy = CategoricalMLPPolicy(
env_spec=env.spec,
hidden_sizes=(32, 32)
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=4000,
max_path_length=env.horizon,
n_itr=50,
discount=0.99,
step_size=0.01,
plot=True,
)
algo.train()
run_experiment_lite(
run_task,
n_parallel=1,
snapshot_mode="last",
plot=True,
)
| 890 | 21.846154 | 73 | py |
rllab | rllab-master/contrib/bichengcao/examples/trpo_gym_Acrobot-v1.py | from rllab.algos.trpo import TRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.gym_env import GymEnv
from rllab.envs.normalized_env import normalize
from rllab.misc.instrument import run_experiment_lite
from rllab.policies.categorical_mlp_policy import CategoricalMLPPolicy
def run_task(*_):
env = normalize(GymEnv("Acrobot-v1"))
policy = CategoricalMLPPolicy(
env_spec=env.spec,
hidden_sizes=(32, 32)
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=4000,
max_path_length=env.horizon,
n_itr=50,
discount=0.99,
step_size=0.01,
plot=True,
)
algo.train()
run_experiment_lite(
run_task,
n_parallel=1,
snapshot_mode="last",
plot=True,
)
| 889 | 21.820513 | 73 | py |
rllab | rllab-master/contrib/bichengcao/examples/trpo_gym_Pendulum-v0.py | from rllab.algos.trpo import TRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.gym_env import GymEnv
from rllab.envs.normalized_env import normalize
from rllab.misc.instrument import run_experiment_lite
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
def run_task(*_):
env = normalize(GymEnv("Pendulum-v0"))
policy = GaussianMLPPolicy(
env_spec=env.spec,
hidden_sizes=(32, 32)
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=4000,
max_path_length=env.horizon,
n_itr=50,
discount=0.99,
step_size=0.01,
plot=True,
)
algo.train()
run_experiment_lite(
run_task,
n_parallel=1,
snapshot_mode="last",
plot=True,
)
| 881 | 21.615385 | 73 | py |
rllab | rllab-master/contrib/bichengcao/examples/__init__.py | 0 | 0 | 0 | py | |
complex | complex-master/wn18_run.py | #import scipy.io
import efe
from efe.exp_generators import *
import efe.tools as tools
if __name__ =="__main__":
#Load data, ensure that data is at path: 'path'/'name'/[train|valid|test].txt
wn18exp = build_data(name = 'wn18',path = tools.cur_path + '/datasets/')
#SGD hyper-parameters:
params = Parameters(learning_rate = 0.5,
max_iter = 1000,
batch_size = int(len(wn18exp.train.values) / 100), #Make 100 batches
neg_ratio = 1,
valid_scores_every = 50,
learning_rate_policy = 'adagrad',
contiguous_sampling = False )
#Here each model is identified by its name, i.e. the string of its class name in models.py
#Parameters given here are the best ones for each model, validated from the grid-search described in the paper
all_params = { "Complex_Logistic_Model" : params } ; emb_size = 150; lmbda =0.03;
#all_params = { "DistMult_Logistic_Model" : params } ; emb_size = 200; lmbda =0.003; params.learning_rate = 1.0
#all_params = { "CP_Logistic_Model" : params } ; emb_size = 100; lmbda =0.1;
#all_params = { "Rescal_Logistic_Model" : params } ; emb_size = 50; lmbda =0.1
#all_params = { "TransE_L2_Model" : params } ; emb_size = 200; lmbda = 0.5 ; params.learning_rate=0.01
tools.logger.info( "Learning rate: " + str(params.learning_rate))
tools.logger.info( "Max iter: " + str(params.max_iter))
tools.logger.info( "Generated negatives ratio: " + str(params.neg_ratio))
tools.logger.info( "Batch size: " + str(params.batch_size))
#Then call a local grid search, here only with one value of rank and regularization
wn18exp.grid_search_on_all_models(all_params, embedding_size_grid = [emb_size], lmbda_grid = [lmbda], nb_runs = 1)
#Print best averaged metrics:
wn18exp.print_best_MRR_and_hits()
#Print best averaged metrics per relation:
wn18exp.print_best_MRR_and_hits_per_rel()
#Save ComplEx embeddings (last trained model, not best on grid search if multiple embedding sizes and lambdas)
#e1 = wn18exp.models["Complex_Logistic_Model"][0].e1.get_value(borrow=True)
#e2 = wn18exp.models["Complex_Logistic_Model"][0].e2.get_value(borrow=True)
#r1 = wn18exp.models["Complex_Logistic_Model"][0].r1.get_value(borrow=True)
#r2 = wn18exp.models["Complex_Logistic_Model"][0].r2.get_value(borrow=True)
#scipy.io.savemat('complex_embeddings.mat', \
# {'entities_real' : e1, 'relations_real' : r1, 'entities_imag' : e2, 'relations_imag' : r2 })
| 2,420 | 42.232143 | 115 | py |
complex | complex-master/fb15k_run.py | #import scipy.io
import efe
from efe.exp_generators import *
import efe.tools as tools
if __name__ =="__main__":
#Load data, ensure that data is at path: 'path'/'name'/[train|valid|test].txt
fb15kexp = build_data(name = 'fb15k',path = tools.cur_path + '/datasets/')
#SGD hyper-parameters:
params = Parameters(learning_rate = 0.5,
max_iter = 1000,
batch_size = int(len(fb15kexp.train.values) / 100), #Make 100 batches
neg_ratio = 10,
valid_scores_every = 50,
learning_rate_policy = 'adagrad',
contiguous_sampling = False )
#Here each model is identified by its name, i.e. the string of its class name in models.py
#Parameters given here are the best ones for each model, validated from the grid-search described in the paper
all_params = { "Complex_Logistic_Model" : params } ; emb_size = 200; lmbda =0.01
#all_params = { "DistMult_Logistic_Model" : params } ; emb_size = 200; lmbda =0.01
#all_params = { "CP_Logistic_Model" : params } ; emb_size = 150; lmbda =0.03
#all_params = { "Rescal_Logistic_Model" : params } ; emb_size = 150; lmbda =0.3
#all_params = { "TransE_L1_Model" : params } ; emb_size = 100; lmbda =2.0 ; params.neg_ratio=1; params.learning_rate=0.01
tools.logger.info( "Learning rate: " + str(params.learning_rate))
tools.logger.info( "Max iter: " + str(params.max_iter))
tools.logger.info( "Generated negatives ratio: " + str(params.neg_ratio))
tools.logger.info( "Batch size: " + str(params.batch_size))
#Then call a local grid search, here only with one value of rank and regularization
fb15kexp.grid_search_on_all_models(all_params, embedding_size_grid = [emb_size], lmbda_grid = [lmbda], nb_runs = 1)
#Print best averaged metrics:
fb15kexp.print_best_MRR_and_hits()
#Save ComplEx embeddings (last trained model, not best on grid search if multiple embedding sizes and lambdas)
#e1 = fb15kexp.models["Complex_Logistic_Model"][0].e1.get_value(borrow=True)
#e2 = fb15kexp.models["Complex_Logistic_Model"][0].e2.get_value(borrow=True)
#r1 = fb15kexp.models["Complex_Logistic_Model"][0].r1.get_value(borrow=True)
#r2 = fb15kexp.models["Complex_Logistic_Model"][0].r2.get_value(borrow=True)
#scipy.io.savemat('complex_embeddings.mat', \
# {'entities_real' : e1, 'relations_real' : r1, 'entities_imag' : e2, 'relations_imag' : r2 })
| 2,332 | 42.203704 | 122 | py |
complex | complex-master/efe/experiment.py | import uuid
import time
import subprocess
import numpy as np
from .tools import *
from .evaluation import *
from . import models
class Experiment(object):
def __init__(self, name, train, valid, test, positives_only = False, compute_ranking_scores = False, entities_dict = None, relations_dict =None) :
"""
An experiment is defined by its train and test set, which are two Triplets_set objects.
"""
self.name = name
self.train = train
self.valid = valid
self.test = test
self.train_tensor = None
self.train_mask = None
self.positives_only = positives_only
self.entities_dict = entities_dict
self.relations_dict = relations_dict
if valid is not None:
self.n_entities = len(np.unique(np.concatenate((train.indexes[:,0], train.indexes[:,2], valid.indexes[:,0], valid.indexes[:,2], test.indexes[:,0], test.indexes[:,2]))))
self.n_relations = len(np.unique(np.concatenate((train.indexes[:,1], valid.indexes[:,1], test.indexes[:,1]))))
else:
self.n_entities = len(np.unique(np.concatenate((train.indexes[:,0], train.indexes[:,2], test.indexes[:,0], test.indexes[:,2]))))
self.n_relations = len(np.unique(np.concatenate((train.indexes[:,1], test.indexes[:,1]))))
logger.info("Nb entities: " + str(self.n_entities))
logger.info( "Nb relations: " + str(self.n_relations))
logger.info( "Nb obs triples: " + str(train.indexes.shape[0]))
self.scorer = Scorer(train, valid, test, compute_ranking_scores)
#The trained models are stored indexed by name
self.models = {}
#The test Results are stored indexed by model name
self.valid_results = CV_Results()
self.results = CV_Results()
def grid_search_on_all_models(self, params, embedding_size_grid = [1,2,3,4,5,6,7,8,9,10], lmbda_grid = [0.1], nb_runs = 10):
"""
Here params is a dictionnary of Parameters, indexed by the names of each model, that
must match with the model class names
"""
#Clear previous results:
self.results = CV_Results()
self.valid_results = CV_Results()
for model_s in params:
logger.info("Starting grid search on: " + model_s)
#Getting train and test function using model string id:
cur_params = params[model_s]
for embedding_size in embedding_size_grid:
for lmbda in lmbda_grid:
cur_params.embedding_size = embedding_size
cur_params.lmbda = lmbda
for run in range(nb_runs):
self.run_model(model_s,cur_params)
self.test_model(model_s)
logger.info("Grid search finished")
def run_model(self,model_s,params):
"""
Generic training for any model, model_s is the class name of the model class defined in module models
"""
#Reuse ancient model if already exist:
if model_s in self.models:
model = self.models[model_s][0]
else: #Else construct it:
model = vars(models)[model_s]()
self.models[model_s] = (model, params)
#Pass a copy of the params object, for TransE handling of neg_ratio > 1
model.fit(self.train, self.valid, Parameters(**vars(params)), self.n_entities, self.n_relations, self.n_entities, self.scorer)
def test_model(self, model_s):
"""
Generic testing for any model, model_s is the class name of the model class defined in module models
"""
model, params = self.models[model_s]
if self.valid is not None:
res = self.scorer.compute_scores(model, model_s, params, self.valid)
self.valid_results.add_res(res, model_s, params.embedding_size, params.lmbda, model.nb_params)
res = self.scorer.compute_scores(model, model_s, params, self.test)
self.results.add_res(res, model_s, params.embedding_size, params.lmbda, model.nb_params)
def print_best_MRR_and_hits(self):
"""
Print best results on validation set, and corresponding scores (with same hyper params) on test set
"""
logger.info( "Validation metrics:")
metrics = self.valid_results.print_MRR_and_hits()
logger.info( "Corresponding Test metrics:")
for model_s, (best_rank, best_lambda, _,_,_,_,_) in metrics.items():
self.results.print_MRR_and_hits_given_params(model_s, best_rank, best_lambda)
def print_best_MRR_and_hits_per_rel(self):
"""
Print best results on validation set, and corresponding scores (with same hyper params) on test set
"""
logger.info( "Validation metrics:")
metrics = self.valid_results.print_MRR_and_hits()
logger.info( "Corresponding per relation Test metrics:" )
for rel_name, rel_idx in self.relations_dict.items():
logger.info( rel_name )
this_rel_row_idxs = self.test.indexes[:,1] == rel_idx
this_rel_test_indexes = self.test.indexes[ this_rel_row_idxs ,:]
this_rel_test_values = self.test.values[ this_rel_row_idxs ]
this_rel_set = Triplets_set(this_rel_test_indexes,this_rel_test_values)
for model_s, (best_rank, best_lambda, _,_,_,_,_) in metrics.items():
rel_cv_results = self.results.extract_sub_scores( this_rel_row_idxs)
rel_cv_results.print_MRR_and_hits_given_params(model_s, best_rank, best_lambda)
| 4,954 | 33.172414 | 171 | py |
complex | complex-master/efe/tools.py | import sys,os
import logging
import numpy as np
import colorsys
#Current path
cur_path = os.path.dirname(os.path.realpath( os.path.basename(__file__)))
#Logging
logger = logging.getLogger("EFE")
logger.setLevel(logging.DEBUG)
logger.propagate = False
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s\t(%(name)s)\t[%(levelname)s]\t%(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
#Plotting colors generation
def _get_colors(num_colors):
colors=[]
for i in np.arange(0., 360., 360. / num_colors):
hue = i/360.
lightness = (50 + np.random.rand() * 10)/100.
saturation = (90 + np.random.rand() * 10)/100.
colors.append(colorsys.hls_to_rgb(hue, lightness, saturation))
return colors
#colors = ['g','c','r','b','m','k','y',"orange",'indigo','salmon','crimson','hotpink','saddlebrown','lightgreen','yellowgreen','peru','gray','darkred']
colors = _get_colors(25)
#Normal random tensor generation
def randn(*args): return np.random.randn(*args).astype('f')
#Projection onto the L2 sphere (TransE embeddings)
def L2_proj(M):
"""
Projection on L2 sphere row-wise
"""
norm_M = np.linalg.norm(M,axis=1)
M /= norm_M[:,None]
return M
#Tool classes:
class Parameters(object):
def __init__(self, lmbda = 0.1, embedding_size = 100, batch_size =100, max_iter = 1000, learning_rate = 0.1, neg_ratio = 0, contiguous_sampling = False,
valid_scores_every = 50, learning_rate_policy = 'adagrad'):
self.lmbda = lmbda # L2 Regularization param
self.embedding_size = embedding_size # Size of the embeddings (in NTN and factorization based models)
self.batch_size = batch_size # Size of the minibatches for optimization
self.max_iter = max_iter # Maximum number of iterations over the data
self.learning_rate = learning_rate # Learning rate for gradient based methods
self.neg_ratio = neg_ratio # Number of negative triples generated by positive
self.contiguous_sampling = contiguous_sampling # Continous or random sampling among train samples
self.valid_scores_every = valid_scores_every # Compute validation scores every this number of iteration for early stopping
self.learning_rate_policy = learning_rate_policy# Learning rate policy, see downhill documentation
class Triplets_set(object):
"""
self.indexes attribute is a n*3 numpy array that are all triplets in the set,
with their corresponding values (1 or -1) in self.values
"""
def __init__(self, indexes, values):
#Type cast to comply with theano types defined in downhill losses
self.indexes = indexes.astype(np.int64)
self.values = values.astype(np.float32)
| 2,662 | 31.084337 | 153 | py |
complex | complex-master/efe/batching.py | from .tools import *
class Batch_Loader(object):
def __init__(self, train_triples, n_entities, batch_size=100, neg_ratio = 0.0, contiguous_sampling = False):
self.train_triples = train_triples
self.batch_size = batch_size
self.n_entities = n_entities
self.contiguous_sampling = contiguous_sampling
self.neg_ratio = int(neg_ratio)
self.idx = 0
self.new_triples_indexes = np.empty((self.batch_size * (self.neg_ratio + 1) , 3), dtype=np.int64)
self.new_triples_values = np.empty((self.batch_size * (self.neg_ratio + 1 )), dtype=np.float32)
def __call__(self):
if self.contiguous_sampling:
if self.idx >= len(self.train_triples.values):
self.idx = 0
b = self.idx
e = self.idx + self.batch_size
this_batch_size = len(self.train_triples.values[b:e]) #Manage shorter batches (last ones)
self.new_triples_indexes[:this_batch_size,:] = self.train_triples.indexes[b:e]
self.new_triples_values[:this_batch_size] = self.train_triples.values[b:e]
self.idx += this_batch_size
last_idx = this_batch_size
else:
idxs = np.random.randint(0,len(self.train_triples.values),self.batch_size)
self.new_triples_indexes[:self.batch_size,:] = self.train_triples.indexes[idxs,:]
self.new_triples_values[:self.batch_size] = self.train_triples.values[idxs]
last_idx = self.batch_size
if self.neg_ratio > 0:
#Pre-sample everything, faster
rdm_entities = np.random.randint(0, self.n_entities, last_idx * self.neg_ratio)
rdm_choices = np.random.random(last_idx * self.neg_ratio) < 0.5
#Pre copying everyting
self.new_triples_indexes[last_idx:(last_idx*(self.neg_ratio+1)),:] = np.tile(self.new_triples_indexes[:last_idx,:],(self.neg_ratio,1))
self.new_triples_values[last_idx:(last_idx*(self.neg_ratio+1))] = np.tile(self.new_triples_values[:last_idx], self.neg_ratio)
for i in range(last_idx):
for j in range(self.neg_ratio):
cur_idx = i* self.neg_ratio + j
#Sample a random subject or object
if rdm_choices[cur_idx]:
self.new_triples_indexes[last_idx + cur_idx,0] = rdm_entities[cur_idx]
else:
self.new_triples_indexes[last_idx + cur_idx,2] = rdm_entities[cur_idx]
self.new_triples_values[last_idx + cur_idx] = -1
last_idx += cur_idx + 1
train = [self.new_triples_values[:last_idx], self.new_triples_indexes[:last_idx,0], self.new_triples_indexes[:last_idx,1], self.new_triples_indexes[:last_idx,2]]
return train
class TransE_Batch_Loader(Batch_Loader):
#Hacky trick to normalize embeddings at each update
def __init__(self, model, train_triples, n_entities, batch_size=100, neg_ratio = 0.0, contiguous_sampling = False):
super(TransE_Batch_Loader, self).__init__(train_triples, n_entities, batch_size, neg_ratio, contiguous_sampling)
self.model = model
def __call__(self):
train = super(TransE_Batch_Loader, self).__call__()
train = train[1:]
#Projection on L2 sphere before each batch
self.model.e.set_value(L2_proj(self.model.e.get_value(borrow = True)), borrow = True)
return train
| 3,033 | 35.119048 | 163 | py |
complex | complex-master/efe/models.py | """
Define all model classes following the definition of Abstract_Model.
"""
import downhill
import theano
import theano.tensor as TT
data_type = 'float32'
#Single precision:
theano.config.floatX = data_type
theano.config.mode = 'FAST_RUN' # 'Mode', 'ProfileMode'(deprecated), 'DebugMode', 'FAST_RUN', 'FAST_COMPILE'
theano.config.exception_verbosity = 'high'
from .tools import *
from .batching import *
from .evaluation import *
class Abstract_Model(object):
def __init__(self):
self.name = self.__class__.__name__
#Symbolic expressions for the prediction function (and compiled one too), the loss, the regularization, and the loss to
#optimize (loss + lmbda * regul)
#To be defined by the child classes:
self.pred_func = None
self.pred_func_compiled = None
self.loss_func = None
self.regul_func = None
self.loss_to_opt = None
#Symbolic variables for training values
self.ys = TT.vector('ys')
self.rows = TT.lvector('rows')
self.cols = TT.lvector('cols')
self.tubes = TT.lvector('tubes')
#Current values for which the loss is currently compiled
#3 dimensions:
self.n = 0 #Number of subject entities
self.m = 0 #Number of relations
self.l = 0 #Number of object entities
#and rank:
self.k = 0
#and corresponding number of parameters (i.e. n*k + m*k + l*k for CP_Model)
self.nb_params = 0
def set_dims(self,train_triples, hparams):
self.n = max(train_triples.indexes[:,0]) +1
self.m = max(train_triples.indexes[:,1]) +1
self.l = max(train_triples.indexes[:,2]) +1
self.k = hparams.embedding_size
def get_pred_symb_vars(self):
"""
Returns the default pred parameters. Made to be overriden by child classes if need of additional input variables
"""
pred_inputs=[self.rows, self.cols, self.tubes]
return pred_inputs
def get_pred_args(self,test_idxs):
"""
Returns the default pred symbolic variables. Made to be overriden by child classes if need of additional input variables
"""
return [test_idxs[:,0], test_idxs[:,1], test_idxs[:,2]]
def get_loss_args_and_symb_vars(self, train_triples, valid_triples, hparams):
"""
Returns the default loss parameters and corresponding symbolic variables. Made to be overriden by child classes if need of additional input variables
"""
train = Batch_Loader(train_triples, n_entities = max(self.n,self.l), batch_size = hparams.batch_size, neg_ratio = hparams.neg_ratio, contiguous_sampling = hparams.contiguous_sampling )
inputs=[self.ys, self.rows, self.cols, self.tubes]
if valid_triples is not None:
valid = Batch_Loader(valid_triples, n_entities = max(self.n,self.l), batch_size = hparams.batch_size, neg_ratio = hparams.neg_ratio, contiguous_sampling = hparams.contiguous_sampling )
#valid = [valid_triples.values[:], valid_triples.indexes[:,0], valid_triples.indexes[:,1], valid_triples.indexes[:,2]]
else:
valid = None
return train, inputs, valid
def allocate_params(self):
nb_params=0
#Call child class getter of initial values of model parameters:
params = self.get_init_params()
#And allocate them as theano shared variables
for name, val in params.items():
setattr(self, name, theano.shared(val, name = name) )
nb_params += val.size
self.nb_params = nb_params
def reinit_params(self):
nb_params=0
#Call child class getter of initial values of model parameters:
params = self.get_init_params()
#And set their values
for name, val in params.items():
getattr(self, name).set_value(val, borrow = True)
nb_params += val.size
self.nb_params = nb_params
def setup_params_for_train(self,train_triples, valid_triples, hparams, redefine_loss = False):
"""
Calls teh define_loss function that gives the model loss to minimize
"""
#Check if need to redefine the loss function or not:
if redefine_loss or self.loss_to_opt is None:
#Initialize parameters (child class overriding):
self.allocate_params()
#Defining the model (child class overriding):
self.define_loss()
#Compile the prediction functions:
self.pred_func_compiled = theano.function(self.get_pred_symb_vars(), self.pred_func)
else:
#Just reinit the params
self.reinit_params()
#Combine loss and regularization to get what downhill will optimize:
#Changing the scalar value lmbda in the function actually doesn't make theano to recompile everything, it's as fast as not changing anything
self.loss_to_opt = self.loss + hparams.lmbda * self.regul_func
def fit(self, train_triples, valid_triples, hparams, n=0,m=0,l=0, scorer = None):
#Set input_dimensions:
if n == 0: #No given dimensions, can be useful for transparent predicton of entities/rels not seen in train
self.set_dims(train_triples, hparams)
else:
self.n, self.m, self.l, self.k = n, m, l, hparams.embedding_size
#Define the downhill loss corresponding to the input dimensions
self.setup_params_for_train(train_triples, valid_triples, hparams)
#get the loss inputs:
train_vals, train_symbs, valid_vals = self.get_loss_args_and_symb_vars(train_triples, valid_triples, hparams)
opt = downhill.build(hparams.learning_rate_policy, loss=self.loss_to_opt, inputs=train_symbs, monitor_gradients=True)
train_vals = downhill.Dataset(train_vals, name = 'train')
#Main SGD loop
it = 0
best_valid_mrr = -1
best_valid_ap = -1
for tm, vm in opt.iterate(train_vals, None,
max_updates=hparams.max_iter,
validate_every=9999999, #I take care of the valiation, with validation metrics instead of loss
patience=9999999, #Number of tolerated imporvements of validation loss that are inferior to min_improvement
max_gradient_norm=1, # Prevent gradient explosion!
learning_rate=hparams.learning_rate):
if it % hparams.valid_scores_every == 0 and scorer is not None:
if valid_triples is not None:
logger.info("Validation metrics:")
res = scorer.compute_scores(self, self.name, hparams, valid_triples)
cv_res = CV_Results()
cv_res.add_res(res, self.name, hparams.embedding_size, hparams.lmbda, self.nb_params)
if scorer.compute_ranking_scores:
metrics = cv_res.print_MRR_and_hits()
#Early stopping on filtered MRR
if best_valid_mrr >= metrics[self.name][2]:
logger.info("Validation filtered MRR decreased, stopping here.")
break
else:
best_valid_mrr = metrics[self.name][2]
else:
logger.info("Validation AP: " + str(res.ap))
#Early stopping on Average Precision
if best_valid_ap >= res.ap:
logger.info("Validation AP decreased, stopping here.")
break
else:
best_valid_ap = res.ap
it += 1
if it >= hparams.max_iter: #Avoid downhill resetting the parameters when max_iter is reached
break
def predict(self, test_idxs):
"""
test_idxs is a 2D numpy array of size [a,3], containing the indexes of the test triples.
Returns a vector of size a.
"""
return self.pred_func_compiled( *self.get_pred_args(test_idxs) )
################ Abstract Functions: ('must' be defined by child classes)
def get_init_params(self,n,m,l,k):
"""
Abstract. Returns a dict of the initial values of shared variables, indexes by the class attribute name (string).
"""
pass
def define_loss(self):
"""
Abstract. Define the loss of the model in the child class for a given input (theano shared variables
must have the correct dimensions at loss definition). Implies an initialization of the parameters
(shared variables), and the initialization of three functions (prediction, loss and regularization)
as symbolic theano expressions: self.pred_func, self.loss_func and self.regul_func (mandatory).
"""
pass
#################################################################################################
# Definition of models (child classes):
#################################################################################################
class CP_Model(Abstract_Model):
"""
Canonical Polyadic decomposition model
"""
def __init__(self):
super(CP_Model, self).__init__()
self.name = self.__class__.__name__
#Parameters: 3 embeddings matrices
#Will be initialized at train time
self.u = None
self.v = None
self.w = None
def get_init_params(self):
params = { 'u' : randn(max(self.n,self.l),self.k),
'v' : randn(self.m,self.k),
'w' : randn(max(self.n,self.l),self.k) }
return params
def define_loss(self):
#Defining the loss by decomposing each part:
self.pred_func = TT.sum(self.u[self.rows,:] * self.v[self.cols,:] * self.w[self.tubes,:], 1)
self.loss = TT.sqr(self.ys - self.pred_func).mean()
self.regul_func = TT.sqr(self.u[self.rows,:]).mean() \
+ TT.sqr(self.v[self.cols,:]).mean() \
+ TT.sqr(self.w[self.tubes,:]).mean()
class CP_Logistic_Model(CP_Model):
"""
Canonical Polyadic decomposition model with logistic loss. Inherit from CP_Model
"""
def __init__(self):
super(CP_Logistic_Model, self).__init__()
self.name = self.__class__.__name__
def define_loss(self):
#Defining the loss by decomposing each part:
self.pred_func = TT.nnet.sigmoid( TT.sum(self.u[self.rows,:] * self.v[self.cols,:] * self.w[self.tubes,:], 1))
self.loss = TT.nnet.softplus( - self.ys * TT.sum(self.u[self.rows,:] * self.v[self.cols,:] * self.w[self.tubes,:], 1)).mean()
self.regul_func = TT.sqr(self.u[self.rows,:]).mean() \
+ TT.sqr(self.v[self.cols,:]).mean() \
+ TT.sqr(self.w[self.tubes,:]).mean()
class DistMult_Model(Abstract_Model):
"""
DistMult model
"""
def __init__(self):
super(DistMult_Model, self).__init__()
self.name = self.__class__.__name__
self.e = None
self.r = None
def get_init_params(self):
params = { 'e' : randn(max(self.n,self.l),self.k),
'r' : randn(self.m,self.k)}
return params
def define_loss(self):
self.pred_func = TT.sum(self.e[self.rows,:] * self.r[self.cols,:] * self.e[self.tubes,:], 1)
self.loss = TT.sqr(self.ys - self.pred_func).mean()
self.regul_func = TT.sqr(self.e[self.rows,:]).mean() \
+ TT.sqr(self.r[self.cols,:]).mean() \
+ TT.sqr(self.e[self.tubes,:]).mean()
class DistMult_Logistic_Model(DistMult_Model):
"""
DistMult model with logistic loss
"""
def __init__(self):
super(DistMult_Logistic_Model, self).__init__()
self.name = self.__class__.__name__
def define_loss(self):
self.pred_func = TT.nnet.sigmoid( TT.sum(self.e[self.rows,:] * self.r[self.cols,:] * self.e[self.tubes,:], 1))
self.loss = TT.nnet.softplus( - self.ys * TT.sum(self.e[self.rows,:] * self.r[self.cols,:] * self.e[self.tubes,:], 1)).mean()
self.regul_func = TT.sqr(self.e[self.rows,:]).mean() \
+ TT.sqr(self.r[self.cols,:]).mean() \
+ TT.sqr(self.e[self.tubes,:]).mean()
class TransE_L2_Model(Abstract_Model):
"""
TransE model with L2 loss, but with the max margin ranking loss (performs better than maximum likelihood, only for TransE interestingly).
"""
def __init__(self):
super(TransE_L2_Model, self).__init__()
self.name = self.__class__.__name__
self.e = None
self.r = None
#Need to split the data
self.batch_size = None
self.neg_ratio = None
def get_init_params(self):
params = { 'e' : randn(max(self.n,self.l),self.k), #np.random.uniform(-6.0/np.sqrt(self.k), 6.0/np.sqrt(self.k), (max(self.n,self.l),self.k)).astype('f')
'r' : L2_proj(randn(self.m,self.k)), #L2_proj(np.random.uniform(-6.0/np.sqrt(self.k), 6.0/np.sqrt(self.k), (self.m,self.k))).astype('f')
}
return params
def get_loss_args_and_symb_vars(self, train_triples, valid_triples, hparams):
"""
Override, regularization made in the batch loader (hacky)
"""
train = TransE_Batch_Loader(self, train_triples, n_entities = max(self.n,self.l), batch_size = hparams.batch_size,
neg_ratio = hparams.neg_ratio, contiguous_sampling = hparams.contiguous_sampling)
inputs=[self.rows, self.cols, self.tubes]
if valid_triples is not None:
valid = Batch_Loader(valid_triples, n_entities = max(self.n,self.l), batch_size = hparams.batch_size,
neg_ratio = hparams.neg_ratio, contiguous_sampling = hparams.contiguous_sampling)
else:
valid = None
return train, inputs, valid
def setup_params_for_train(self, train_triples, valid_triples, hparams, redefine_loss = False):
self.batch_size = hparams.batch_size
if hparams.neg_ratio > 1:
hparams.max_iter *= hparams.neg_ratio
hparams.neg_ratio = 1
logger.warning("Using neg_ratio > 1 with TransE is like making more iterations, max_iter *= neg_ratio, max_iter is now " + str(hparams.max_iter))
self.margin = hparams.lmbda #Use for cross-validation since there is no need of regularization
super(TransE_L2_Model,self).setup_params_for_train(train_triples, valid_triples, hparams, redefine_loss)
def define_loss(self):
self.pred_func = - TT.sqrt(TT.sum(TT.sqr(self.e[self.rows,:] + self.r[self.cols,:] - self.e[self.tubes,:]),1))
self.loss = TT.maximum( 0, self.margin + TT.sqrt(TT.sum(TT.sqr(self.e[self.rows[:self.batch_size],:] + self.r[self.cols[:self.batch_size],:] - self.e[self.tubes[:self.batch_size],:]),1) ) \
- TT.sqrt(TT.sum(TT.sqr(self.e[self.rows[self.batch_size:],:] + self.r[self.cols[self.batch_size:],:] - self.e[self.tubes[self.batch_size:],:]),1)) ).mean()
#Maximum likelihood loss, performs worse.
#self.loss = TT.nnet.softplus( self.ys * TT.sqrt(TT.sum(TT.sqr(self.e[self.rows,:] + self.r[self.cols,:] - self.e[self.tubes,:]),1)) ).mean()
self.regul_func = 0 #Entities embeddings are force to have unit norm, as stated in TransE paper
class TransE_L1_Model(TransE_L2_Model):
"""
TransE model with L1 loss, but with the max margin ranking loss (performs better than maximum likelihood, only for TransE interestingly).
"""
def __init__(self):
super(TransE_L1_Model, self).__init__()
self.name = self.__class__.__name__
def setup_params_for_train(self, train_triples, valid_triples, hparams, redefine_loss = False):
super(TransE_L1_Model,self).setup_params_for_train(train_triples, valid_triples, hparams, redefine_loss)
def define_loss(self):
self.pred_func = - TT.sum(TT.abs_(self.e[self.rows,:] + self.r[self.cols,:] - self.e[self.tubes,:]),1)
self.loss = TT.maximum( 0, self.margin + TT.sum(TT.abs_(self.e[self.rows[:self.batch_size],:] + self.r[self.cols[:self.batch_size],:] - self.e[self.tubes[:self.batch_size],:]),1) \
- TT.sum(TT.abs_(self.e[self.rows[self.batch_size:],:] + self.r[self.cols[self.batch_size:],:] - self.e[self.tubes[self.batch_size:],:]),1) ).mean()
self.regul_func = 0
class Complex_Model(Abstract_Model):
"""
Factorization in complex numbers
"""
def __init__(self):
super(Complex_Model, self).__init__()
self.name = self.__class__.__name__
#Parameters:
self.e1 = None
self.e2 = None
self.r1 = None
self.r2 = None
def get_init_params(self):
params = { 'e1' : randn(max(self.n,self.l),self.k),
'e2' : randn(max(self.n,self.l),self.k),
'r1' : randn(self.m,self.k),
'r2' : randn(self.m,self.k) }
return params
def define_loss(self):
self.pred_func = TT.sum(self.e1[self.rows,:] * self.r1[self.cols,:] * self.e1[self.tubes,:], 1) \
+ TT.sum(self.e2[self.rows,:] * self.r1[self.cols,:] * self.e2[self.tubes,:], 1) \
+ TT.sum(self.e1[self.rows,:] * self.r2[self.cols,:] * self.e2[self.tubes,:], 1) \
- TT.sum(self.e2[self.rows,:] * self.r2[self.cols,:] * self.e1[self.tubes,:], 1)
self.loss = TT.sqr(self.ys - self.pred_func).mean()
self.regul_func = TT.sqr(self.e1[self.rows,:]).mean() \
+ TT.sqr(self.e2[self.rows,:]).mean() \
+ TT.sqr(self.e1[self.tubes,:]).mean() \
+ TT.sqr(self.e2[self.tubes,:]).mean() \
+ TT.sqr(self.r1[self.cols,:]).mean() \
+ TT.sqr(self.r2[self.cols,:]).mean()
class Complex_Logistic_Model(Complex_Model):
"""
Factorization in complex numbers with logistic loss
"""
def __init__(self):
super(Complex_Logistic_Model, self).__init__()
self.name = self.__class__.__name__
def define_loss(self):
#Inverse since those that have a smaller distance are the most probable.
self.pred_func = TT.nnet.sigmoid( TT.sum(self.e1[self.rows,:] * self.r1[self.cols,:] * self.e1[self.tubes,:], 1) \
+ TT.sum(self.e2[self.rows,:] * self.r1[self.cols,:] * self.e2[self.tubes,:], 1) \
+ TT.sum(self.e1[self.rows,:] * self.r2[self.cols,:] * self.e2[self.tubes,:], 1) \
- TT.sum(self.e2[self.rows,:] * self.r2[self.cols,:] * self.e1[self.tubes,:], 1) )
self.loss = TT.nnet.softplus( - self.ys * ( TT.sum(self.e1[self.rows,:] * self.r1[self.cols,:] * self.e1[self.tubes,:], 1) \
+ TT.sum(self.e2[self.rows,:] * self.r1[self.cols,:] * self.e2[self.tubes,:], 1) \
+ TT.sum(self.e1[self.rows,:] * self.r2[self.cols,:] * self.e2[self.tubes,:], 1) \
- TT.sum(self.e2[self.rows,:] * self.r2[self.cols,:] * self.e1[self.tubes,:], 1) )).mean()
self.regul_func = TT.sqr(self.e1[self.rows,:]).mean() \
+ TT.sqr(self.e2[self.rows,:]).mean() \
+ TT.sqr(self.e1[self.tubes,:]).mean() \
+ TT.sqr(self.e2[self.tubes,:]).mean() \
+ TT.sqr(self.r1[self.cols,:]).mean() \
+ TT.sqr(self.r2[self.cols,:]).mean()
class Rescal_Model(Abstract_Model):
"""
Rescal model
"""
def __init__(self):
super(Rescal_Model, self).__init__()
self.name = self.__class__.__name__
self.r = None
self.e = None
def get_init_params(self):
params = { 'r' : randn(self.m, self.k, self.k),
'e' : randn(max(self.n,self.l),self.k) }
return params
def define_loss(self):
self.pred_func = TT.sum( TT.sum(self.e[self.rows,:,None] * self.r[self.cols,:,:], 1) * self.e[self.tubes,:], 1)
self.loss = TT.sqr(self.ys - self.pred_func).mean()
self.regul_func = TT.sqr(self.e[self.rows,:]).mean() \
+ TT.sqr(self.r[self.cols,:,:]).mean() \
+ TT.sqr(self.e[self.tubes,:]).mean()
class Rescal_Logistic_Model(Rescal_Model):
"""
Rescal model with logistic loss
"""
def __init__(self):
super(Rescal_Logistic_Model, self).__init__()
self.name = self.__class__.__name__
def define_loss(self):
self.pred_func = TT.nnet.sigmoid( TT.sum( TT.sum(self.e[self.rows,:,None] * self.r[self.cols,:,:], 1) * self.e[self.tubes,:], 1) )
self.loss = TT.nnet.softplus( - self.ys * TT.sum( TT.sum(self.e[self.rows,:,None] * self.r[self.cols,:,:], 1) * self.e[self.tubes,:], 1)).mean()
self.regul_func = TT.sqr(self.e[self.rows,:]).mean() \
+ TT.sqr(self.r[self.cols,:,:]).mean() \
+ TT.sqr(self.e[self.tubes,:]).mean()
| 18,636 | 30.481419 | 191 | py |
complex | complex-master/efe/__init__.py | 0 | 0 | 0 | py | |
complex | complex-master/efe/evaluation.py | import operator
import sklearn
import sklearn.metrics
from .tools import *
class Result(object):
"""
Store one test results
"""
def __init__(self, preds, true_vals, ranks, raw_ranks):
self.preds = preds
self.ranks = ranks
self.true_vals = true_vals
self.raw_ranks = raw_ranks
#Test if not all the prediction are the same, sometimes happens with overfitting,
#and leads scikit-learn to output incorrect average precision (i.e ap=1)
if not (preds == preds[0]).all() :
#Due to the use of np.isclose in sklearn.metrics.ranking._binary_clf_curve (called by following metrics function),
#I have to rescale the predictions if they are too small:
preds_rescaled = preds
diffs = np.diff(np.sort(preds))
min_diff = min(abs(diffs[np.nonzero(diffs)]))
if min_diff < 1e-8 : #Default value of absolute tolerance of np.isclose
preds_rescaled = (preds * ( 1e-7 / min_diff )).astype('d')
self.ap = sklearn.metrics.average_precision_score(true_vals,preds_rescaled)
self.precision, self.recall, self.thresholds = sklearn.metrics.precision_recall_curve(true_vals,preds_rescaled)
else:
logger.warning("All prediction scores are equal, probable overfitting, replacing scores by random scores")
self.ap = (true_vals == 1).sum() / float(len(true_vals))
self.thresholds = preds[0]
self.precision = (true_vals == 1).sum() / float(len(true_vals))
self.recall = 0.5
self.mrr =-1
self.raw_mrr =-1
if ranks is not None:
self.mrr = np.mean(1.0 / ranks)
self.raw_mrr = np.mean(1.0 / raw_ranks)
class CV_Results(object):
"""
Class that stores predictions and scores by indexing them by model, embedding_size and lmbda
"""
def __init__(self):
self.res = {}
self.nb_params_used = {} #Indexed by model_s and embedding sizes, in order to plot with respect to the number of parameters of the model
def add_res(self, res, model_s, embedding_size, lmbda, nb_params):
if model_s not in self.res:
self.res[model_s] = {}
if embedding_size not in self.res[model_s]:
self.res[model_s][embedding_size] = {}
if lmbda not in self.res[model_s][embedding_size]:
self.res[model_s][embedding_size][lmbda] = []
self.res[model_s][embedding_size][lmbda].append( res )
if model_s not in self.nb_params_used:
self.nb_params_used[model_s] = {}
self.nb_params_used[model_s][embedding_size] = nb_params
def extract_sub_scores(self, idxs):
"""
Returns a new CV_Results object with scores only at the given indexes
"""
new_cv_res = CV_Results()
for j, (model_s, cur_res) in enumerate(self.res.items()):
for i,(k, lmbdas) in enumerate(cur_res.items()):
for lmbda, res_list in lmbdas.items():
for res in res_list:
if res.ranks is not None:
#Concat idxs on ranks as subject and object ranks are concatenated in a twice larger array
res = Result(res.preds[idxs], res.true_vals[idxs], res.ranks[np.concatenate((idxs,idxs))], res.raw_ranks[np.concatenate((idxs,idxs))])
else:
res = Result(res.preds[idxs], res.true_vals[idxs], None, None)
new_cv_res.add_res(res, model_s, k, lmbda, self.nb_params_used[model_s][k])
return new_cv_res
def _get_best_mean_ap(self, model_s, embedding_size):
"""
Averaging runs for each regularization value, and picking the best AP
"""
lmbdas = self.res[model_s][embedding_size]
mean_aps = []
var_aps = []
for lmbda_aps in lmbdas.values():
mean_aps.append( np.mean( [ result.ap for result in lmbda_aps] ) )
var_aps.append( np.std( [ result.ap for result in lmbda_aps] ) )
cur_aps_moments = zip(mean_aps, var_aps)
return max(cur_aps_moments, key = operator.itemgetter(0)) #max by mean
def print_MRR_and_hits_given_params(self, model_s, rank, lmbda):
mrr = np.mean( [ res.mrr for res in self.res[model_s][rank][lmbda] ] )
raw_mrr = np.mean( [ res.raw_mrr for res in self.res[model_s][rank][lmbda] ] )
ranks_list = [ res.ranks for res in self.res[model_s][rank][lmbda]]
hits_at1 = np.mean( [ (np.sum(ranks <= 1) + 1e-10) / float(len(ranks)) for ranks in ranks_list] )
hits_at3 = np.mean( [ (np.sum(ranks <= 3) + 1e-10) / float(len(ranks)) for ranks in ranks_list] )
hits_at10= np.mean( [ (np.sum(ranks <= 10) + 1e-10) / float(len(ranks)) for ranks in ranks_list] )
logger.info("%s\t%0.3f\t%0.3f\t%0.3f\t%0.3f\t%0.3f\t%i\t%f" %(model_s, mrr, raw_mrr, hits_at1, hits_at3, hits_at10, rank, lmbda))
return ( mrr, raw_mrr, hits_at1, hits_at3, hits_at10)
def print_MRR_and_hits(self):
metrics = {}
logger.info("Model\t\t\tMRR\tRMRR\tH@1\tH@3\tH@10\trank\tlmbda")
for j, (model_s, cur_res) in enumerate(self.res.items()):
best_mrr = -1.0
for i,(k, lmbdas) in enumerate(cur_res.items()):
mrrs = []
for lmbda, res_list in lmbdas.items():
mrrs.append( (lmbda, np.mean( [ result.mrr for result in res_list] ), np.mean( [ result.raw_mrr for result in res_list] ) ) )
lmbda_mrr = max(mrrs, key = operator.itemgetter(1))
mrr = lmbda_mrr[1]
if mrr > best_mrr:
best_mrr = mrr
best_raw_mrr = lmbda_mrr[2]
best_lambda = lmbda_mrr[0]
best_rank = k
metrics[model_s] = (best_rank, best_lambda) + self.print_MRR_and_hits_given_params(model_s, best_rank, best_lambda)
return metrics
class Scorer(object):
def __init__(self, train, valid, test, compute_ranking_scores = False,):
self.compute_ranking_scores = compute_ranking_scores
self.known_obj_triples = {}
self.known_sub_triples = {}
if self.compute_ranking_scores:
self.update_known_triples_dicts(train.indexes)
self.update_known_triples_dicts(test.indexes)
if valid is not None:
self.update_known_triples_dicts(valid.indexes)
def update_known_triples_dicts(self,triples):
for i,j,k in triples:
if (i,j) not in self.known_obj_triples:
self.known_obj_triples[(i,j)] = [k]
elif k not in self.known_obj_triples[(i,j)]:
self.known_obj_triples[(i,j)].append(k)
if (j,k) not in self.known_sub_triples:
self.known_sub_triples[(j,k)] = [i]
elif i not in self.known_sub_triples[(j,k)]:
self.known_sub_triples[(j,k)].append(i)
def compute_scores(self, model, model_s, params, eval_set):
preds = model.predict(eval_set.indexes)
ranks = None
raw_ranks = None
if self.compute_ranking_scores:
#Then we compute the rank of each test:
nb_test = len( eval_set.values) #1000
ranks = np.empty( 2 * nb_test)
raw_ranks = np.empty(2 * nb_test)
if model_s.startswith("DistMult") or model_s.startswith("Complex") or model_s.startswith("CP") or model_s.startswith("TransE") or model_s.startswith("Rescal"):
#Fast super-ugly filtered metrics computation for Complex, DistMult, RESCAL and TransE
logger.info("Fast MRRs")
def cp_eval_o(i,j):
return (u[i,:] * v[j,:]).dot(w.T)
def cp_eval_s(j,k):
return u.dot(v[j,:] * w[k,:])
def distmult_eval_o(i,j):
return (e[i,:] * r[j,:]).dot(e.T)
def distmult_eval_s(j,k):
return e.dot(r[j,:] * e[k,:])
def complex_eval_o(i,j):
return (e1[i,:] * r1[j,:]).dot(e1.T) + (e2[i,:] * r1[j,:]).dot(e2.T) + (e1[i,:] * r2[j,:]).dot(e2.T) - (e2[i,:] * r2[j,:]).dot(e1.T)
def complex_eval_s(j,k):
return e1.dot(r1[j,:] * e1[k,:]) + e2.dot(r1[j,:] * e2[k,:]) + e1.dot(r2[j,:] * e2[k,:]) - e2.dot(r2[j,:] * e1[k,:])
def transe_l2_eval_o(i,j):
return - np.sum(np.square((e[i,:] + r[j,:]) - e ),1)
def transe_l2_eval_s(j,k):
return - np.sum(np.square(e + (r[j,:] - e[k,:]) ),1)
def transe_l1_eval_o(i,j):
return - np.sum(np.abs((e[i,:] + r[j,:]) - e ),1)
def transe_l1_eval_s(j,k):
return - np.sum(np.abs(e + (r[j,:] - e[k,:]) ),1)
def rescal_eval_o(i,j):
return (e[i,:].dot(r[j,:,:])).dot(e.T)
def rescal_eval_s(j,k):
return e.dot(r[j,:,:].dot(e[k,:]))
if model_s.startswith("DistMult"):
e = model.e.get_value(borrow=True)
r = model.r.get_value(borrow=True)
eval_o = distmult_eval_o
eval_s = distmult_eval_s
elif model_s.startswith("CP"):
u = model.u.get_value(borrow=True)
v = model.v.get_value(borrow=True)
w = model.w.get_value(borrow=True)
eval_o = cp_eval_o
eval_s = cp_eval_s
elif model_s.startswith("Complex"):
e1 = model.e1.get_value(borrow=True)
r1 = model.r1.get_value(borrow=True)
e2 = model.e2.get_value(borrow=True)
r2 = model.r2.get_value(borrow=True)
eval_o = complex_eval_o
eval_s = complex_eval_s
elif model_s == "TransE_L2_Model":
e = model.e.get_value(borrow=True)
r = model.r.get_value(borrow=True)
eval_o = transe_l2_eval_o
eval_s = transe_l2_eval_s
elif model_s == "TransE_L1_Model":
e = model.e.get_value(borrow=True)
r = model.r.get_value(borrow=True)
eval_o = transe_l1_eval_o
eval_s = transe_l1_eval_s
elif model_s.startswith("Rescal"):
e = model.e.get_value(borrow=True)
r = model.r.get_value(borrow=True)
eval_o = rescal_eval_o
eval_s = rescal_eval_s
else:
#Generic version to compute ranks given any model:
logger.info("Slow MRRs")
n_ent = max(model.n,model.l)
idx_obj_mat = np.empty((n_ent,3), dtype=np.int64)
idx_sub_mat = np.empty((n_ent,3), dtype=np.int64)
idx_obj_mat[:,2] = np.arange(n_ent)
idx_sub_mat[:,0] = np.arange(n_ent)
def generic_eval_o(i,j):
idx_obj_mat[:,:2] = np.tile((i,j),(n_ent,1))
return model.predict(idx_obj_mat)
def generic_eval_s(j,k):
idx_sub_mat[:,1:] = np.tile((j,k),(n_ent,1))
return model.predict(idx_sub_mat)
eval_o = generic_eval_o
eval_s = generic_eval_s
for a,(i,j,k) in enumerate(eval_set.indexes[:nb_test,:]):
#Computing objects ranks
res_obj = eval_o(i,j)
raw_ranks[a] = 1 + np.sum( res_obj > res_obj[k] )
ranks[a] = raw_ranks[a] - np.sum( res_obj[self.known_obj_triples[(i,j)]] > res_obj[k] )
#Computing subjects ranks
res_sub = eval_s(j,k)
raw_ranks[nb_test + a] = 1 + np.sum( res_sub > res_sub[i] )
ranks[nb_test + a] = raw_ranks[nb_test + a] - np.sum( res_sub[self.known_sub_triples[(j,k)]] > res_sub[i] )
return Result(preds, eval_set.values, ranks, raw_ranks)
| 10,160 | 32.314754 | 162 | py |
complex | complex-master/efe/exp_generators.py | import scipy
import scipy.io
import random
from .experiment import *
def parse_line(filename, line,i):
line = line.strip().split("\t")
sub = line[0]
rel = line[1]
obj = line[2]
val = 1
return sub,obj,rel,val
def load_triples_from_txt(filenames, entities_indexes = None, relations_indexes = None, add_sameas_rel = False, parse_line = parse_line):
"""
Take a list of file names and build the corresponding dictionnary of triples
"""
if entities_indexes is None:
entities_indexes= dict()
entities = set()
next_ent = 0
else:
entities = set(entities_indexes)
next_ent = max(entities_indexes.values()) + 1
if relations_indexes is None:
relations_indexes= dict()
relations= set()
next_rel = 0
else:
relations = set(relations_indexes)
next_rel = max(relations_indexes.values()) + 1
data = dict()
for filename in filenames:
with open(filename) as f:
lines = f.readlines()
for i,line in enumerate(lines):
sub,obj,rel,val = parse_line(filename, line,i)
if sub in entities:
sub_ind = entities_indexes[sub]
else:
sub_ind = next_ent
next_ent += 1
entities_indexes[sub] = sub_ind
entities.add(sub)
if obj in entities:
obj_ind = entities_indexes[obj]
else:
obj_ind = next_ent
next_ent += 1
entities_indexes[obj] = obj_ind
entities.add(obj)
if rel in relations:
rel_ind = relations_indexes[rel]
else:
rel_ind = next_rel
next_rel += 1
relations_indexes[rel] = rel_ind
relations.add(rel)
data[ (sub_ind, rel_ind, obj_ind)] = val
if add_sameas_rel :
rel = "sameAs_"
rel_ind = next_rel
next_rel += 1
relations_indexes[rel] = rel_ind
relations.add(rel)
for sub in entities_indexes:
for obj in entities_indexes:
if sub == obj:
data[ (entities_indexes[sub], rel_ind, entities_indexes[obj])] = 1
else:
data[ (entities_indexes[sub], rel_ind, entities_indexes[obj])] = -1
return data, entities_indexes, relations_indexes
def build_data(name, path = '/home/ttrouill/dbfactor/projects/relational_bench/datasets/'):
folder = path + '/' + name + '/'
train_triples, entities_indexes, relations_indexes = load_triples_from_txt([folder + 'train.txt'],
add_sameas_rel = False, parse_line = parse_line)
valid_triples, entities_indexes, relations_indexes = load_triples_from_txt([folder + 'valid.txt'],
entities_indexes = entities_indexes , relations_indexes = relations_indexes,
add_sameas_rel = False, parse_line = parse_line)
test_triples, entities_indexes, relations_indexes = load_triples_from_txt([folder + 'test.txt'],
entities_indexes = entities_indexes, relations_indexes = relations_indexes,
add_sameas_rel = False, parse_line = parse_line)
train = Triplets_set(np.array(list(train_triples.keys())), np.array(list(train_triples.values())))
valid = Triplets_set(np.array(list(valid_triples.keys())), np.array(list(valid_triples.values())))
test = Triplets_set(np.array(list(test_triples.keys())), np.array(list(test_triples.values())))
return Experiment(name,train, valid, test, positives_only = True, compute_ranking_scores = True, entities_dict = entities_indexes, relations_dict = relations_indexes)
def load_mat_file(name, path, matname, load_zeros = False, prop_valid_set = .1, prop_test_set=0):
x = scipy.io.loadmat(path + name)[matname]
if sp.issparse(x):
if not load_zeros:
idxs = x.nonzero()
indexes = np.array(zip(idxs[0], np.zeros_like(idxs[0]), idxs[1]))
np.random.shuffle(indexes)
nb = indexes.shape[0]
i_valid = int(nb - nb*prop_valid_set - nb * prop_test_set)
i_test = i_valid + int( nb*prop_valid_set)
train = Triplets_set(indexes[:i_valid,:], np.ones(i_valid))
valid = Triplets_set(indexes[i_valid:i_test,:], np.ones(i_test - i_valid))
test = Triplets_set(indexes[i_test:,:], np.ones(nb - i_test))
return Experiment(name,train, valid, test, positives_only = True, compute_ranking_scores = True)
| 3,978 | 24.837662 | 167 | py |
wakenet | wakenet-master/Code/turbine_scaling.py | from neuralWake import *
from superposition import *
from synth_and_train import *
from optimisation import *
import synth_and_train as dat
if train_net == 1:
# Plot wake dataset sample
dat.Create(plots=True)
else:
# ------------ Computational time vs Superimposed turbines scaling ------------ #
iterations = 3
mm = 4
max_turbines = 6*mm
saveas = "scaling"+str(max_turbines)+" "+device
xs = [
0,
0,
0,
7 * D,
7 * D,
7 * D
]
ys = [
0 * D,
2 * D,
4 * D,
1 * D,
3 * D,
5 * D
]
cnt = 2
for i in range(int(max_turbines/6+.5)-1):
xs += [7*cnt*D, 7*cnt*D, 7*cnt*D] + [7*(cnt+1)*D, 7*(cnt+1)*D, 7*(cnt+1)*D]
ys += ys[:6]
cnt+=1
xs = np.array(xs)
ys = np.array(ys)
yws = np.zeros(xs.size)
floris_time_plot = np.zeros(max_turbines)
neural_time_plot = np.zeros(max_turbines)
for i in range(max_turbines):
print("No. of turbines:", i)
for _ in range(iterations):
floris_time, neural_time = compare(
yws=yws[: i + 1],
ws=7,
ti=0.05,
xs=xs[: i + 1],
ys=ys[: i + 1],
print_times=False,
timings=True,
)
floris_time_plot[i] += floris_time
neural_time_plot[i] += neural_time
floris_time_plot[i] /= iterations
neural_time_plot[i] /= iterations
fig, ax = plt.subplots(1)
# plt.plot(np.arange(1, max_turbines+1), floris_time_plot/100, color='navy', linestyle='--')
plt.plot(
np.arange(1, max_turbines + 1), floris_time_plot, color="navy", linestyle="--", label='FLORIS'
)
plt.plot(
np.arange(1, max_turbines + 1), neural_time_plot, color="crimson", label='wakeNet'
)
plt.xscale("log")
plt.yscale("log")
plt.tick_params(axis="x", direction="in")
plt.tick_params(axis="y", direction="in")
plt.legend()
plt.show()
fig.savefig("figures/"+str(saveas), dpi=1200)
| 2,171 | 22.608696 | 102 | py |
wakenet | wakenet-master/Code/example_main.py | from neuralWake import *
from optimisation import *
import synth_and_train as st
def florisPw(u_stream, tis, xs, ys, yws):
# Initialise FLORIS for initial configuraiton
if curl == True:
fi.floris.farm.set_wake_model("curl")
fi.reinitialize_flow_field(wind_speed=u_stream)
fi.reinitialize_flow_field(turbulence_intensity=tis)
fi.reinitialize_flow_field(layout_array=[xs, ys])
fi.calculate_wake(yaw_angles=yws)
# Get initial FLORIS power
floris_power_0 = fi.get_farm_power()
return round(floris_power_0/1e6, 2)
def main():
if train_net == True:
# Start training timer
t0 = time.time()
# Create the dataset
X_train, X_val, X_test, y_train, y_val, y_test = st.create()
# Set neural model
model = wakeNet().to(device)
# Feed domain points to train the model
print("Training...")
vloss_plot, tloss_plot, v_plot, t_plot = \
st.training(X_train, X_val, X_test, y_train, y_val, y_test, model, plot_curves=1, saveas='tcurv')
# End training timer
t1 = time.time()
print("Training took: ", int(t1 - t0), " seconds")
else:
# Set neural model
model = wakeNet().to(device)
model.load_state_dict(torch.load(weights_path, map_location=device))
model.eval().to(device)
# Sets test case value
test = int(input("Please enter the test case number (1-4): "))
if test == 1:
# Single and multiple wake comparisons
# Single
xs = np.array([D])
ys = np.array([D])
yws = [-30]
compare(
plots=True,
yws=yws,
ws=11,
ti=0.05,
xs=xs,
ys=ys,
print_times=True,
single=False,
)
# Multiple 1
xs = np.array([1*D, 1*D, 1*D,
4.5*D, 4.5*D, 4.5*D,
8*D, 8*D, 8*D])
ys = np.array([1*D, 3*D, 5*D,
2*D, 4*D, 6*D,
1*D, 3*D, 5*D])
yws = [30, -30, 30, -30, 30, -30, 30, -30, 30, -30]
compare(
plots=True,
yws=yws,
ws=11,
ti=0.05,
xs=xs,
ys=ys,
print_times=True,
single=False,
)
if test == 2:
# Case A (yaw) M3
xs = np.array([1*D, 1*D, 8*D, 8*D, 15*D, 15*D])
ys = np.array([1*D, 7*D, 1*D, 7*D, 1*D, 7*D])
florisOptimiser(ws=11, ti=0.05, layout_x=xs, layout_y=ys, plots=True)
neuralOptimiser(ws=11, ti=0.05, xs=xs, ys=ys, plots=True, floris_gain=True)
# Yaw power heatmaps
heatmap(xs, ys, res=3, farm_opt=False)
# Case B (yaw) M2
xs = np.array([1*D, 1*D, 1*D, 4.5*D, 4.5*D,
8*D, 8*D, 8*D, 11.5*D, 11.5*D,
15*D, 15*D, 15*D, 18.5*D, 18.5*D])
ys = np.array([1*D, 5*D, 9*D, 3*D, 7*D,
1*D, 5*D, 9*D, 3*D, 7*D,
1*D, 5*D, 9*D, 3*D, 7*D])
florisOptimiser(ws=11, ti=0.05, layout_x=xs, layout_y=ys, plots=False)
neuralOptimiser(ws=11, ti=0.05, xs=xs, ys=ys, plots=False, floris_gain=True)
# Yaw power heatmaps
heatmap(xs, ys, res=3, farm_opt=False)
if test == 3:
# Case C (Layout)
# 6-turb
xs = np.array([1*D, 1*D, 8*D, 8*D, 15*D, 15*D])
ys = np.array([1*D, 5*D, 1*D, 5*D, 1*D, 5*D])
# # 15-turb
# xs = np.array([1*D, 1*D, 1*D, 4.5*D, 4.5*D,
# 8*D, 8*D, 8*D, 11.5*D, 11.5*D,
# 15*D, 15*D, 15*D, 18.5*D, 18.5*D])
# ys = np.array([1*D, 5*D, 9*D, 3*D, 7*D,
# 1*D, 5*D, 9*D, 3*D, 7*D,
# 1*D, 5*D, 9*D, 3*D, 7*D])
neuralOptimiser(ws=11.0, ti=0.05, xs=xs, ys=ys, plots=True, floris_gain=True, mode='farm')
florisOptimiser(ws=11.0, ti=0.05, layout_x=xs, layout_y=ys, plots=True, mode='farm')
# Layout power heatmaps
heatmap(xs, ys, res=10, farm_opt=True)
heatmap(xs, ys, res=3, farm_opt=True)
if __name__=="__main__":
main()
| 4,399 | 30.884058 | 105 | py |
wakenet | wakenet-master/Code/packages.py | # Package list
import os
import time
import json
import random
import warnings
import numpy as np
import scipy.stats as stats
from matplotlib import rc
import matplotlib.pyplot as plt
from matplotlib.pyplot import gca
import torch
import torch.nn as nn
import torch.optim as optim
from torch import nn, optim
from torch.utils.data import TensorDataset, DataLoader
import dask
from dask.distributed import Client, progress
from PIL import Image
from scipy import interp
from scipy import ndimage
from scipy import interpolate
from scipy.optimize import minimize
import floris.tools as wfct
import floris.tools.visualization as vis
from floris.tools.optimization.scipy.yaw import YawOptimization
from floris.tools.optimization.scipy.layout import LayoutOptimization
import logging
logging.getLogger("floris").setLevel(logging.WARNING)
| 840 | 21.72973 | 69 | py |
wakenet | wakenet-master/Code/superposition.py | from re import S
from neuralWake import *
from torch import cpu
from CNNWake.FCC_model import *
warnings.filterwarnings("ignore")
# Synth value
if train_net == 0:
# Load model
model = wakeNet().to(device)
model.load_state_dict(torch.load(weights_path, map_location=device))
model.eval().to(device)
# Use CNNWake module to calculate local ti values
# Initialise network to local turbulent intensities
nr_input_values = 42 # Number of input values
nr_neurons_ti = 200 # Number of neurons in every layer
nr_neurons = 300 # Number of neurons in every layer
nr_output = 1 # Number of outputs from model
# Use CNNWake module to calculate local power and ti values
if local_ti == True:
TI_model = FCNN(nr_input_values, nr_neurons_ti, nr_output).to(device)
# Load trained model and set it to evaluation mode
TI_model.load_model("CNNWake/FCNN_TI.pt", device=device)
TI_model.eval()
if local_pw == True:
pw_model = FCNN(nr_input_values, nr_neurons, nr_output).to(device)
# Load trained model and set it to evaluation mode
pw_model.load_model("CNNWake/power_model.pt", device=device)
pw_model.eval()
def florisPw(u_stream, tis, xs, ys, yws):
# Initialise FLORIS for initial configuraiton
if curl == True:
fi.floris.farm.set_wake_model("curl")
fi.reinitialize_flow_field(wind_speed=u_stream)
fi.reinitialize_flow_field(turbulence_intensity=tis)
fi.reinitialize_flow_field(layout_array=[xs, ys])
fi.calculate_wake(yaw_angles=yws)
# Get initial FLORIS power
floris_power_0 = fi.get_farm_power()
return round(floris_power_0, 2)
def superposition(
inpt1,
inpt2,
u_stream,
tis,
cp=None,
wind_speed=None,
farm_opt=False,
plots=False,
power_opt=True,
print_times=False,
timings=False,
floris_gain=False,
x0=np.zeros(1),
single=False,
saveas=None,
):
"""
Calls the neural model to produce neural wakes and superimposes them on the
computational domain in order to calculate the total farm power output in MW.
Args:
yws (numpy array of floats) Yaws of each turbine.
u_stream (float) Free stream velocity.
tis (numpy array of floats) Yaws of each turbine.
xs (numpy array of floats) Turbine x coordinates.
ys (numpy array of floats) Turbine y coordinates.
cp (numpy array of floats) Cp values of turbine Cp-wind speed curve.
wind_speed (numpy array of floats) Wind speed values of turbine Cp-wind speed curve.
plots (boolean, optional) If True, Plots superimposed wakes.
power_opt (boolean, optional) If True, performs one optimization step.
print_times (boolean, optional) If True, prints timings.
timings (boolean, optional) Returns model timings.
floris_gain (boolean, optional) If True, calculates and returns gained power output
with the optimised results of the DNN but using Floris for comparison.
x0 (numpy array, optional) Defined with size > 1 for farm optimisations for storing
initial turbine coordinates.
Returns:
floris_time (float) Time required for Floris computation.
neural_time (float) Time required for a forward solution of the DNN.
or
-power_tot (float) Total (negative) farm power output produced by the DNN,
based on the input turbine yaws and positions.
floris_power_opt (float) Total farm power output produced by Floris in MW,
based on the input turine yaws and positions.
"""
# Local pw
pw_ar = []
# Scales final domain
xscale = 0.7
if curl == True:
fi.floris.farm.set_wake_model("curl")
# Select first and second argument based on the optimisiation mode.
# Scipy's "minimise" prefers the parameter of optimisaion to be first.
if farm_opt == True:
layout = inpt1
yws = inpt2
else:
layout = inpt2
yws = inpt1
# Save initial positions. x0 defined only for farm optimisation.
if x0.size > 1:
xs0 = x0[: int(layout.size / 2 + 0.25)]
ys0 = x0[int(layout.size / 2 + 0.25) :]
xs0_arg = xs0.argsort()
xs0 = xs0[xs0_arg]
ys0 = ys0[xs0_arg]
# Split layout vector in x and y coordinates
layout = np.array(layout)
xs = layout[: int(layout.size / 2 + 0.25)]
ys = layout[int(layout.size / 2 + 0.25) :]
# Sort x, y and yaws based on x coordinates to superimpose
# the turbines from left to right (downstream direction).
xs_arg = xs.argsort()
xs = xs[xs_arg]
ys = ys[xs_arg]
yws = np.array(yws)
yws = yws[xs_arg]
# Initialisations
n = xs.size # Turbine number
clean = np.zeros(n)
if n == 1: single = True
hbs = 90 # Hub height
inlet_speed = u_stream # Speed at inlet
# Domain dimensions
x_domain = x_bounds[1] - x_bounds[0]
y_domain = y_bounds[1] - y_bounds[0]
# Hub speeds and Yaws' initialization
hub_speeds = np.zeros(n)
hub_speeds_power = np.zeros(n)
hub_speeds_mean = np.zeros(n)
# Define dx, dy
dx = np.abs(x_domain / dimx)
dy = np.abs(y_domain / dimy)
# Domain dimensions
length = np.max(np.abs(xs)) + x_domain
domain_cols = int(length / dx + .5)
height = 2 * np.max(np.abs(ys)) + y_domain
domain_rows = int(height / dy + .5)
# Domain shape initialization
domain = np.ones((domain_rows, domain_cols)) * inlet_speed
neural_old = np.ones((dimy, dimx)) * inlet_speed
# Calculate the position of the first wake in the domain.
rows1 = int(domain_rows / 2 - ys[0] / dy - dimy / 2 + .5)
rows2 = int(domain_rows / 2 - ys[0] / dy + dimy / 2 + .5)
cols1 = int(xs[0] / dx + .5)
cols2 = int(xs[0] / dx + .5) + dimx
# Start DNN timer
t0 = time.time()
for p in range(n):
# Define start and finish rows of the current turbine's hub
hub_start = int((rows2 + rows1) / 2 - D / dy / 2 + .5)
hub_finish = int((rows2 + rows1) / 2 + D / dy / 2 + .5)
hub_tot = hub_finish - hub_start
if np.all(domain[hub_start:hub_finish, cols1] == u_stream):
clean[p] = 1
# Method A (mean). Calculate the mean speed on the hub.
inlet_speed_mean = np.mean(domain[hub_start:hub_finish, cols1])
# Method B (rings). Numerically integrate over the rotor swept area surface.
# This gives a better approximation to the 3D domain calculations of Floris.
inlet_speed = 0
inlet_speed_pw = 0
area = np.pi * D * D / 4
for i in range(int(hub_tot / 2)):
# Stop calculation if the profile == u_stream
if clean[p] == 1:
break
# Find mean ring speed assuming symmetric flow with respect to the tower.
mean_hub_speed = np.mean([domain[hub_start + i, cols1], domain[hub_finish - i, cols1]])
# # Calculate total rotor area.
# area += 2 * np.pi * int((hub_tot/2-i)*dy) * dy
# Calculate inlet speed of current turbine based on the current state of the domain.
inlet_speed += (mean_hub_speed * 2 * np.pi * (int(hub_tot / 2) - i) * dy * dy)
if local_pw != True:
# Calculate speed^3 (kinetic energy) term that will go in the calculation of power.
area_int = 2 * np.pi * (int(hub_tot / 2) - i) * dy * dy
inlet_speed_pw += (mean_hub_speed * mean_hub_speed * mean_hub_speed * area_int)
# Divide speeds by total calculated area
inlet_speed /= area
inlet_speed_pw /= area
inlet_speed_pw = (inlet_speed_pw) ** (1 / 3)
# Profile == u_stream or Single wake condition
if clean[p] == 1 or single == True:
inlet_speed = u_stream
inlet_speed_pw = u_stream
# Limit the minimum speed at the minimum training speed of the DNN.
if inlet_speed < ws_range[0]:
inlet_speed = ws_range[0]
# Use CNNWake module to calculate local ti values for each turbine
ti_ar = np.ones(2)*tis
if local_ti == True or local_pw == True:
speeds_50m = domain[hub_start:hub_finish, cols1 - int(50 / dx + .5)] # ***
sss = speeds_50m.size
ult = np.array([((speeds_50m[i - 1] + speeds_50m[i] + speeds_50m[i + 1])/3)
for i in np.linspace(1, sss-2, 40, dtype=int)])
yaw_angle = yws[p]
turbulent_int = tis
ult /= 15
# The array conists of 40 wind speeds values, the yaw angle and inflow TI
# change the two last values of the array to yaw angle and inflow TI b4 passing to NN
ult = np.append(ult, yaw_angle / 35)
ult = np.append(ult, turbulent_int)
if local_ti == True and clean[p] != 1:# and curl != 1:
ti_norm = 0.3
ti2 = (TI_model((torch.tensor(ult).float().to(device))).detach().cpu().numpy() * ti_norm)
if ti2 < turbulent_int * 0.7:
ti2 = turbulent_int * 1.5
# clip ti values to max and min trained
ti_ar[1] = np.clip(ti2, 0.01, 0.25).item(0)
ti_ar[0] = tis
if local_pw == True:
pw_norm = 4996386
pw = (pw_model((torch.tensor(ult).float().to(device))).detach().cpu().numpy() * pw_norm)
pw_ar.append(pw)
# Get the DNN result
# print(u_stream, inlet_speed, ti_ar, yws[p], hbs)
neural = model.compareContour(
u_stream, inlet_speed, ti_ar, yws[p], hbs, model, result_plots=False
)
# Save the inlet speed terms
hub_speeds[p] = inlet_speed
hub_speeds_mean[p] = inlet_speed_mean
hub_speeds_power[p] = inlet_speed_pw
# Apply SOS for after the first turbine is placed in the domain
if p != 0 and p != (xs.size):
# Apply the SOS superposition model
def1 = np.square(1 - neural / hub_speeds[p])
def2 = np.square(1 - neural_old / u_stream)
neural = (1 - np.sqrt(def1 + def2)) * u_stream
# Apply denoise filter (mainly for plotting purposes)
if denoise > 1:
neural[:, 1:] = ndimage.median_filter(
neural[:, 1:], denoise
) # denoise filter
# Place the DNN output inside the domain
domain[rows1:rows2, cols1:cols2] = neural
# Calculate the rows and columns of the next wake inside the domain
if p != (xs.size - 1):
p2 = p + 1
rows1 = int(domain_rows / 2 - ys[p2] / dy - dimy / 2 + .5)
rows2 = int(domain_rows / 2 - ys[p2] / dy + dimy / 2 + .5)
cols1 = int(xs[p2] / dx + .5)
cols2 = int(xs[p2] / dx + .5) + dimx
# Store an old image of the domain to be used in the next superposition
neural_old = np.copy(domain[rows1:rows2, cols1:cols2])
# End DNN timer
t1 = time.time()
# Print DNN time
neural_time = t1 - t0
neural_time_rnd = round(t1 - t0, 2)
if print_times == True:
print("Total Neural time: ", neural_time_rnd)
# 2 Modes: Plot contours and/or Return calculation timings.
if plots == True or timings == True:
# Start FLORIS timer
t0 = time.time()
# Initialise FLORIS
if curl == True:
fi.floris.farm.set_wake_model("curl")
fi.reinitialize_flow_field(wind_speed=u_stream)
fi.reinitialize_flow_field(turbulence_intensity=tis)
if single != True:
fi.reinitialize_flow_field(layout_array=[xs, -ys])
# Get FLORIS power
if timings == False:
fi.calculate_wake(yaw_angles=yaw_ini)
floris_power_0 = fi.get_farm_power()
fi.calculate_wake(yaw_angles=yws)
floris_power_opt = fi.get_farm_power()
if plots == True:
nocut=0
else:
nocut=1
if nocut != 1:
if single == True:
cut_plane = fi.get_hor_plane(height=hbs,
x_bounds=x_bounds,
y_bounds=y_bounds,
x_resolution=dimx,
y_resolution=dimy)
else:
cut_plane = fi.get_hor_plane(height=hbs,
x_bounds=(0, length+0.5*dx),
y_bounds=(-height/2, height/2),
x_resolution=domain_cols,
y_resolution=domain_rows)
u_mesh0 = cut_plane.df.u.values.reshape(
cut_plane.resolution[1], cut_plane.resolution[0]
)
if single == True:
u_mesh = np.ones((domain_rows, domain_cols)) * inlet_speed
u_mesh[rows1:rows2, cols1:cols2] = u_mesh0
else:
u_mesh = u_mesh0
# End FLORIS timer
t1 = time.time()
floris_time = t1 - t0
floris_time_rnd = round(t1 - t0, 2)
if print_times == True:
print("Total Floris time: ", floris_time_rnd)
if timings == True:
return floris_time, neural_time
# Define plot length and height
fx = cut_plane.df.x1.values
new_len = np.max(fx) - np.min(fx)
new_height1 = np.min(ys) - 2 * D
new_height2 = np.max(ys) + 2 * D
row_start = int(domain.shape[0] / 2 - np.max(ys) / dy - 2 * D / dy + .0)
row_finish = int(domain.shape[0] / 2 - np.min(ys) / dy + 2 * D / dy + .5)
# Keep the FLORIS and DNN domains to be plotted
u_mesh = u_mesh[row_start:row_finish, :]
domain_final = domain[row_start:row_finish, :]
# domain_final = domain
# Keep min and max velocities of FLORIS domain
vmin = np.min(u_mesh)
vmax = np.max(u_mesh)
if u_mesh.shape != domain_final.shape:
print("Error: unequal domain shapes!")
# Set figure properties
fig, axs = plt.subplots(3, sharex=False)
cmap = "coolwarm"
# ----- FLORIS wake plots ----- #
if contours_on == True:
X, Y = np.meshgrid(
np.linspace(0, new_len, u_mesh.shape[1]),
np.linspace(new_height2, new_height1, u_mesh.shape[0]),
)
contours = axs[0].contour(X, Y, u_mesh, 4, alpha=0.5, linewidths=0.5, colors="white")
axs[0].clabel(contours, inline=False, fontsize=1)
im1 = axs[0].imshow(
u_mesh[:, :int(xscale*u_mesh.shape[1])],
vmin=vmin+1.25,
vmax=vmax,
cmap=cmap,
extent=[0, new_len*xscale, new_height1, new_height2],
)
fig.colorbar(im1, ax=axs[0])
axs[0].tick_params(axis="x", direction="in")
axs[0].tick_params(axis="y", direction="in", length=0)
# ----- DNN wake plots ----- #
if contours_on == True:
X, Y = np.meshgrid(
np.linspace(0, new_len, domain_final.shape[1]),
np.linspace(new_height2, new_height1, domain_final.shape[0]),
)
contours = axs[1].contour(X, Y, domain_final, 1, colors="white")
axs[1].clabel(contours, inline=True, fontsize=8)
im2 = axs[1].imshow(
domain_final[:, :int(xscale*domain_final.shape[1])],
vmin=vmin+1.25,
vmax=vmax,
cmap=cmap,
extent=[0, new_len*xscale, new_height1, new_height2],
)
fig.colorbar(im2, ax=axs[1])
axs[1].tick_params(axis="x", direction="in")
axs[1].tick_params(axis="y", direction="in", length=0)
# ----- ERROR (%) plots ----- #
max_val = np.max(u_mesh)
im3 = axs[2].imshow(
(np.abs(u_mesh - domain_final) / max_val * 100)[:, :int(xscale*domain_final.shape[1])],
cmap=cmap,
extent=[0, new_len*xscale, new_height1, new_height2],
vmax=20,
)
axs[2].tick_params(axis="x", direction="in")
axs[2].tick_params(axis="y", direction="in", length=0)
plt.colorbar(im3, ax=axs[2])
if saveas != None:
fig.savefig("figures/"+str(saveas), dpi=1200)
else:
plt.show()
absdifsum = np.sum(np.abs(u_mesh - domain_final))
error = round(1 / (dimx * dimy) * absdifsum / max_val * 100, 2)
print("Abs mean error (%): ", error)
# ----- Y-Transect plots ----- #
mindx = np.min(xs)/dx+0.5
mindx = int(mindx)
tlist = mindx + np.array([3*D/dx, 6.5*D/dx, 10*D/dx]).astype(int)
transects = tlist.size # defines the number of transects
fig, axs = plt.subplots(1, transects, sharey=False)
cnt = 0
for indx in tlist:
yy1 = u_mesh[:, indx] # FLORIS transect
yy2 = domain_final[:, indx] # CNN transect
axs[cnt].plot(
np.flip(yy1, axis=0),
np.arange(u_mesh.shape[0]),
color="navy",
linestyle="--",
)
axs[cnt].plot(
np.flip(yy2, axis=0), np.arange(u_mesh.shape[0]), color="crimson"
)
axs[cnt].title.set_text(str(int(indx * dx)))
cnt += 1
if saveas != None:
fig.savefig("figures/"+str(saveas)+"yt", dpi=1200)
else:
plt.show()
if power_opt == True:
# Calculation of total farm power
if local_pw == True:
power_tot = pw_ar
else:
rho = 1.225 # air density
hub_speeds_old = np.copy(hub_speeds_power)
# Interpolate cp values
cp_interp = np.interp(hub_speeds_old, wind_speed, cp)
# Multiply by cos(theta) term
# Default exponent of cos term is 1.0.
# An exponent of .78 was found to perform best.
cp_interp *= np.cos(np.pi / 180 * (-yws)) ** (1.3)
# Calculate powers using the kinetic energy term
power_tot = 0.5 * rho * cp_interp * hub_speeds_power**3 * area
# Sum of all turbine power outputs
power_tot = np.sum(power_tot)
if floris_gain == True:
# Calculate power gain as provided by FLORIS (for final assessment of optimisation).
# Initialise FLORIS for initial configuraiton
if curl == True:
fi.floris.farm.set_wake_model("curl")
fi.reinitialize_flow_field(wind_speed=u_stream)
fi.reinitialize_flow_field(turbulence_intensity=tis)
if x0.size > 1:
fi.reinitialize_flow_field(layout_array=[xs0, ys0])
else:
fi.reinitialize_flow_field(layout_array=[xs, ys])
fi.calculate_wake(yaw_angles=yaw_ini)
# Get initial FLORIS power
floris_power_0 = fi.get_farm_power()
floris_power_opt = florisPw(u_stream, tis, xs, ys, yws)
floris_power_gain = round(
(floris_power_opt - floris_power_0) / floris_power_0 * 100, 2
)
if plots == True:
print("----------------FLORIS for Neural--------------------")
print("Floris Initial Power", round(floris_power_0 / 1e6, 2), "MW")
print("Floris Optimal power", round(floris_power_opt / 1e6, 2), "MW")
print("Floris Power Gain (%)", floris_power_gain)
print("-----------------------------------------------------")
return -power_tot, floris_power_opt/1e6, floris_power_0/1e6
else:
# Calculate power gain as provided by the DNN (used in optimisation steps).
return -power_tot
| 19,926 | 37.469112 | 101 | py |
wakenet | wakenet-master/Code/optimisation.py | from superposition import *
import floris
def florisOptimiser(
ws,
ti,
layout_x,
layout_y,
min_yaw=-30,
max_yaw=30,
resx=dimx,
resy=dimy,
plots=False,
mode="yaw",
results=True
):
"""
Calls the Floris optimiser to calculate the optimal yaws of a turbine farm.
Args:
ws (float) Downstream wind speed.
ti (float) Downstream turbulence intesity.
layout_x (numpy array of floats) Turbine x coordinates.
layout_y (numpy array of floats) Turbine y coordinates.
min_yaw (float) Minimum yaw of optimisation in degrees.
max_yaw (float) Maximum yaw of optimisation in degrees.
resx (float, optional) Horizontal resolution.
resy (float, optional) Vertical resolution.
plots (boolean, optional) If True, plots initial and optimised configuration.
mode (boolean, optional) "yaw" for yaw optimisation, "farm" for layout optimisation.
Returns:
power_opt (float) Floris Optimised farm power output in MW.
floris_time Floris optimisation time in seconds.
power_initial (float) Floris initial farm power output in MW.
"""
print()
print()
print("In FLORIS Optimiser...")
# Instantiate the FLORIS object
file_dir = os.path.dirname(os.path.abspath(__file__))
fi = wfct.floris_interface.FlorisInterface(os.path.join(file_dir, file_path))
# Initialise FLORIS wakefield
if curl == True:
fi.floris.farm.set_wake_model("curl")
fi.reinitialize_flow_field(wind_speed=ws)
fi.reinitialize_flow_field(turbulence_intensity=ti)
fi.reinitialize_flow_field(layout_array=(layout_x, layout_y))
fi.calculate_wake()
power_initial = fi.get_farm_power() # get initial power
if mode == "yaw":
# Start timer
t0 = time.time()
# Initialize the horizontal cut
print(resx, resy)
if plots == True:
hor_plane = fi.get_hor_plane(height=hh, x_resolution=resx, y_resolution=resy)
fig, ax = plt.subplots()
wfct.visualization.visualize_cut_plane(hor_plane, ax=ax)
ax.set_title("Baseline Case")
opt_options = {
"maxiter": 100,
"disp": True,
"iprint": 2,
"ftol": 1e-7,
"eps": 0.1,
}
# Instantiate the Optimization object
yaw_opt = YawOptimization(
fi,
minimum_yaw_angle=min_yaw,
maximum_yaw_angle=max_yaw,
opt_options=opt_options,
)
# Perform optimization
yaw_angles = yaw_opt.optimize()
# Assign yaw angles to turbines and calculate wake
fi.calculate_wake(yaw_angles=yaw_angles)
power_opt = fi.get_farm_power()
# End timer
t1 = time.time()
if results == True:
print("==========================================")
print("Inital Power = ", round(power_initial / 1e6, 2))
print("Optimized Power = ", round(power_opt / 1e6, 2))
print("Total Power Gain = %.1f%%" % (100 * (power_opt - power_initial) / power_initial))
print("Floris Yaws: ", yaw_angles)
print("==========================================")
if plots == True:
# Initialize the horizontal cut
hor_plane = fi.get_hor_plane(height=hh, x_resolution=resx, y_resolution=resy)
# Plot and show
fig, ax = plt.subplots()
wfct.visualization.visualize_cut_plane(hor_plane, ax=ax)
ax.set_title("Optimal Wake Steering")
plt.show()
floris_time = round(t1 - t0, 2)
print("FLORIS TIME:", floris_time)
return power_opt / 1e6, floris_time, power_initial / 1e6
elif mode == "farm":
# Define turbine layout
layout_x = list(layout_x)
layout_y = list(layout_y)
fi.reinitialize_flow_field(layout_array=(layout_x, layout_y))
if plots == True:
hor_plane = fi.get_hor_plane(height=hh,)
fig, ax = plt.subplots()
wfct.visualization.visualize_cut_plane(hor_plane, ax=ax)
plt.show()
t0 = time.time()
boundaries = [
[0, 0],
[0, opt_ybound * D],
[opt_xbound * D, opt_ybound * D],
[opt_xbound * D, 0],
]
# Generate random wind rose data
# (single wind direction and wind speed for this study)
wd = np.array([270.0]*1)
ws = np.ones(len(wd))*ws
freq = np.abs(np.sort(np.random.randn(len(wd))))
freq = freq / freq.sum()
# Set optimization options
opt_options = {"maxiter": 50, "disp": True, "iprint": 2, "ftol": 1e-8}
# Compute initial AEP for optimization normalization
AEP_initial = fi.get_farm_AEP(wd, ws, freq)
# Instantiate the layout otpimization object
layout_opt = LayoutOptimization(
fi=fi,
boundaries=boundaries,
wd=wd,
ws=ws,
freq=freq,
AEP_initial=AEP_initial,
opt_options=opt_options,
)
# Perform layout optimization
try:
layout_results = layout_opt.optimize()
except:
layout_results = [layout_x, layout_y]
file1 = open("opt_exeptions.txt", "a")
file1.write(str(ws) + ' ' + str(ti) + " \n")
file1.close()
pass
# Calculate new AEP results
fi.reinitialize_flow_field(layout_array=(layout_results[0], layout_results[1]))
AEP_optimized = fi.get_farm_AEP(wd, ws, freq)
power_opt = fi.get_farm_power()
if results == True:
print("=====================================================")
print("Total AEP Gain = %.1f%%" % (100.0 * (AEP_optimized - AEP_initial) / AEP_initial))
print("Floris Initial Power", round(power_initial / 1e6, 2))
print("Floris Optimal Power", round(power_opt / 1e6, 2))
print("Total Power Gain (%)", round((power_opt - power_initial)/power_initial * 100, 2))
print("Floris Layout: ", layout_results)
print("=====================================================")
t1 = time.time()
floris_time = round(t1 - t0, 2)
print("FLORIS TIME:", floris_time)
if plots == True:
# Plot the new layout vs the old layout
layout_opt.plot_layout_opt_results()
plt.show()
hor_plane = fi.get_hor_plane(height=hh,)
fig, ax = plt.subplots()
wfct.visualization.visualize_cut_plane(hor_plane, ax=ax)
plt.show()
return power_opt / 1e6, floris_time, power_initial / 1e6
def _norm(val, x1, x2):
return (val - x1) / (x2 - x1)
def _space_constraint(x_in):
min_dist = 2.0*D
nturbs = int(x_in.size / 2.0 + 0.25)
x = np.array(x_in[:nturbs])
y = np.array(x_in[nturbs:])
dist = [
np.sqrt((x[i] - x[j]) ** 2 + (y[i] - y[j]) ** 2)
for i in range(nturbs)
for j in range(nturbs)
if i != j
]
dist = np.array(dist)
# Normalise boundaries (from Floris)
bndx_min = 0.0
bndx_max = (opt_xbound+1.0)*D
an = _norm(np.min(dist), bndx_min, bndx_max)
bn = _norm(min_dist, bndx_min, bndx_max)
return an - bn
def neuralOptimiser(
ws,
ti,
xs,
ys,
min_yaw=-30,
max_yaw=30,
plots=False,
plots_ini=False,
floris_gain=False,
mode="yaw",
results=True
):
"""
Calls the Floris optimiser to calculate the optimal yaws of a turbine farm.
Args:
ws (float) Downstream wind speed.
ti (float) Downstream turbulence intesity.
xs (numpy array of floats) Turbine x coordinates.
ys (numpy array of floats) Turbine y coordinates.
min_yaw (float) Minimum yaw of optimisation in degrees.
max_yaw (float) Maximum yaw of optimisation in degrees.
plots (boolean, optional) If True, plots initial and optimised configuration.
floris_gain (boolean, optional) If True, calculates and returns gained power
output with the optimised results of the DNN but using Floris for comparison.
mode ('yaw' or 'farm') Specifies which optimisation mode is to be run.
Returns:
floris_power_opt (float) Total farm power output produced by Floris in MW,
based on the input turine yaws and positions produced by the neural optimiser.
neural_time (float) Neural Network optimisation time in seconds.
"""
print()
print()
print("In NEURAL Optimiser...")
layout = np.concatenate((xs, ys), axis=0)
if mode == "yaw":
# Calculate initial power
power_ini = -superposition(
np.zeros(xs.size),
layout,
u_stream=ws,
tis=ti,
cp=cp,
wind_speed=wind_speed,
plots=plots_ini,
power_opt=True,
)
# Optimiser options
opt_options = {
"maxiter": 100,
"disp": True,
"ftol": 1e-7,
"eps": 0.1,
}
# Set initial yaws
x0 = (yaw_ini,) * xs.size
# Set min-max yaw constraints
bnds = ((min_yaw, max_yaw),) * xs.size
# Optimise and time
t0 = time.time()
res = minimize(
superposition,
x0,
args=(layout, ws, ti, cp, wind_speed),
method="SLSQP",
bounds=bnds,
options=opt_options,
)
t1 = time.time()
neural_time = round(t1 - t0, 2)
optimal, floris_power_opt, floris_power_0 = superposition(
res.x,
layout,
u_stream=ws,
tis=ti,
cp=cp,
wind_speed=wind_speed,
plots=plots,
power_opt=True,
floris_gain=floris_gain,
)
if results == True:
print("-----------------------------------------------------")
print("Floris Initial Power", round(floris_power_0, 2), "MW")
print("Floris Optimal Power", round(floris_power_opt, 2), "MW")
print("Floris Power Gain (%)", round((np.abs(floris_power_opt) - floris_power_0)/floris_power_0 * 100, 2))
print("Neural Yaws:", np.round(res.x, 2))
print("-----------------------------------------------------")
print("NEURAL TIME:", neural_time)
return floris_power_opt, neural_time, floris_power_0
elif mode == "farm":
farm_opt = True
x0 = np.copy(layout) # Save initial layout positions
# Define minimum distance between turbines
min_dist = 2.0 * D
tmp1 = {
"type": "ineq",
"fun": lambda x, *args: _space_constraint(x, min_dist),
"args": (min_dist,),
}
con = {'type': 'ineq', 'fun': _space_constraint}
# Optimiser options
opt_options = {"maxiter": 100, "disp": True, "iprint": 2, "ftol": 1e-8, "eps": 5}
# Set initial yaws
yws = (yaw_ini,) * xs.size
# Set min-max boundary constraints (+1D to match FLORIS bnds)
bnds = ((0, opt_xbound*D),)*xs.size + ((0, opt_ybound*D),)*xs.size
# Optimise and time
t0 = time.time()
res = minimize(
superposition,
layout,
args=(yws, ws, ti, cp, wind_speed, farm_opt),
method="SLSQP",
bounds=bnds,
options=opt_options,
constraints=con,
)
t1 = time.time()
neural_time = round(t1 - t0, 2)
optimal, floris_power_opt, floris_power_0 = superposition(
res.x,
yws,
u_stream=ws,
tis=ti,
cp=cp,
wind_speed=wind_speed,
farm_opt=farm_opt,
plots=plots,
power_opt=True,
floris_gain=floris_gain,
x0=x0,
)
if results == True:
print("-----------------------------------------------------")
print("Floris Initial Power", round(floris_power_0, 2), "MW")
print("Floris Optimal Power", round(floris_power_opt, 2), "MW")
print("Floris Power Gain (%)", round((np.abs(floris_power_opt) - floris_power_0)/floris_power_0 * 100, 2))
print("Neural Layout:", np.round(res.x, 2))
print("-----------------------------------------------------")
print("NEURAL TIME:", neural_time)
# return floris_power_gain, neural_time
return floris_power_opt, neural_time, floris_power_0
def compare(
yws,
ws,
ti,
xs,
ys,
plots=False,
print_times=True,
timings=False,
power_opt=True,
single=False,
saveas=None,
):
"""
Performs a comparison between a wind farm produced
by the Neural Network vs Floris.
Args:
yws (numpy array of floats) Yaws of each turbine.
ws (float) Free stream velocity.
ti (floats) Turbulence intensity.
xs (numpy array of floats) Turbine x coordinates.
ys (numpy array of floats) Turbine y coordinates.
print_times (boolean, optional) If True, prints timings.
single (boolen, optional) If True, calculates single turbine
"""
f = open(file_path,)
data = json.load(f)
f.close()
layout = np.concatenate((xs, ys), axis=0)
cp = np.array(data["turbine"]["properties"]["power_thrust_table"]["power"])
wind_speed = np.array(
data["turbine"]["properties"]["power_thrust_table"]["wind_speed"]
)
return superposition(
yws,
layout,
u_stream=ws,
tis=ti,
cp=cp,
wind_speed=wind_speed,
plots=plots,
power_opt=power_opt,
print_times=print_times,
timings=timings,
floris_gain=True,
single=single,
saveas=saveas,
)
def heatmap(xs, ys, res=10, farm_opt=False, saveas=None):
"""
Assess the performance of the DNN vs FLORIS on
parametric optimiser calls for a wide range of
inlet speed and turbulence intensity for a
specific array configuration.
Args:
xs (numpy array of floats) Turbine x coordinates.
ys (numpy array of floats) Turbine y coordinates.
res (int, optional) Resolution of heatmap.
farm_opt (boolean, optional) Calls either farm or yaw optimisers.
"""
# Wind speeds and turbulence intensities examined
x_ws = np.linspace(ws_range[0], ws_range[1], res)
y_ti = np.linspace(ti_range[0], ti_range[1], res)
if res == 3:
x_ws = np.array([11, 12.33333333, 13.66666667])
y_ti = np.array([0.01, 0.03111111, 0.05222222])
# Initialisation of power and timing heatmaps
g0 = np.zeros((res, res))
g1 = np.zeros((res, res))
g2 = np.zeros((res, res))
t1 = np.zeros((res, res))
t2 = np.zeros((res, res))
only_ddn = False
cnt = 0
# Begin parametric runs
for k1 in range(res):
for k2 in range(res):
# Print progress
print()
print('OPTIMISATION PROGRESS:', int(cnt/res/res*100), "%", "COMPLETE.")
print()
cnt+=1
if farm_opt == True:
if only_ddn == False:
g1[k1, k2], t1[k1, k2], g0[k1, k2] = florisOptimiser(
ws=x_ws[k1],
ti=y_ti[k2],
layout_x=xs,
layout_y=ys,
mode="farm"
)
g2[k1, k2], t2[k1, k2], g0[k1, k2] = neuralOptimiser(
ws=x_ws[k1],
ti=y_ti[k2],
xs=xs,
ys=ys,
floris_gain=True,
mode="farm",
)
else:
if only_ddn == False:
g1[k1, k2], t1[k1, k2], g0[k1, k2] = florisOptimiser(
ws=x_ws[k1],
ti=y_ti[k2],
layout_x=xs,
layout_y=ys
)
g2[k1, k2], t2[k1, k2], g0[k1, k2]= neuralOptimiser(
ws=x_ws[k1],
ti=y_ti[k2],
xs=xs, ys=ys,
floris_gain=True
)
if saveas != None:
save1 = saveas+"floris_opt"; save2=saveas+"ddn_opt"; save3=saveas+"floris_t"; save4=saveas+"ddn_t"
else:
save1, save2, save3, save4 = saveas, saveas, saveas, saveas
# Calculate FLORIS power gain in MW
sample = g1 - g0
mv = makeHeatmap(
np.transpose(np.flip(sample, 1)), x_ws, y_ti, title="Floris optimisation", saveas=save1
)
if only_ddn == True:
# mval = None
mval = 2.1
else:
mval = mv
# Calculate DNN power gain in MW
sample = g2 - g0
makeHeatmap(
np.transpose(np.flip(sample, 1)), x_ws, y_ti, mval, title="Neural optimisation", saveas=save2
)
# Calculate FLORIS average time
sample = t1
print("Average FLORIS time:", np.round(np.mean(t1), 2))
mv = makeHeatmap(np.transpose(np.flip(sample, 1)), x_ws, y_ti, title="Floris time", saveas=save3
)
if only_ddn == True:
# mval = None
mval = 1700
else:
mval = mv
# Calculate DNN average time
sample = t2
print("Average DNN time:", np.round(np.mean(t2), 2))
makeHeatmap(np.transpose(np.flip(sample, 1)), x_ws, y_ti, mval, title="Neural time", saveas=save4
)
def makeHeatmap(bitmap, x_ws, y_ti, vmax=None, title=None, saveas=None):
"""
Plots bitmap of parametric optimisation runs.
Args:
bitmap (2D numpy array of floats) Calculated powers.
x_ws (1D numpy array of floats) Wind speeds.
y_ti (1D numpy array of floats) Turbulence intensities.
vmax (float, optional) Max velocity cap of plot.
title (string) Plot title.
"""
# Min and max values of heatmap
x_min = np.min(x_ws)
x_max = np.max(x_ws)
y_min = np.min(y_ti)
y_max = np.max(y_ti)
maxval = np.max(np.abs([bitmap.min(), bitmap.max()]))
if vmax:
maxval = vmax
vmin = -maxval
if title == "Floris time" or title == "Neural time":
cmap = "RdYlGn_r"
vmin = 0
else:
cmap = "RdYlGn"
# Plot heatmap based on bitmap produced by the "Assess" function.
plt.figure()
plt.imshow(
bitmap,
cmap=cmap,
interpolation="nearest",
vmin=vmin,
vmax=maxval,
extent=[x_min, x_max, y_min, y_max],
aspect=(x_max - x_min) / (y_max - y_min),
)
plt.title(title, fontname="serif")
plt.xlabel("Free stream velocity (m/s)", fontname="serif")
plt.ylabel("Turbulence intensity", fontname="serif")
plt.colorbar()
if saveas != None:
plt.savefig("figures/"+str(saveas), dpi=1200)
else:
plt.show()
return maxval
def yawVsPowerContour(yws, ws, ti, xs, ys, res=30, saveas=None):
"""
Plot 2 turbine wind farm yaw-power contour
"""
from mpl_toolkits import mplot3d
x = np.linspace(0, res, res)
y = np.linspace(0, res, res)
X, Y = np.meshgrid(x, y)
powerNeural = np.zeros((res, res))
powerFloris = np.zeros((res, res))
cnt = 0
for i in range(res):
for j in range(res):
if len(yws) == 2:
yws = [i, j]
elif len(yws) == 3:
yws = [0, i, j]
r = compare(
yws,
ws,
ti,
xs,
ys,
print_times=False,
timings=False,
power_opt=True,
single=False,
)
powerNeural[i, j], powerFloris[i, j] = -r[0] / 1e6, r[1]
fig = plt.figure(1)
ax = plt.axes(projection="3d")
ax.contour3D(X, Y, powerNeural, 50, cmap="viridis")
# ax.plot_surface(X, Y, powerNeural, rstride=1, cstride=1, cmap='viridis', edgecolor='none')
ax.set_xlabel("Yaw b")
ax.set_ylabel("Yaw a")
ax.set_zlabel("Power (MW)")
ax.set_title("Neural")
if saveas != None:
fig.savefig("figures/"+str(saveas)+"yvpd", dpi=1200)
else:
plt.show()
fig = plt.figure(2)
ax = plt.axes(projection="3d")
ax.contour3D(X, Y, powerFloris, 50, cmap="viridis")
ax.set_xlabel("Yaw b")
ax.set_ylabel("Yaw a")
ax.set_zlabel("Power (MW)")
ax.set_title("FLORIS")
if saveas != None:
fig.savefig("figures/"+str(saveas)+"yvpf", dpi=1200)
else:
plt.show()
| 20,767 | 29.541176 | 118 | py |
wakenet | wakenet-master/Code/synth_and_train.py | from neuralWake import *
def set_seed(seed):
"""
Use this to set ALL the random seeds to a fixed value and remove randomness from cuda kernels
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# Uses the inbuilt cudnn auto-tuner to find the fastest convolution algorithms
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = False
return True
def normalise(x, mode, print_output=False):
"""
Normalises input data.
Args:
x (numpy float array) Data to be normalised.
mode (values: 1, 2, 3) Modes of normalisation,
1: Min-Max [0, 1], 2: Z-Score [-0.5, 0.5], 3: Min-Max [-1, 1].
print_output (boolean, optional) Prints normalised data.
Returns:
x (numpy float array) Normalised data.
"""
if mode == 1:
x = np.true_divide(x - np.min(x), np.max(x) - np.min(x))
if print_output == True:
print("Normalised speeds:", x)
input("Press enter to continue...")
elif mode == 2:
x -= np.mean(x)
x /= np.std(x)
if print_output == True:
print("Normalised speeds:", x)
input("Press enter to continue...")
elif mode == 3:
x = (np.true_divide(x - np.min(x), np.max(x) - np.min(x)) - 0.5) * 2
if print_output == True:
print("Normalised speeds:", x)
input("enter")
return x
def create(plots=False):
"""
Generates synthetic wake deficit data.
Args:
plots (boolean, optional) Plots indicative sample.
Returns:
X_train, X_val, X_test (1D numpy float arrays) Training, validation
and test sets input inlet conditions.
y_train, y_val, y_test (1D numpy float arrays) Training, validation
and test sets output wake deficits as calculated by Floris.
"""
# Random Dataset
speeds, ti = wakeNet.tiVsVel(data_size)
np.random.seed(51)
yw = (np.random.rand(data_size) - 0.5) * (
yw_range[1] - yw_range[0]
) # hub yaw angles
np.random.seed(256)
hbs = (
np.random.rand(data_size) * (hb_range[1] - hb_range[0]) + hb_range[0]
) # height slice
print("Max inlet speed:", round(np.max(speeds), 2), "m/s")
speeds_out = np.zeros((data_size, out_piece, rows))
u_rs = np.zeros((out_piece, rows))
sample_plots = []
cnt = 1
sample_size = 9 # must be perfect square for sample plots
if save_data == True:
print("Are you sure you want to create new dataset? (y/n)")
if input() == "y":
os.system("mkdir " + "wake_dataset")
np.save("wake_dataset/inlets.npy", np.stack((speeds, ti, yw), axis = 1))
elif curl == True:
inlets = np.load("wake_dataset/inlets.npy")
speeds, ti, yw = inlets[:data_size, 0], inlets[:data_size, 1], inlets[:data_size, 2]
for i in range(data_size):
if curl == True:
fi.floris.farm.set_wake_model("curl")
if make_data == True:
if i == 0:
print("Synthesizing data...")
if i % 100 == 0:
print("Synthesised", int(i / data_size * 100), "%", "of wakes.")
if inputs == 1:
fi.reinitialize_flow_field(wind_speed=speeds[i])
fi.calculate_wake()
if inputs == 2:
fi.reinitialize_flow_field(wind_speed=speeds[i])
fi.reinitialize_flow_field(turbulence_intensity=ti[i])
fi.calculate_wake()
if inputs == 3:
fi.reinitialize_flow_field(wind_speed=speeds[i])
fi.reinitialize_flow_field(turbulence_intensity=ti[i])
fi.calculate_wake(yaw_angles=yw[i])
if inputs == 4:
fi.reinitialize_flow_field(wind_speed=speeds[i])
fi.reinitialize_flow_field(turbulence_intensity=ti[i])
fi.change_turbine([0], {"yaw_angle": yw[i]})
cut_plane = fi.get_hor_plane(
height=hbs[i],
x_resolution=dimx,
y_resolution=dimy,
x_bounds=x_bounds,
y_bounds=y_bounds,
)
else:
cut_plane = fi.get_hor_plane(
height=hh,
x_resolution=dimx,
y_resolution=dimy,
x_bounds=x_bounds,
y_bounds=y_bounds,
)
u_mesh = cut_plane.df.u.values.reshape(
cut_plane.resolution[1], cut_plane.resolution[0]
)
if save_data == True:
# Save velocities as numpy array
np.save("wake_dataset/" + "wake" + str(i), u_mesh)
continue
if save_data == False and curl == True:
if i == 0:
print("Loading data...")
if i % 100 == 0:
print("Loaded ", int(i / data_size * 100), "%", "of wakes.")
# Read back into different array "r"
u_mesh = np.load("wake_dataset/" + "wake" + str(i) + ".npy")
if row_major == 0:
u_mesh = u_mesh.T
u_mesh = u_mesh.flatten()
for kapa in range(rows):
u_rs[:, kapa] = u_mesh[kapa * out_piece : (kapa + 1) * out_piece]
if cubes == 1:
jj = 0
ii = 0
alpha = np.zeros((dim1 * dim2, int(u_rs.size / (out_piece))))
for k in range(int(u_rs.size / (dim1 * dim2))):
alpha[:, k] = u_rs[ii : ii + dim1, jj : jj + dim2].flatten("C")
jj += dim2
if jj >= u_rs.shape[1]:
jj = 0
ii += dim1
speeds_out[i] = alpha
else:
speeds_out[i] = u_rs
# Store synthesized data for plotting
if plots == True:
sample_plots.append(cut_plane)
# Plot synthesized data (batches of sample_size)
if plots == True and np.mod(i + 1, sample_size) == 0:
fig, axarr = plt.subplots(
int(np.sqrt(sample_size)),
int(np.sqrt(sample_size)),
sharex=True,
sharey=True,
figsize=(12, 5),
)
axarr = axarr.flatten()
minspeed = np.min(speeds[(cnt - 1) * sample_size : cnt * sample_size])
maxspeed = np.max(speeds[(cnt - 1) * sample_size : cnt * sample_size])
for ii in range(sample_size):
ax = axarr[ii]
title = (
"("
+ str(np.round(speeds[(cnt - 1) * sample_size + ii], 1))
+ ", "
+ str(np.round(ti[(cnt - 1) * sample_size + ii], 2))
+ ", "
+ str(np.round(yw[(cnt - 1) * sample_size + ii], 1))
+ ")"
)
hor_plane = sample_plots[ii]
wfct.visualization.visualize_cut_plane(
hor_plane, ax=ax, minSpeed=minspeed, maxSpeed=maxspeed
)
ax.set_title(title)
ax.set_yticklabels(ax.get_yticks().astype(int))
ax.set_xticklabels(ax.get_xticks().astype(int))
plt.show()
sample_plots = []
cnt += 1
# Normalisation
speeds = ((speeds - ws_range[0]) / (ws_range[1] - ws_range[0]) - 0.5) * 3
ti = ((ti - ti_range[0]) / (ti_range[1] - ti_range[0]) - 0.5) * 3
yw = ((yw - yw_range[0]) / (yw_range[1] - yw_range[0]) - 0.5) * 3
hbs = ((hbs - hb_range[0]) / (hb_range[1] - hb_range[0]) - 0.5) * 3
# Make X and y
X_input = np.zeros((data_size, inputs))
if inputs == 1:
X_input[:, 0] = speeds
if inputs == 2:
X_input[:, 0] = speeds
X_input[:, 1] = ti
if inputs == 3:
X_input[:, 0] = speeds
X_input[:, 1] = ti
X_input[:, 2] = yw
if inputs == 4:
X_input[:, 0] = speeds
X_input[:, 1] = ti
X_input[:, 2] = yw
X_input[:, 3] = hbs
X = torch.tensor(X_input, dtype=torch.float)
y = torch.tensor(speeds_out, dtype=torch.float)
X = X.view(data_size, -1)
print("X shape:", X.shape)
print("y shape:", y.shape)
# Train, Validation, Test slices
c1 = int(data_size * (train_slice))
c2 = int(data_size * (train_slice + val_slice))
c3 = int(data_size * (train_slice + val_slice + test_slice))
X_train = X[:c1]
y_train = y[:c1]
X_val = X[c1:c2]
y_val = y[c1:c2]
X_test = X[c2:c3]
y_test = y[c2:c3]
return X_train, X_val, X_test, y_train, y_val, y_test
def dif_central(u, dx, eflag=0):
batches = u.shape[0]
u_x = torch.ones_like(u)
for ii in range(batches):
for jj in range(1, dimx-1):
u_x[ii, :, jj] = (u[ii, :, jj+1] - u[ii, :, jj-1])/(2*dx)
u_x[ii, :, 0] = (u[ii, :, 1] - u[ii, :, 0])/dx
u_x[ii, :, -1] = (u[ii, :, -2] - u[ii, :, -1])/dx
if eflag==-1:
u_x[:,:,:10] = 0
plt.figure(1)
plt.imshow(u[0].detach().cpu().numpy())
plt.figure(2)
plt.imshow(u_x[0].detach().cpu().numpy())
plt.show()
return u_x
def training(X_train, X_val, X_test, y_train, y_val, y_test, model, plot_curves=0,
multiplots=False, data_size=data_size, batch_size=batch_size, saveas=None):
"""
Trains the neural model.
Args:
plots (boolean, optional) Plots indicative sample.
"""
if batch_size > X_train.shape[0]:
print('Error: batch_size must be <', X_train.shape[0])
exit()
# Define validation and test batch sizes
val_batch_size = y_val.size()[0]
train_split = TensorDataset(X_train, y_train[:, :, -1])
validation_split = TensorDataset(X_val, y_val[:, :, -1])
train_loader = DataLoader(
train_split, batch_size=batch_size, shuffle=True, num_workers=workers, drop_last=True
)
validation_loader = DataLoader(
validation_split, batch_size=val_batch_size, shuffle=True, num_workers=workers, drop_last=True
)
# Seed, optimiser and criterion
set_seed(42)
params = list(model.fc1.parameters()) + \
list(model.fc2.parameters()) + \
list(model.fc3.parameters())
# Optimizers
if opt_method == "SGD":
optimizer = optim.SGD(params, lr=lr, momentum=momentum)
elif opt_method == "Rprop":
optimizer = optim.Rprop(params, lr=lr, etas=(0.5, 1.2), step_sizes=(1e-06, 50))
elif opt_method == "Adam":
optimizer = optim.Adam(params, lr=lr)
# Loss criterions
criterion = nn.MSELoss(size_average=1)
criterion = criterion.to(device)
# Initialise plots
t_plot = []; v_plot = []
t_loss_plot = []; v_loss_plot = []
lossmin = 1e16; valmax = 0.5
# Model Training
for i_epoch in range(epochs):
print("Epoch:", i_epoch, "/", epochs)
t_loss = 0; t_lossc1 = 0; t_lossc1_ = 0; t_lossc2 = 0; t_acc = 0
v_loss = 0; v_acc = 0; v_lossc1_ = 0; v_min = 0;
model.train().to(device)
eflag = i_epoch
for X, y in train_loader:
# Get yt_pred
X, y = X.to(device), y.to(device)
yt_pred = model(X)
c1 = criterion(yt_pred, y)
yy = yt_pred.detach().cpu().numpy()
yy_ = y.detach().cpu().numpy()
c2 = torch.tensor(0)
# Losses
train_loss = c1 + c2
t_loss += train_loss.item()
tterm = torch.abs(y - yt_pred)/torch.max(y)
t_acc += torch.sum(torch.pow(tterm, 2)).detach().cpu().numpy()
t_lossc1 += c1.item()
t_lossc1_ += torch.sum(torch.pow(y - yt_pred, 2)).detach().cpu().numpy()
t_lossc2 += c2.item()
optimizer.zero_grad()
train_loss.backward()
optimizer.step()
eflag = 0
# Training results
t_loss = t_loss/(train_slice*data_size/batch_size)
t_lossc1 = t_lossc1/(train_slice*data_size/batch_size)
t_lossc1_ /= train_slice*data_size*out_piece
t_lossc2 = t_lossc2/(train_slice*data_size/batch_size)
t_acc /= train_slice*data_size*out_piece
t_acc = 1 - np.sqrt(t_acc)
model.eval().to(device)
for X, y in validation_loader:
with torch.no_grad():
val_batch = y.shape[0]
X, y = X.to(device), y.to(device)
y_pred = model(X)
c1 = criterion(y_pred, y)
c2 = torch.tensor(0)
val_loss = c1 + c2
v_loss += val_loss.item()
vvterm = torch.abs(y - y_pred)/torch.max(y)
v_acc += torch.sum(torch.pow(vvterm, 2)).detach().cpu().numpy()
v_min += torch.min(1 - torch.abs(y - y_pred)).detach().cpu().numpy()
v_lossc1_ += torch.sum(torch.pow(y - y_pred, 2)).detach().cpu().numpy()
# # Validation results
v_loss = v_loss/(val_batch_size/val_batch)
v_lossc1_ /= val_batch_size*out_piece
v_acc /= val_batch_size*out_piece
v_acc = 1 - np.sqrt(v_acc)
v_min /= val_batch_size*out_piece
# Append to plots
t_plot.append(t_acc); v_plot.append(v_acc)
t_loss_plot.append(t_loss); v_loss_plot.append(v_loss)
if v_loss < lossmin: # and i_epoch > epochs*0.8:
lossmin = v_loss
# Save model weights
torch.save(model.state_dict(), weights_path)
print("Saved weights with", v_loss, "loss")
if v_acc > valmax: # and i_epoch > epochs*0.8:
valmax = v_acc
# Mean sum squared loss
print(
"t_acc: " + str(round(t_acc, 4)) + " v_acc: " + str(round(v_acc, 4))
+ " t_loss: " + str(round(t_loss, 2)) + " v_loss: " + str(round(v_loss, 2))
+ " t_lossc1: " + str(round(t_lossc1, 2)) + " t_lossc2: " + str(round(t_lossc2, 2))
)
# ------------- Loss and Accuracy Plots -------------#
if plot_curves == 1 or saveas != None:
fig, axs = plt.subplots(1, 2)
del fig
axs[0].plot(np.arange(epochs), t_loss_plot, color="navy", linestyle="--")
axs[0].plot(np.arange(epochs), v_loss_plot, color="crimson")
axs[1].plot(np.arange(epochs), t_plot, color="navy", linestyle="--")
axs[1].plot(np.arange(epochs), v_plot, color="crimson")
axs[1].set_ylim(0.5, 1)
print("Validation loss:", lossmin)
print("Validation accuracy:", valmax)
axs[0].tick_params(axis="x", direction="in")
axs[0].tick_params(axis="y", direction="in")
axs[0].set_aspect(aspect=1.0 / axs[0].get_data_ratio())
axs[1].tick_params(axis="x", direction="in")
axs[1].tick_params(axis="y", direction="in")
axs[1].set_aspect(aspect=1.0 / axs[1].get_data_ratio())
if saveas != None:
plt.savefig("figures/"+str(saveas), dpi=1200)
elif multiplots == False:
plt.show()
# Replace last values with best values
v_loss_plot[-1] = lossmin
v_plot[-1] = valmax
return v_loss_plot, t_loss_plot, v_plot, t_plot
| 15,305 | 31.916129 | 102 | py |
wakenet | wakenet-master/Code/__init__.py | # __init__.py | 13 | 13 | 13 | py |
wakenet | wakenet-master/Code/neuralWake.py | from packages import *
from initialisations import *
class wakeNet(nn.Module):
"""
wakeNet class definition
"""
def __init__(self, inputs=3, hidden_neurons=[100, 200]):
"""
wakeNet initializations
Args:
u_stream (torch float array) Inputs of training step.
ws (float) Wind speed.
ti (float) Turbulence intensity.
yw (float) Yaw angle.
hb (float) Hub height.
model (torch model) Passes the neural model to be used.
timings (boolean, optional) Prints and output timings of both Neural
and Analytical calculations.
Returns:
gauss_time, neural_time, error (floats) Analytical, Neural timings and
absolute mean error between the Analytical and Neural wake deficits.
or
final (2D numpy float array) Wake profile with u_stream background velocity.
"""
super(wakeNet, self).__init__()
# Parameters
self.inputSize = inputs
self.outputSize = out_piece
self.hidden_neurons = hidden_neurons
self.layers = len(self.hidden_neurons) + 1
iSize = [self.inputSize] + self.hidden_neurons + [self.outputSize]
# Initialisation of linear layers
self.fc = []
# Append layers
for psi in range(self.layers):
self.fc.append(nn.Linear(iSize[psi], iSize[psi+1], bias=True).to(device))
self.fc1 = nn.Linear(iSize[0], iSize[1], bias=True).to(device)
self.fc2 = nn.Linear(iSize[1], iSize[2], bias=True).to(device)
self.fc3 = nn.Linear(iSize[2], iSize[3], bias=True).to(device)
# Initialisation of batchnorm layers
self.fcb = []
# Append layers
for psi in range(self.layers-1):
self.fcb.append(nn.BatchNorm1d(iSize[psi+1], affine=False).to(device))
self.fcb1 = nn.BatchNorm1d(iSize[1], affine=False).to(device)
self.fcb2 = nn.BatchNorm1d(iSize[2], affine=False).to(device)
# Dropout
self.drop = nn.Dropout(0.2).to(device) # 20% probability
# Activation functions
self.act = nn.Tanh().to(device)
self.act2 = self.purelin
def tansig(self, s):
return 2 / (1 + torch.exp(-2 * s)) - 1
def purelin(self, s):
return s
@staticmethod
def tiVsVel(n, weather=weather, plots=False):
"""Make ti vs speeds distribution"""
if plots == True:
np.random.seed(89)
xs0 = (np.random.rand(data_size) * (ws_range[1] - ws_range[0]) + ws_range[0]) # ws
np.random.seed(42)
ys0 = (np.random.rand(data_size) * (ti_range[1] - ti_range[0]) + ti_range[0]) # ti
lower, upper = ws_range[0], ws_range[1]
s = 1e-9
mu, sigma = 3, 8
xx = stats.truncnorm((lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)
xs = xx.rvs(n)
yy = 2 ** (1 / (xs + s) / 6) - 0.9
rs = []
for _ in range(n):
rs.append(-0.01 + random.random() * 0.02)
ys = 2 ** (1 / (xs + s) / 6) - 0.9 + rs * (1 + 60 * (yy - 0.1))
plt.scatter(xs, ys, s=0.5)
plt.show()
exit()
if weather == False:
np.random.seed(89)
xs = (np.random.rand(data_size) * (ws_range[1] - ws_range[0]) + ws_range[0]) # ws
np.random.seed(42)
ys = (np.random.rand(data_size) * (ti_range[1] - ti_range[0]) + ti_range[0]) # ti
else:
lower, upper = ws_range[0], ws_range[1]
s = 1e-9
mu, sigma = 3, 8
xx = stats.truncnorm((lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)
xs = xx.rvs(n)
yy = 2 ** (1 / (xs + s) / 6) - 0.9
rs = []
for _ in range(n):
rs.append(-0.01 + random.random() * 0.02)
ys = 2 ** (1 / (xs + s) / 6) - 0.9 + rs * (1 + 60 * (yy - 0.1))
return xs, ys
def forward(self, X):
"""
Performs a forward step during training.
Args:
X (torch float array) Inputs of training step.
point (int): Number of individual sub-network to be trained.
Returns:
out (torch float array) Turbine wake output.
"""
X = X.to(device)
if train_net == 0:
X = X.view(1, -1)
X = self.fc1(X)
X = self.act(X)
X = self.fcb1(X)
if train_net == 0:
X = X.view(1, -1)
X = self.fc2(X)
X = self.act(X)
X = self.fcb2(X)
out = self.act2(self.fc3(X))
return out
def saveWeights(self, model):
torch.save(model, "NN")
def compareContour(
self,
u_stream,
ws,
ti_ar,
yw,
hb,
model,
result_plots=result_plots,
timings=False,
):
"""
Performs a forward step during training.
Args:
u_stream (torch float array) Inputs of training step.
ws (float) Wind speed.
ti (float) Turbulence intensity.
yw (float) Yaw angle.
hb (float) Hub height.
model (torch model) Passes the neural model to be used.
timings (boolean, optional) Prints and output timings of both Neural
and Analytical calculations.
Returns:
gauss_time, neural_time, error (floats) Analytical, Neural timings and
absolute mean error between the Analytical and Neural wake deficits.
or
final (2D numpy float array) Wake profile with u_stream background velocity.
"""
tmp = yw
yw = np.zeros(1)
yw[0] = tmp
hb = np.array(hb)
ti = ti_ar[0]
if timings == True or result_plots == True:
t0 = time.time()
# Set Floris parameters
if curl == True:
fi.floris.farm.set_wake_model("curl") # curl model
if inputs == 1:
fi.reinitialize_flow_field(wind_speed=ws)
fi.calculate_wake()
if inputs == 2:
fi.reinitialize_flow_field(wind_speed=ws)
fi.reinitialize_flow_field(turbulence_intensity=ti)
fi.calculate_wake()
if inputs == 3:
fi.reinitialize_flow_field(wind_speed=ws)
fi.reinitialize_flow_field(turbulence_intensity=ti)
fi.calculate_wake(yaw_angles=yw)
if inputs == 4:
fi.reinitialize_flow_field(wind_speed=ws)
fi.reinitialize_flow_field(turbulence_intensity=ti)
fi.change_turbine([0], {"yaw_angle": yw})
cut_plane = fi.get_hor_plane(
height=hb,
x_resolution=dimx,
y_resolution=dimy,
x_bounds=x_bounds,
y_bounds=y_bounds,
)
else:
cut_plane = fi.get_hor_plane(
height=hh,
x_resolution=dimx,
y_resolution=dimy,
x_bounds=x_bounds,
y_bounds=y_bounds,
)
u_mesh = cut_plane.df.u.values.reshape(
dimy, dimx
)
t1 = time.time()
# Get analytical model timing
gauss_time = t1 - t0
# Keep min value for plotting
vmin = np.min(u_mesh)
vmax = np.max(u_mesh)
# Initialise model for evaluation
model.eval().to(device)
t0 = time.time()
# Initialise neural output vector
neural = np.zeros(dimx * dimy)
ti = ti_ar[1]
# Normalisation
speed_norm = ((ws - ws_range[0]) / (ws_range[1] - ws_range[0]) - 0.5) * 3
ti_norm = ((ti - ti_range[0]) / (ti_range[1] - ti_range[0]) - 0.5) * 3
yw_norm = ((yw - yw_range[0]) / (yw_range[1] - yw_range[0]) - 0.5) * 3
hbs_norm = ((hb - hb_range[0]) / (hb_range[1] - hb_range[0]) - 0.5) * 3
# Make input tensor
if inputs == 1:
inpt = torch.tensor(([speed_norm]), dtype=torch.float)
elif inputs == 2:
inpt = torch.tensor(([speed_norm, ti_norm]), dtype=torch.float)
elif inputs == 3:
inpt = torch.tensor(([speed_norm, ti_norm, yw_norm]), dtype=torch.float)
elif inputs == 4:
inpt = torch.tensor(
([speed_norm, ti_norm, yw_norm, hbs_norm]), dtype=torch.float
)
model.eval().to(device)
neural = model(inpt).detach().cpu().numpy()
# Apply Filter to replace backround with u_stream (helps with scattering)
if fltr < 1.0:
neural[neural > ws * fltr] = ws
if cubes == 1:
# Compose 2D velocity deficit made of blocks
dd = dim1 * dim2
jj = 0
ii = 0
alpha = np.zeros((dimy, dimx))
for k in range(int(dimx * dimy / (dim1 * dim2))):
alpha[ii : ii + dim1, jj : jj + dim2] = np.reshape(
neural[k * dd : k * dd + dd], (dim1, dim2)
)
jj += dim2
if jj >= dimx:
jj = 0
ii += dim1
neural = alpha.T
else:
if row_major == 0:
# Compose 2D velocity deficit column-wise
neural = np.reshape(neural, (dimx, dimy)).T
else:
# Compose 2D velocity deficit row-wise
neural = np.reshape(neural, (dimy, dimx))
t1 = time.time()
# Get neural timing
neural_time = t1 - t0
# ----------------- Plot wake deficit results -----------------#
if timings == True or result_plots == True:
if result_plots == True:
cmap = "coolwarm"
fig, axs = plt.subplots(2)
fig.suptitle("Velocities(m/s): Analytical (top), Neural (bot)")
im1 = axs[0].imshow(
u_mesh,
vmin=vmin,
vmax=vmax,
cmap=cmap,
extent=[x_bounds[0], x_bounds[1], y_bounds[0], y_bounds[1]],
)
fig.colorbar(im1, ax=axs[0])
im2 = axs[1].imshow(
neural,
vmin=vmin,
vmax=vmax,
interpolation=None,
cmap=cmap,
extent=[x_bounds[0], x_bounds[1], y_bounds[0], y_bounds[1]],
)
fig.colorbar(im2, ax=axs[1])
plt.show()
max_val = np.max(u_mesh)
if timings == True:
absdifsum = np.sum(np.abs(u_mesh - neural))
error = round(1 / (dimx * dimy) * absdifsum / max_val * 100, 2)
if result_plots == True:
print("Abs mean error (%): ", error)
if result_plots == True:
plt.imshow(
(np.abs(u_mesh - neural) / max_val * 100),
vmax=20,
extent=[x_bounds[0], x_bounds[1], y_bounds[0], y_bounds[1]],
cmap=cmap,
)
plt.colorbar()
plt.title("Abs difference")
plt.show()
# ----- Y-Transect plots ----- #
dx = 6.048
tlist = np.array([3*D/dx, 6.5*D/dx, 10*D/dx]).astype(int)
transects = tlist.size # defines the number of transects
fig, axs = plt.subplots(1, transects, sharey=False)
cnt = 0
for indx in tlist:
yy1 = u_mesh[:, indx] # FLORIS transect
yy2 = neural[:, indx] # CNN transect
axs[cnt].plot(
np.flip(yy1, axis=0),
np.arange(u_mesh.shape[0]),
color="navy",
linestyle="--",
)
axs[cnt].plot(
np.flip(yy2, axis=0), np.arange(u_mesh.shape[0]), color="crimson"
)
axs[cnt].title.set_text(str(int(indx * dx)))
cnt += 1
plt.show()
final = np.copy(neural)
# Replace current turbine inlet speed (ws) with farm u_stream (for superimposed wakes)
final[final == ws] = u_stream
if timings == True:
return gauss_time, neural_time, error
else:
return final
| 12,814 | 32.372396 | 97 | py |
wakenet | wakenet-master/Code/initialisations.py | import numpy as np
from packages import json
from packages import torch
import floris.tools as wfct
from floris.tools import static_class as sc
# Initialisation of variables #
# =================================================================================================#
# Open JSON file (change based on the wake model)
neural_info = open(
"example_inputs/inputs_gauss.json",
)
# returns JSON object as a dictionary
data = json.load(neural_info)
# Close JSON file
neural_info.close()
# Turbine parameters
hh = data["turbine"]["cut_plane"] # hub height
file_path = data["turbine"]["file_path"]
# Data creation parameters
train_net = data["data"]["train_net"]
make_data = data["data"]["make_data"]
save_data = data["data"]["save_data"]
local_ti = data["data"]["local_ti"]
local_pw = data["data"]["local_pw"]
curl = data["data"]["curl"]
weather = data["data"]["weather"]
row_major = data["data"]["row_major"]
x_bounds = data["data"]["x_bounds"]
y_bounds = data["data"]["y_bounds"]
data_size = data["data"]["data_size"]
batch_size = data["data"]["batch_size"]
dimx = data["data"]["dimx"]
dimy = data["data"]["dimy"]
dim1 = data["data"]["dim1"]
dim2 = data["data"]["dim2"]
cubes = data["data"]["cubes"]
norm = data["data"]["norm"]
inputs = data["data"]["inputs"]
plot_curves = data["data"]["plot_curves"]
result_plots = data["data"]["result_plots"]
full_domain = data["data"]["full_domain"]
defo = data["data"]["defo"]
# Data range
ws_range = data["data_range"]["ws_range"]
ti_range = data["data_range"]["ti_range"]
yw_range = data["data_range"]["yw_range"]
hb_range = data["data_range"]["hb_range"]
# Training hyperparameters
# device = data["training"]["device"]
if train_net == True:
device = "cuda"
else:
device = "cpu"
parallel = data["training"]["parallel"]
para_workers = data["training"]["para_workers"]
seed = data["training"]["seed"]
epochs = data["training"]["epochs"]
lr = data["training"]["lr"]
momentum = data["training"]["momentum"]
test_batch_size = data["training"]["test_batch_size"]
weight_decay = data["training"]["weight_decay"]
workers = data["training"]["workers"]
train_slice = data["training"]["train_slice"]
val_slice = data["training"]["val_slice"]
test_slice = data["training"]["test_slice"]
opt_method = data["training"]["opt_method"]
# Results parameters
weights_path = data["results"]["weights_path"]
fltr = data["results"]["fltr"]
denoise = data["results"]["denoise"]
contours_on = data["results"]["contours_on"]
# Optimisation boundaries
opt_xbound = data["optimisation"]["opt_xbound"]
opt_ybound = data["optimisation"]["opt_ybound"]
yaw_ini = data["optimisation"]["yaw_ini"]
# Opening turbine JSON file
f = open(
file_path,
)
# returns JSON object as a dictionary
data2 = json.load(f)
f.close()
# Set GPU if Available
if device == "cuda":
if torch.cuda.device_count() > 0 and torch.cuda.is_available():
print("Cuda installed! Running on GPU!")
device = "cuda"
else:
device = "cpu"
print("No GPU available! Running on CPU.")
# Get turbine cp curve
cp = np.array(data2["turbine"]["properties"]["power_thrust_table"]["power"])
wind_speed = np.array(
data2["turbine"]["properties"]["power_thrust_table"]["wind_speed"]
)
# Read turbine json
sc.x_bounds = x_bounds
sc.y_bounds = y_bounds
fi = wfct.floris_interface.FlorisInterface(file_path)
D = fi.floris.farm.turbines[0].rotor_diameter # turbine rotor diameter
D = float(D)
# Define the size of the partition. if full_domain==Flase, defaults at row or column size.
if full_domain == True:
out_piece = dimx * dimy
elif cubes == 0:
out_piece = dim1 * dim2
else:
if row_major == True:
out_piece = dimy
else:
out_piece = dimx
# Calculates ref_point
# (the list of all points of the domain that the DNN is going to be trained on).
rows = int(dimx * dimy / out_piece)
ref_point_x = np.linspace(0, dimy - 1, dimy)
ref_point_y = np.linspace(0, dimx - 1, dimx)
ref_point = np.zeros((dimx * dimy, 2))
k = 0
for i in range(dimy):
for j in range(dimx):
ref_point[k, 0] = ref_point_x[i]
ref_point[k, 1] = ref_point_y[j]
k += 1
ref_point = ref_point.astype(np.int)
# Wake boundaries definition
if defo == 1:
x_bounds = None
y_bounds = None
else:
x_bounds = (x_bounds[0], x_bounds[1])
y_bounds = (y_bounds[0], y_bounds[1])
| 4,417 | 28.065789 | 100 | py |
wakenet | wakenet-master/Code/CNNWake/FCC_model.py | import torch
import torch.nn as nn
import numpy as np
import random
import floris.tools as wfct
import torch.optim as optim
import matplotlib.pyplot as plt
from torch.utils.data import TensorDataset, DataLoader
from torch.optim import lr_scheduler
__author__ = "Jens Bauer"
__copyright__ = "Copyright 2021, CNNwake"
__credits__ = ["Jens Bauer"]
__license__ = "MIT"
__version__ = "1.0"
__email__ = "jens.bauer20@imperial.ac.uk"
__status__ = "Development"
class FCNN(nn.Module):
"""
The class is the Neural Network that can predicts the power output of
wind turbine and the turbulent intensity (TI) at the turbine. The same
network architecture is used for both TI and power predict which
simplifies the code. The network uses the pytorch framwork and uses fully
connected layers. The methods of this class include the training of
the network, testing of the accuracy and generaton of training data.
The networks can be fine tuned via transfer learing if a specific park
layout is known, this will stongly improve the accuracy.
"""
def __init__(self, in_size, nr_neurons, out_size=1):
"""
init method that generates the network architecture using pytroch.
The number of input varibles can be changed incase more flow data is
available in the line segment upstream the turbine.
The nr_neurons defines the size of the given network. The output size
is set to 1 because the network only predicts either the power or TI.
In theory it should be able to do both the error was the high
therefore two networks are used.
Args:
in_size (int): Nr. of inputs, usually 42, 40 for wind speed
and the global ti and yaw angle of the turbine
nr_neurons (int): Nr. of neurons used in the layers, more
neurons means that
the network will have more parameters
out_size (int): Nr. of outputs in the last layer,
set to one if the NN only predicts a single value.
"""
super(FCNN, self).__init__()
# This defines the model architecture
self.disc = nn.Sequential(
# The linear layer is the fully connected layer
torch.nn.Linear(in_size, nr_neurons),
# LeakyReLU activation function after every fully
# connected layer
torch.nn.LeakyReLU(negative_slope=0.01),
torch.nn.Linear(nr_neurons, nr_neurons),
torch.nn.LeakyReLU(negative_slope=0.01),
torch.nn.Linear(nr_neurons, nr_neurons),
torch.nn.LeakyReLU(negative_slope=0.01),
torch.nn.Linear(nr_neurons, out_size),
)
def forward(self, x):
"""
Functions defines a forward pass though the network. Can be used for
a single input or a batch of inputs
Args:
x (torch.tensor): input tensor, to be passed through the network
Returns:
flow_fields (torch.tensor): Output of network
"""
# Use the architecture defined above for a forward pass
return self.disc(x)
def initialize_weights(self):
"""
Initilize weights using a xavier uniform distribution which has
helped training.
Loop over all modules, if module is a linear layer then
initialize weigths.
For more information about xavier initialization please read:
Understanding the difficulty of training deep feedforward neural
networks.
X. Glorot, und Y. Bengio. AISTATS , Volume 9 JMLR Proceedings,
249-256, 2010
"""
# for ever layer in model
if type(self) == nn.Linear:
# initialize weights using a xavier distribution
torch.nn.init.xavier_uniform(self.weight)
# initialize bias with 0.0001
self.bias.data.fill_(0.0001)
@staticmethod
def power_ti_from_FLORIS(x_position, y_position, yawn_angles,
wind_velocity, turbulent_int,
type='ti', nr_varabiles=40,
florisjason_path='.'):
"""
This function uses FLORIS to create the dataset to train the FCNN.
The wind speed along a line just upstream every wind turbine and
the corresponding TI or power output will be returned as numpy arrays.
Args:
x_position (list or numpy array): 1d array of the x postions of
the wind turbines in m.
y_position (list or numpy array): 1d array of the y postions of
the wind turbines in m.
yawn_angles (lisr numpy array): 1d array of the yaw angle of every
wind turbinein degree, from -30° to 30°
wind_velocity (float): Free stream wind velocity in m/s,
from 3 m/s to 12 m/s
turbulent_int (float): Turbulent intensity in percent ,
from 1.5% to 25%
type (str): Type of data that is returned, if set to power,
the power generated by every turbine is Returned. If set to
anything else, the func will return the TI
nr_varabiles (int): Nr of points along the line upstream the
turbine to take u values from. More points means that more wind
speeds are sampled from upstream the turbine. 40 was a good value
florisjason_path (string): Location of the FLORIS jason file
Returns:
numpy array: Final 2d array of flow field around the wind park.
U_list (2d np.array): array of size len(x_position) x 1 x
nr_varabiles + 2 where all the wind speeds upstream every
turbine are stored
ti_power_list (np.array): array of size len(x_position) x 1
where either all power or TI values of the turbines are stored
"""
# define the x and y length of a single cell in the array
# This is set by the standard value used in FLROIS wakes
dx = 18.4049079755
dy = 2.45398773006
# Set the maximum length of the array to be 3000m and 400m
# more than the maximum x and y position of the turbines
x_max = np.max(x_position) + 3005
y_max = np.max(y_position) + 400
# Number of cells in x and y needed to create a 2d array of
# the maximum size
Nx = int(x_max / dx)
Ny = int(y_max / dy)
# Init FLORIS from the jason file
wind_farm = wfct.floris_interface.FlorisInterface("FLORIS_input"
"_gauss.json")
# Set the x and y postions of the wind turbines
wind_farm.reinitialize_flow_field(layout_array=[x_position,
y_position])
# Set the yaw angle of every turbine
for _ in range(0, len(x_position)):
wind_farm.change_turbine([_], {'yaw_angle': yawn_angles[_],
"blade_pitch": 0.0})
# Set inlet wind speed and TI
wind_farm.reinitialize_flow_field(wind_speed=wind_velocity,
turbulence_intensity=turbulent_int)
# Calculate wind field
wind_farm.calculate_wake()
# Extract 2d slice from 3d domain at hub height
# This slice needs to have the same number of cells in x and y
# and same physical dimensions
cut_plane = wind_farm.get_hor_plane(
height=90, x_resolution=Nx, y_resolution=Ny, x_bounds=[0, x_max],
y_bounds=[0, y_max]).df.u.values.reshape(Ny, Nx)
# Calculate power generated by every turbine
power = wind_farm.get_turbine_power()
# Calculate local TI at every tribune
ti = wind_farm.get_turbine_ti()
# Initialize list to store all all the u values
# Number of turbines x 1 x number of values used + 2
U_list = np.zeros((len(x_position), 1, nr_varabiles + 2))
# Initialise list to store TI or u valurs
ti_power_list = np.zeros((len(x_position), 1))
# From the flow field generated by FLORIS, extract the wind speeds
# from a line 60 meter upstream the turbines
for i in range(len(x_position)):
# determine the x and y cells that the tubine center is at
turbine_cell = [int((x_position[i]) / dx),
int((y_position[i] - 200) / dy)]
# extract wind speeds along the rotor, 60 meters upstream
u_upstream_hub = cut_plane[
turbine_cell[1] + 45: turbine_cell[1] + 110,
turbine_cell[0] - 3]
# Do an running average, this is done because CNNwake has slight
# variations in the u predictions, also normalise the u values
u_average = [((u_upstream_hub[i - 1] +
u_upstream_hub[i] +
u_upstream_hub[i + 1]) / 3) / 12 for i in
np.linspace(1, 63, nr_varabiles, dtype=int)]
# append yaw which is normalised and ti
u_average = np.append(u_average, yawn_angles[i] / 30)
u_input_fcnn = np.append(u_average, turbulent_int)
U_list[i] = u_input_fcnn
# If type required is power then use power else
# use TI
if type == 'power':
ti_power_list[i] = power[i]
else:
ti_power_list[i] = ti[i]
# round values to 2 places
return np.round(U_list, 2), np.round(ti_power_list, 2)
@staticmethod
def create_ti_power_dataset(size, u_range, ti_range, yaw_range,
nr_varabiles=40, type='power',
floris_path='.'):
"""
This function will create a training or test set to train the power
or turbulent intensity (TI) prediction networks. The function will
use FLORIS to create the flowfield around 4 example wind parks
and saves the wind speed just upstream the wind rotor of every turbine
and the corresponding TI or power output. The wind speeds are along a
line which spans the entire diameter of the turbine blades and along
this line nr_varibles of points are sampled and the wind farm TI and
yaw angle of the corresponding turbine is added.
This allows the network to predict the power output of every turbine
under different inflow conditions or TI at every trubine.
Four different wind parks examples are used to generate the data,
this does not cover all possible flow fields
but delivers a good inital guess for the network.
The corresponding TI or power values are normalised by the maximum
value of the array, this will make all values to be between
0 and 1 which helps training.
Args:
size (int, optional): Nr of example flows generated and saved for
training. Defaults to 400.
u_range (list): Bound of u values [u_min, u_max] used
ti_range (list): Bound of TI values [TI_min, TI_max] used
yaw_range (list): Bound of yaw angles [yaw_min, yaw_max] used
nr_varabiles (int, optional): Nr. of values sampled along line.
Defaults to 40.
type (str, optional): If set to power, the power will be saved,
if set to anything else the TI at every turbine will be saved
Defaults to 'power'.
floris_path (str, optinal): Path to FLORIS jason file.
Returns:
x [torch tensor]: Tensor of size size*6 x 1 x nr_varabiles+2 where
all the flow data along line is stored. This will be the input
to the FCNN
y [torch tensor]: Tensor of size chuck_size*6 x 1 where all the
TI or pwoer data for every turbine is stored, this is what the
FCNN is trained to predict
"""
# 4 wind parks are used the generate data
# for every wind park generates 1/4 of the dataset
chuck_size = int(size/4)
# initialize empty numpy array to store 2d arrays
# and corresponding u, ti and yawn values
y = np.zeros((chuck_size * 4 * 6, 1, nr_varabiles + 2))
x = np.zeros((chuck_size * 6 * 4, 1))
# index to add the wind fields in the right postion
index = [i for i in range(0, size * 6, 6)]
# create train examples
print("generate FLORIS data")
# WIND PARK 1
for _ in range(0, chuck_size):
# sample u, ti and yaw from uniform distro
u_list = round(random.uniform(u_range[0], u_range[1]), 2)
ti_list = round(random.uniform(ti_range[0], ti_range[1]), 2)
yawlist = [round(random.uniform(yaw_range[0], yaw_range[1]), 2) for _ in range(0, 6)]
# get the wind speeds along line and corresponding TI or power
# from FLORIS for the wind park
u_list_hub, floris_power_ti = FCNN.power_ti_from_FLORIS(
[100, 300, 1000, 1300, 2000, 2300],
[300, 500, 300, 500, 300, 500],
yawlist, u_list, ti_list, type, nr_varabiles,
florisjason_path=floris_path)
# add u and power/TI in correct postion
y[index[_]: index[_ + 1], :, :] = u_list_hub
x[index[_]: index[_ + 1], :] = floris_power_ti
# WIND PARK 2
for _ in range(chuck_size, chuck_size * 2):
u_list = round(random.uniform(u_range[0], u_range[1]), 2)
ti_list = round(random.uniform(ti_range[0], ti_range[1]), 2)
yawlist = [round(random.uniform(yaw_range[0], yaw_range[1]), 2) for _ in range(0, 6)]
u_list_hub, floris_power_ti = FCNN.power_ti_from_FLORIS(
[100, 600, 1000, 1300, 2000, 2900],
[300, 300, 300, 300, 300, 500],
yawlist, u_list, ti_list, type, nr_varabiles)
y[index[_]: index[_ + 1], :, :] = u_list_hub
x[index[_]: index[_ + 1], :] = floris_power_ti
# WIND PARK 3
for _ in range(chuck_size * 2, chuck_size * 3):
u_list = round(random.uniform(u_range[0], u_range[1]), 2)
ti_list = round(random.uniform(ti_range[0], ti_range[1]), 2)
yawlist = [round(random.uniform(yaw_range[0], yaw_range[1]), 2) for _ in range(0, 6)]
u_list_hub, floris_power_ti = FCNN.power_ti_from_FLORIS(
[100, 100, 800, 1600, 1600, 2600],
[300, 500, 400, 300, 500, 400],
yawlist, u_list, ti_list, type, nr_varabiles)
y[index[_]: index[_ + 1], :, :] = u_list_hub
x[index[_]: index[_ + 1], :] = floris_power_ti
# WIND PARK 4
for _ in range(chuck_size * 3, chuck_size * 4 - 1):
u_list = round(random.uniform(u_range[0], u_range[1]), 2)
ti_list = round(random.uniform(ti_range[0], ti_range[1]), 2)
yawlist = [round(random.uniform(yaw_range[0], yaw_range[1]), 2) for _ in range(0, 6)]
u_list_hub, floris_power_ti = FCNN.power_ti_from_FLORIS(
[100, 300, 500, 1000, 1300, 1600],
[300, 500, 300, 300, 500, 400],
yawlist, u_list, ti_list, type, nr_varabiles)
y[index[_]: index[_ + 1], :, :] = u_list_hub
x[index[_]: index[_ + 1], :] = floris_power_ti
# transform into a pytroch tensor
x = torch.tensor(x[0:-6], dtype=torch.float)
y = torch.tensor(y[0:-6], dtype=torch.float)
print(f"Normalisation used: {torch.max(x)}")
# Normalise the power/TI by maximum value so that they are
# between 0-1
x = x / torch.max(x)
return y, x
def epoch_training(self, criterion, optimizer, dataloader, device):
"""
Trains the model for one epoch data provided by dataloader. The model
will be updated after each batch and the function will return the
train loss of the last batch
Args:
criterion (torch.nn.criterion): Loss function used to
train model
optimizer (torch.optim.Optimizer): Optimizer used for
gradient descent
dataloader (torch.utils.data.DataLoader): Dataloader for dataset
device (str): Device on which model and data is stored,
cpu or cuda
Returns:
training loss (float): Loss value of training set defined
by criterion
"""
# For all data in datalaoder
for power_ti, input_u in dataloader:
# one batch at a time, get network prediction
output = self(input_u.to(device))
# compute loss
train_loss = criterion(output.squeeze(), power_ti[:, 0].to(device))
self.zero_grad() # Zero the gradients
train_loss.backward() # Calc gradients
optimizer.step() # Do parameter update
return train_loss.item()
def learn_wind_park(self, x_postion, y_position, size, eval_size,
nr_varabiles=40, type='power',
device='cpu', nr_epochs=50,
batch_size=100, lr=0.003):
"""
EXPERIMENTAL FUNCTION; DOES NOT WORK YET, DO NOT USE!!!
This function is supposed to fine tune a already trained TI/Power
model on a specific wind park. This should further reduce the error
in predicting power or local TI. However, it currently increase the
error so there is something wrong. DO NOT USE!!!!
Args:
x_postion (list or numpy array): 1d array of the x positions of
the wind turbines in m.
y_position (list or numpy array): 1d array of the y positions of
the wind turbines in m.
size (list numpy array): Size of training set
eval_size (list numpy array): Size of test set
nr_varabiles (int): Nr of points along the line upstream the
turbine to take u values from. More points means that more
speeds are sampled from upstream the turbine. 40 was a good
type (str): Type of data that is returned, if set to power,
the power generated by every turbine is Returned. If set to
anything else, the func will return the TI
device (torch.device): Device to run the training on, cuda or cpu
nr_epochs (int): Nr. of training epochs
batch_size (int): Training batch size
lr (float): Model learning rate
Returns:
[Bool]: True if training was successful
"""
nr_values = int(((size + eval_size)*len(x_postion)))
# initialize empty numpy array to store 2d arrays and
# corresponding u, ti and yawn values
y = np.zeros((nr_values, 1, nr_varabiles + 2))
x = np.zeros((nr_values, 1))
print(nr_values)
print(len(x_postion))
print(int(nr_values/len(x_postion)))
index = [i for i in range(0, nr_values * 2, len(x_postion))]
# create train examples of the specified wind farm using FLORIS
print("generate FLORIS data")
for _ in range(0, int(nr_values/len(x_postion))):
u_list = round(random.uniform(3, 12), 2)
ti_list = round(random.uniform(0.015, 0.25), 2)
yawlist = [round(random.uniform(-30, 30), 2)
for _ in range(0, len(x_postion))]
u_list_hub, floris_power_ti = FCNN.power_ti_from_FLORIS(
x_postion, y_position, yawlist, u_list, ti_list, type,
nr_varabiles)
y[index[_]: index[_ + 1], :, :] = u_list_hub
x[index[_]: index[_ + 1], :] = floris_power_ti
x = torch.tensor(x, dtype=torch.float)
y = torch.tensor(y, dtype=torch.float)
print(f"Normalisation used: {torch.max(x)}")
x = x / torch.max(x)
x_train = x[0:size * len(x_postion)]
y_train = y[0:size * len(x_postion)]
x_eval = x[-eval_size*len(x_postion):]
y_eval = y[-eval_size*len(x_postion):]
print(x_eval.size(), x_train.size())
dataset = TensorDataset(x_train, y_train.float())
# generate dataload for training
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
optimizer = optim.Adam(self.parameters(), lr=lr)
scheduler_gen = lr_scheduler.ReduceLROnPlateau(optimizer, 'min',
factor=0.6, patience=4,
verbose=True)
# use L2 norm as criterion
criterion = nn.MSELoss()
# init to list to store error
error_list = []
# Train model on data
for _ in range(nr_epochs): # train model
self.train() # set model to training mode
loss = self.epoch_training(criterion, optimizer,
dataloader, device)
self.eval() # set model to evaluation
# evaluation on validation set
val_error = self.error(y_eval, x_eval, device)
# if error has not decreased over the past 4 epochs
# decrease the lr by a factor of 0.6
scheduler_gen.step(val_error)
error_list.append(val_error)
print(f" Epoch: {_:.0f}, Training loss: {loss:.4f},"
f" Validation error: {val_error:.2f}")
# plot the val error over the epochs
plt.plot(range(nr_epochs), error_list)
plt.show()
return True
def error(self, x_eval, y_eval, device='cpu'):
"""
Function to calculate the error between the networks
predictions and the actual output. The x and y values
need to be generated using the create_ti_power_dataset
function. The error will be the mean percentage difference
between all values predicted by the network and the actual
values
Args:
x_eval (torch tensor): Tensor of all flow, ti and yaw values
for different turbines, this the the model input.
y_eval (torch tensor): Tensor of all TI or power outputs as
calculated by floris for the corresponding flow field in x
device (str, optional): Device where the model is stored on.
Defaults to 'cpu'.
Returns:
error (float): percentage error
"""
error_list = []
# Do forward pass of the x data
model_predict = self.forward(x_eval.to(device))
for n in range(0, len(y_eval)):
# sometimes the power prediction is zero, this will give
# an error of inf due to divide by zero in step below.
# Therefore filter out very small power here
if abs(y_eval.detach().cpu().numpy()[n]) < 0.01:
continue
else:
# calculate error
power_error = abs(y_eval.detach().cpu().numpy()[n] -
model_predict[n].detach().cpu().numpy()) / (
y_eval.detach().cpu().numpy()[n] + 1e-8)
error_list.append(power_error * 100)
return np.mean(error_list)
def load_model(self, path='.', device='cpu'):
"""
Function to load model from a pt file into this class.
Args:
path (str): path to saved model.
device (torch.device): Device to load onto, cpu or cuda
"""
# Load a previously trained model
self.load_state_dict(torch.load(path, map_location=device))
def save_model(self, name='generator.pt'):
"""
Function to save current model paramters so that it can
be used again later. Needs to be saved with as .pt file
Args:
name (str): name of .pt file from which to load model
"""
torch.save(self.state_dict(), name)
| 24,254 | 42.467742 | 97 | py |
wakenet | wakenet-master/Code/CNNWake/visualise.py | import torch
import matplotlib.pyplot as plt
import numpy as np
import time
import floris.tools as wfct
from .superposition import super_position
__author__ = "Jens Bauer"
__copyright__ = "Copyright 2021, CNNwake"
__credits__ = ["Jens Bauer"]
__license__ = "MIT"
__version__ = "1.0"
__email__ = "jens.bauer20@imperial.ac.uk"
__status__ = "Development"
def visualize_turbine(plane, domain_size, nr_points, title="", ax=None):
"""
Function to plot the flow field around a single turbine
Args:
plane (2d numpy array): Flow field around turbine
domain_size (list or numpy array): x and y limits of the domain,
the first two values correspond to min and max of x and
similar for the y values [x_min, x_max, y_min, y_max]
nr_points (list or numpy array): Nr. of points in the array
title (str, optional): Title of the graph. Defaults to "".
ax (ax.pcolormesh, optional): Pyplot subplot class,
adds the plot to this location.
Returns:
ax.pcolormesh: Image of the flow field
"""
# create mesh grid for plotting
x = np.linspace(domain_size[0], domain_size[1], nr_points[0])
y = np.linspace(domain_size[2], domain_size[3], nr_points[1])
x_mesh, y_mesh = np.meshgrid(x, y)
# Plot the cut-through
im = ax.pcolormesh(x_mesh, y_mesh, plane, shading='auto', cmap="coolwarm")
ax.set_title(title)
# Make equal axis
ax.set_aspect("equal")
return im
def visualize_farm(
plane, nr_points, size_x, size_y, title="", ax=None, vmax=False):
"""
Function to plot flow-field around a wind farm.
Args:
plane (2d numpy array): Flow field of wind farm
nr_points (list or np array): List of nr of points in x and y
size_x (int): Size of domain in x direction (km)
size_y (int): Size of domain in y direction (km)
title (str, optional): Title of the plot. Defaults to "".
ax (ax.pcolormesh, optional): Pyplot subplot class, adds the plot
to this location.
vmax (bool, optional): Maximum value to plot. If false,
the max value of the plane is used a vmax
Returns:
ax.pcolormesh: Image of the flow field around the wind farm
"""
x = np.linspace(0, size_x, nr_points[0]) # this is correct!
y = np.linspace(0, size_y, nr_points[1])
x_mesh, y_mesh = np.meshgrid(x, y)
# if no vmax is set, use the maximum of plane
if vmax is False:
vmax = np.max(plane)
# Plot the cut-through
im = ax.pcolormesh(x_mesh, y_mesh, plane,
shading='auto', cmap="coolwarm", vmax=vmax)
ax.set_title(title)
# Make equal axis
ax.set_aspect("equal")
return im
def Compare_CNN_FLORIS(
x_position, y_position, yawn_angles, wind_velocity, turbulent_int,
CNN_generator, Power_model, TI_model, device,
florisjason_path='', plot=False):
"""
Generates the wind field around a wind park using the neural networks.
The individual wakes of the turbines are calculated using thee CNN and
superimposed onto the wind farm flow field using a super-position model.
The energy produced by the turbines are calcuated using another fully
connected network from the flow data just upstream the turbine.
The functions generates the same wind park flow field using FLORIS so that
the two solutions can be compared when plot = True is set.
Args:
x_position (list): 1d array of x locations of the wind turbines in m.
y_position (list): 1d array of y locations of the wind turbines in m.
yawn_angles (list): 1d array of yaw angles of every wind turbine.
wind_velocity (float): Free stream wind velocity in m/s.
turbulent_int (float): Turbulent intensity in percent.
device (torch.device): Device to store and run the neural network on,
cpu or cuda
florisjason_path (string): Location of the FLORIS jason file
plot (bool, optional): If True, the FLORIS and CNN solution will
be plotted and compared.
Returns:
numpy array: Final 2d array of flow field around the wind park.
"""
# Define the x and y length of a single cell in the array
# This is set by the standard value used in FLORIS wakes
dx = 18.4049079755
dy = 2.45398773006
# Set the maximum length of the array to be 3000m and 400m
# more than the maximum x and y position of the wind park
# If a larger physical domain was used change adapt the values
x_max = np.max(x_position) + 3000
y_max = np.max(y_position) + 300
# Number of cells in x and y needed to create a 2d array of
# that is x_max x y_max using dx, dy values
Nx = int(x_max / dx)
Ny = int(y_max / dy)
# Initialise a 2d array of the wind park with the
# inlet wind speed
farm_array = np.ones((Ny, Nx)) * wind_velocity
# set up FLORIS model
floris_model = wfct.floris_interface.FlorisInterface(
florisjason_path + "FLORIS_input_gauss.json")
floris_model.reinitialize_flow_field(
layout_array=[x_position, np.array(y_position)])
for _ in range(0, len(x_position)):
floris_model.change_turbine([_], {'yaw_angle': yawn_angles[_],
"blade_pitch": 0.0})
floris_model.reinitialize_flow_field(wind_speed=wind_velocity,
turbulence_intensity=turbulent_int)
start_t = time.time()
# Calcuate using FLORIS and extract 2d flow field
floris_model.calculate_wake()
print(f"Time taken for FLORIS to generate"
f" wind park: {time.time() - start_t:.3f}")
floris_plane = floris_model.get_hor_plane(
height=90, x_resolution=Nx, y_resolution=Ny, x_bounds=[0, x_max],
y_bounds=[0, y_max]).df.u.values.reshape(Ny, Nx)
floris_power = floris_model.get_turbine_power()
floris_ti = floris_model.get_turbine_ti()
# print(floris_power, floris_ti)
power_CNN = []
ti_CNN = []
t = time.time()
with torch.no_grad():
# Do CNNwake cautions
for i in range(len(x_position)):
# determine the x and y cells that the turbine center is at
turbine_cell = [int((x_position[i]) / dx),
int((y_position[i] - 200) / dy)]
t1 = time.time()
# extract wind speeds along the rotor, 60 meters upstream
u_upstream_hub = farm_array[
turbine_cell[1] + 45: turbine_cell[1] + 110,
turbine_cell[0] - 3]
# Do an running average, this is done because CNNwake has slight
# variations in the u predictions, also normalise the u values
u_power = [
((u_upstream_hub[i - 1] + u_upstream_hub[i] +
u_upstream_hub[i + 1]) / 3) / 12 for
i in np.linspace(5, 55, 40, dtype=int)]
u_power = np.append(u_power, yawn_angles[i] / 30)
u_power = np.append(u_power, turbulent_int)
# The local TI does not change from inlet TI if the turbine
# is not covered by a wake, therefore check if if all values
# in u_list_hub are the same -> means no wake coverage
# Local TI also depends on yaw, if yaw is less than 12° and
# turbine is not in wake -> use inlet TI for local TI
if np.allclose(u_power[0], u_power[0:-3],
rtol=1e-02, atol=1e-02) and abs(u_power[-2]) < 0.4:
# print("Turbine in free stream, set ti to normal")
ti = turbulent_int
else:
ti = TI_model((torch.tensor(u_power).float().to(device))).detach().cpu().numpy() * 0.30000001192092896
# regulate TI to ensure it is not to different from free stream
if ti < turbulent_int * 0.7:
# print(f"TI REGULATED 1 AT {i}")
ti = turbulent_int * 1.5
# clip ti values to max and min trained
ti = np.clip(ti, 0.015, 0.25).item(0)
ti_CNN.append(ti)
u_power[-1] = ti
energy = Power_model(torch.tensor(u_power).float().to(device)).detach().cpu().numpy() * 4834506
power_CNN.append(energy[0])
hub_speed = np.round(np.mean(u_upstream_hub), 2)
turbine_condition = [[hub_speed, ti, yawn_angles[i]]]
turbine_field = CNN_generator(torch.tensor(turbine_condition).float().to(device))
# Use CNN to calculate wake of individual trubine
# Since CNN output is normalised,
# mutiply by 12 and create a numpy array
turbine_field = turbine_field[0][0].detach().cpu().numpy() * 12
# Place wake of indivual turbine in the farm_array
farm_array = super_position(
farm_array, turbine_field, turbine_cell, hub_speed,
wind_velocity, sp_model="SOS")
# print information
print(f"Time taken for CNNwake to generate wind park: {time.time() - t:.3f}")
print(f"CNNwake power prediction error: "
f"{100 * np.mean(abs(np.array(floris_power) - np.array(power_CNN)) / np.array(floris_power)):.2f} %")
print(f"CNNwake TI prediction error: {100 * np.mean(abs(np.array(floris_ti) - np.array(ti_CNN)) / np.array(floris_ti)):.2f} %")
print(f"APWP error: {100 * np.mean(abs(floris_plane - farm_array) / np.max(floris_plane)):.2f}")
if plot:
plt.rcParams.update({'font.size': 16})
# Plot wake fields of both wind farms and error field
fig, axarr = plt.subplots(3, 1, sharex=True, figsize=(20, 49))
im1 = visualize_farm(farm_array, nr_points=[Nx, Ny], size_x=x_max,
size_y=y_max, title="CNNwake", ax=axarr[0])
im2 = visualize_farm(floris_plane, nr_points=[Nx, Ny], size_x=x_max,
size_y=y_max, title="FLORIS", ax=axarr[1])
im3 = visualize_farm(
(100 * abs(floris_plane - farm_array) / np.max(floris_plane)),
nr_points=[Nx, Ny], size_x=x_max, size_y=y_max,
title="Pixel wise percentage error ", ax=axarr[2], vmax=20)
col1 = fig.colorbar(im1, ax=axarr[0])
col1.set_label('m/s', labelpad=15, y=1.06, rotation=0)
col2 = fig.colorbar(im2, ax=axarr[1])
col2.set_label('m/s', labelpad=15, y=1.06, rotation=0)
col3 = fig.colorbar(im3, ax=axarr[2])
col3.set_label('%', labelpad=11, y=0.9, rotation=0)
axarr[2].set_xlabel('m', fontsize=15)
axarr[0].set_ylabel('m', labelpad=9, rotation=0, y=0.4, fontsize=15)
axarr[1].set_ylabel('m', labelpad=9, rotation=0, y=0.4, fontsize=15)
axarr[2].set_ylabel('m', labelpad=9, rotation=0, y=0.4, fontsize=15)
# Plot TI and Power of every turbine for FLORIS adn CNNNwake
fig, axarr = plt.subplots(2, figsize=(9, 9))
axarr[0].plot(range(1, len(x_position) + 1),
np.array(power_CNN)/1.e06, 'o--', label="CNNwake")
axarr[0].plot(range(1, len(x_position) + 1),
np.array(floris_power)/1.e06, 'o--', label="FLORIS")
axarr[1].plot(range(1, len(x_position) + 1),
np.array(ti_CNN), 'o--', label="CNNwake")
axarr[1].plot(range(1, len(x_position) + 1),
floris_ti, 'o--', label="FLORIS")
axarr[0].set_ylabel('Power output [MW]', fontsize=15)
axarr[1].set_ylabel('Local TI [%]', fontsize=15)
axarr[1].set_xlabel('Turbine Nr.', rotation=0, fontsize=15)
axarr[1].legend()
axarr[0].legend()
plt.show()
return farm_array, floris_plane
if __name__ == '__main__':
# To run individual CNNWake files, the imports are not allowed to be
# relative. Instead of: from .superposition import super_position
# it needs to be: from superposition import super_position, for all CNNWake imports
# also import all NNs
from CNN_model import Generator
from FCC_model import FCNN
from superposition import super_position
# Set up/load all NNs
device = torch.device("cpu" if torch.cuda.is_available() else "cpu")
CNN_generator = Generator(3, 30).to(device)
CNN_generator.load_model('./trained_models/CNN_FLOW.pt', device=device)
CNN_generator = CNN_generator.to()
CNN_generator.eval()
# the first forward pass is super slow so do it outside loop and use the
# output for a simple assert test
example_out = CNN_generator(torch.tensor([[4, 0.1, 20]]).float().to(device))
assert example_out.size() == torch.Size([1, 1, 163, 163])
Power_model = FCNN(42, 300, 1).to(device)
Power_model.load_state_dict(torch.load('./trained_models/FCNN_POWER.pt', map_location=device))
Power_model.eval()
# the first forward pass is super slow so do it outside loop and use the
# output for a simple assert test
energy = Power_model(torch.tensor([i for i in range(0, 42)]).float().to(device))
assert energy.size() == torch.Size([1])
TI_model = FCNN(42, 300, 1).to(device)
TI_model.load_state_dict(torch.load('./trained_models/FCNN_TI.pt', map_location=device))
TI_model.eval()
# the first forward pass is super slow so do it outside loop and use the
# output for a simple assert test
TI = TI_model(torch.tensor([i for i in range(0, 42)]).float().to(device))
assert TI.size() == torch.Size([1])
# Compare a single wind farm, this will show the wake, energy and local TI
# for every turbine and compare it to FLORIS
farm, a = Compare_CNN_FLORIS([100, 100, 700, 700, 1200, 1200],
[300, 800, 1300, 550, 1050, 300],
[0, 0, 0, 0, 0, 0, 0], 11.6, 0.06,
CNN_generator, Power_model,
TI_model, device, plot=True)
| 13,979 | 42.6875 | 131 | py |
wakenet | wakenet-master/Code/CNNWake/train_CNN.py | import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from torch.utils.data import TensorDataset, DataLoader
from torch.optim import lr_scheduler
from .CNN_model import Generator
__author__ = "Jens Bauer"
__copyright__ = "Copyright 2021, CNNwake"
__credits__ = ["Jens Bauer"]
__license__ = "MIT"
__version__ = "1.0"
__email__ = "jens.bauer20@imperial.ac.uk"
__status__ = "Development"
def train_CNN_model(
nr_filters, nr_epochs, learing_rate, batch_size,
train_size, val_size, image_size, device, u_range,
ti_range, yaw_range, model_name, nr_workers=0, floris_path="."):
"""
Create a new model and train it for a certain number of epochs using a
newly generated dataset. Hyper-parameters such as model size or lr can be
changed as input to the function.
After training the model error for all epochs is plotted and the model
performance will be evaluated on a test set. Finally, the model
will saved as the model_name which needs to add as .pt file
Args:
nr_filters (int): Nr. of filters used for the conv layers
nr_epochs (int): Nr. of training epochs
learing_rate (float): Model learning rate
batch_size (int): Training batch size
train_size (int): Size of the generated training set
val_size (int): Size of the generated validation set
image_size (int): Size of the data set images, needs to match the
model output size for the current model this is 163 x 163
device (torch.device): Device to run the training on, cuda or cpu
u_range (list): Bound of u values [u_min, u_max] used
ti_range (list): Bound of TI values [TI_min, TI_max] used
yaw_range (list): Bound of yaw angles [yaw_min, yaw_max] used
model_name (str): Name of the trained saved model (needs be .pt)
nr_workers (int, optional): Nr. of worker to load data. Defaults to 0.
floris_path (str, optinal): Path to FLORIS jason file.
Returns:
gen (Generator): Trained model
loss (float): training loss defined by the loss function
val_error (float): Percentage error on the validation set
"""
# The current inputs are: u, ti and yaw. If more are
# used please change this input var
nr_input_var = 3
# create a generator of the specified size
gen = Generator(nr_input_var, nr_filters).to(device)
# create a datasets from the data generated by FLORIS
x_train, y_train = gen.create_floris_dataset(
size=train_size, image_size=image_size, u_range=u_range,
ti_range=ti_range, yaw_range=yaw_range, floris_init_path=floris_path,
curl=False)
x_eval, y_eval = gen.create_floris_dataset(
size=val_size, image_size=image_size, u_range=u_range,
ti_range=ti_range, yaw_range=yaw_range, floris_init_path=floris_path,
curl=False)
dataset = TensorDataset(y_train.unsqueeze(1), x_train.float())
# generate dataload for training
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True,
num_workers=nr_workers)
# init the weights of the generator
gen.initialize_weights()
# set up and optimizer and learing rate scheduler using hyperparameters
optimizer = optim.Adam(gen.parameters(), lr=learing_rate)
scheduler_gen = lr_scheduler.ReduceLROnPlateau(
optimizer, 'min', factor=0.6, patience=4, verbose=True)
# use L2 norm as criterion
criterion = nn.MSELoss()
# init to list to store error
error_list = []
for _ in range(nr_epochs): # train model
gen.train() # set model to training mode
# use method to train for one epoch
loss = gen.epoch_training(criterion, optimizer, dataloader, device)
gen.eval() # set model to evaluation
# evaluation on validation set
val_error = gen.error(x_eval, y_eval,
device, image_size=image_size,
normalisation=12)
# if error has not decreased over the past 4
# epochs decrease the lr by a factor of 0.6
scheduler_gen.step(val_error)
error_list.append(val_error)
print(f" Epoch: {_:.0f},"
f" Training loss: {loss:.4f},"
f" Validation error: {val_error:.2f}")
print("Finished training")
# save model
gen.save_model(model_name)
# plot the val error over the epochs
plt.plot(range(nr_epochs), error_list)
plt.show()
return gen, loss, val_error
if __name__ == '__main__':
# To run individual CNNWake files, the imports are not allowed to be
# relative. Instead of: from .CNN_model import Generator
# it needs to be: from CNN_model import Generator for all CNNWake imports
# Set device used for training
devices = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Train a new model with the given parameters
train_CNN_model(
nr_filters=16, nr_epochs=25, learing_rate=0.003, batch_size=50,
train_size=200, val_size=30, image_size=163, device=devices,
u_range=[3, 12], ti_range=[0.015, 0.25], yaw_range=[-30, 30],
model_name='generator.pt'
)
| 5,251 | 38.19403 | 78 | py |
wakenet | wakenet-master/Code/CNNWake/train_FCNN.py | import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from torch.utils.data import TensorDataset, DataLoader
from torch.optim import lr_scheduler
from FCC_model import FCNN
__author__ = "Jens Bauer"
__copyright__ = "Copyright 2021, CNNwake"
__credits__ = ["Jens Bauer"]
__license__ = "MIT"
__version__ = "1.0"
__email__ = "jens.bauer20@imperial.ac.uk"
__status__ = "Development"
def train_FCNN_model(
nr_neurons, input_size, nr_epochs, learing_rate, batch_size,
train_size, val_size, u_range, ti_range, yaw_range, model_name,
type='power', device='cpu', nr_workers=0, floris_path="."):
"""
Create a new model and train it for a certain number of epochs using a
newly generated dataset. Hyper-parameters such as model size or lr can be
changed as input to the function.
After training the model error over all epochs is plotted and the model
performance will be evaluated on a unseen test set. Finally, the model
will saved as the model_name which needs to add as .pt file
Args:
nr_filters (int): Nr. of filters used for the conv layers
nr_epochs (int): Nr. of training epochs
learing_rate (float): Model learing rate
batch_size (int): Training batch size
train_size (int): Size of the generated training set
val_size (int): Size of the generated validation set
u_range (list): Bound of u values [u_min, u_max] used
ti_range (list): Bound of TI values [TI_min, TI_max] used
yaw_range (list): Bound of yaw angles [yaw_min, yaw_max] used
model_name (str): Name of the trained saved model (needs to be .pt)
image_size (int): Size of the data set images, needs to match the
model output size for the current model this is 163
device (torch.device): Device to run the training on, cuda or cpu
nr_workers (int, optional): Nr. of workers to load data. Defaults to 0.
floris_path (str, optinal): Path to FLORIS jason file.
Returns:
gen (Generator): Trained model
loss (float): training loss defined by the loss function
val_error (float): Percentage error on the validation set
"""
# The current inputs are: u, ti and yaw. If more are used please
# change this input var
model_input_size = input_size + 2
# create a generator of the specified size
model = FCNN(model_input_size, nr_neurons, 1).to(device)
# create a datasets from the data generated by FLORIS
x_train, y_train = model.create_ti_power_dataset(
size=train_size, u_range=u_range, ti_range=ti_range,
yaw_range=yaw_range, nr_varabiles=input_size, type=type,
floris_path=floris_path)
x_eval, y_eval = model.create_ti_power_dataset(
size=val_size, u_range=u_range, ti_range=ti_range,
yaw_range=yaw_range, nr_varabiles=input_size, type=type,
floris_path=floris_path)
dataset = TensorDataset(y_train, x_train.float())
# generate dataload for training
dataloader = DataLoader(
dataset, batch_size=batch_size, shuffle=True, num_workers=nr_workers)
# init the weights of the generator
model.initialize_weights()
# set up and optimizer and learing rate scheduler using hyperparameters
optimizer = optim.Adam(model.parameters(), lr=learing_rate)
scheduler_gen = lr_scheduler.ReduceLROnPlateau(
optimizer, 'min', factor=0.6, patience=4, verbose=True)
# use L2 norm as criterion
criterion = nn.MSELoss()
# init to list to store error
error_list = []
for _ in range(nr_epochs): # train model
model.train() # set model to training mode
loss = model.epoch_training(criterion, optimizer, dataloader, device)
model.eval() # set model to evaluation
# evaluation on validation set
val_error = model.error(x_eval, y_eval, device)
# if error has not decreased over the past 4 epochs decrease
# the lr by a factor of 0.6
scheduler_gen.step(val_error)
error_list.append(val_error)
print(f" Epoch: {_:.0f},"
f" Training loss: {loss:.4f},"
f" Validation error: {val_error:.2f}")
# save model
model.save_model(model_name)
# plot the val error over the epochs
plt.plot(range(nr_epochs), error_list)
plt.show()
return model, loss, val_error
if __name__ == '__main__':
# To run indivual CNNWake files, the imports are not allowed to be
# relative. Instead of: from .FCC_model import FCNN
# it needs to be: from FCC_model import FCNN, for all CNNWake imports
# Set device used for training
devices = torch.device("cpu" if torch.cuda.is_available() else "cpu")
# Train a FCNN to predict power
train_FCNN_model(
nr_neurons=20, input_size=20, nr_epochs=150, learing_rate=0.003,
batch_size=30, train_size=50, val_size=40, u_range=[3, 12],
ti_range=[0.015, 0.25], yaw_range=[-30, 30],
model_name='power_model.pt', type='power', device=devices)
| 5,082 | 38.1 | 79 | py |
wakenet | wakenet-master/Code/CNNWake/superposition.py | import torch
from torch.backends import cudnn
import matplotlib.pyplot as plt
import numpy as np
import time
import floris.tools as wfct
__author__ = "Jens Bauer"
__copyright__ = "Copyright 2021, CNNwake"
__credits__ = ["Jens Bauer"]
__license__ = "MIT"
__version__ = "1.0"
__email__ = "jens.bauer20@imperial.ac.uk"
__status__ = "Development"
def super_position(farm_array, turbine_array, turbine_postion,
hub_speed, wind_velocity, sp_model="SOS"):
"""
Generate super-position of a turbine wind field and a farm wind field.
The turbine wind field is superimposed onto the wind farm flow field using
different super-postion models. The recommended model is the root sum of
squares (SOS), more information about super-postion can be found in this
paper: https://doi.org/10.1088/1742-6596/749/1/012003
Args:
farm_array (numpy array): 2d wind field of whole wind farm
turbine_array (numpy array): 2d wind field around wind turbine
turbine_postion (numpy array): x and y cell number of wind turbine
in the global array [x_cell, y_cell]
hub_speed (float): u velocity at turbine hub in m/s
wind_U_turbine (float): wind speed at turbine hub
wind_velocity (float): free stream wind speed
sp_model (str, optional): Select model to be used for the
super-positioning Defaults to "SOS".
Returns:
numpy array: 2d wind field of whole wind farm with flow
field around turbine superimposed
"""
# normalize wind by the free stream and hub height speed
turbine_u = turbine_array/hub_speed
farm_u = farm_array/wind_velocity
# Define the start and end coordinates of the turbine wake
# in the global wind park array
x_start = turbine_postion[0]
x_end = turbine_postion[0]+turbine_u.shape[0]
y_start = turbine_postion[1]
y_end = turbine_postion[1]+turbine_u.shape[1]
if sp_model == "SOS":
# For SOS model with one turbine, the equation is:
# u = 1 - sqrt((1 - u_1)^2 + (1 - u_2)^2)
sos1 = np.square(1 - turbine_u)
sos2 = np.square(1 - farm_u)
# place the SOS superpostion in the correct location of the farm array
farm_array[y_start:y_end, x_start:x_end] = (1 - np.sqrt(
sos1 + sos2[y_start:y_end, x_start:x_end])) * wind_velocity
# farm_array now includes the velocity field of the turbine
return farm_array
elif sp_model == "linear":
# For SOS model with one turbine, the equation is:
# u = 1 - ((1 - u_1) + (1 - u_2))
sos1 = 1 - turbine_u
sos2 = 1 - farm_u
# place the linear superpostion in the correct
# location of the farm array
farm_array[
turbine_postion[1]:turbine_postion[1]+sos1.shape[1],
turbine_postion[0]:turbine_postion[0]+sos1.shape[0]] = \
(1 - (sos1 + sos2[
turbine_postion[1]:turbine_postion[1]+sos1.shape[1],
turbine_postion[0]:turbine_postion[0]+sos1.shape[0]]))\
* wind_velocity
# farm_array now includes the velocity field of the turbine
return farm_array
elif sp_model == "largest_deficit":
# u = min(u_1, u_2)
# place the SOS super postion in the correct location of the farm array
farm_array[
turbine_postion[1]:turbine_postion[1]+turbine_u.shape[1],
turbine_postion[0]:turbine_postion[0]+turbine_u.shape[0]]\
= np.minimum(turbine_u,
farm_u[y_start:y_end,
x_start:x_end]) * wind_velocity
# farm_array now includes the velocity field of the turbine
return farm_array
else:
# other models to be added
raise Exception('No super position model selected, please'
' either select: SOS, linear or largest_deficit')
def CNNWake_farm_power(
yawn_angles, x_position, y_position, wind_velocity, turbulent_int,
CNN_generator, Power_model, TI_model, device,
ti_normalisation=0.30000001, power_normalisation=4834506):
"""
Calculates the power output of the wind farm using the NN.
The generated power is returned as negative number for the minimization.
The individual wakes of the turbines are calculated using the CNN and
superimposed onto the wind farm flow field using a super-position model.
The energy produced by the turbines are calculated using another fully
connected network from the flow data just upstream the turbine.
Please ensure that the x position are in ascending order and every
turbine is placed at least 300 above 0 in the direction. This is done
to ensure that no wake is lost at the edge of the domain.
Args:
yawn_angles (list): 1d array of the yaw angle of every wind turbine
in degree, from -30° to 30°
x_position (list): 1d array of the x postions of the wind
turbines in meters.
y_position (list): 1d array of the y postions of the wind
turbines in meters.
wind_velocity (float): Free stream wind velocity in m/s,
from 3 m/s to 12 m/s
turbulent_int (float): Turbulent intensity in percent,
from 1.5% to 25%
CNN_generator (Generator): CNN to predict the wake of a single
turbine, ensure it is trained and set to validation mode
Power_model (Generator): FCNN to predict the power generated
by a turbine, ensure it is trained and set to validation mode
TI_model (Generator): FCNN to predict the local TI of a
turbine, ensure it is trained and set to validation mode
device (torch.device): Device to store and run the neural network
on, either cpu or cuda
ti_normalisation (float): Normalisation of the TI training set
power_normalisation (float): Normalisation of the power training set
Returns:
power float: negative power output
"""
# Define the x and y length of a single cell in the array
# This is set by the standard value used in FLORIS wakes
dx = 18.4049079755
dy = 2.45398773006
# Set the maximum length of the array to be 3000m and 400m
# more than the maximum x and y position of the wind park
# If a larger physical domain was used change adapt the values
x_max = np.max(x_position) + 3000
y_max = np.max(y_position) + 300
# Number of cells in x and y needed to create a 2d array of
# that is x_max x y_max using dx, dy values
Nx = int(x_max/dx)
Ny = int(y_max/dy)
# Initialise a 2d array of the wind park with the
# inlet wind speed
farm_array = np.ones((Ny, Nx)) * wind_velocity
# round yaw angle
yawn_angles = np.round(yawn_angles, 2)
# Initialise array to store power and TI for every turbine
power_CNN = []
ti_CNN = []
with torch.no_grad(): # Ensure no gradients are calculated
# For every wind turbine
for i in range(len(x_position)):
# determine the x and y cells that the turbine center is at
turbine_cell = [int((x_position[i])/dx),
int((y_position[i] - 200)/dy)]
# extract wind speeds along the rotor, 60 meters upstream
u_upstream_hub = farm_array[
turbine_cell[1] + 45: turbine_cell[1] + 110, turbine_cell[0] - 3]
# Do an running average, this is done because CNNwake has slight
# variations in the u predictions, also normalise the u values
u_list_hub = [
((u_upstream_hub[i-1] + u_upstream_hub[i] +
u_upstream_hub[i+1])/3)/12 for i in np.linspace(
5, len(u_upstream_hub)-5, 40, dtype=int)]
# append yaw angle and normalised it, also append ti
u_list_hub = np.append(u_list_hub, yawn_angles[i]/30)
u_list_hub = np.append(u_list_hub, turbulent_int)
# The local TI does not change from inlet TI if the turbine
# is not covered by a wake, therefore check if if all values
# in u_list_hub are the same -> means no wake coverage
# Local TI also depends on yaw, if yaw is less than 12° and
# turbine is not in wake -> use inlet TI for local TI
if np.allclose(
u_list_hub[0], u_list_hub[0:-3], rtol=1e-02, atol=1e-02)\
and abs(u_list_hub[-2]) < 0.4:
ti = turbulent_int
# If turbine is in wake or yaw angle is larger use FCNN to find
# local TI
else:
# Use FCNN forward pass to predict TI
ti = TI_model((torch.tensor(u_list_hub).float().to(device))).detach().cpu().numpy() * ti_normalisation
# regulate TI to ensure it is not to different from free stream
if ti < turbulent_int*0.7:
ti = turbulent_int * 1.5
# clip ti values to max and min trained
ti = np.clip(ti, 0.015, 0.25).item(0)
ti_CNN.append(ti) # Save ti value
# Replace global/inlet TI in u_list with local TI
u_list_hub[-1] = ti
# Use FCNN to predcit power generated by turbine
turbine_energy = Power_model(torch.tensor(u_list_hub).float().to(device)).detach().cpu().numpy() * power_normalisation
power_CNN.append(turbine_energy) # Save power
# Find the mean wind speed upstream the turbine
hub_speed = np.round(np.mean(u_upstream_hub), 2)
# Create Array of array to pass it to CNN
turbine_condition = [[hub_speed, ti, yawn_angles[i]]]
# Use CNN to calculate wake of individual trubine
turbine_field = CNN_generator(torch.tensor(turbine_condition).float().to(device))
# Since CNN output is normalised,
# mutiply by 12 and create a numpy array
turbine_field = turbine_field[0][0].detach().cpu().numpy() * 12
# Place wake of indivual turbine in the farm_array
farm_array = super_position(
farm_array, turbine_field, turbine_cell,
hub_speed, wind_velocity, sp_model="SOS")
# Return the value negative of power generated
return -sum(power_CNN).item(0)
def FLORIS_farm_power(
yawn_angles, x_position, y_position, wind_velocity,
turbulent_int, floris_park):
"""
Function to generate the power output of a wind farm defined by
the x, y and yaw angles of every turbine in the farm.
The function will only use FLORIS to calcaute the power and
returnes the power as a negtive value which is needed for
the minimisation.
Args:
yawn_angles (list): Yaw angle of every turbine in the wind park
x_position (list): All x locations of the turbines
y_position (list): All y locations of the turbines
wind_velocity (float): Inlet wind speed
turbulent_int (float): Inlet turbulent intensity
floris_park (floris.tools.FlorisInterface): Floris interface
loads in data from jason file
Returns:
power (float): negative power generated by wind park
"""
# Round yaw angle input
yawn_angles = np.round(yawn_angles, 2)
# Set the x and y postions of the wind turbines
floris_park.reinitialize_flow_field(layout_array=[x_position,
np.array(y_position)])
# Set the yaw angle of every turbine
for _ in range(0, len(x_position)):
floris_park.change_turbine([_],
{'yaw_angle': yawn_angles[_],
"blade_pitch": 0.0})
# Set inlet wind speed and TI
floris_park.reinitialize_flow_field(wind_speed=wind_velocity,
turbulence_intensity=turbulent_int)
# Calculate wind field
floris_park.calculate_wake()
# Calculate power generated by every turbine
power = floris_park.get_turbine_power()
# Return the sum of all power per turbine but as negative
# value for the optimisation
return -sum(power)
| 12,291 | 43.375451 | 130 | py |
wakenet | wakenet-master/Code/CNNWake/optimisation.py | from scipy.optimize import minimize
import numpy as np
import torch
import time
import floris.tools as wfct
from .superposition import CNNWake_farm_power, FLORIS_farm_power
from .CNN_model import Generator
from .FCC_model import FCNN
__author__ = "Jens Bauer"
__copyright__ = "Copyright 2021, CNNwake"
__credits__ = ["Jens Bauer"]
__license__ = "MIT"
__version__ = "1.0"
__email__ = "jens.bauer20@imperial.ac.uk"
__status__ = "Development"
def CNNwake_wake_steering(x_position, y_position, initial_yaw, wind_velocity,
turbulent_int, CNN_generator, Power_model, TI_model,
device, bounds, tolerance):
"""
Function will optimise the yaw angle of a specific wind farm for
a given inlet wind speed and TI using CNNwake's wind farm function.
Please ensure that the x position are in ascending order and every
turbine is placed at least 300 above 0 in the y direction. This is done
to ensure that no wake is lost at the edge of the domain.
Args:
x_position (list or numpy array): 1d array of the x postions of
the wind turbines in m.
y_position (list or numpy array): 1d array of the y postions of
the wind turbines in m.
initial_yaw (list or numpy array): 1d array of inital yaw angle
of every wind turbine in degree, set to 0
wind_velocity (float): Free stream wind velocity in m/s,
ensure NNa are trained on this wind speed
turbulent_int (float): Turbulent intensity in percent ,
ensure NNs are trained on this TI
CNN_generator (Generator): CNN to predict the wake of a single
turbine, ensure it is trained and set to validation mode
Power_model (Generator): FCNN to predict the power generated
by a turbine, ensure it is trained and set to validation mode
TI_model (Generator): FCNN to predict the local TI of a
turbine, ensure it is trained and set to validation mode
device (torch.device): Device to store and run the neural network
on, either cpu or cuda
bounds (list): Yaw angle bounds for optimisation [min_yaw, max_yaw]
tolerance (float): Relative solver tolerance
Returns:
opt_yaw.x (np.array): Optimal yaw angle
opt_yaw.func (float): Optimal power output
time_taken (float): Time taken for optimisation
"""
# Set all NNs to evaluation mode
CNN_generator.eval()
Power_model.eval()
TI_model.eval()
# Run a few check to ensure that optimisation will work
# Check if there are same number of turbines defined in
# x, y and yaw anf´gle arrays
assert len(x_position) == len(y_position)
assert len(y_position) == len(initial_yaw)
# check if x_list in ascending order, if this assert fails
# ensure that x goes from smallest to largest
if len(x_position) > 1:
assert np.any(np.diff(np.array(x_position)) > 0)
# Check if all the NNs work as expected
assert CNN_generator(torch.tensor([[
4, 0.1, 20]]).float().to(device)).size() == \
torch.Size([1, 1, 163, 163])
assert TI_model(torch.tensor([
i for i in range(0, 42)]).float().to(device)).size() == \
torch.Size([1])
assert Power_model(torch.tensor([
i for i in range(0, 42)]).float().to(device)).size() == \
torch.Size([1])
# create a list of tuples of bounds for the optimizer
bounds_list = [(bounds[0], bounds[1]) for _ in range(0, len(x_position))]
init_t = time.time() # start timer
# Using scipy.optimize function to find the optimal yaw setting by calling
# CNNWake_farm_power many times with different yaw angles. Ensure that all
# arguments are given in the correct order
opt_yaw = minimize(
CNNWake_farm_power, initial_yaw,
args=(x_position, y_position, wind_velocity, turbulent_int,
CNN_generator, Power_model, TI_model, device), method='SLSQP',
bounds=bounds_list, options={'ftol': tolerance, 'eps': 0.1,
'disp': False})
# find time taken for optimisation
time_taken = time.time() - init_t
return np.round(opt_yaw.x, 2), abs(opt_yaw.fun), time_taken
def FLORIS_wake_steering(x_position, y_position, initial_yaw, wind_velocity,
turbulent_int, bounds, tolerance, floris_path='./'):
"""
Function will optimise the yaw angle of a specific wind farm for
a given inlet wind speed and TI using FLORIS.
Please ensure that the x position are in ascending order and every
turbine is placed at least 300 above 0 in the direction. This is done
to ensure that no wake is lost at the edge of the domain.
Args:
x_position (list or numpy array): 1d array of the x postions of
the wind turbines in m.
y_position (list or numpy array): 1d array of the y postions of
the wind turbines in m.
initial_yaw (list or numpy array): 1d array of inital yaw angle
of every wind turbine in degree, set to 0
wind_velocity (float): Free stream wind velocity in m/s,
ensure NNa are trained on this wind speed
turbulent_int (float): Turbulent intensity in percent ,
ensure NNs are trained on this TI
bounds (list): Yaw angle bounds for optimisation [min, max]
tolerance (float): Relative solver tolerance
floris_path (str): Path to FLORIS jason file
Returns:
floris_opti.x (np.array): Optimal yaw angle
floris_opti.func (float): Optimal power output
time_taken (float): Time taken for optimisation
"""
# Check if there are same number of turbines defined in
# x, y and yaw anf´gle arrays
assert len(x_position) == len(y_position)
assert len(y_position) == len(initial_yaw)
# create a list of tuples of bounds for the optimizer
bounds = [(bounds[0], bounds[1]) for _ in range(0, len(x_position))]
# This variable is used to sum up all the power generated by turbines
floris_park = 0
# Check if path to FLORIS jason file is correct by testing if it can
# open it
try:
floris_park = wfct.floris_interface.FlorisInterface(
floris_path + "FLORIS_input_gauss.json")
except FileNotFoundError:
print('No FLORIS_input_gauss.jason file found at this lcoation, '
'please specfiy the path to this file')
init_t = time.time() # Start timer
# Using scipy.optimize function to find the optimal yaw setting by calling
# FLORIS_farm_power many times with different yaw angles. Ensure that all
# arguments are given in the correct order
floris_opti = minimize(
FLORIS_farm_power, initial_yaw,
args=(x_position, y_position, wind_velocity,
turbulent_int, floris_park),
method='SLSQP', bounds=bounds,
options={'ftol': tolerance, 'eps': 0.1,
'disp': False})
time_taken = time.time() - init_t
return np.round(floris_opti.x, 2), abs(floris_opti.fun), time_taken
if __name__ == '__main__':
# To run individual CNNWake files, the imports are not allowed to be
# relative. Instead of: from .CNN_model import Generator
# it needs to be: from CNN_model import Generator for all CNNWake imports
# select device to run model on
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Load and set up all NNs
CNN_generator = Generator(3, 30).to(device)
CNN_generator.load_model('./trained_models/CNN_FLOW.pt', device=device)
Power_model = FCNN(42, 300, 1).to(device)
Power_model.load_state_dict(torch.load('./trained_models/FCNN_POWER.pt',
map_location=device))
TI_model = FCNN(42, 300, 1).to(device)
TI_model.load_state_dict(torch.load('./trained_models/FCNN_TI.pt',
map_location=device))
# Use optimisation to find best yaw angle
yaw1, power1, timing1 = CNNwake_wake_steering(
[100, 100, 1000, 1000],
[300, 800, 300, 800],
[0, 0, 0, 0], 10.6, 0.09, CNN_generator, Power_model, TI_model,
device, [-30, 30], 1e-07)
print(f"CNNwake optimized yaw abgle: {yaw1}")
# Find FLORIS best yaw abgle
yaw, power, timing = FLORIS_wake_steering(
[100, 100, 1000, 1000],
[300, 800, 300, 800],
[0, 0, 0, 0], 10.6, 0.09, [-30, 30], 1e-07)
print(f"FLORIS optimized yaw abgle: {yaw}")
| 8,797 | 42.554455 | 79 | py |
wakenet | wakenet-master/Code/CNNWake/__init__.py | 0 | 0 | 0 | py | |
wakenet | wakenet-master/Code/CNNWake/CNN_model.py | import torch
import torch.nn as nn
import numpy as np
import random
import floris.tools as wfct
__author__ = "Jens Bauer"
__copyright__ = "Copyright 2021, CNNwake"
__credits__ = ["Jens Bauer"]
__license__ = "MIT"
__version__ = "1.0"
__email__ = "jens.bauer20@imperial.ac.uk"
__status__ = "Development"
class Generator(nn.Module):
"""
The class is the Neural Network that generates the flow field around a
wind turbine. The network uses the pytorch framwork and uses fully
connected and transpose convolutional layers.
The methods of this class include the training of the network,
testing of the accuracy and generaton of the training data.
"""
def __init__(self, nr_input_var, nr_filter):
"""
init method that generates the network architecture using pytroch's
ConvTranspose2d and Sequential layers. The number of input varibles
and size of the given network can be changed. The output size will not
change and it set at 163 x 163 pixels.
Args:
nr_input_var (int): Nr. of inputs, usually 3 for u, ti and yaw
nr_filter (int): Nr. filters used in deconv layers, more filters
means that the network will have more parameters
"""
super(Generator, self).__init__()
# linear layer
self.FC_Layer = nn.Sequential(nn.Linear(in_features=nr_input_var,
out_features=9))
# Deconvolutional layer
self.net = nn.Sequential(
self.layer(1, nr_filter * 16, 4, 2, 1),
self.layer(nr_filter * 16, nr_filter * 8, 4, 1, 1),
self.layer(nr_filter * 8, nr_filter * 8, 4, 2, 1),
self.layer(nr_filter * 8, nr_filter * 4, 4, 2, 1),
self.layer(nr_filter * 4, nr_filter * 4, 3, 2, 1),
nn.ConvTranspose2d(nr_filter * 4, 1, kernel_size=3,
stride=3, padding=1),
)
def layer(self, in_filters, out_filters, kernel_size, stride, padding):
"""
One layer of the CNN which consits of ConvTranspose2d,
a batchnorm and LRelu activation function.
Function is used to define one layer of the network
Args:
in_filters (int): Nr. of filters in the previous layer
out_filters (int): Nr. of output filters
kernel_size (int): Size of the ConvTranspose2d layer
stride (int): Stride of the ConvTranspose2d layer
padding (int): Padding used in this layer
Returns:
nn.Sequential: Pytroch Sequential container that defines one layer
"""
# One layer of the network uses:
# Deconvolutional layer, then batch norm and leakyrelu
# activation function
single_layer = nn.Sequential(nn.ConvTranspose2d(in_filters,
out_filters,
kernel_size,
stride,
padding,
bias=False, ),
nn.BatchNorm2d(out_filters),
nn.LeakyReLU(0.2), )
return single_layer
def initialize_weights(self):
"""
Initilize weights using a normal distribution with mean = 0,std2 = 0.02
which has helped training. Loop over all modules, if module is
convolutional layer or batchNorm then initialize weights.
Args:
model (torch model): Neural network model defined using Pytorch
"""
# for ever layer in model
for m in self.modules():
# check if it deconvolutional ot batch nrom layer
if isinstance(m, (nn.Conv2d, nn.BatchNorm2d)):
# initialize weights using a normal distribution
nn.init.normal_(m.weight.data, 0.0, 0.02)
def forward(self, x):
"""
Functions defines a forward pass though the network. Can be used for
a single input or a batch of inputs
Args:
x (torch.tensor): input tensor, to be passed through the network
Returns:
flow_fields (torch.tensor): Output of network
"""
# first the fully connected layer takes in the input, and outputs
# 9 neurons which are reshaped into a 3x3 array
x = self.FC_Layer(x).view(len(x), -1, 3, 3)
# the Conv layers take in the 3x3 array and output a 163x163 array
return self.net(x)
@staticmethod
def create_floris_dataset(
size, image_size, u_range, ti_range, yaw_range,
floris_init_path=".", curl=False):
"""
Function to generate the dataset needed for training using FLORIS.
The flowfield around a turbine is generated for a large range of wind
speeds, turbulent intensities and yaw angles. The 2d array and
correspoding init conditions are saved for training. The data is
generated using a Gaussian wake mode, please see:
https://doi.org/10.1016/j.renene.2014.01.002.
For more information about FLORIS see: https://github.com/NREL/floris.
Function can be used to generated training, validation and test sets.
Args:
size (int): Size of the dataset
image_size (int): Size of the flow field outputs that
are generated, this depends on the
Neural network used, should be 163.
u_range (list): Bound of u values [u_min, u_max] used
ti_range (list): Bound of TI values [TI_min, TI_max] used
yaw_range (list): Bound of yaw angles [yaw_min, yaw_max] used
floris_init_path (str, optional): Path to the FLORIS jason file.
Defaults to ".".
curl (bool, optional): If curl model should be used please set
to True, see this for more information:
https://doi.org/10.5194/wes-4-127-2019.
Defaults to False.
Returns:
y (torch.tensor): Tensor of size (size, image_size, image_size)
which includes all the generated flow fields. The flow fields
are normalised to help training
x (torch.tensor): Tensor of size (size, 1, 3) which includes the
flow conditons of the correspoding flow field in the x tensor.
"""
# sample u, ti and yawn angle from a uniform distribution
u_list = [round(random.uniform(u_range[0], u_range[1]), 2) for
i in range(0, size)]
ti_list = [round(random.uniform(ti_range[0], ti_range[1]), 2) for
i in range(0, size)]
yawn_list = [round(random.uniform(yaw_range[0], yaw_range[1]), 2) for
i in range(0, size)]
# initialize FLORIS model using the jason file
if curl is False:
floris_turbine = wfct.floris_interface.FlorisInterface(
floris_init_path + "/FLORIS_input_gauss.json")
else:
floris_turbine = wfct.floris_interface.FlorisInterface(
floris_init_path + "/FLORIS_input_curl.json")
# initialize empty numpy array to store 2d arrays and
# corresponding u, ti and yawn values
y = np.zeros((size, image_size, image_size))
x = np.zeros((size, 3))
# create train examples
print("generate FLORIS data")
for _ in range(0, size):
if _ % 500 == 0:
print(f"{_}/{size}")
# set wind speed, ti and yawn angle for FLORIS model
floris_turbine.reinitialize_flow_field(
wind_speed=u_list[_],
turbulence_intensity=ti_list[_])
floris_turbine.change_turbine([0], {'yaw_angle': yawn_list[_]})
# calculate the wakefield
floris_turbine.calculate_wake()
# extract horizontal plane at hub height
cut_plane = floris_turbine.get_hor_plane(
height=90,
x_resolution=image_size,
y_resolution=image_size,
x_bounds=[0, 3000],
y_bounds=[-200, 200]).df.u.values.reshape(image_size,
image_size)
# save the wind speed values of the plane at hub height and
# the corresponding turbine stats
y[_] = cut_plane
x[_] = u_list[_], ti_list[_], yawn_list[_]
# turn numpy array into a pytroch tensor
x = torch.tensor(x, dtype=torch.float)
# The wind speeds are normalised by dividing it by 12
# i.e. every value will be between 0-1 which helps training
y = torch.tensor(y, dtype=torch.float)/12
return x, y
def error(self, x_eval, y_eval, device, image_size=163, normalisation=12):
r"""
Calculate the average pixel wise percentage error of the model on
a evaluation set. For error function is:
error = 1/set_size *\sum_{n=0}^{set_size}(1/image_size**2 *
\sum_{i=0}^{image_size**2}(100*abs(FLORIS_{n,i} - GAN_{n,i})/
max(FLORIS_{n,i})))
For a detailed explanation of this function please see the report in
the ACSE9 repo.
"""
error_list = []
# Use model to predict the wakes for the given conditions in x
model_predict = self.forward(x_eval.to(device))
for n in range(0, len(x_eval)):
# Calculate the mean error between CNNwake output and FLORIS
# for a given flow field using the function given above
pixel_error = np.sum(abs(
y_eval.detach().cpu().numpy()[n] -
model_predict.squeeze(1)[n].detach().cpu().numpy()) /
(torch.max(y_eval.detach()[n]).cpu().numpy()))
# divide by number of pixels in array for an mean value
pixel_error /= image_size * image_size
error_list.append(pixel_error * 100)
# return mean error
return np.mean(error_list)
def evaluate_model(self, set_size, u_range, ti_range, yaw_range,
image_size=163, device='cpu', normalisation=12,
florisjason_path="."):
"""
Function to calculate a average pixel wise percentage error
of the model using the error function. This functions generates
a test set and evaluates the model on this unseen data to provide
a test error.
Args:
set_size (int, optional): Nr. of samples to be used for testing.
u_range (list): Bound of u values [u_min, u_max] used
ti_range (list): Bound of TI values [TI_min, TI_max] used
yaw_range (list): Bound of yaw angles [yaw_min, yaw_max] used
image_size (int, optional): Size of the flow field.
Defaults to 163.
device (str): Device to store and run the neural network on,
either cpu or cuda.
normalisation (int, optional): The CNN output is between
0 and 1 due to the normalisation used, therefore it needs to
be renormalised. Defaults to 12.
florisjason_path (str, optional): Location of the FLORIS jason
file. Defaults to ".".
Returns:
error (float): Error of model on test set
"""
# Create a dataset to test the model on
x_eval, y_eval = self.create_floris_dataset(
set_size, image_size, u_range=u_range, ti_range=ti_range,
yaw_range=yaw_range, floris_init_path=florisjason_path)
# Use generated data set to calculate the error of CNNwake when
# compared with the FLORIS output
test_error = self.error(x_eval, y_eval, device,
image_size, normalisation=12)
return test_error
def epoch_training(self, criterion, optimizer, dataloader, device):
"""
Trains the model for one epoch data provided by dataloader. The model
will be updated after each batch and the function will return the
train loss of the last batch
Args:
criterion (torch.nn.criterion): Loss function used to train model
optimizer (torch.optim.Optimizer): Optimizer for gradient descent
dataloader (torch.utils.DataLoader): Dataloader to store dataset
device (str): Device on which model/data is stored, cpu or cuda
Returns:
training loss (float): Loss of training set defined by criterion
"""
# For all training data in epoch
for real_images, label in dataloader:
# move data to device
real_images = real_images.to(device)
label = label.to(device)
# images need to be in correct shape: batch_size x 1 x 1 x 3
# compute reconstructions of flow-field using the CNN
outputs = self.forward(label)
# compute training reconstruction loss using the
# loss function set
train_loss = criterion(outputs, real_images)
optimizer.zero_grad() # Zero gradients of previous step
train_loss.backward() # compute accumulated gradients
optimizer.step() # Do optimizer step
# return training loss
return train_loss.item()
def load_model(self, path='.', device='cpu'):
"""
Function to load model from a pt file into this class.
Args:
path (str): path to saved model.
device (torch.device): Device to load onto, cpu or cuda
"""
# load the pretrained model
self.load_state_dict(torch.load(path, map_location=device))
def save_model(self, name='generator.pt'):
"""
Function to save current model paramters so that it can
be used again later. Needs to be saved with as .pt file
Args:
name (str): name of .pt file from which to load model
"""
# Save current model parameters
torch.save(self.state_dict(), name)
| 14,425 | 42.583082 | 79 | py |
wakenet | wakenet-master/Code/CNNWake/in/__init__.py | from .CNN_model import Generator
from .FCC_model import FCNN
from .superposition import super_position, FLORIS_farm_power, CNNWake_farm_power
from .train_FCNN import train_FCNN_model
from .train_CNN import train_CNN_model
from .visualise import Compare_CNN_FLORIS, visualize_farm
from .optimisation import FLORIS_wake_steering, CNNwake_wake_steering | 349 | 49 | 80 | py |
Enhancement-Coded-Speech | Enhancement-Coded-Speech-master/CepsDomCNN_Train.py | #####################################################################################
# Training the CNN for cepstral domain approach III.
# Input:
# 1- Training input: Train_inputSet_g711.mat
# 2- Training target: Train_targetSet_g711.mat
# 3- Validation input: Validation_inputSet_g711.mat
# 4- Validation target: Validation_targetSet_g711.mat
# Output:
# 1- Trained CNN weights: cnn_weights_ceps_g711_best_example.h5
#####################################################################################
""" import os
import tensorflow as tf
from keras.engine.topology import Layer
from keras.models import Model
from keras.layers import Input, Add, Multiply, Average, Activation, LeakyReLU
from keras.layers.convolutional import Conv1D, MaxPooling1D, UpSampling1D, AveragePooling1D
from keras import backend as K
import keras.optimizers as optimizers
import numpy as np
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, CSVLogger, LearningRateScheduler
import keras.callbacks as cbs
from numpy import random
import scipy.io as sio
from sklearn import preprocessing
import math
import time """
import os
import time
import numpy as np
from numpy import random
import scipy.io as sio
import h5py as h5
import scipy.io.wavfile as swave
import tensorflow as tf
import tensorflow.keras as K
from tensorflow.keras.models import Model, load_model, save_model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Input, Add, Activation, LeakyReLU, Conv1D, MaxPooling1D, UpSampling1D
from tensorflow.keras.callbacks import ReduceLROnPlateau, CSVLogger, EarlyStopping, ModelCheckpoint, LearningRateScheduler
def snr(y_true, y_pred):
"""
SNR is Signal to Noise Ratio
"""
return 10.0 * tf.math.log((tf.math.reduce_sum(tf.math.square(y_true))) / (tf.math.reduce_sum(tf.math.square(y_pred - y_true)))) / tf.math.log(10.0)
#####################################################################################
# 0. Setup
#####################################################################################
# Settings and CNN topology parameters
codec = "g711"
fram_length = 32
n1 = 22 # F=22 in paper
n2 = 44 #
n3 = 22 #
N_cnn = 6 # N=6 in paper
# Training parameters
nb_epochs = 2
batch_size = 16
learning_rate = 5e-4
#####################################################################################
# 1. load data
#####################################################################################
print('> Loading data... ')
# Load Input Data
mat_input = "./data/Train_inputSet_g711.mat"
mat_input = os.path.normcase(mat_input)
x_train_noisy = sio.loadmat(mat_input)
x_train_noisy = x_train_noisy['inputSetNorm']
x_train_noisy = np.array(x_train_noisy)
# Load Input Data for Validation
mat_input_vali = "./data/Validation_inputSet_g711.mat"
mat_input_vali = os.path.normcase(mat_input_vali)
x_train_noisy_vali = sio.loadmat(mat_input_vali)
x_train_noisy_vali = x_train_noisy_vali['inputSetNorm']
x_train_noisy_vali = np.array(x_train_noisy_vali)
# Load Target Data
mat_target = "./data/Train_targetSet_g711.mat"
mat_target = os.path.normcase(mat_target)
x_train = sio.loadmat(mat_target)
x_train = x_train['targetSet']
x_train = np.array(x_train)
# Load Target Data for Validation
mat_target_vali = "./data/Validation_targetSet_g711.mat"
mat_target_vali = os.path.normcase(mat_target_vali)
x_train_vali = sio.loadmat(mat_target_vali)
x_train_vali = x_train_vali['targetSet']
x_train_vali = np.array(x_train_vali)
# Randomization of Training Pairs
random.seed(1024)
train = np.column_stack((x_train_noisy, x_train))
np.random.shuffle(train)
x_train_noisy = train[:, :fram_length]
x_train = train[:, fram_length:]
# Reshape of Traing Pairs and validation Pairs
x_train_noisy = np.reshape(x_train_noisy, (x_train_noisy.shape[0], x_train_noisy.shape[1], 1))
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
x_train_noisy_vali = np.reshape(x_train_noisy_vali, (x_train_noisy_vali.shape[0], x_train_noisy_vali.shape[1], 1))
x_train_vali = np.reshape(x_train_vali, (x_train_vali.shape[0], x_train_vali.shape[1], 1))
print('> Data Loaded. Compiling...')
#####################################################################################
# 2. define model
#####################################################################################
input_vec = Input(shape=(fram_length, 1))
c1 = Conv1D(n1, N_cnn, padding='same')(input_vec)
c1 = LeakyReLU(0.2)(c1)
c1 = Conv1D(n1, N_cnn, padding='same')(c1)
c1 = LeakyReLU(0.2)(c1)
x = MaxPooling1D(2)(c1)
c2 = Conv1D(n2, N_cnn, padding='same')(x)
c2 = LeakyReLU(0.2)(c2)
c2 = Conv1D(n2, N_cnn, padding='same')(c2)
c2 = LeakyReLU(0.2)(c2)
x = MaxPooling1D(2)(c2)
c3 = Conv1D(n3, N_cnn, padding='same')(x)
c3 = LeakyReLU(0.2)(c3)
x = UpSampling1D(2)(c3)
c2_2 = Conv1D(n2, N_cnn, padding='same')(x)
c2_2 = LeakyReLU(0.2)(c2_2)
c2_2 = Conv1D(n2, N_cnn, padding='same')(c2_2)
c2_2 = LeakyReLU(0.2)(c2_2)
m1 = Add()([c2, c2_2])
m1 = UpSampling1D(2)(m1)
c1_2 = Conv1D(n1, N_cnn, padding='same')(m1)
c1_2 = LeakyReLU(0.2)(c1_2)
c1_2 = Conv1D(n1, N_cnn, padding='same')(c1_2)
c1_2 = LeakyReLU(0.2)(c1_2)
m2 = Add()([c1, c1_2])
decoded = Conv1D(1, N_cnn, padding='same', activation='linear')(m2)
model = Model(input_vec, decoded)
model.summary()
adam = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(optimizer=adam, loss='mse', metrics=[snr])
#####################################################################################
# 3. Fit the model
#####################################################################################
# Stop criteria
stop_str = EarlyStopping(monitor='val_loss', patience=16, verbose=1, mode='min')
# Reduce learning rate
reduce_LR = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=2, verbose=1, mode='min', epsilon=0.0001, cooldown=0, min_lr=0)
# Save only best weights
best_weights = "./data/cnn_weights_ceps_g711_example.h5"
best_weights = os.path.normcase(best_weights)
model_save = ModelCheckpoint(best_weights, monitor='val_loss', save_best_only=True, mode='min', save_weights_only=True, period=1)
start = time.time()
print("> Training model " + "using Batch-size: " + str(batch_size) + ", Learning_rate: " + str(learning_rate) + "...")
hist = model.fit(x=x_train_noisy, y=x_train, epochs=nb_epochs, batch_size=batch_size, shuffle=True, initial_epoch=0,
callbacks=[reduce_LR, stop_str, model_save],
validation_data=(x_train_noisy_vali, x_train_vali)
)
print("> Saving Completed, Time : ", time.time() - start)
print('> +++++++++++++++++++++++++++++++++++++++++++++++++++++ ')
| 6,716 | 35.112903 | 151 | py |
Enhancement-Coded-Speech | Enhancement-Coded-Speech-master/CepsDomCNN_Test.py | #####################################################################################
# Use the trained CNN for cepstral domain approach III.
# Input:
# 1- CNN input: type_3_cnn_input_ceps.mat
# 2- Trained CNN weights: cnn_weights_ceps_g711_best.h5
# Output:
# 1- CNN output: type_3_cnn_output_ceps.mat
#####################################################################################
""" import os
from keras.models import Model
from keras.engine.topology import Layer
from keras.layers import Input, Add, Multiply, Average, Activation, LeakyReLU
from keras.layers import merge, Input, Dense, Flatten, BatchNormalization, Activation, LeakyReLU
from keras.layers.convolutional import Conv1D, MaxPooling1D, UpSampling1D, AveragePooling1D
from keras import backend as K
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, CSVLogger, TensorBoard, LearningRateScheduler
import keras.optimizers as optimizers
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
import h5py as h5
import scipy.io.wavfile as swave
from sklearn import preprocessing
from weightnorm import AdamWithWeightnorm
from tensorflow.python.framework import ops
import math
import time """
import os
import numpy as np
import scipy.io as sio
import h5py as h5
import scipy.io.wavfile as swave
import tensorflow as tf
import tensorflow.keras as K
from tensorflow.keras.models import Model, load_model, save_model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Input, Add, Activation, LeakyReLU, Conv1D, MaxPooling1D, UpSampling1D
from tensorflow.keras.callbacks import ReduceLROnPlateau, CSVLogger, EarlyStopping, ModelCheckpoint, LearningRateScheduler
def snr(y_true, y_pred):
"""
SNR is Signal to Noise Ratio
"""
return 10.0 * tf.math.log((tf.math.reduce_sum(tf.math.square(y_true))) / (tf.math.reduce_sum(tf.math.square(y_pred - y_true)))) / tf.math.log(10.0)
#####################################################################################
# 0. Setup
#####################################################################################
# Settings and CNN topology parameters
fram_length = 32
n1 = 22 # F=22 in paper
n2 = 44 #
n3 = 22 #
N_cnn = 6 # N=6 in paper
#####################################################################################
# 2. define model
#####################################################################################
input_vec = Input(shape=(fram_length, 1))
c1 = Conv1D(n1, N_cnn, padding='same')(input_vec)
c1 = LeakyReLU(0.2)(c1)
c1 = Conv1D(n1, N_cnn, padding='same')(c1)
c1 = LeakyReLU(0.2)(c1)
x = MaxPooling1D(2)(c1)
c2 = Conv1D(n2, N_cnn, padding='same')(x)
c2 = LeakyReLU(0.2)(c2)
c2 = Conv1D(n2, N_cnn, padding='same')(c2)
c2 = LeakyReLU(0.2)(c2)
x = MaxPooling1D(2)(c2)
c3 = Conv1D(n3, N_cnn, padding='same')(x)
c3 = LeakyReLU(0.2)(c3)
x = UpSampling1D(2)(c3)
c2_2 = Conv1D(n2, N_cnn, padding='same')(x)
c2_2 = LeakyReLU(0.2)(c2_2)
c2_2 = Conv1D(n2, N_cnn, padding='same')(c2_2)
c2_2 = LeakyReLU(0.2)(c2_2)
m1 = Add()([c2, c2_2])
m1 = UpSampling1D(2)(m1)
c1_2 = Conv1D(n1, N_cnn, padding='same')(m1)
c1_2 = LeakyReLU(0.2)(c1_2)
c1_2 = Conv1D(n1, N_cnn, padding='same')(c1_2)
c1_2 = LeakyReLU(0.2)(c1_2)
m2 = Add()([c1, c1_2])
decoded = Conv1D(1, N_cnn, padding='same', activation='linear')(m2)
model = Model(input_vec, decoded)
model.summary()
model.load_weights("./data/cnn_weights_ceps_g711_best.h5")
#####################################################################################
# 4. Test
#####################################################################################
print('> Loading Test data ... ')
mat_input = "./data/type_3_cnn_input_ceps.mat"
mat_input = os.path.normcase(mat_input)
x_test_noisy = sio.loadmat(mat_input)
x_test_noisy = x_test_noisy['inputTestNorm']
x_test_noisy = np.array(x_test_noisy)
x_test_noisy = np.reshape(x_test_noisy,(x_test_noisy.shape[0], x_test_noisy.shape[1], 1))
predicted = model.predict(x_test_noisy)
preOutput = "./data/type_3_cnn_output_ceps.mat"
preOutput = os.path.normcase(preOutput)
sio.savemat(preOutput, {'predictions': predicted})
| 4,173 | 31.866142 | 151 | py |
kl_sample | kl_sample-master/plot_data.py | import os, sys, fnmatch
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from astropy.io import fits
import kl_sample.io as io
import kl_sample.reshape as rsh
import kl_sample.settings as set
data_path = os.path.expanduser("~")+'/data_project/kl_sample/data/data_fourier.fits'
# Read data
ell = io.read_from_fits(data_path, 'ELL')
cl_EE = io.read_from_fits(data_path, 'CL_EE')
noise_EE = io.read_from_fits(data_path, 'CL_EE_NOISE')
sims_EE = io.read_from_fits(data_path, 'CL_SIM_EE')
# Clean data from noise
cl_EE = rsh.clean_cl(cl_EE, noise_EE)
sims_EE = rsh.clean_cl(sims_EE, noise_EE)
cov_pf = rsh.get_covmat_cl(sims_EE)
cl_EE = rsh.unify_fields_cl(cl_EE, cov_pf)
noise_EE = rsh.unify_fields_cl(noise_EE, cov_pf)
sims_EE = rsh.unify_fields_cl(sims_EE, cov_pf)
print(cl_EE.shape, sims_EE.shape, cov_pf.shape)
| 860 | 26.774194 | 84 | py |
kl_sample | kl_sample-master/kl_sample.py | """
kl_sample: a code that sample cosmological parameters using
lensing data (for now CFHTlens). It performs a KL transform
to compress data. There are three main modules:
a - prep_real: prepare data in real space and store them
inside the repository. Once they are there it is no
longer needed to rerun it;
b - prep_fourier: prepare data in fourier space and store
them inside the repository. Once they are there it is
no longer needed to rerun it;
c - run: given the data (either in real or fourier space)
and some additional input parameter do the actual run.
Implemented samplers: emcee or single_point
"""
import sys
from kl_sample.io import argument_parser
# -----------------MAIN-CALL-----------------------------------------
if __name__ == '__main__':
# Call the parser
args = argument_parser()
# Redirect the run to the correct module
if args.mode == 'prep_real':
from kl_sample.prep_real import prep_real
sys.exit(prep_real(args))
if args.mode == 'prep_fourier':
from kl_sample.prep_fourier import prep_fourier
sys.exit(prep_fourier(args))
if args.mode == 'plots':
from kl_sample.plots import plots
sys.exit(plots(args))
if args.mode == 'run':
from kl_sample.run import run
sys.exit(run(args))
if args.mode == 'get_kl':
from kl_sample.get_kl import get_kl
sys.exit(get_kl(args))
| 1,429 | 33.878049 | 69 | py |
kl_sample | kl_sample-master/plot_triangles_2pt.py | import numpy as np
from astropy.io import fits
import kl_sample.io as io
io.print_info_fits('/users/groups/damongebellini/kl_sample/data/data_real.fits')
io.print_info_fits('/users/groups/damongebellini/kl_sample/data/data_fourier.fits')
'''
d_r=fits.open("/users/groups/damongebellini/kl_sample/data/data_real.fits")
d_f=fits.open("/users/groups/damongebellini/kl_sample/data/data_fourier.fits")
def unwrap_cells(ell :
ells=d_f[1].data
cells=d_f[2].data
nells=d_f[2].data
print(ells.shape,cells.shape,nells.shape)
'''
| 529 | 24.238095 | 83 | py |
kl_sample | kl_sample-master/kl_sample/likelihood.py | """
This module contains all the relevant functions
used to compute the likelihood.
Functions:
- how_many_sims(data, settings)
- select_sims(data, settings)
- compute_kl(cosmo, data, settings)
- apply_kl(kl_t, corr, settings)
- compute_inv_covmat(data, settings)
- lnprior(var, full, mask)
- lnlike(var, full, mask, data, settings)
- lnprob(var, full, mask, data, settings)
- get_random(pars, squeeze)
"""
import numpy as np
import random
import sys
import kl_sample.cosmo as cosmo_tools
import kl_sample.checks as checks
import kl_sample.reshape as rsh
import kl_sample.settings as set
# ------------------- Simulations --------------------------------------------#
def how_many_sims(n_sims, n_sims_tot, n_data, n_data_tot):
""" Compute how many simulations will be used.
Args:
data: dictionary containing the data stored.
settings: dictionary with all the settings used.
Returns:
int: the number of simulations that will be used.
"""
# If all simulations wanted, return it
if n_sims == 'all':
return n_sims_tot
# If auto, calculate how many sims should be used
elif n_sims == 'auto':
ratio = (n_sims_tot-n_data_tot-2.)/(n_sims_tot-1.)
return int(round((2.+n_data-ratio)/(1.-ratio)))
# If it is a number, just return it
else:
return int(n_sims)
def select_sims(data, settings):
""" Select simulations to use.
Args:
data: dictionary containing the data stored.
settings: dictionary with all the settings used.
Returns:
array of simulations that will be used.
array of weights for each simulation.
"""
# Local variables
n_sims = settings['n_sims']
n_sims_tot = settings['n_sims_tot']
# Select simulations
rnd = random.sample(range(n_sims_tot), n_sims)
# Generate arrays of simulations and weights
sims = data['corr_sim'][:, rnd]
return sims
# ------------------- KL related ---------------------------------------------#
def compute_kl(params, pz, noise, ell_min=2, ell_max=2000, scale_dep=False,
bp=None):
""" Compute the KL transform.
Args:
params: cosmological parameters.
pz: photo-z
noise: estimated noise.
ell_min, ell_max: minimum, maximum ell.
scale_dep: kl with scale dependence or not.
Returns:
array with the KL transform that will be used.
"""
# Compute theory Cl's (S = signal)
cosmo_ccl = cosmo_tools.get_cosmo_ccl(params)
S = cosmo_tools.get_cls_ccl(params, cosmo_ccl, pz, ell_max)
S = S[ell_min:ell_max+1]
# Cholesky decomposition of noise (N=LL^+)
N = noise[ell_min:ell_max+1]
L = np.linalg.cholesky(N)
if set.PINV:
inv_L = np.linalg.pinv(L)
else:
inv_L = np.linalg.inv(L)
# Matrix for which we want to calculate eigenvalues and eigenvectors
M = np.array([np.dot(inv_L[x], S[x]+N[x]) for x in range(len(S))])
M = np.array([np.dot(M[x], inv_L[x].T) for x in range(len(S))])
# Calculate eigenvalues and eigenvectors
eigval, eigvec = np.linalg.eigh(M)
# Re-order eigenvalues and eigenvectors sorted from smallest eigenvalue
new_ord = np.array([np.argsort(eigval[x])[::-1] for x in range(len(S))])
eigval = np.array([eigval[x][new_ord[x]] for x in range(len(S))])
eigvec = np.array([eigvec[x][:, new_ord[x]] for x in range(len(S))])
# Calculate transformation matrix (E) from eigenvectors and L^-1
E = np.array([np.dot(eigvec[x].T, inv_L[x]) for x in range(len(S))])
# Change sign to eigenvectors according to the first element
signs = np.array([[np.sign(E[ell][x][0]/E[2][x][0])
for x in range(len(S[0]))] for ell in range(len(S))])
E = np.array([(E[x].T*signs[x]).T for x in range(len(S))])
# Test if the transformation matrix gives the correct new Cl's
checks.kl_consistent(E, S, N, L, eigval, 1.e-12)
# Return either the scale dependent or independent KL transform
if scale_dep:
return rsh.bin_cl(E, bp)
else:
E_avg = np.zeros((len(E[0]), len(E[0])))
den = np.array([(2.*x+1) for x in range(2, len(E))]).sum()
for n in range(len(E[0])):
for m in range(len(E[0])):
num = np.array([(2.*x+1)*E[:, n][:, m][x]
for x in range(2, len(E))]).sum()
E_avg[n][m] = num/den
return E_avg
def apply_kl(kl_t, corr, settings):
""" Apply the KL transform to the correlation function
and reduce number of dimensions.
Args:
kl_t: KL transform.
corr: correlation function
Returns:
KL transformed correlation function.
"""
kl_t_T = np.moveaxis(kl_t, [-1], [-2])
# Apply KL transform
corr_kl = np.dot(kl_t, corr)
if settings['kl_scale_dep']:
corr_kl = np.diagonal(corr_kl, axis1=0, axis2=-2)
corr_kl = np.moveaxis(corr_kl, [-1], [-2])
corr_kl = np.dot(corr_kl, kl_t_T)
if settings['kl_scale_dep']:
corr_kl = np.diagonal(corr_kl, axis1=-3, axis2=-2)
corr_kl = np.moveaxis(corr_kl, [-1], [-2])
corr_kl = np.moveaxis(corr_kl, [0], [-2])
# Reduce dimensions of the array
n_kl = settings['n_kl']
corr_kl = np.moveaxis(corr_kl, [-2, -1], [0, 1])
corr_kl = corr_kl[:n_kl, :n_kl]
corr_kl = np.moveaxis(corr_kl, [0, 1], [-2, -1])
if settings['method'] == 'kl_diag':
corr_kl = np.diagonal(corr_kl, axis1=-2, axis2=-1)
return corr_kl
# ------------------- Covmat -------------------------------------------------#
def compute_inv_covmat(data, settings):
""" Compute inverse covariance matrix.
Args:
data: dictionary containing the data stored
settings: dictionary with all the settings used
Returns:
array with the inverse covariance matrix.
"""
# Local variables
n_fields = settings['n_fields']
n_sims = settings['n_sims']
n_data = data['corr_sim'].shape[2]
A_c = set.A_CFHTlens
A_s = set.A_sims
corr = data['corr_sim']
# Compute inverse covariance matrix
cov = np.empty((n_fields, n_data, n_data))
inv_cov_tot = np.zeros((n_data, n_data))
for nf in range(n_fields):
cov[nf] = np.cov(corr[nf].T)
if set.PINV:
inv_cov_tot = inv_cov_tot + A_c[nf]/A_s[nf]*np.linalg.pinv(cov[nf])
else:
inv_cov_tot = inv_cov_tot + A_c[nf]/A_s[nf]*np.linalg.inv(cov[nf])
# Invert and mask
if set.PINV:
cov_tot = np.linalg.pinv(inv_cov_tot)
else:
cov_tot = np.linalg.inv(inv_cov_tot)
cov_tot = rsh.mask_xipm(cov_tot, data['mask_theta_ell'], settings)
cov_tot = rsh.mask_xipm(cov_tot.T, data['mask_theta_ell'], settings)
# Add overall normalization
n_data_mask = cov_tot.shape[0]
factor = (n_sims-n_data_mask-2.)/(n_sims-1.)
if set.PINV:
inv_cov_tot = factor*np.linalg.pinv(cov_tot)
else:
inv_cov_tot = factor*np.linalg.inv(cov_tot)
return inv_cov_tot
# ------------------- Likelihood ---------------------------------------------#
def lnprior(var, full, mask):
"""
Function containing the prior.
"""
var_uni = var[full[mask][:, 0] != full[mask][:, 2]]
var_gauss = var[full[mask][:, 0] == full[mask][:, 2]]
uni = full[mask][full[mask][:, 0] != full[mask][:, 2]]
gauss = full[mask][full[mask][:, 0] == full[mask][:, 2]]
is_in = (uni[:, 0] <= var_uni).all()
is_in = is_in*(var_uni <= uni[:, 2]).all()
if is_in:
lp = (var_gauss-gauss[:, 1])**2./2./gauss[:, 0]**2.
return lp.sum()
return -np.inf
def lnlike(var, full, mask, data, settings, save_loc=None):
"""
Function containing the likelihood.
"""
# Get theory
import signal
tmout = 600
def handler(signum, frame):
raise Exception()
signal.signal(signal.SIGALRM, handler)
signal.alarm(tmout)
try:
th = cosmo_tools.get_theory(var, full, mask, data, settings)
except Exception:
print('Theory timeout with pars = ' + str(var))
sys.stdout.flush()
return -np.inf
# except:
# print('Theory failure with pars = ' + str(var))
# sys.stdout.flush()
# return -np.inf
signal.alarm(0)
obs = data['corr_obs']
icov = data['inv_cov_mat']
if save_loc:
z = np.array([(x[0]+x[1])/2. for x in set.Z_BINS])
ell = np.array([(x[0]+x[1])/2. for x in set.BANDPOWERS[set.MASK_ELL]])
np.savetxt(save_loc + 'z.txt', z)
np.savetxt(save_loc + 'ell.txt', ell)
np.savetxt(save_loc + 'cell_th.txt', th)
np.savetxt(save_loc + 'cell_obs.txt', obs)
np.savetxt(save_loc + 'cov_mat.txt', np.linalg.pinv(icov))
# Get chi2
chi2 = (obs-th).dot(icov).dot(obs-th)
return -chi2/2.
def lnprob(var, full, mask, data, settings):
"""
Function containing the posterior.
"""
lp = lnprior(var, full, mask)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(var, full, mask, data, settings)
def get_random(pars, squeeze):
"""
Get random initial points.
"""
rnd_pars = np.array([])
for count in range(len(pars)):
if pars[count][2] is None:
rb = np.inf
else:
rb = pars[count][2]
if pars[count][0] is None:
lb = -np.inf
else:
lb = pars[count][0]
if (lb == -np.inf) and (rb == np.inf):
rnd = pars[count][1] + 2.*(np.random.rand()-.5)/squeeze
else:
rnd = pars[count][1] + 2.*(np.random.rand()-.5)*min(
rb-pars[count][1], pars[count][1]-lb)/squeeze
rnd_pars = np.append(rnd_pars, rnd)
return rnd_pars
| 9,782 | 27.438953 | 79 | py |
kl_sample | kl_sample-master/kl_sample/prep_fourier.py | """
This module contains the pipeline to prepare data in
fourier space for run. It should be used only once. Then
the data will be stored in the repository.
The only mandatory argument to run this module is the path
of an input folder. It should contain:
- cat_full.fits: full catalogue in fits format
- mask_arcsec_N.fits.gz: (N=1,..,4) 1 arcsec resolution masks for each field in
compressed fits format
- mask_url.txt: list of urls from which to download the public masks. They
will be used to remove the bad fields from mask_arcsec_N.fits.gz
- cls_bf.fits: theory cls used to calculate the simulations. TODO_EB: use ccl
or other theory to get them.
"""
import os
import sys
import re
import numpy as np
import urllib
import time
import gzip
import shutil
import pymaster as nmt
from astropy.io import fits
from astropy import wcs
import kl_sample.settings as set
import kl_sample.io as io
import kl_sample.prep_fourier_tools as tools
import matplotlib # TODO_EB
matplotlib.use('Agg')
import matplotlib.pyplot as plt # noqa: E402
def prep_fourier(args):
""" Prepare data in fourier space.
Args:
args: the arguments read by the parser.
Returns:
saves to fits files file the output.
"""
# ----------- Define local variables -------------------------------------#
# Global variable collecting warnings
warning = False
# List of CFHTlens fields
fields = set.FIELDS_CFHTLENS
# List of redshift bins
z_bins = set.Z_BINS
# Size pixels masks in arcsecs (it has to be an integer number)
size_pix = set.SIZE_PIX
# Range of pixels used to average the multiplicative correction
n_avg_m = set.N_AVG_M
# Number of simulations used to calculate the covariance matrix
n_sims_cov = set.N_SIMS_COV
# Number of simulations used to calculate the noise
n_sims_noise = set.N_SIMS_NOISE
# Bandpowers to calculate Cl's
bandpowers = set.BANDPOWERS
# Do not decouple Cells with mcm
keep_cells_coupled = set.KEEP_CELLS_COUPLED
# ----------- Initialize paths -------------------------------------------#
# Define absolute paths
path = tools.get_io_paths(args, fields)
# Determine which modules have to be run
is_run, warning = tools.is_run_and_check(args, fields, path, n_sims_cov)
# ----------- Run modules ------------------------------------------------#
if is_run['mask']:
start = time.time()
warning = run_mask(path, fields, z_bins, size_pix,
args.remove_files, args.want_plots) or warning
end = time.time()
hrs, rem = divmod(end-start, 3600)
mins, secs = divmod(rem, 60)
print('Run MASK module in {:0>2} Hours {:0>2} Minutes {:05.2f}'
' Seconds!'.format(int(hrs), int(mins), secs))
sys.stdout.flush()
if is_run['mult']:
start = time.time()
warning = run_mult(path, fields, z_bins, n_avg_m,
args.want_plots) or warning
end = time.time()
hrs, rem = divmod(end-start, 3600)
mins, secs = divmod(rem, 60)
print('Run MULT_CORR module in {:0>2} Hours {:0>2} Minutes {:05.2f}'
' Seconds!'.format(int(hrs), int(mins), secs))
sys.stdout.flush()
if is_run['pz']:
start = time.time()
warning = run_pz(path, fields, z_bins, size_pix,
args.want_plots) or warning
end = time.time()
hrs, rem = divmod(end-start, 3600)
mins, secs = divmod(rem, 60)
print('Run PHOTO_Z module in {:0>2} Hours {:0>2} Minutes {:05.2f}'
' Seconds!'.format(int(hrs), int(mins), secs))
sys.stdout.flush()
if is_run['cat']:
start = time.time()
warning = run_cat(path, fields, z_bins) or warning
end = time.time()
hrs, rem = divmod(end-start, 3600)
mins, secs = divmod(rem, 60)
print('Run CATALOGUE module in {:0>2} Hours {:0>2} Minutes {:05.2f}'
' Seconds!'.format(int(hrs), int(mins), secs))
sys.stdout.flush()
if is_run['map']:
start = time.time()
warning = run_map(path, fields, z_bins, args.want_plots) or warning
end = time.time()
hrs, rem = divmod(end-start, 3600)
mins, secs = divmod(rem, 60)
print('Run MAP module in {:0>2} Hours {:0>2} Minutes {:05.2f}'
' Seconds!'.format(int(hrs), int(mins), secs))
sys.stdout.flush()
if is_run['cl']:
start = time.time()
warning = run_cl(path, fields, z_bins,
bandpowers, n_sims_noise, keep_cells_coupled,
args.want_plots) or warning
end = time.time()
hrs, rem = divmod(end-start, 3600)
mins, secs = divmod(rem, 60)
print('Run CL module in {:0>2} Hours {:0>2} Minutes {:05.2f}'
' Seconds!'.format(int(hrs), int(mins), secs))
sys.stdout.flush()
if is_run['cat_sims']:
start = time.time()
warning = run_cat_sims(path, fields, z_bins, n_sims_cov) or warning
end = time.time()
hrs, rem = divmod(end-start, 3600)
mins, secs = divmod(rem, 60)
print('Run CAT_SIMS module in {:0>2} Hours {:0>2} Minutes {:05.2f}'
' Seconds!'.format(int(hrs), int(mins), secs))
sys.stdout.flush()
if is_run['cl_sims']:
start = time.time()
warning = run_cl_sims(path, fields, z_bins, bandpowers,
keep_cells_coupled) or warning
end = time.time()
hrs, rem = divmod(end-start, 3600)
mins, secs = divmod(rem, 60)
print('Run CL_SIMS module in {:0>2} Hours {:0>2} Minutes {:05.2f}'
' Seconds!'.format(int(hrs), int(mins), secs))
sys.stdout.flush()
# Collect everything in a single file
start = time.time()
warning = run_final(path, fields, z_bins, bandpowers) or warning
end = time.time()
hrs, rem = divmod(end-start, 3600)
mins, secs = divmod(rem, 60)
print('Run FINAL module in {:0>2} Hours {:0>2} Minutes {:05.2f}'
' Seconds!'.format(int(hrs), int(mins), secs))
sys.stdout.flush()
if warning:
print('Done! However something went unexpectedly! '
'Check your warnings!')
sys.stdout.flush()
else:
print('Success!!')
sys.stdout.flush()
return
# ------------------- Function to calculate the mask -------------------------#
def run_mask(path, fields, z_bins, size_pix, remove_files, want_plots):
print('Running MASK module')
sys.stdout.flush()
warning = False
# Read galaxy catalogue
tabname = 'data'
fname = path['cat_full']
try:
cat = io.read_from_fits(fname, tabname)
except KeyError:
print('WARNING: No key {} in {}. Skipping '
'calculation!'.format(tabname, fname))
sys.stdout.flush()
return True
# Check that the table has the correct columns
table_keys = ['ALPHA_J2000', 'DELTA_J2000', 'id']
for key in table_keys:
if key not in cat.columns.names:
print('WARNING: No key {} in table of {}. Skipping '
'calculation!'.format(key, fname))
sys.stdout.flush()
return True
# First loop: scan over the fields and generate new maps
for f in fields:
# Check existence of files and in case skip
file_exists = os.path.exists(path['mask_'+f])
if file_exists is True:
keys = io.get_keys_from_fits(path['mask_'+f])
mnow = 'MASK_NOW_'+f in keys
mzs = all(['MASK_{}_Z{}'.format(f, x+1) in keys
for x in range(len(z_bins))])
if file_exists is True and mnow is True and mzs is True:
print('----> Skipping MASK calculation for field {}. Output file '
'already there!'.format(f))
sys.stdout.flush()
continue
print('Calculating mask for field {}:'.format(f))
sys.stdout.flush()
# Remove old output file to avoid confusion
try:
os.remove(path['mask_'+f])
except FileNotFoundError:
pass
# Read urls where to find coordinates of the bad fields
urls = []
with open(path['mask_url'], 'r') as fn:
for line in fn:
cond1 = re.match('.+'+f+'.+finalmask_mosaic.fits', line)
cond2 = not(np.array([re.match('.+'+x+'.+', line)
for x in set.good_fit_patterns])).any()
if cond1 and cond2:
urls.append(line.rstrip())
# Read mask data
imname = 'primary'
fname = path['mask_sec_'+f]
try:
mask_sec = io.read_from_fits(fname, imname).astype(np.uint16)
except KeyError:
print('WARNING: No key {} in {}. Skipping '
'calculation!'.format(imname, fname))
sys.stdout.flush()
return True
# Read mask header and check necessary keys
try:
hd_sec = io.read_header_from_fits(fname, imname)
except KeyError:
print('WARNING: No header in {}. Skipping '
'calculation!'.format(fname))
sys.stdout.flush()
return True
for key in ['CRPIX1', 'CRPIX2', 'CD1_1', 'CD2_2', 'CRVAL1',
'CRVAL2', 'CTYPE1', 'CTYPE2']:
if key not in list(hd_sec.keys()):
print('WARNING: No key {} in {}. Skipping '
'calculation!'.format(key, fname))
sys.stdout.flush()
return True
# Convert mask to boolean
mask_sec = 1 - np.array(mask_sec, dtype=bool).astype(np.int8)
# Determine how many pixels should be grouped together
# in the degraded mask
dim_ratio = int(np.round(abs(size_pix/(hd_sec['CD1_1']*60.**2))))
cond1 = abs(dim_ratio/abs(size_pix/(hd_sec['CD1_1']*60.**2))-1) > 1e-6
cond2 = abs(dim_ratio/abs(size_pix/(hd_sec['CD2_2']*60.**2))-1) > 1e-6
if cond1 or cond2:
print('WARNING: Invalid pixel dimensions. Skipping calculation!')
sys.stdout.flush()
return True
# Calculate how many pixels should be added to the original mask
div1, mod1 = np.divmod(mask_sec.shape[0], dim_ratio)
div2, mod2 = np.divmod(mask_sec.shape[1], dim_ratio)
if mod1 == 0:
x1 = div1
else:
x1 = div1 + 1
if mod2 == 0:
x2 = div2
else:
x2 = div2 + 1
start1 = int(np.round((x1*dim_ratio - mask_sec.shape[0])/2.))
start2 = int(np.round((x2*dim_ratio - mask_sec.shape[1])/2.))
end1 = start1 + mask_sec.shape[0]
end2 = start2 + mask_sec.shape[1]
# Add borders to the mask
mask_ext = np.zeros((x1*dim_ratio, x2*dim_ratio), dtype=np.int8)
mask_ext[start1:end1, start2:end2] = mask_sec
# Calculate new mask
mask = np.zeros((x1, x2))
for count1 in range(x1):
for count2 in range(x2):
s1 = count1*dim_ratio
s2 = count2*dim_ratio
new_pix = \
mask_ext[s1:s1+dim_ratio, s2:s2+dim_ratio].astype(float)
mask[count1, count2] = np.average(new_pix)
# Create header
w = wcs.WCS(naxis=2)
w.wcs.crpix = np.array([start1+hd_sec['CRPIX1'],
start2+hd_sec['CRPIX2']])/dim_ratio
w.wcs.cdelt = np.array([hd_sec['CD1_1'], hd_sec['CD2_2']])*dim_ratio
w.wcs.crval = np.array([hd_sec['CRVAL1'], hd_sec['CRVAL2']])
w.wcs.ctype = [hd_sec['CTYPE1'], hd_sec['CTYPE2']]
hd = w.to_header()
# Print message
print('----> Degraded mask for {}. Now I will remove the bad '
'fields!'.format(f))
sys.stdout.flush()
# Remove bad fields from mask
imname = 'primary'
for nurl, url in enumerate(urls):
badname = os.path.join(path['badfields'], os.path.split(url)[1])
# Get the file if it is not there
if not(os.path.exists(badname) or os.path.exists(badname+'.gz')):
urllib.request.urlretrieve(url, badname)
# Compress file
if os.path.exists(badname):
with open(badname, 'rb') as f_in:
with gzip.open(badname+'.gz', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
if os.path.exists(badname+'.gz'):
try:
os.remove(badname)
except FileNotFoundError:
pass
badname = badname + '.gz'
# Read the mask
mask_bad = io.read_from_fits(badname, imname).astype(np.int16)
hd_bad = io.read_header_from_fits(badname, imname)
w_bad = wcs.WCS(hd_bad)
# Find pixels inside the field
n_arrs = 7 # This is just a trick to make the problem tractable
div = np.divide(mask_bad.shape[0], n_arrs)
starts = np.array([x*div for x in range(n_arrs+1)], dtype=np.int32)
for start, end in np.vstack((starts[:-1], starts[1:])).T:
pos_bad = (start, 0) + np.stack(np.where(
mask_bad[start:end] < 8192), axis=-1).astype(np.int32)
pos_bad = np.flip(pos_bad, axis=1) # Invert the columns
pos_bad = w_bad.wcs_pix2world(pos_bad, 0).astype(np.float32)
pos_bad = np.around(
w.wcs_world2pix(pos_bad, 0)).astype(np.int32)
pos_bad = np.flip(pos_bad, axis=1) # Invert the columns
pos_bad = np.unique(pos_bad, axis=0)
mask[pos_bad[:, 0], pos_bad[:, 1]] = 0
# Print message
print('----> Removed bad field {} from {} mask! ({}/{})'
''.format(os.path.split(url)[1], f, nurl+1, len(urls)))
sys.stdout.flush()
# Remove file to save space
if remove_files:
os.remove(badname)
# Remove bad galaxies from mask
bad_fields = [x.split('_')[0].split('/')[-1] for x in urls]
filter = np.array([x[:6] in bad_fields for x in cat['id']])
pos = np.vstack(
(cat['ALPHA_J2000'][filter], cat['DELTA_J2000'][filter])).T
# Calculate Pixel position of each galaxy
pos = w.wcs_world2pix(pos, 0).astype(int)
pos = np.flip(pos, axis=1) # Need to invert the columns
# Pixels where at least one galaxy has been found
pos = np.unique(pos, axis=0)
mask[pos[:, 0], pos[:, 1]] = 0
print('----> Removed galaxies in bad fields for {}!'.format(f))
sys.stdout.flush()
# Save to file the mask
name = 'MASK_NOW_{}'.format(f)
warning = io.write_to_fits(
path['mask_'+f], mask, name, header=hd, type='image') or warning
# Generate plots
if want_plots:
plt.imshow(mask, interpolation='nearest')
plt.colorbar()
plt.savefig('{}/mask_now_{}.pdf'.format(path['plots'], f))
plt.close()
# Second loop: calculate mask of weights
mask = mask.astype(bool).astype(np.int8)
for n_z_bin, z_bin in enumerate(z_bins):
# Create an empty array for the weight mask
weights_mask = np.zeros(mask.shape)
# Filter galaxies
sel = set.filter_galaxies(cat, z_bin[0], z_bin[1], field=f)
gals = cat[sel]
# Get World position of each galaxy
pos = np.vstack((gals['ALPHA_J2000'], gals['DELTA_J2000'])).T
# Calculate Pixel position of each galaxy
pos = w.wcs_world2pix(pos, 0).astype(int)
pos = np.flip(pos, axis=1) # Need to invert the columns
# Pixels where at least one galaxy has been found
pos_unique = np.unique(pos, axis=0)
# Scan over the populated pixels
for count, pix in enumerate(pos_unique):
# Select galaxies in range of pixels
sel = pos[:, 0] == pix[0]
sel = (pos[:, 1] == pix[1])*sel
weight = gals[sel]['weight']
weights_mask[tuple(pix)] = np.sum(weight)
# Get final mask
weights_mask = mask*weights_mask
# Save to file the mask
name = 'MASK_{}_Z{}'.format(f, n_z_bin+1)
warning = io.write_to_fits(
path['mask_'+f], weights_mask, name,
header=hd, type='image') or warning
print('----> Created mask for field {} and '
'bin {}'.format(f, n_z_bin+1))
sys.stdout.flush()
# Generate plots
if want_plots:
plt.imshow(weights_mask, interpolation='nearest')
plt.colorbar()
plt.savefig('{}/mask_{}_z{}.pdf'
''.format(path['plots'], f, n_z_bin+1))
plt.close()
io.print_info_fits(path['mask_'+f])
return warning
# ------------------- Function to calculate the multiplicative correction ----#
def run_mult(path, fields, z_bins, n_avg_m, want_plots):
print('Running MULT_CORR module')
sys.stdout.flush()
warning = False
# Read galaxy catalogue
tabname = 'data'
fname = path['cat_full']
try:
cat = io.read_from_fits(fname, tabname)
except KeyError:
print('WARNING: No key {} in {}. Skipping '
'calculation!'.format(tabname, fname))
sys.stdout.flush()
return True
# Check that the table has the correct columns
table_keys = ['ALPHA_J2000', 'DELTA_J2000', 'm', 'weight', 'id', 'Z_B',
'MASK', 'star_flag']
for key in table_keys:
if key not in cat.columns.names:
print('WARNING: No key {} in table of {}. Skipping '
'calculation!'.format(key, fname))
sys.stdout.flush()
return True
# First loop: scan over the fields
for f in fields:
# Check existence of files and in case skip
file_exists = os.path.exists(path['m_'+f])
if file_exists is True:
keys = io.get_keys_from_fits(path['m_'+f])
mzs = all(['MULT_CORR_{}_Z{}'.format(f, x+1) in keys
for x in range(len(z_bins))])
if file_exists is True and mzs is True:
print('----> Skipping MULT_CORR calculation for field {}. Output '
'file already there!'.format(f))
sys.stdout.flush()
continue
# Remove old output file to avoid confusion
try:
os.remove(path['m_'+f])
except FileNotFoundError:
pass
# Second loop: divide galaxies in redshift bins
for n_z_bin, z_bin in enumerate(z_bins):
print('Calculating multiplicative correction for field'
' {} and bin {}:'.format(f, n_z_bin+1))
sys.stdout.flush()
# Read mask and create WCS object
imname = 'MASK_{}_Z{}'.format(f, n_z_bin+1)
fname = path['mask_'+f]
try:
mask = io.read_from_fits(fname, imname)
hd = io.read_header_from_fits(fname, imname)
except KeyError:
print('WARNING: No key {} in {}. Skipping '
'calculation!'.format(imname, fname))
sys.stdout.flush()
return True
# Create a new WCS object
w = wcs.WCS(hd)
# Create an empty array for the multiplicative correction
mult_corr = np.zeros(mask.shape)
# Filter galaxies
filter = set.filter_galaxies(cat, z_bin[0], z_bin[1], field=f)
gals = cat[filter]
# Get World position of each galaxy
pos = np.vstack((gals['ALPHA_J2000'], gals['DELTA_J2000'])).T
# Calculate Pixel position of each galaxy
pos = w.wcs_world2pix(pos, 0).astype(int)
pos = np.flip(pos, axis=1) # Need to invert the columns
# Pixels where at least one galaxy has been found
pos_unique = np.unique(pos, axis=0)
# Scan over the populated pixels
for count, pix in enumerate(pos_unique):
# Calculate range of pixels to average
if pix[0] - n_avg_m < 0:
s1 = 0
elif pix[0] + n_avg_m >= mult_corr.shape[0]:
s1 = mult_corr.shape[0]-(2*n_avg_m + 1)
else:
s1 = pix[0] - n_avg_m
if pix[1] - n_avg_m < 0:
s2 = 0
elif pix[1] + n_avg_m >= mult_corr.shape[1]:
s2 = mult_corr.shape[1] - (2*n_avg_m + 1)
else:
s2 = pix[1] - n_avg_m
# Select galaxies in range of pixels
sel = pos[:, 0] >= s1
sel = (pos[:, 0] < s1 + 2*n_avg_m + 1)*sel
sel = (pos[:, 1] >= s2)*sel
sel = (pos[:, 1] < s2 + 2*n_avg_m + 1)*sel
m = gals[sel]['m']
weight = gals[sel]['weight']
mult_corr[tuple(pix)] = np.average(m, weights=weight)
# Print message every some step
if (count + 1) % 1e3 == 0:
print('----> Done {0:5.1%} of the pixels ({1:d})'
''.format(float(count+1)/len(pos_unique),
len(pos_unique)))
sys.stdout.flush()
# Save to file the map
name = 'MULT_CORR_{}_Z{}'.format(f, n_z_bin+1)
warning = io.write_to_fits(path['m_'+f], mult_corr, name,
header=hd, type='image') or warning
# Generate plots
if want_plots:
plt.imshow(mult_corr, interpolation='nearest')
plt.colorbar()
plt.savefig('{}/mult_corr_{}_z{}.pdf'
''.format(path['plots'], f, n_z_bin+1))
plt.close()
io.print_info_fits(path['m_' + f])
return warning
# ------------------- Function to calculate the photo_z ----------------------#
def run_pz(path, fields, z_bins, size_pix, want_plots):
print('Running PHOTO_Z module')
sys.stdout.flush()
warning = False
# Remove old output file to avoid confusion
try:
os.remove(path['photo_z'])
except FileNotFoundError:
pass
# Read mask
mask = {}
w = {}
for f in fields:
# Read mask and create WCS object
imname = 'MASK_NOW_{}'.format(f)
fname = path['mask_'+f]
try:
mask[f] = io.read_from_fits(fname, imname)
hd = io.read_header_from_fits(fname, imname)
except KeyError:
print('WARNING: No key {} in {}. Skipping '
'calculation!'.format(imname, fname))
sys.stdout.flush()
return True
# Create a new WCS object
w[f] = wcs.WCS(hd)
# Read galaxy catalogue
tabname = 'data'
imname = 'pz_full'
fname = path['cat_full']
try:
cat = io.read_from_fits(fname, tabname)
except KeyError:
print('WARNING: No key {} in {}. Skipping '
'calculation!'.format(tabname, fname))
sys.stdout.flush()
return True
try:
pz_full = io.read_from_fits(fname, imname)
except KeyError:
print('WARNING: No key {} in {}. Skipping '
'calculation!'.format(imname, fname))
sys.stdout.flush()
return True
# Check that the table has the correct columns
table_keys = ['ALPHA_J2000', 'DELTA_J2000', 'm', 'weight', 'id', 'Z_B',
'MASK', 'star_flag']
for key in table_keys:
if key not in cat.columns.names:
print('WARNING: No key {} in table of {}. Skipping '
'calculation!'.format(key, fname))
sys.stdout.flush()
return True
# Read multiplicative corrections
m = {}
for f in fields:
m[f] = {}
fname = path['m_'+f]
for n_z_bin, z_bin in enumerate(z_bins):
imname = 'MULT_CORR_{}_Z{}'.format(f, n_z_bin+1)
try:
m[f][n_z_bin] = io.read_from_fits(fname, imname)
except KeyError:
print('WARNING: No key {} in {}. Skipping '
'calculation!'.format(imname, fname))
sys.stdout.flush()
return True
# Create filters for each bin and field
filter = {}
for f in fields:
filter[f] = {}
for n_z_bin, z_bin in enumerate(z_bins):
filt = set.filter_galaxies(cat, z_bin[0], z_bin[1], field=f)
pix = np.transpose(
[cat[filt]['ALPHA_J2000'], cat[filt]['DELTA_J2000']])
pix = w[f].wcs_world2pix(pix, 0).astype(int)
masked = np.where(
np.array([mask[f][iy, ix] for ix, iy in pix]) <= 0)[0]
filt[filt][masked] = False
filter[f][n_z_bin] = filt
# Print progress message
print('----> Created filters!')
sys.stdout.flush()
# Correct ellipticities
m_corr = np.zeros(len(cat))
for f in fields:
for n_z_bin, z_bin in enumerate(z_bins):
filt = filter[f][n_z_bin]
pix = np.transpose(
[cat[filt]['ALPHA_J2000'], cat[filt]['DELTA_J2000']])
pix = w[f].wcs_world2pix(pix, 0).astype(int)
m_corr[filt] = np.array([m[f][n_z_bin][iy, ix] for ix, iy in pix])
cat['e1'] = cat['e1']/(1+m_corr)
cat['e2'] = (cat['e2']-cat['c2'])/(1+m_corr)
# Print progress message
print('----> Corrected ellipticities!')
sys.stdout.flush()
# Useful functions (area, n_eff, sigma_g)
def get_area(fields, mask=mask, size_pix=size_pix):
area = 0.
for f in fields:
area += mask[f].sum()*(size_pix/60.)**2.
return area
def get_n_eff(cat, area):
wsum2 = (cat['weight'].sum())**2
w2sum = (cat['weight']**2).sum()
return wsum2/w2sum/area
def get_sigma_g(cat):
w2 = cat['weight']**2
sg = np.dot(w2, (cat['e1']**2. + cat['e2']**2.)/2.)/w2.sum()
return sg**0.5
# Initialize quantities
n_eff = np.zeros(len(z_bins))
area = np.zeros(len(z_bins))
sigma_g = np.zeros(len(z_bins))
photo_z = np.zeros((len(z_bins)+1, len(pz_full[0])))
photo_z[0] = (np.arange(len(pz_full[0]))+1./2.)*set.dZ_CFHTlens
n_eff_f = np.zeros((len(fields), len(z_bins)))
area_f = np.zeros((len(fields), len(z_bins)))
sigma_g_f = np.zeros((len(fields), len(z_bins)))
photo_z_f = np.zeros((len(fields), len(z_bins) + 1, len(pz_full[0])))
for count in range(len(fields)):
photo_z_f[count, 0] = \
(np.arange(len(pz_full[0])) + 1./2.)*set.dZ_CFHTlens
# First loop: scan over redshift bins
for n_z_bin, z_bin in enumerate(z_bins):
# Merge filters
sel = np.zeros(len(cat), dtype=bool)
for f in fields:
sel += filter[f][n_z_bin]
# Filter galaxies
gals = cat[sel]
pz_z = pz_full[sel]
# Get n_eff
area[n_z_bin] = get_area(fields)
n_eff[n_z_bin] = get_n_eff(gals, area[n_z_bin])
# Get sigma_g
sigma_g[n_z_bin] = get_sigma_g(gals)
# Get photo_z
photo_z[n_z_bin+1] = np.average(pz_z, weights=gals['weight'], axis=0)
# Second loop: scan over fields
for count, f in enumerate(fields):
# Filter galaxies
gals = cat[filter[f][n_z_bin]]
pz_z = pz_full[filter[f][n_z_bin]]
# Get n_eff
area_f[count, n_z_bin] = get_area([f])
n_eff_f[count, n_z_bin] = get_n_eff(gals, area_f[count, n_z_bin])
# Get sigma_g
sigma_g_f[count, n_z_bin] = get_sigma_g(gals)
# Get photo_z
photo_z_f[count, n_z_bin+1] = \
np.average(pz_z, weights=gals['weight'], axis=0)
# Print progress message
print('----> Completed bin {}'.format(n_z_bin+1))
sys.stdout.flush()
# Save to file the results
warning = io.write_to_fits(path['photo_z'], photo_z, 'PHOTO_Z',
type='image') or warning
warning = io.write_to_fits(path['photo_z'], n_eff, 'N_EFF',
type='image') or warning
warning = io.write_to_fits(path['photo_z'], sigma_g, 'SIGMA_G',
type='image') or warning
warning = io.write_to_fits(path['photo_z'], photo_z_f, 'PHOTO_Z_PF',
type='image') or warning
warning = io.write_to_fits(path['photo_z'], n_eff_f, 'N_EFF_PF',
type='image') or warning
warning = io.write_to_fits(path['photo_z'], sigma_g_f, 'SIGMA_G_PF',
type='image') or warning
warning = io.write_to_fits(path['photo_z'], area, 'AREA',
type='image') or warning
warning = io.write_to_fits(path['photo_z'], area_f, 'AREA_PF',
type='image') or warning
# Generate plots
if want_plots:
x = photo_z[0]
for count in range(1, len(photo_z)):
y = photo_z[count]
plt.plot(x, y, label='Bin ' + str(count))
plt.xlim(0., 2.)
plt.xlabel('$z$', fontsize=14)
plt.ylabel('Probability distribution', fontsize=14)
plt.title('Photo-z')
plt.legend(loc="upper right", frameon=False, fontsize=9,
labelspacing=0.01)
plt.savefig('{}/photo_z.pdf'.format(path['plots']))
plt.close()
for n_f, f in enumerate(fields):
x = photo_z_f[n_f, 0]
for count in range(1, photo_z_f.shape[1]):
y = photo_z_f[n_f, count]
plt.plot(x, y, label='Bin ' + str(count))
plt.xlim(0., 2.)
plt.xlabel('$z$', fontsize=14)
plt.ylabel('Probability distribution', fontsize=14)
plt.title('Photo-z {}'.format(f))
plt.legend(loc="upper right", frameon=False, fontsize=9,
labelspacing=0.01)
plt.savefig('{}/photo_z_{}.pdf'.format(path['plots'], f))
plt.close()
io.print_info_fits(path['photo_z'])
return warning
# ------------------- Function to calculate the clean catalogue --------------#
def run_cat(path, fields, z_bins):
print('Running CATALOGUE module')
sys.stdout.flush()
warning = False
# Read galaxy catalogue
tabname = 'data'
fname = path['cat_full']
try:
cat = io.read_from_fits(fname, tabname)
except KeyError:
print('WARNING: No key {} in {}. Skipping '
'calculation!'.format(tabname, fname))
sys.stdout.flush()
return True
# Check that the table has the correct columns
table_keys = ['ALPHA_J2000', 'DELTA_J2000', 'e1', 'e2', 'c2', 'weight',
'id', 'Z_B', 'MASK', 'star_flag']
for key in table_keys:
if key not in cat.columns.names:
print('WARNING: No key {} in table of {}. Skipping '
'calculation!'.format(key, fname))
sys.stdout.flush()
return True
# First loop: scan over the fields
for f in fields:
# Check existence of files and in case skip
file_exists = os.path.exists(path['cat_'+f])
if file_exists is True:
keys = io.get_keys_from_fits(path['cat_'+f])
mzs = all(['CAT_{}_Z{}'.format(f, x+1) in keys
for x in range(len(z_bins))])
if file_exists is True and mzs is True:
print('----> Skipping CATALOGUE calculation for field {}. Output '
'file already there!'.format(f))
sys.stdout.flush()
continue
# Remove old output file to avoid confusion
try:
os.remove(path['cat_'+f])
except FileNotFoundError:
pass
# Second loop: divide galaxies in redshift bins
for n_z_bin, z_bin in enumerate(z_bins):
print('Calculating catalogue for field {} and bin {}:'
''.format(f, n_z_bin+1))
sys.stdout.flush()
# Read multiplicative corrections
fname = path['m_'+f]
imname = 'MULT_CORR_{}_Z{}'.format(f, n_z_bin+1)
try:
m = io.read_from_fits(fname, imname)
hd = io.read_header_from_fits(fname, imname)
except KeyError:
print('WARNING: No key {} in {}. Skipping '
'calculation!'.format(imname, fname))
sys.stdout.flush()
return True
# Create a new WCS object
w = wcs.WCS(hd)
# Filter galaxies
filter = set.filter_galaxies(cat, z_bin[0], z_bin[1], field=f)
gals = cat[filter]
# Calculate corrected ellipticities
def find_m_correction(gal):
pix = w.wcs_world2pix(
[[gal['ALPHA_J2000'], gal['DELTA_J2000']]], 0)
pix = tuple(np.flip(pix.astype(int), axis=1)[0])
return m[pix]
m_corr = np.array([find_m_correction(gal) for gal in gals])
# Ellipticities
gals['e1'] = gals['e1']/(1+m_corr)
gals['e2'] = (gals['e2']-gals['c2'])/(1+m_corr)
# Create Table and save it
table_keys = ['ALPHA_J2000', 'DELTA_J2000', 'e1', 'e2', 'weight']
columns = []
for key in table_keys:
columns.append(
fits.Column(name=key, array=gals[key], format='E'))
name = 'CAT_{}_Z{}'.format(f, n_z_bin+1)
gals = fits.BinTableHDU.from_columns(columns, name=name)
warning = io.write_to_fits(path['cat_'+f], gals, name,
type='table') or warning
io.print_info_fits(path['cat_'+f])
return warning
# ------------------- Function to calculate the map --------------------------#
def run_map(path, fields, z_bins, want_plots):
print('Running MAP module')
sys.stdout.flush()
warning = False
# First loop: scan over the fields
for f in fields:
# Check existence of files and in case skip
file_exists = os.path.exists(path['map_'+f])
if file_exists is True:
keys = io.get_keys_from_fits(path['map_'+f])
mzs1 = all(['CAT_{}_Z{}_G1'.format(f, x+1) in keys
for x in range(len(z_bins))])
mzs2 = all(['CAT_{}_Z{}_G2'.format(f, x+1) in keys
for x in range(len(z_bins))])
if file_exists is True and mzs1 is True and mzs2 is True:
print('----> Skipping MAP calculation for field {}. Output '
'file already there!'.format(f))
sys.stdout.flush()
continue
# Remove old output file to avoid confusion
try:
os.remove(path['map_'+f])
except FileNotFoundError:
pass
# Second loop: divide galaxies in redshift bins
for n_z_bin, z_bin in enumerate(z_bins):
print('Calculating map for field {} and bin {}:'
''.format(f, n_z_bin+1))
sys.stdout.flush()
# Read mask and create WCS object
imname = 'MASK_{}_Z{}'.format(f, n_z_bin+1)
fname = path['mask_'+f]
try:
mask = io.read_from_fits(fname, imname)
hd = io.read_header_from_fits(fname, imname)
except KeyError:
print('WARNING: No key {} in {}. Skipping '
'calculation!'.format(imname, fname))
sys.stdout.flush()
return True
# Create a new WCS object
w = wcs.WCS(hd)
# Read galaxy catalogue
tabname = 'CAT_{}_Z{}'.format(f, n_z_bin+1)
fname = path['cat_'+f]
try:
cat = io.read_from_fits(fname, tabname)
except KeyError:
print('WARNING: No key {} in {}. Skipping '
'calculation!'.format(tabname, fname))
sys.stdout.flush()
return True
# Check that the table has the correct columns
table_keys = ['ALPHA_J2000', 'DELTA_J2000', 'e1', 'e2', 'weight']
for key in table_keys:
if key not in cat.columns.names:
print('WARNING: No key {} in table of {}. Skipping '
'calculation!'.format(key, fname))
sys.stdout.flush()
return True
# Get map
map, _ = tools.get_map(w, mask, cat)
# Save to file the map
name = 'MAP_{}_Z{}_G1'.format(f, n_z_bin+1)
warning = io.write_to_fits(path['map_'+f], map[0], name, header=hd,
type='image') or warning
name = 'MAP_{}_Z{}_G2'.format(f, n_z_bin+1)
warning = io.write_to_fits(path['map_'+f], map[1], name, header=hd,
type='image') or warning
# Generate plots
if want_plots:
plt.imshow(map[0], interpolation='nearest')
plt.colorbar()
plt.savefig('{}/map_{}_z{}_g1.pdf'
''.format(path['plots'], f, n_z_bin+1))
plt.close()
plt.imshow(map[1], interpolation='nearest')
plt.colorbar()
plt.savefig('{}/map_{}_z{}_g2.pdf'
''.format(path['plots'], f, n_z_bin+1))
plt.close()
io.print_info_fits(path['map_'+f])
return warning
# ------------------- Function to calculate the cl ---------------------------#
def run_cl(path, fields, z_bins, bp, n_sims_noise, coupled_cell, want_plots):
print('Running CL module')
sys.stdout.flush()
warning = False
# First loop: scan over the fields
for f in fields:
# Check existence of files and in case skip
file_exists = os.path.exists(path['cl_'+f])
if file_exists is True:
keys = io.get_keys_from_fits(path['cl_'+f])
mzs = all([x in keys for x in ['ELL_'+f, 'CL_'+f, 'CL_NOISE_'+f]])
if file_exists is True and mzs is True:
print('----> Skipping CL calculation for field {}. Output '
'file already there!'.format(f))
sys.stdout.flush()
continue
print('Calculating cl for field {}:'.format(f))
sys.stdout.flush()
# Remove old output file to avoid confusion
try:
os.remove(path['cl_'+f])
except FileNotFoundError:
pass
# Read mask
fname = path['mask_'+f]
t = 'MASK_{}_Z1'.format(f)
try:
# Header is the same for each bin
hd = io.read_header_from_fits(fname, t)
except KeyError:
print('WARNING: No key {} in {}. Skipping '
'calculation!'.format(t, fname))
sys.stdout.flush()
return True
w = wcs.WCS(hd)
mask = np.zeros((len(z_bins), hd['NAXIS2'], hd['NAXIS1']))
for n_z_bin, z_bin in enumerate(z_bins):
t = 'MASK_{}_Z{}'.format(f, n_z_bin+1)
try:
mask[n_z_bin] = io.read_from_fits(fname, t)
except KeyError:
print('WARNING: No key {} in {}. Skipping '
'calculation!'.format(t, fname))
sys.stdout.flush()
return True
# Read galaxy catalogue
fname = path['cat_'+f]
cat = {}
for n_z_bin, z_bin in enumerate(z_bins):
t = 'CAT_{}_Z{}'.format(f, n_z_bin+1)
try:
cat[n_z_bin] = io.read_from_fits(fname, t)
except KeyError:
print('WARNING: No key {} in {}. Skipping '
'calculation!'.format(t, fname))
sys.stdout.flush()
return True
# Check that the table has the correct columns
table_keys = ['ALPHA_J2000', 'DELTA_J2000', 'e1', 'e2', 'weight']
for key in table_keys:
if key not in cat[n_z_bin].columns.names:
print('WARNING: No key {} in table of {}. Skipping '
'calculation!'.format(key, fname))
sys.stdout.flush()
return True
# Get maps
map = np.zeros((len(z_bins), 2, hd['NAXIS2'], hd['NAXIS1']))
pos = {}
for n_z_bin, z_bin in enumerate(z_bins):
map[n_z_bin], pos[n_z_bin] = \
tools.get_map(w, mask[n_z_bin], cat[n_z_bin])
# Get Cl's
map = np.transpose(map, axes=(1, 0, 2, 3))
ell, cl, mcm_paths = \
tools.get_cl(f, bp, hd, mask, map, coupled_cell,
tmp_path=path['mcm'])
# Save to file the map
warning = io.write_to_fits(path['cl_'+f], ell, 'ELL_{}'.format(f),
type='image') or warning
warning = io.write_to_fits(path['cl_'+f], cl, 'CL_{}'.format(f),
type='image') or warning
# Get noise
noise_sims = np.zeros((n_sims_noise,) + cl.shape)
for ns in range(n_sims_noise):
map = np.zeros((len(z_bins), 2, hd['NAXIS2'], hd['NAXIS1']))
# Generate random ellipticities
for n_z_bin, z_bin in enumerate(z_bins):
cat_sim = cat[n_z_bin].copy()
n_gals = len(cat_sim)
phi = 2*np.pi*np.random.rand(n_gals)
cos = np.cos(2*phi)
sin = np.sin(2*phi)
cat_sim['e1'] = cat_sim['e1']*cos-cat_sim['e2']*sin
cat_sim['e2'] = cat_sim['e1']*sin+cat_sim['e2']*cos
# Get map
map[n_z_bin], _ = tools.get_map(w, mask[n_z_bin], cat_sim,
pos_in=pos[n_z_bin])
# Print message every some step
if (ns+1) % 1e2 == 0:
print('----> Done {0:5.1%} of the noise Cls ({1:d})'
''.format(float(ns+1)/n_sims_noise, n_sims_noise))
sys.stdout.flush()
# Get Cl's
map = np.transpose(map, axes=(1, 0, 2, 3))
_, noise_sims[ns], _ = \
tools.get_cl(f, bp, hd, mask, map, coupled_cell,
tmp_path=path['mcm'])
# Get mean shape noise
noise = np.mean(noise_sims, axis=0)
# Save to file the map
name = 'CL_NOISE_{}'.format(f)
warning = io.write_to_fits(path['cl_'+f], noise, name,
type='image') or warning
# Generate plots
if want_plots:
factor = 1.
x = ell
for nb1 in range(len(z_bins)):
for nb2 in range(nb1, len(z_bins)):
ax = plt.gca()
for ng1 in range(2):
for ng2 in range(ng1, 2):
y = factor*(cl[ng1, ng2, :, nb1, nb2]
- noise[ng1, ng2, :, nb1, nb2])
color = next(ax._get_lines.prop_cycler)['color']
plt.plot(x, y, 'o', label='$C_l^{{{}{}}}$'
''.format(ng1+1, ng2+1), color=color)
plt.plot(x, -y, '*', color=color)
plt.title('Z = {}{}'.format(nb1+1, nb2+1))
plt.xscale('log')
plt.yscale('log')
plt.xlabel('$\\ell$')
plt.ylabel('$C_\\ell$')
plt.legend(loc='best')
plt.savefig('{}/cl_{}_z{}{}.pdf'
''.format(path['plots'], f, nb1+1, nb2+1))
plt.close()
io.print_info_fits(path['cl_'+f])
return warning
# ------------------- Function to calculate the cat_sims ---------------------#
def run_cat_sims(path, fields, z_bins, n_sims_cov):
print('Running CAT_SIMS module')
sys.stdout.flush()
warning = False
# Define local variables
join = os.path.join
n_bins = len(z_bins)
sign_e2 = -1
noise_factor = 1
seed_ini = 1000
# Define main function
def get_gaussian_sim(ww, masks, cl_mat, catals, ipixs, seed, fname, field):
n_bins = len(catals)
if (n_bins != len(ipixs)) or (n_bins != len(cl_mat)/2):
print("SHIT")
exit(1)
nmaps = 2*n_bins
clarr = []
sparr = 2*np.ones(n_bins, dtype=int)
for i1 in range(nmaps):
for i2 in range(i1, nmaps):
clarr.append(cl_mat[i1, i2])
clarr = np.array(clarr)
ny, nx = masks[0].shape
lx = np.fabs(np.radians(nx*ww.wcs.cdelt[0]))
ly = np.fabs(np.radians(ny*ww.wcs.cdelt[1]))
maps = nmt.synfast_flat(nx, ny, lx, ly, clarr, sparr,
seed=seed).reshape([n_bins, 2, ny, nx])
cats = []
cats.append(fits.PrimaryHDU())
for i_c, c in enumerate(catals):
# Get randomly rotated ellipticities
phi = 2*np.pi*np.random.rand(len(c))
cs = np.cos(2*phi)
sn = np.sin(2*phi)
e1n = c['e1']*cs - c['e2']*sn
e2n = c['e1']*sn + c['e2']*cs
# Add signal and noise
e1 = maps[i_c, 0][ipixs[i_c][1], ipixs[i_c][0]] + noise_factor*e1n
e2 = sign_e2*(
maps[i_c, 1][ipixs[i_c][1], ipixs[i_c][0]] + noise_factor*e2n)
cats.append(fits.BinTableHDU.from_columns([
fits.Column(name='ALPHA_J2000', format='E',
array=c['ALPHA_J2000']),
fits.Column(name='DELTA_J2000', format='E',
array=c['DELTA_J2000']),
fits.Column(name='e1', format='E', array=e1),
fits.Column(name='e2', format='E', array=e2),
fits.Column(name='weight', format='E', array=c['weight'])],
name='CAT_{}_Z{}'.format(f, i_c+1)))
hdul = fits.HDUList(cats)
hdul.writeto(fname, overwrite=True)
return
# Create reference theory Cl
# TODO_EB: for now I am just importing the preexisting ones
fname = join(path['input'], 'cls_bf.fits')
ells = io.read_from_fits(fname, 'ELL')
cl_ee = io.read_from_fits(fname, 'CLS_BF')
cl_matrix = np.zeros([2*n_bins, 2*n_bins, len(ells)])
cl0 = np.zeros(len(ells))
for i in range(n_bins):
for j in range(n_bins):
cl_matrix[2*i, 2*j, :] = cl_ee[:, i, j] # EE
cl_matrix[2*i, 2*j+1, :] = cl0 # EB
cl_matrix[2*i+1, 2*j, :] = cl0 # BE
cl_matrix[2*i+1, 2*j+1, :] = cl0 # BB
# Scan over the fields
for f in fields:
print('Calculating gaussian simulations for field {}:'.format(f))
sys.stdout.flush()
# Remove old output file to avoid confusion
for ns in range(n_sims_cov):
try:
os.remove(
join(path['cat_sims'], 'sim_{}_cat_{}.fits'.format(ns, f)))
except FileNotFoundError:
pass
# Read masks
imnames = ['MASK_{}_Z{}'.format(f, x+1) for x in range(n_bins)]
fname = path['mask_'+f]
try:
masks = [io.read_from_fits(fname, x) for x in imnames]
hds = [io.read_header_from_fits(fname, x) for x in imnames]
except KeyError:
print('WARNING: At least one of these keys {} is missing {}. '
'Skipping calculation!'.format(imnames, fname))
sys.stdout.flush()
return True
# Create a new WCS object
w = wcs.WCS(hds[0])
# Read catalogues
tabnames = ['CAT_{}_Z{}'.format(f, x+1) for x in range(n_bins)]
fname = path['cat_'+f]
try:
cats = [io.read_from_fits(fname, x) for x in tabnames]
except KeyError:
print('WARNING: At least one of these keys {} is missing in {}. '
'Skipping calculation!'.format(tabnames, fname))
sys.stdout.flush()
return True
# Check that the table has the correct columns
table_keys = ['ALPHA_J2000', 'DELTA_J2000', 'e1', 'e2', 'weight']
for cat in cats:
for key in table_keys:
if key not in cat.columns.names:
print('WARNING: No key {} in table of {}. Skipping '
'calculation!'.format(key, fname))
sys.stdout.flush()
return True
# Transform to pixel positions
ipixs = [np.transpose((w.wcs_world2pix(
np.transpose([c['ALPHA_J2000'], c['DELTA_J2000']]),
0, ra_dec_order=True)).astype(int)) for c in cats]
# Generate simulations for each field
io.path_exists_or_create(path['cat_sims'])
for i in range(n_sims_cov):
seed = seed_ini + i
fname = \
join(path['cat_sims'], 'sim_{}_cat_{}.fits'.format(seed, f))
get_gaussian_sim(w, masks, cl_matrix, cats, ipixs, seed, fname, f)
# Print message every some step
if (i+1) % 1e2 == 0:
print('----> Calculated {0:5.1%} of the simulation catalogues '
'({1:d}) for field {2}'.format(float(i+1)/n_sims_cov,
n_sims_cov, f))
sys.stdout.flush()
return warning
# ------------------- Function to calculate the cl_sims ----------------------#
def run_cl_sims(path, fields, z_bins, bp, coupled_cell):
print('Running CL_SIMS module')
sys.stdout.flush()
warning = False
join = os.path.join
# First loop: scan over the fields
for nf, f in enumerate(fields):
# Check existence of files and in case skip
file_exists = os.path.exists(path['cl_sims_'+f])
if file_exists is True:
keys = io.get_keys_from_fits(path['cl_sims_'+f])
mzs = all([x in keys for x in ['ELL_'+f, 'CL_SIM_'+f]])
if file_exists is True and mzs is True:
print('----> Skipping CL_SIMS calculation for field {}. Output '
'file already there!'.format(f))
sys.stdout.flush()
continue
print('Calculating Cls from simulations for field {}:'.format(f))
sys.stdout.flush()
# Remove old output file to avoid confusion
try:
os.remove(path['cl_sims_'+f])
except FileNotFoundError:
pass
# List files present in the sims directory
list_sims = [join(path['cat_sims'], x) for x
in os.listdir(path['cat_sims'])
if re.match('.+{}.fits'.format(f), x)]
list_sims = sorted(list_sims)
# Initialize array with Cl's
ns = len(list_sims)
nl = len(bp)
nb = len(z_bins)
nm = 2
cl = np.zeros((ns, nm, nm, nl, nb, nb))
# Read mask
fname = path['mask_'+f]
t = 'MASK_{}_Z1'.format(f)
try:
# Header is the same for each bin
hd = io.read_header_from_fits(fname, t)
except KeyError:
print('WARNING: No key {} in {}. Skipping calculation!'
''.format(t, fname))
sys.stdout.flush()
return True
w = wcs.WCS(hd)
mask = np.zeros((len(z_bins), hd['NAXIS2'], hd['NAXIS1']))
for n_z_bin, z_bin in enumerate(z_bins):
t = 'MASK_{}_Z{}'.format(f, n_z_bin+1)
try:
mask[n_z_bin] = io.read_from_fits(fname, t)
except KeyError:
print('WARNING: No key {} in {}. Skipping calculation!'
''.format(t, fname))
sys.stdout.flush()
return True
# Calculate Cl's for each simulation
for ns, fname in enumerate(list_sims):
# Read galaxy catalogue
cat = {}
for n_z_bin, z_bin in enumerate(z_bins):
t = 'CAT_{}_Z{}'.format(f, n_z_bin+1)
try:
cat[n_z_bin] = io.read_from_fits(fname, t)
except KeyError:
print('WARNING: No key {} in {}. Skipping calculation!'
''.format(t, fname))
sys.stdout.flush()
return True
# Check that the table has the correct columns
table_keys = ['ALPHA_J2000', 'DELTA_J2000', 'e1', 'e2',
'weight']
for key in table_keys:
if key not in cat[n_z_bin].columns.names:
print('WARNING: No key {} in {}. Skipping calculation!'
''.format(key, fname))
sys.stdout.flush()
return True
# Get maps
map = np.zeros((len(z_bins), 2, hd['NAXIS2'], hd['NAXIS1']))
pos = {}
for n_z_bin, z_bin in enumerate(z_bins):
map[n_z_bin], pos[n_z_bin] = \
tools.get_map(w, mask[n_z_bin], cat[n_z_bin])
# Get Cl's
map = np.transpose(map, axes=(1, 0, 2, 3))
ell, cl[ns], mcm_paths = \
tools.get_cl(f, bp, hd, mask, map, coupled_cell,
tmp_path=path['mcm'])
# Print message every some step
if (ns+1) % 1e2 == 0:
print('----> Done {0:5.1%} of the simulations ({1:d})'
''.format(float(ns+1)/len(list_sims), len(list_sims)))
sys.stdout.flush()
# Save to file the map
name = 'ELL_{}'.format(f)
warning = io.write_to_fits(path['cl_sims_'+f], ell, name,
type='image') or warning
name = 'CL_SIM_{}'.format(f)
warning = io.write_to_fits(path['cl_sims_'+f], cl, name,
type='image') or warning
io.print_info_fits(path['cl_sims_'+f])
return warning
# ------------------- Function to unify all ----------------------------------#
def run_final(path, fields, z_bins, bp):
print('Running Final module')
sys.stdout.flush()
warning = False
# Remove old output file to avoid confusion
try:
os.remove(path['final'])
except FileNotFoundError:
pass
# Read cl and cl noise
cl = []
cl_n = []
for nf, f in enumerate(fields):
fname = path['cl_'+f]
imname = 'ELL_{}'.format(f)
try:
ell = io.read_from_fits(fname, imname)
except KeyError:
print('WARNING: No key {} in {}. Skipping calculation!'
''.format(imname, fname))
sys.stdout.flush()
return True
imname = 'CL_{}'.format(f)
try:
cl.append(io.read_from_fits(fname, imname))
except KeyError:
print('WARNING: No key {} in {}. Skipping calculation!'
''.format(imname, fname))
sys.stdout.flush()
return True
imname = 'CL_NOISE_{}'.format(f)
try:
cl_n.append(io.read_from_fits(fname, imname))
except KeyError:
print('WARNING: No key {} in {}. Skipping calculation!'
''.format(imname, fname))
sys.stdout.flush()
return True
# Write ells
warning = io.write_to_fits(path['final'], ell, 'ELL',
type='image') or warning
# Write cl
cl = np.array(cl)
warning = io.write_to_fits(path['final'], cl[:, 0, 0], 'CL_EE',
type='image') or warning
warning = io.write_to_fits(path['final'], cl[:, 0, 1], 'CL_EB',
type='image') or warning
warning = io.write_to_fits(path['final'], cl[:, 1, 1], 'CL_BB',
type='image') or warning
# Write cl noise
cl_n = np.array(cl_n)
warning = io.write_to_fits(path['final'], cl_n[:, 0, 0], 'CL_EE_NOISE',
type='image') or warning
warning = io.write_to_fits(path['final'], cl_n[:, 0, 1], 'CL_EB_NOISE',
type='image') or warning
warning = io.write_to_fits(path['final'], cl_n[:, 1, 1], 'CL_BB_NOISE',
type='image') or warning
# Read cl sims
cl = []
for nf, f in enumerate(fields):
fname = path['cl_sims_'+f]
imname = 'CL_SIM_{}'.format(f)
try:
cl.append(io.read_from_fits(fname, imname))
except KeyError:
print('WARNING: No key {} in {}. Skipping calculation!'
''.format(imname, fname))
sys.stdout.flush()
return True
# Write cl sims
cl = np.array(cl)
warning = io.write_to_fits(path['final'], cl[:, :, 0, 0], 'CL_SIM_EE',
type='image') or warning
warning = io.write_to_fits(path['final'], cl[:, :, 0, 1], 'CL_SIM_EB',
type='image') or warning
warning = io.write_to_fits(path['final'], cl[:, :, 1, 1], 'CL_SIM_BB',
type='image') or warning
# Read and write photo_z
fname = path['photo_z']
imname = 'PHOTO_Z'
try:
image = io.read_from_fits(fname, imname)
except KeyError:
print('WARNING: No key {} in {}. Skipping calculation!'
''.format(imname, fname))
sys.stdout.flush()
return True
warning = io.write_to_fits(path['final'], image, imname,
type='image') or warning
# Read and write n_eff
fname = path['photo_z']
imname = 'N_EFF'
try:
image = io.read_from_fits(fname, imname)
except KeyError:
print('WARNING: No key {} in {}. Skipping calculation!'
''.format(imname, fname))
sys.stdout.flush()
return True
warning = io.write_to_fits(path['final'], image, imname,
type='image') or warning
# Read and write sigma_g
fname = path['photo_z']
imname = 'SIGMA_G'
try:
image = io.read_from_fits(fname, imname)
except KeyError:
print('WARNING: No key {} in {}. Skipping calculation!'
''.format(imname, fname))
sys.stdout.flush()
return True
warning = io.write_to_fits(path['final'], image, imname,
type='image') or warning
io.print_info_fits(path['final'])
return warning
| 59,387 | 36.706667 | 79 | py |
kl_sample | kl_sample-master/kl_sample/settings.py | """
General settings: default variables.
WARNING: if you modify this file you may
have to rerun prep_real.py or prep_fourier.py.
"""
import numpy as np
# Photo-z Bins (minimum, maximum and intermediate bins)
Z_BINS = [0.15, 0.29, 0.43, 0.57, 0.70, 0.90, 1.10, 1.30]
# Z_BINS = [0.5,0.85,1.30]
Z_BINS = np.vstack((Z_BINS[:-1], Z_BINS[1:])).T
# Angles of the correlation functions
THETA_ARCMIN = [1.41, 2.79, 5.53, 11.0, 21.7, 43.0, 85.2] # in arcmin
MASK_THETA = np.array([
[True, True, True, True, True, True, False],
[False, False, False, True, True, True, True]
])
# Default settings
# Bandpowers to calculate Cl's (minimum, maximum and intermediate bins)
BANDPOWERS = [30, 80, 260, 450, 670, 1310, 2300, 5100]
MASK_ELL = np.array([False, True, True, True, True, True, False])
KEEP_CELLS_COUPLED = False
# Use pseudo-inverse instead of inverse.
PINV = False
# Theory from.
THEORY = 'CCL'
BNT = False
# # Camera settings
# # Bandpowers to calculate Cl's (minimum, maximum and intermediate bins)
# BANDPOWERS = [30, 96, 110, 130, 159, 207, 237, 259, 670, 1310]
# MASK_ELL = np.array([True, True, True, True, True, True, True, False, False])
# KEEP_CELLS_COUPLED = True
# # Use pseudo-inverse instead of inverse.
# PINV = True
# # Theory from.
# THEORY = 'Camera'
# # Template file
# CLS_TEMPLATE = 'data/Cls_template_Camera.txt'
# BNT = True
BANDPOWERS = np.vstack((BANDPOWERS[:-1], BANDPOWERS[1:])).T
# CFHTlens specifications plus area of simulations
FIELDS_CFHTLENS = ['W'+str(x+1) for x in range(4)]
dZ_CFHTlens = 0.05
A_CFHTlens = np.array([42.90, 12.10, 26.10, 13.30])*(60.**2.) # in arcmin^-2
A_sims = np.array([12.72, 10.31, 12.01, 10.38])*(60.**2.) # in arcmin^-2
# Size pixels masks in arcsecs (it has to be an integer number)
SIZE_PIX = 120
# Range of pixels used to average the multiplicative correction
N_AVG_M = 2
# Number of simulations for the covariance matrix
N_SIMS_COV = 2000
# Number of simulations used to calculate the noise
N_SIMS_NOISE = 1000
# Parameters for Intrinsic Alignment
RHO_CRIT = 2.77536627e11
C_1 = 5.e-14
L_I_OVER_L_0 = np.array([0.017, 0.069, 0.15, 0.22, 0.36, 0.49, 0.77])
# Criteria used to select the data
def filter_galaxies(data, z_min, z_max, field='all'):
sel = data['Z_B'] >= z_min
sel = (data['Z_B'] < z_max)*sel
sel = (data['MASK'] == 0)*sel
sel = (data['weight'] > 0.)*sel
sel = (data['star_flag'] == 0)*sel
sel = np.array([x[:6] in good_fit_patterns for x in data['id']])*sel
if field in FIELDS_CFHTLENS:
sel = np.array([x[:2] in field for x in data['id']])*sel
return sel
# Good fit patterns
good_fit_patterns = [
'W1m0m0', 'W1m0m3', 'W1m0m4', 'W1m0p1', 'W1m0p2', 'W1m0p3', 'W1m1m0',
'W1m1m2', 'W1m1m3', 'W1m1m4', 'W1m1p3', 'W1m2m1', 'W1m2m2', 'W1m2m3',
'W1m2p1', 'W1m2p2', 'W1m3m0', 'W1m3m2', 'W1m3m4', 'W1m3p1', 'W1m3p3',
'W1m4m0', 'W1m4m1', 'W1m4m3', 'W1m4m4', 'W1m4p1', 'W1p1m1', 'W1p1m2',
'W1p1m3', 'W1p1m4', 'W1p1p1', 'W1p1p2', 'W1p1p3', 'W1p2m0', 'W1p2m2',
'W1p2m3', 'W1p2m4', 'W1p2p1', 'W1p2p2', 'W1p2p3', 'W1p3m1', 'W1p3m2',
'W1p3m3', 'W1p3m4', 'W1p3p1', 'W1p3p2', 'W1p3p3', 'W1p4m0', 'W1p4m1',
'W1p4m2', 'W1p4m3', 'W1p4m4', 'W1p4p1', 'W1p4p2', 'W1p4p3',
'W2m0m0', 'W2m0m1', 'W2m0p1', 'W2m0p2', 'W2m1m0', 'W2m1m1', 'W2m1p1',
'W2m1p3', 'W2p1m0', 'W2p1p1', 'W2p1p2', 'W2p2m0', 'W2p2m1', 'W2p2p1',
'W2p2p2', 'W2p3m0', 'W2p3m1', 'W2p3p1', 'W2p3p3',
'W3m0m1', 'W3m0m2', 'W3m0m3', 'W3m0p2', 'W3m0p3', 'W3m1m0', 'W3m1m2',
'W3m1m3', 'W3m1p1', 'W3m1p2', 'W3m1p3', 'W3m2m1', 'W3m2m2', 'W3m2m3',
'W3m2p1', 'W3m2p2', 'W3m3m0', 'W3m3m1', 'W3m3m2', 'W3m3m3', 'W3m3p1',
'W3m3p2', 'W3p1m0', 'W3p1m1', 'W3p1m2', 'W3p1m3', 'W3p1p2', 'W3p1p3',
'W3p2m0', 'W3p2m3', 'W3p2p3', 'W3p3m1', 'W3p3m3', 'W3p3p1', 'W3p3p2',
'W3p3p3',
'W4m0m2', 'W4m0p1', 'W4m1m0', 'W4m1m1', 'W4m1m2', 'W4m1p1', 'W4m2m0',
'W4m2p1', 'W4m2p3', 'W4m3m0', 'W4m3p1', 'W4m3p2', 'W4m3p3', 'W4p1m0',
'W4p1m1', 'W4p1m2', 'W4p2m0', 'W4p2m1', 'W4p2m2'
]
# Default parameters
default_params = {
'h': [0.61, 0.61197750, 0.81],
'omega_c': [0.001, 0.11651890, 0.99],
'omega_b': [0.013, 0.03274485, 0.033],
'ln10_A_s': [2.3, 2.47363700, 5.0],
'n_s': [0.7, 1.25771300, 1.3],
'w_0': [-3.0, -1.00000000, 0.0],
'w_A': [-5.0, 0.00000000, 5.0],
'A_IA': [-6.0, 0.00000000, 6.0],
'beta_IA': [0.25, 1.13000000, 0.25],
'ell_max': 2000,
'method': 'full',
'n_kl': len(Z_BINS),
'kl_scale_dep': False,
'n_sims': 'auto',
'sampler': 'single_point',
'n_walkers': 10,
'n_steps': 2,
'space': 'real',
'data': 'data/data_real.fits',
'output': 'output/test/test.txt',
'n_threads': 2,
'add_ia': False
}
| 4,995 | 34.432624 | 79 | py |
kl_sample | kl_sample-master/kl_sample/checks.py | """
This module contains checks that needs to be performed
to ensure that the input is consistent.
Functions:
- unused_params(cosmo, settings, path)
- sanity_checks(cosmo, settings, path)
- kl_consistent(E, S, N, L, eigval, tol)
"""
import re
import sys
import numpy as np
from astropy.io import fits
import kl_sample.settings as set
# ------------------- Preliminary --------------------------------------------#
def unused_params(cosmo, settings, path):
""" Check if there are unused.
In case it prints the parameter on the screen.
Args:
cosmo: dictionary containing names, values and mask for
the cosmological parameters
settings: dictionary with all the settings used
path: dictionary containing the paths stored
Returns:
None if there are no unused parameters.
"""
# Join together all the parameters read
params = \
np.hstack((cosmo['names'], list(settings.keys()), list(path.keys())))
# Scroll all the parameter to see if there are unused ones
with open(path['params']) as fn:
for line in fn:
line = re.sub('#.+', '', line)
if '=' in line:
name, _ = line.split('=')
name = name.strip()
if name not in params:
print('Unused parameter: {}'.format(line).rstrip('\n'))
sys.stdout.flush()
return
def sanity_checks(cosmo, settings, path):
""" Perform different sanity checks on the input
parameters and data.
Args:
cosmo: dictionary containing names, values and mask for
the cosmological parameters
settings: dictionary with all the settings used
path: dictionary containing the paths stored
Returns:
None if the checks are passed. Otherwise it raises
an error.
"""
# Function to check if a string can be a natural number
def is_natural(str):
try:
if int(str) > 0:
return True
except ValueError:
return False
return False
# Checks on cosmological parameters
for n, par in enumerate(cosmo['params']):
# Check that the central value is a number
test = par[1] is not None
assert test, 'Central value for ' + cosmo['names'][n] + ' is None!'
# If left and right are the same the prior is considered Gaussian
# Check that the left bound is either None or smaller than central
test = par[0] is None or par[0] <= par[1] or par[0] == par[2]
assert test, 'Central value for {} is lower than the left bound!' \
''.format(cosmo['names'][n])
# Check that the right bound is either None or larger than central
test = par[2] is None or par[1] <= par[2] or par[0] == par[2]
assert test, 'Central value for {} is larger than the right bound!' \
''.format(cosmo['names'][n])
# Check sampler options
test = settings['sampler'] in ['emcee', 'single_point']
assert test, 'sampler not recognized! Options: emcee, single_point'
# Check space options
test = settings['space'] in ['real', 'fourier']
assert test, 'space not recognized! Options: real, fourier'
# Check method options
test = settings['method'] in ['full', 'kl_off_diag', 'kl_diag']
assert test, 'method not recognized! Options: full, kl_off_diag, kl_diag'
# Check ell_max
test = settings['ell_max'] > 100
assert test, 'Unless you are crazy, choose a larger ell_max!'
# Check n_sims
test = \
settings['n_sims'] in ['auto', 'all'] or is_natural(settings['n_sims'])
assert test, 'n_sims not recognized! Options: auto, all, positive integer'
# Checks related to the emcee sampler
if settings['sampler'] == 'emcee':
# Check that there are at least 2 varying parameters
test = len(cosmo['mask'][cosmo['mask']]) > 1
assert test, 'For emcee the minimum number of varied parameters is 2!'
# Check n_walkers
test = settings['n_walkers'] > 0
assert test, 'n_walkers should be at least 1!'
# Check n_steps
test = settings['n_steps'] > 0
assert test, 'n_steps should be at least 1!'
# Check n_threads
test = settings['n_threads'] > 0
assert test, 'n_threads should be at least 1!'
# Check data existence
with fits.open(path['data']) as hdul:
imgs = [hdul[n].name for n in range(1, len(hdul))]
# Checks common to real and fourier spaces
for name in ['PHOTO_Z']:
test = name in imgs
assert test, name + ' was not found in the data file!'
# Checks related to real space
if settings['space'] == 'real':
for name in ['XIPM_OBS', 'XIPM_SIM']:
test = name in imgs
assert test, name + ' was not found in data file!'
# Checks related to fourier space
elif settings['space'] == 'fourier':
for name in ['ELL', 'CL_EE', 'CL_EE_NOISE', 'CL_SIM_EE']:
test = name in imgs
assert test, name + ' was not found in data file!'
# Check data dimensions
n_bins = hdul['PHOTO_Z'].shape[0]-1
# Photo_z
test = len(hdul['PHOTO_Z'].shape) == 2
assert test, 'photo_z has wrong dimensions!'
if settings['space'] == 'real':
n_theta = len(set.THETA_ARCMIN)
n_fields = hdul['XIPM_SIM'].shape[0]
n_sims = hdul['XIPM_SIM'].shape[1]
# xipm_obs
test = hdul['XIPM_OBS'].shape == (2, n_theta, n_bins, n_bins)
assert test, 'xipm_obs has wrong dimensions!'
test = hdul['XIPM_SIM'].shape == \
(n_fields, n_sims, 2, n_theta, n_bins, n_bins)
assert test, 'xipm_sim has wrong dimensions!'
elif settings['space'] == 'fourier':
n_ell = hdul['ELL'].shape[0]
n_fields = hdul['CL_SIM_EE'].shape[0]
n_sims = hdul['CL_SIM_EE'].shape[1]
# cl_obs
test = hdul['CL_EE'].shape == (n_fields, n_ell, n_bins, n_bins)
assert test, 'CL_EE has wrong dimensions!'
test = hdul['CL_EE_NOISE'].shape == \
(n_fields, n_ell, n_bins, n_bins)
assert test, 'CL_EE_NOISE has wrong dimensions!'
test = hdul['CL_SIM_EE'].shape == \
(n_fields, n_sims, n_ell, n_bins, n_bins)
assert test, 'CL_SIM_EE has wrong dimensions!'
# if n_sims is natural check that it is smaller than
# the max n_sims we have
if is_natural(settings['n_sims']):
test = int(settings['n_sims']) <= n_sims and \
int(settings['n_sims']) > 1
assert test, 'n_sims has to be smaller than {} and larger than 1' \
''.format(n_sims)
# Checks related to the KL transform
if settings['method'] in ['kl_diag', 'kl_off_diag']:
# n_kl
test = settings['n_kl'] > 0 and settings['n_kl'] <= n_bins
assert test, 'n_kl should be at least 1 and at most {}'.format(n_bins)
return
# ------------------- Calculations related -----------------------------------#
def kl_consistent(E, S, N, L, eigval, tol):
""" Check if the calculated KL transform is consistent.
Args:
E, S, N, eigval: KL transform, signal, noise,
eivenvalues respectively.
tol: tolerance.
Returns:
None if the checks are passed. Otherwise it raises
a warning.
"""
# Calculate the KL transformed Cl's
angular_cl = np.array([np.diag(eigval[x]) for x in range(len(S))])
# First test
test1 = np.array([np.dot(E[x], S[x]+N[x]) for x in range(len(S))])
test1 = np.array([np.dot(test1[x], E[x].T) for x in range(len(S))])
test1 = np.array([abs(test1[x]-angular_cl[x]) for x in range(len(S))])
test1 = test1[2:].max()/abs(angular_cl[2:]).max()
# Second test
test2 = np.array([np.dot(L[x].T, E[x].T) for x in range(len(S))])
test2 = np.array([np.dot(test2[x], E[x]) for x in range(len(S))])
test2 = np.array([np.dot(test2[x], L[x]) for x in range(len(S))])
test2 = np.array([abs(test2[x] - np.identity(len(range(len(S[0])))))
for x in range(len(S))])
test2 = test2[2:].max()
# Warning message
if test1 > tol or test2 > tol:
print('WARNING: the transformation matrix does not reproduce the '
'correct Cl\'s. The relative difference is {:1.2e} and the '
'maximum accepted is {:1.2e}.'.format(max(test1, test2), tol))
sys.stdout.flush()
return
| 8,676 | 36.562771 | 79 | py |
kl_sample | kl_sample-master/kl_sample/sampler.py | """
This module contains all the samplers implemented.
Functions:
- run_emcee()
- run_single_point()
"""
import sys
import numpy as np
import emcee
import kl_sample.likelihood as lkl
import kl_sample.cosmo as cosmo_tools
# ------------------- emcee --------------------------------------------------#
def run_emcee(args, cosmo, data, settings, path):
""" Run emcee sampler.
Args:
cosmo: array containing the cosmological parameters.
data: dictionary containing data.
settings: dictionary containing settings.
path: dictionary containing paths.
Returns:
file with chains.
"""
# Local variables
full = cosmo['params']
mask = cosmo['mask']
ns = settings['n_steps']
nw = settings['n_walkers']
nt = settings['n_threads']
nd = len(mask[mask])
# Print useful stuff
print('Starting the chains!')
for key in settings.keys():
print(key + ' = ' + str(settings[key]))
sys.stdout.flush()
# Initialize sampler
sampler = emcee.EnsembleSampler(nw, nd, lkl.lnprob,
args=[full, mask, data, settings],
threads=nt)
if args.restart:
# Initial point from data
vars_0 = np.loadtxt(path['output'], unpack=True)
vars_0 = vars_0[2:2+nd]
vars_0 = vars_0[:, -nw:].T
else:
# Initial point
vars_0 = \
np.array([lkl.get_random(full[mask], 1.e3) for x in range(nw)])
# Create file
f = open(path['output'], 'w')
f.close()
for count, result in enumerate(sampler.sample(vars_0, iterations=ns,
storechain=False)):
pos = result[0]
prob = result[1]
f = open(path['output'], 'a')
for k in range(pos.shape[0]):
out = np.append(np.array([1., -prob[k]]), pos[k])
out = np.append(out, cosmo_tools.get_sigma_8(pos[k], full, mask))
f.write(' '.join(['{0:.10e}'.format(x) for x in out]) + '\n')
f.close()
print('----> Computed {0:5.1%} of the steps'
''.format(float(count+1) / ns))
sys.stdout.flush()
return
# ------------------- single_point -------------------------------------------#
def run_single_point(cosmo, data, settings):
""" Run emcee sampler.
Args:
cosmo: array containing the cosmological parameters.
data: dictionary containing data.
settings: dictionary containing settings.
path: dictionary containing paths.
Returns:
output in terminal likelihood.
"""
# Local variables
full = cosmo['params']
mask = cosmo['mask']
var = full[:, 1][mask]
post = lkl.lnprob(var, full, mask, data, settings)
sigma8 = cosmo_tools.get_sigma_8(var, full, mask)
print('Cosmological parameters:')
print('----> h = ' + '{0:2.4e}'.format(full[0, 1]))
print('----> Omega_c h^2 = ' + '{0:2.4e}'.format(full[1, 1]))
print('----> Omega_b h^2 = ' + '{0:2.4e}'.format(full[2, 1]))
print('----> ln(10^10 A_s) = ' + '{0:2.4e}'.format(full[3, 1]))
print('----> n_s = ' + '{0:2.4e}'.format(full[4, 1]))
print('----> w_0 = ' + '{0:2.4e}'.format(full[5, 1]))
print('----> w_A = ' + '{0:2.4e}'.format(full[6, 1]))
if settings['add_ia']:
print('----> A_IA = ' + '{0:2.4e}'.format(full[7, 1]))
print('----> beta_IA = ' + '{0:2.4e}'.format(full[8, 1]))
print('Derived parameters:')
print('----> sigma_8 = ' + '{0:2.4e}'.format(sigma8))
print('Likelihood:')
print('----> -ln(like) = ' + '{0:4.4f}'.format(-post))
return
| 3,731 | 29.096774 | 79 | py |
kl_sample | kl_sample-master/kl_sample/prep_fourier_tools.py | """
This module contains the tools to prepare
data in fourier space.
Functions:
- get_map(w, mask, cat)
"""
import sys
import os
import re
import numpy as np
import pymaster as nmt
import kl_sample.io as io
def get_map(w, mask, cat, pos_in=None):
""" Generate a map from a catalogue, a mask
and a WCS object.
Args:
w: WCS object.
mask: array with mask.
cat: catalogue of objects.
pos_in: to save cpu time it is possible to provide pixel positions.
Returns:
map_1, map_2: array with maps for each polarization.
pos: pixel positions.
"""
# Create arrays for the two shears
map_1 = np.zeros(mask.shape)
map_2 = np.zeros(mask.shape)
# Get World position of each galaxy
if pos_in is None:
pos = np.vstack(
(cat['ALPHA_J2000'], cat['DELTA_J2000'])).T
# Calculate Pixel position of each galaxy
pos = w.wcs_world2pix(pos, 0).astype(int)
pos = np.flip(pos, axis=1) # Need to invert the columns
else:
pos = pos_in.copy()
# Perform lex sort and get the sorted indices
sorted_idx = np.lexsort(pos.T)
sorted_pos = pos[sorted_idx, :]
# Differentiation along rows for sorted array
diff_pos = np.diff(sorted_pos, axis=0)
diff_pos = np.append([True], np.any(diff_pos != 0, 1), 0)
# Get unique sorted labels
sorted_labels = diff_pos.cumsum(0)-1
# Get labels
labels = np.zeros_like(sorted_idx)
labels[sorted_idx] = sorted_labels
# Get unique indices
unq_idx = sorted_idx[diff_pos]
# Get unique pos's and ellipticities
pos_unique = pos[unq_idx, :]
w_at_pos = np.bincount(labels, weights=cat['weight'])
g1_at_pos = np.bincount(labels, weights=cat['e1']*cat['weight'])/w_at_pos
g2_at_pos = np.bincount(labels, weights=cat['e2']*cat['weight'])/w_at_pos
# Create the maps
map_1[pos_unique[:, 0], pos_unique[:, 1]] = g1_at_pos
map_2[pos_unique[:, 0], pos_unique[:, 1]] = g2_at_pos
# empty = 1.-np.array(
# [mask[tuple(x)] for x in pos_unique]).sum()/mask.flatten().sum()
# print('----> Empty pixels: {0:5.2%}'.format(empty))
# sys.stdout.flush()
return np.array([map_1, map_2]), pos
def get_cl(field, bp, hd, mask, map, coupled_cell, tmp_path=None):
""" Generate cl's from a mask and a map.
Args:
field: field.
bp: bandpowers for ell.
hd: header with infos about the mask and maps.
mask: array with mask.
coupled_cell: do not decouple cls with mcm (but calculated and stored)
map: maps for each bin and polarization.
Returns:
cl: array with cl (E/B, ell, bins).
mcm_path: path to the mode coupling matrix.
"""
# Initialize Cls
n_bins = map.shape[1]
n_ells = len(bp)
cl = np.zeros((2, 2, n_ells, n_bins, n_bins))
# Dimensions
Nx = hd['NAXIS1']
Ny = hd['NAXIS2']
Lx = Nx*abs(hd['CDELT1'])*np.pi/180 # Mask dimension in radians
Ly = Ny*abs(hd['CDELT2'])*np.pi/180 # Mask dimension in radians
# Fields definition
fd = np.array([nmt.NmtFieldFlat(
Lx, Ly, mask[x], [map[0, x], -map[1, x]]) for x in range(n_bins)])
# Bins for flat sky fields
b = nmt.NmtBinFlat(bp[:, 0], bp[:, 1])
# Effective ells
ell = b.get_effective_ells()
# Iterate over redshift bins to compute Cl's
mcm_paths = []
for nb1 in range(n_bins):
for nb2 in range(nb1, n_bins):
# Temporary path for mode coupling matrix
if tmp_path is None:
mcm_p = os.path.expanduser('~')
else:
mcm_p = tmp_path
mcm_p = mcm_p+'/mcm_{}_Z{}{}.dat'.format(field, nb1+1, nb2+1)
mcm_paths.append(mcm_p)
# Define workspace for mode coupling matrix
wf = nmt.NmtWorkspaceFlat()
try:
wf.read_from(mcm_p)
except RuntimeError:
wf.compute_coupling_matrix(fd[nb1], fd[nb2], b)
wf.write_to(mcm_p)
print('Calculated mode coupling matrix for bins {}{}'
''.format(nb1+1, nb2+1))
sys.stdout.flush()
# Calculate Cl's
cl_c = nmt.compute_coupled_cell_flat(fd[nb1], fd[nb2], b)
if coupled_cell is True:
cl_d = cl_c
else:
cl_d = wf.decouple_cell(cl_c)
cl_d = np.reshape(cl_d, (2, 2, n_ells))
cl[:, :, :, nb1, nb2] = cl_d
cl[:, :, :, nb2, nb1] = cl_d
return ell, cl, mcm_paths
def get_io_paths(args, fields):
""" Get paths for input and output.
Args:
args: the arguments read by the parser.
fields: list of the observed fields.
Returns:
path: dictionary with all the necessary paths.
"""
# Define local variables
path = {}
create = io.path_exists_or_create
join = os.path.join
path['input'] = os.path.abspath(args.input_path)
io.path_exists_or_error(path['input'])
if args.output_path:
path['output'] = create(os.path.abspath(args.output_path))
else:
path['output'] = create(join(path['input'], 'output'))
if args.badfields_path:
path['badfields'] = create(os.path.abspath(args.badfields_path))
else:
path['badfields'] = create(join(path['input'], 'badfields'))
path['final'] = join(path['output'], 'data_fourier.fits')
if args.want_plots:
path['plots'] = create(join(path['output'], 'plots'))
path['cat_full'] = join(path['input'], 'cat_full.fits')
path['mask_url'] = join(path['input'], 'mask_url.txt')
path['photo_z'] = join(path['output'], 'photo_z.fits')
path['mcm'] = create(join(path['output'], 'mcm'))
if args.cat_sims_path:
path['cat_sims'] = create(os.path.abspath(args.cat_sims_path))
else:
path['cat_sims'] = join(path['output'], 'cat_sims')
for f in fields:
path['mask_sec_'+f] = \
join(path['input'], 'mask_arcsec_{}.fits.gz'.format(f))
path['mask_'+f] = join(path['output'], 'mask_{}.fits'.format(f))
path['m_'+f] = join(path['output'], 'mult_corr_{}.fits'.format(f))
path['cat_'+f] = join(path['output'], 'cat_{}.fits'.format(f))
path['map_'+f] = join(path['output'], 'map_{}.fits'.format(f))
path['cl_'+f] = join(path['output'], 'cl_{}.fits'.format(f))
path['cl_sims_'+f] = join(path['output'], 'cl_sims_{}.fits'.format(f))
return path
def is_run_and_check(args, fields, path, n_sims_cov):
""" Determine which modules to run and do some preliminary check.
Args:
args: the arguments read by the parser.
fields: list of the observed fields.
path: dictionary with all the necessary paths.
Returns:
is_run: dictionary with the modules to be run.
warning: true if it generated some warning.
"""
# Local variables
warning = False
is_run = {}
ex = os.path.exists
# Determine which modules have to be run, by checking the existence of the
# output files and arguments passed by the user
is_run['mask'] = np.array([not(ex(path['mask_'+f])) for f in fields]).any()
if args.run_mask or args.run_all:
is_run['mask'] = True
is_run['mult'] = np.array([not(ex(path['m_'+f])) for f in fields]).any()
if args.run_mult or args.run_all:
is_run['mult'] = True
is_run['pz'] = not(ex(path['photo_z']))
if args.run_pz or args.run_all:
is_run['pz'] = True
is_run['cat'] = np.array([not(ex(path['cat_'+f])) for f in fields]).any()
if args.run_cat or args.run_all:
is_run['cat'] = True
is_run['map'] = np.array([not(ex(path['map_'+f])) for f in fields]).any()
if args.run_map or args.run_all:
is_run['map'] = True
is_run['cl'] = np.array([not(ex(path['cl_'+f])) for f in fields]).any()
if args.run_cl or args.run_all:
is_run['cl'] = True
is_run['cat_sims'] = not(ex(path['cat_sims']))
# Check that all the files are present as well
if ex(path['cat_sims']):
is_files = True
for f in fields:
sims = [x for x in os.listdir(path['cat_sims'])
if re.match('.+{}\.fits'.format(f), x)] # noqa:W605
if len(sims) != n_sims_cov:
is_files = False
is_run['cat_sims'] = not(is_files)
if args.run_cat_sims or args.run_all:
is_run['cat_sims'] = True
is_run['cl_sims'] = \
np.array([not(ex(path['cl_sims_'+f])) for f in fields]).any()
if args.run_cl_sims or args.run_all:
is_run['cl_sims'] = True
# Check the existence of the required input files
if is_run['mask']:
nofile1 = not(ex(path['mask_url']))
nofile2 = \
np.array([not(ex(path['mask_sec_'+f])) for f in fields]).any()
nofile3 = not(ex(path['cat_full']))
if nofile1 or nofile2 or nofile3:
print(
'WARNING: I will skip the MASK module. Input files not found!')
is_run['mask'] = False
warning = True
else:
print('I will skip the MASK module. Output files already there!')
sys.stdout.flush()
if is_run['mult']:
nofile1 = np.array([not(ex(path['mask_'+f])) for f in fields]).any()
nofile2 = not(ex(path['cat_full']))
if (not(is_run['mask']) and nofile1) or nofile2:
print('WARNING: I will skip the MULT_CORR module. Input file not '
'found!')
sys.stdout.flush()
is_run['mult'] = False
warning = True
else:
print('I will skip the MULT_CORR module. Output files already there!')
sys.stdout.flush()
if is_run['pz']:
nofile1 = np.array([not(ex(path['mask_'+f])) for f in fields]).any()
nofile2 = not(ex(path['cat_full']))
nofile3 = np.array([not(ex(path['m_'+f])) for f in fields]).any()
test1 = not(is_run['mask']) and nofile1
test3 = not(is_run['mult']) and nofile3
if test1 or nofile2 or test3:
print('WARNING: I will skip the PHOTO_Z module. Input files not '
'found!')
sys.stdout.flush()
is_run['pz'] = False
warning = True
else:
print('I will skip the PHOTO_Z module. Output file already there!')
sys.stdout.flush()
if is_run['cat']:
nofile1 = not(ex(path['cat_full']))
nofile2 = np.array([not(ex(path['m_'+f])) for f in fields]).any()
if nofile1 or (not(is_run['mult']) and nofile2):
print('WARNING: I will skip the CATALOGUE module. Input file not '
'found!')
sys.stdout.flush()
is_run['cat'] = False
warning = True
else:
print('I will skip the CATALOGUE module. Output files already there!')
sys.stdout.flush()
if is_run['map']:
nofile1 = np.array([not(ex(path['mask_'+f])) for f in fields]).any()
nofile2 = np.array([not(ex(path['cat_'+f])) for f in fields]).any()
test1 = not(is_run['mask']) and nofile1
test2 = not(is_run['cat']) and nofile2
if test1 or test2:
print('WARNING: I will skip the MAP module. Input files not '
'found!')
sys.stdout.flush()
is_run['map'] = False
warning = True
else:
print('I will skip the MAP module. Output files already there!')
sys.stdout.flush()
if is_run['cl']:
nofile1 = np.array([not(ex(path['mask_'+f])) for f in fields]).any()
nofile2 = np.array([not(ex(path['cat_'+f])) for f in fields]).any()
test1 = not(is_run['mask']) and nofile1
test2 = not(is_run['cat']) and nofile2
if test1 or test2:
print('WARNING: I will skip the CL module. Input files not found!')
sys.stdout.flush()
is_run['cl'] = False
warning = True
else:
print('I will skip the CL module. Output files already there!')
sys.stdout.flush()
if is_run['cat_sims']:
nofile1 = np.array([not(ex(path['mask_'+f])) for f in fields]).any()
nofile2 = np.array([not(ex(path['cat_'+f])) for f in fields]).any()
test1 = not(is_run['mask']) and nofile1
test2 = not(is_run['cat']) and nofile2
if test1 or test2:
print('WARNING: I will skip the CAT_SIMS module. Input files not '
'found!')
sys.stdout.flush()
is_run['cat_sims'] = False
warning = True
else:
print('I will skip the CAT_SIMS module. Output files already there!')
sys.stdout.flush()
if is_run['cl_sims']:
nofile1 = np.array([not(ex(path['mask_'+f])) for f in fields]).any()
nofile2 = np.array([not(ex(path['cl_'+f])) for f in fields]).any()
nofile3 = not(ex(path['cat_sims']))
if ex(path['cat_sims']):
is_files = True
for f in fields:
sims = [x for x in os.listdir(path['cat_sims'])
if re.match('.+{}\.fits'.format(f), x)] # noqa:W605
if len(sims) != n_sims_cov:
is_files = False
nofile3 = not(is_files)
test1 = not(is_run['mask']) and nofile1
test2 = not(is_run['cl']) and nofile2
test3 = not(is_run['cat_sims']) and nofile3
if test1 or test2 or test3:
print('WARNING: I will skip the CL_SIMS module. Input files not '
'found!')
sys.stdout.flush()
is_run['cl_sims'] = False
warning = True
else:
print('I will skip the CL_SIMS module. Output files already there!')
sys.stdout.flush()
return is_run, warning
| 13,806 | 35.720745 | 79 | py |
kl_sample | kl_sample-master/kl_sample/plots_b.py | import os
import sys
import numpy as np
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import kl_sample.io as io
import kl_sample.reshape as rsh
import kl_sample.settings as set
import kl_sample.cosmo as cosmo_tools
def plots(args):
""" Generate plots for the papers.
Args:
args: the arguments read by the parser.
Returns:
saves to fits files file the output.
"""
fields = set.FIELDS_CFHTLENS
# Define absolute paths
path = {}
path['params'] = io.path_exists_or_error(args.params_file)
path['mcm'] = io.path_exists_or_error(io.read_param(args.params_file, 'mcm', type='path'))+'/'
path['fourier'] = io.path_exists_or_error('{}/data/data_fourier.fits'.format(sys.path[0]))
path['real'] = io.path_exists_or_error('{}/data/data_real.fits'.format(sys.path[0]))
path['output'] = io.path_exists_or_create(os.path.abspath(args.output_path))+'/'
# Read data
ell = io.read_from_fits(path['fourier'], 'ELL')
cl_EE = io.read_from_fits(path['fourier'], 'CL_EE')
cl_BB = io.read_from_fits(path['fourier'], 'CL_BB')
noise_EE = io.read_from_fits(path['fourier'], 'CL_EE_NOISE')
noise_BB = io.read_from_fits(path['fourier'], 'CL_BB_NOISE')
sims_EE = io.read_from_fits(path['fourier'], 'CL_SIM_EE')
sims_BB = io.read_from_fits(path['fourier'], 'CL_SIM_BB')
pz = io.read_from_fits(path['fourier'], 'PHOTO_Z')
n_eff = io.read_from_fits(path['fourier'], 'N_EFF')
sigma_g = io.read_from_fits(path['fourier'], 'SIGMA_G')
pz_r = io.read_from_fits(path['real'], 'PHOTO_Z')
n_eff_r = io.read_from_fits(path['real'], 'N_EFF')
sigma_g_r = io.read_from_fits(path['real'], 'SIGMA_G')
# Clean data from noise
cl_EE = rsh.clean_cl(cl_EE, noise_EE)
sims_EE = rsh.clean_cl(sims_EE, noise_EE)
cl_BB = rsh.clean_cl(cl_BB, noise_BB)
sims_BB = rsh.clean_cl(sims_BB, noise_BB)
# Create array with cosmo parameters
params_name = ['h', 'omega_c', 'omega_b', 'ln10_A_s', 'n_s']
params_val = io.read_cosmo_array(path['params'], params_name)[:,1]
# Get theory Cl's
bp = set.BANDPOWERS
n_bins = len(bp)
cosmo = cosmo_tools.get_cosmo_ccl(params_val)
th_cl = cosmo_tools.get_cls_ccl(params_val, cosmo, pz, bp[-1,-1])
from astropy.io import fits
d=fits.open("/users/groups/damongebellini/data/preliminary/7bins/cls_bf.fits")
cl_ee=d[2].data[:len(th_cl)]
# th_cl = rsh.bin_cl(th_cl, bp)
tot_ell = np.arange(bp[-1,-1]+1)
th_cl,th_cl_BB = rsh.couple_decouple_cl(tot_ell, th_cl, path['mcm'], len(fields), n_bins, len(bp),
return_BB=True)
th_clb,th_cl_BBb = rsh.couple_decouple_cl(tot_ell, cl_ee, path['mcm'], len(fields), n_bins, len(bp),
return_BB=True)
cov_pf = rsh.get_covmat_cl(sims_EE)
cov_pf_BB = rsh.get_covmat_cl(sims_BB)
th_cl = rsh.unify_fields_cl(th_cl, cov_pf)
th_cl_BB = rsh.unify_fields_cl(th_cl_BB, cov_pf_BB)
th_clb = rsh.unify_fields_cl(th_clb, cov_pf)
th_cl_BBb = rsh.unify_fields_cl(th_cl_BBb, cov_pf_BB)
# Unify fields
cl_EE = rsh.unify_fields_cl(cl_EE, cov_pf)
noise_EE = rsh.unify_fields_cl(noise_EE, cov_pf)
sims_EE = rsh.unify_fields_cl(sims_EE, cov_pf)
cl_BB = rsh.unify_fields_cl(cl_BB, cov_pf_BB)
noise_BB = rsh.unify_fields_cl(noise_BB, cov_pf_BB)
sims_BB = rsh.unify_fields_cl(sims_BB, cov_pf_BB)
# Average simulations
sims_EE_avg = np.average(sims_EE, axis=0)
sims_BB_avg = np.average(sims_BB, axis=0)
# Calculate covmat
covmat_EE = rsh.get_covmat_cl(sims_EE)
covmat_BB = rsh.get_covmat_cl(sims_BB)
np.savez("cls_all",dd_EE=cl_EE,dd_BB=cl_BB,
tt_EE=th_cl,tt_BB=th_cl_BB,tb_EE=th_clb,tb_BB=th_cl_BBb,
ss_EE=sims_EE,ss_BB=sims_BB,cv_EE=covmat_EE,cv_BB=covmat_BB)
exit(1)
# Noise based on n_eff and sigma_g
n_eff = n_eff*(180.*60./np.pi)**2. #converted in stedrad^-1
noise_ns = np.array([np.diag(sigma_g**2/n_eff) for x in ell])
# Noise based on n_eff and sigma_g
n_eff_r = n_eff_r*(180.*60./np.pi)**2. #converted in stedrad^-1
noise_ns_r = np.array([np.diag(sigma_g_r**2/n_eff_r) for x in ell])
# Average noise
noise_EE_avg = rsh.debin_cl(noise_EE, bp)
noise_EE_avg = np.average(noise_EE_avg[bp[0,0]:bp[-1,-1]],axis=0)
noise_EE_avg = np.array([noise_EE_avg for x in ell])
# Plot noise
x = ell
for b1 in range(n_bins):
for b2 in range(b1,n_bins):
y1 = noise_ns[:,b1,b2]
y2 = noise_EE_avg[:,b1,b2]
y3 = noise_EE[:,b1,b2]
y4 = noise_ns_r[:,b1,b2]
plt.figure()
plt.plot(x, y1, label = '$\\sigma_g^2/n_{eff}$')
plt.plot(x, y2, label = 'Average')
plt.plot(x, y3, 'o', label = 'Noise')
plt.plot(x, y4, label = '$\\sigma_g^2/n_{eff}$ real')
plt.xscale('log')
plt.yscale('log')
plt.legend(loc='best')
plt.title('Bins {} {}'.format(b1+1,b2+1))
plt.xlabel('$\\ell$')
plt.ylabel('$N_\\ell^{EE}$')
plt.savefig('{}noise_bin{}{}.pdf'.format(path['output'],b1+1,b2+1))
plt.close()
# Plot Cl
x = ell
for b1 in range(n_bins):
for b2 in range(b1,n_bins):
y1 = cl_EE[:,b1,b2]
y2 = th_cl[:,b1,b2]
y3 = sims_EE_avg[:,b1,b2]
err1 = np.sqrt(np.diag(covmat_EE[:,:,b1,b1,b2,b2]))
plt.figure()
plt.errorbar(x, y1, yerr=err1, fmt='-o', label='data')
plt.plot(x, y2, label='theory')
plt.plot(x, y3, label='simulations')
plt.xscale('log')
plt.legend(loc='best')
plt.title('Bins {} {}'.format(b1+1,b2+1))
plt.xlabel('$\\ell$')
plt.ylabel('$C_\\ell^{EE}$')
plt.savefig('{}cl_bin{}{}.pdf'.format(path['output'],b1+1,b2+1))
plt.close()
return
| 6,006 | 36.310559 | 104 | py |
kl_sample | kl_sample-master/kl_sample/cosmo.py | """
Module containing all the relevant functions
to compute and manipulate cosmology.
Functions:
- get_cosmo_mask(params)
- get_cosmo_ccl(params)
- get_cls_ccl(params, cosmo, pz, ell_max)
- get_xipm_ccl(cosmo, cls, theta)
"""
import numpy as np
import pyccl as ccl
import kl_sample.reshape as rsh
import kl_sample.likelihood as lkl
import kl_sample.settings as set
# ------------------- Masks --------------------------------------------------#
def get_cosmo_mask(params):
""" Infer from the cosmological parameters
array which are the varying parameters.
Args:
params: array containing the cosmological parameters.
Returns:
mask: boolean array with varying parameters.
"""
# Function that decide for a given parameter if
# it is varying or not.
def is_varying(param):
if param[0] is None or param[2] is None:
return True
if param[0] < param[1] or param[1] < param[2]:
return True
return False
return np.array([is_varying(x) for x in params])
# ------------------- CCL related --------------------------------------------#
def get_cosmo_ccl(params):
""" Get cosmo object.
Args:
params: array with cosmological parameters.
Returns:
cosmo object from CCL.
"""
cosmo = ccl.Cosmology(
h=params[0],
Omega_c=params[1]/params[0]**2.,
Omega_b=params[2]/params[0]**2.,
A_s=(10.**(-10.))*np.exp(params[3]),
n_s=params[4],
w0=params[5],
wa=params[6],
transfer_function='boltzmann_class'
)
return cosmo
def get_cls_ccl(params, cosmo, pz, ell_max, add_ia=False):
""" Get theory Cl's.
Args:
cosmo: cosmo object from CCL.
pz: probability distribution for each redshift bin.
ell_max: maximum multipole.
Returns:
array with Cl's.
"""
# Local variables
n_bins = len(pz)-1
n_ells = ell_max+1
# z and pz
z = pz[0].astype(np.float64)
prob_z = pz[1:].astype(np.float64)
# If add_ia
if add_ia:
f_z = np.ones(len(z))
# Bias
Omega_m = (params[1]+params[2])/params[0]**2.
D_z = ccl.background.growth_factor(cosmo, 1./(1.+z))
b_z = -params[7]*set.C_1*set.RHO_CRIT*Omega_m/D_z
b_z = np.outer(set.L_I_OVER_L_0**params[8], b_z)
# Tracers
lens = np.array([
ccl.WeakLensingTracer(
cosmo,
dndz=(z, prob_z[x]),
ia_bias=(z, b_z[x]),
red_frac=(z, f_z),
) for x in range(n_bins)])
else:
# Tracers
lens = np.array([
ccl.WeakLensingTracer(
cosmo,
dndz=(z, prob_z[x])
) for x in range(n_bins)])
# Cl's
ell = np.arange(n_ells)
cls = np.zeros((n_bins, n_bins, n_ells))
for count1 in range(n_bins):
for count2 in range(count1, n_bins):
cls[count1, count2] = \
ccl.angular_cl(cosmo, lens[count1], lens[count2], ell)
cls[count2, count1] = cls[count1, count2]
cls = np.transpose(cls, axes=[2, 0, 1])
return cls
def get_xipm_ccl(cosmo, cls, theta):
""" Get theory correlation function.
Args:
cosmo: cosmo object from CCL.
cls: array of cls for each pair of bins.
theta: array with angles for the correlation function.
Returns:
correlation function.
"""
# Local variables
n_bins = cls.shape[-1]
n_theta = len(theta)
ell = np.arange(len(cls))
# Main loop: compute correlation function for each bin pair
xi_th = np.zeros((2, n_bins, n_bins, n_theta))
for c1 in range(n_bins):
for c2 in range(n_bins):
for c3 in range(n_theta):
xi_th[0, c1, c2, c3] = \
ccl.correlation(cosmo, ell, cls[:, c1, c2], theta[c3],
corr_type='L+', method='FFTLog')
xi_th[1, c1, c2, c3] = \
ccl.correlation(cosmo, ell, cls[:, c1, c2], theta[c3],
corr_type='L-', method='FFTLog')
# Transpose to have (pm, theta, bin1, bin2)
xi_th = np.transpose(xi_th, axes=[0, 3, 1, 2])
return xi_th
# ------------------- KL related ---------------------------------------------#
def get_theory(var, full, mask, data, settings):
""" Get theory correlation function or Cl's.
Args:
var: array containing the varying cosmo parameters.
full: array containing all the cosmo parameters.
mask: array containing the mask for the cosmo parameters.
data: dictionary with all the data used
settings: dictionary with all the settings used
Returns:
array with correlation function or Cl's.
"""
# Local variables
pz = data['photo_z']
theta = data['theta_ell']
ell_max = settings['ell_max']
bp = settings['bp_ell']
ell = np.arange(bp[-1, -1] + 1)
nf = settings['n_fields']
nb = settings['n_bins']
# Merge in a single array varying and fixed parameters
pars = np.empty(len(mask))
count1 = 0
for count2 in range(len(pars)):
if not mask[count2]:
pars[count2] = full[count2][1]
else:
pars[count2] = var[count1]
count1 = count1+1
# Get corr
cosmo = get_cosmo_ccl(pars)
if set.THEORY == 'CCL':
corr = get_cls_ccl(pars, cosmo, pz, ell_max, add_ia=settings['add_ia'])
if settings['space'] == 'real':
corr = get_xipm_ccl(cosmo, corr, theta)
elif set.THEORY == 'Camera':
corr = settings['cls_template']
Om = (pars[1] + pars[2])/pars[0]**2.
s8 = get_sigma_8(var, full, mask)
Oms8 = np.zeros(len(set.Z_BINS))
for nbin, bin in enumerate(set.Z_BINS):
z = (bin[0] + bin[1])/2.
D = cosmo.growth_factor(1./(1. + z))
Oms8[nbin] = D*Om*s8
# Multiply twice the template by Oms8 array along the last two axes
corr = mult_elementwiselastaxis(corr, Oms8)
corr = np.moveaxis(corr, [-2], [-1])
corr = mult_elementwiselastaxis(corr, Oms8)
# Keep cls coupled or not
if set.KEEP_CELLS_COUPLED:
corr = rsh.couple_cl(ell, corr, settings['mcm'], nf, nb, len(bp))
else:
corr = rsh.couple_decouple_cl(ell, corr, settings['mcm'], nf, nb,
len(bp))
# Apply KL
if settings['method'] in ['kl_off_diag', 'kl_diag']:
corr = lkl.apply_kl(data['kl_t'], corr, settings)
if settings['method'] == 'kl_diag':
is_diag = True
else:
is_diag = False
# Reshape corr
if settings['space'] == 'real':
corr = rsh.flatten_xipm(corr, settings)
corr = rsh.mask_xipm(corr, data['mask_theta_ell'], settings)
else:
corr = rsh.mask_cl(corr, is_diag=is_diag)
corr = rsh.unify_fields_cl(corr, data['cov_pf'], is_diag=is_diag,
pinv=set.PINV)
# Apply BNT if required
if set.BNT:
corr = apply_bnt(corr, data['bnt_mat'])
corr = rsh.flatten_cl(corr, is_diag=is_diag)
return corr
def get_sigma_8(var, full, mask):
# Merge in a single array varying and fixed parameters
pars = np.empty(len(mask))
count1 = 0
for count2 in range(len(pars)):
if not mask[count2]:
pars[count2] = full[count2][1]
else:
pars[count2] = var[count1]
count1 = count1+1
# Cosmology
cosmo = get_cosmo_ccl(pars)
sigma8 = ccl.sigma8(cosmo)
return sigma8
def mult_elementwiselastaxis(A, B):
C = np.outer(A, B)
C = C.reshape(A.shape+B.shape)
C = np.diagonal(C, axis1=-2, axis2=-1)
return C
class BNT(object):
def __init__(self, params, photo_z):
cosmo = get_cosmo_ccl(params[:, 1])
self.z = photo_z[0] # np.array of redshifts
self.chi = cosmo.comoving_radial_distance(1./(1.+photo_z[0]))
self.n_i_list = photo_z[1:]
self.nbins = len(self.n_i_list)
def get_matrix(self):
A_list = []
B_list = []
for i in range(self.nbins):
nz = self.n_i_list[i]
A_list += [np.trapz(nz, self.z)]
B_list += [np.trapz(nz / self.chi, self.z)]
BNT_matrix = np.eye(self.nbins)
BNT_matrix[1, 0] = -1.
for i in range(2, self.nbins):
mat = np.array([[A_list[i-1], A_list[i-2]],
[B_list[i-1], B_list[i-2]]])
A = -1. * np.array([A_list[i], B_list[i]])
soln = np.dot(np.linalg.inv(mat), A)
BNT_matrix[i, i-1] = soln[0]
BNT_matrix[i, i-2] = soln[1]
return BNT_matrix
def apply_bnt(cl, bnt):
bnt_cl = np.dot(cl, bnt)
bnt_cl = np.moveaxis(bnt_cl, [-1], [-2])
bnt_cl = np.dot(bnt_cl, bnt)
return bnt_cl
| 8,962 | 27.453968 | 79 | py |
kl_sample | kl_sample-master/kl_sample/reshape.py | """
This module contains functions to reshape and manipulate
the correlation function and power spectra.
Functions:
- mask_cl(cl)
- unify_fields_cl(cl, sims)
- position_xipm(n, n_bins, n_theta)
- unflatten_xipm(array)
- flatten_xipm(corr, settings)
- mask_xipm(array, mask, settings)
- unmask_xipm(array, mask)
"""
import os
import numpy as np
import kl_sample.settings as set
import pymaster as nmt
# ------------------- Manipulate Cl's ----------------------------------------#
def mask_cl(cl, is_diag=False):
if is_diag:
idx = -2
else:
idx = -3
mask_cl = np.moveaxis(cl, [idx], [0])
mask_cl = mask_cl[set.MASK_ELL]
mask_cl = np.moveaxis(mask_cl, [0], [idx])
return mask_cl
def clean_cl(cl, noise):
if cl.ndim == 4:
return cl - noise
elif cl.ndim == 5:
clean = np.array([cl[:, x]-noise for x in range(len(cl[0]))])
clean = np.transpose(clean, axes=(1, 0, 2, 3, 4))
return clean
else:
raise ValueError('Expected Cl\'s array with dimensions 4 or 5. Found'
' {}'.format(cl.ndim))
def flatten_cl(cl, is_diag=False):
"""
Given a cl array with shape (A, ell, z_bin_1, z_bin_2), where A stands
for additional shape indices (it can be the number of simulations,
number of fields, ...), return a flattened one with shape (A, s).
In practise only the ells and z_bins are flattened, piling triangle up
indices for each ell, i.e.:
cl[A, 0, 0, 0]
cl[A, 0, 0, 1]
...
cl[A, 0, 0, z_bins]
cl[A, 0, 1, 1]
...
cl[A, 0, z_bins, z_bins]
cl[A, 1, 0, 0]
cl[A, ell_bins, z_bins, z_bins]
If is_diag, the input array should have this shape (A, ell, z_bin).
In this case, the flatten process just flattens the last two indices.
"""
flat_cl = cl
if not is_diag:
tr_idx = np.triu_indices(cl.shape[-1])
flat_cl = np.moveaxis(flat_cl, [-2, -1], [0, 1])
flat_cl = flat_cl[tr_idx]
flat_cl = np.moveaxis(flat_cl, [0], [-1])
flat_cl = flat_cl.reshape(flat_cl.shape[:-2] +
(flat_cl.shape[-2]*flat_cl.shape[-1],))
return flat_cl
def unflatten_cl(cl, shape, is_diag=False):
"""
Given a cl flattened array (see flatten_cl for details on the structure)
return an array with shape (A, ell, z_bin_1, z_bin_2), where A stands
for additional shape indices (it can be the number of simulations,
number of fields, ...).
If is_diag, the final shape will be (A, ell, z_bin).
"""
if is_diag:
unflat_cl = cl.reshape(shape)
else:
tr_idx = np.triu_indices(shape[-1])
unflat_cl = np.zeros(shape)
tmp_cl = cl.reshape(shape[:-2]+(-1,))
tmp_cl = np.moveaxis(tmp_cl, [-1], [0])
unflat_cl = np.moveaxis(unflat_cl, [-2, -1], [0, 1])
unflat_cl[tr_idx] = tmp_cl
unflat_cl = np.moveaxis(unflat_cl, [1], [0])
unflat_cl[tr_idx] = tmp_cl
unflat_cl = np.moveaxis(unflat_cl, [0, 1], [-2, -1])
return unflat_cl
def flatten_covmat(cov, is_diag=False):
"""
Given a cov_mat array with shape
(A, ell_c1, ell_c2, z_bin_c1_1, z_bin_c2_1, z_bin_c1_2, z_bin_c2_2),
where c1 and c2 stand for the first and second cls and A
for additional shape indices (it can be the number of simulations,
number of fields, ...), return a A+square array with shape (A, s, s).
This function just applies twice flatten_cl on the different indices of
the covariance matrix.
"""
if is_diag:
flat_cov = np.moveaxis(cov, [-3, -2], [-2, -3])
idx = 2
else:
flat_cov = np.moveaxis(cov, [-5, -4, -3, -2], [-3, -5, -2, -4])
idx = 3
flat_cov = flatten_cl(flat_cov, is_diag)
flat_cov = np.moveaxis(flat_cov, [-1], [-1-idx])
flat_cov = flatten_cl(flat_cov, is_diag)
return flat_cov
def unflatten_covmat(cov, cl_shape, is_diag=False):
"""
Given a covmat flattened array (see flatten_covmat for details on the
structure) return an array with shape
(A, ell_c1, ell_c2, z_bin_c1_1, z_bin_c2_1, z_bin_c1_2, z_bin_c2_2),
where A stands for additional shape indices (it can be the number of
simulations, number of fields, ...).
This function just applies twice unflatten_cl on the different indices of
the covariance matrix.
"""
unflat_cov = np.apply_along_axis(unflatten_cl, -1, cov, cl_shape, is_diag)
unflat_cov = np.apply_along_axis(unflatten_cl, -1-len(cl_shape),
unflat_cov, cl_shape, is_diag)
if is_diag:
unflat_cov = np.moveaxis(unflat_cov, [-3, -2], [-2, -3])
else:
unflat_cov = np.moveaxis(unflat_cov,
[-5, -4, -3, -2], [-4, -2, -5, -3])
return unflat_cov
def get_covmat_cl(sims, is_diag=False):
sims_flat = flatten_cl(sims, is_diag)
if len(sims_flat.shape) == 2:
cov = np.cov(sims_flat.T, bias=True)
elif len(sims_flat.shape) == 3:
cov = np.array([np.cov(x.T, bias=True) for x in sims_flat])
else:
raise ValueError('Input dimensions can be either 2 or 3, found {}'
''.format(len(sims_flat.shape)))
if is_diag:
shape = sims.shape[-2:]
else:
shape = sims.shape[-3:]
return unflatten_covmat(cov, shape, is_diag)
def unify_fields_cl(cl, cov_pf, is_diag=False, pinv=False):
cl_flat = flatten_cl(cl, is_diag)
cov = flatten_covmat(cov_pf, is_diag)
if pinv:
inv_cov = np.array([np.linalg.pinv(x) for x in cov])
else:
inv_cov = np.array([np.linalg.inv(x) for x in cov])
tot_inv_cov = np.sum(inv_cov, axis=0)
if pinv:
tot_cov = np.linalg.pinv(tot_inv_cov)
else:
tot_cov = np.linalg.inv(tot_inv_cov)
# keeping also original code just in case
tot_cl = np.array([np.dot(inv_cov[x], cl_flat[x].T)
for x in range(len(cl))])
# tot_cl = np.array([np.linalg.solve(cov[x], cl_flat[x].T)
# for x in range(len(cl))])
tot_cl = np.sum(tot_cl, axis=0)
# tot_cl = np.linalg.solve(tot_inv_cov, tot_cl).T
tot_cl = np.dot(tot_cov, tot_cl).T
tot_cl = unflatten_cl(tot_cl, cl.shape[1:], is_diag=is_diag)
return tot_cl
def debin_cl(cl, bp):
if cl.shape[-3] != bp.shape[0]:
raise ValueError('Bandpowers and Cl shape mismatch!')
new_shape = list(cl.shape)
new_shape[-3] = bp[-1, -1]
new_shape = tuple(new_shape)
cl_dbp = np.zeros(new_shape)
cl_dbp = np.moveaxis(cl_dbp, [-3], [0])
for count, range in enumerate(bp):
n_rep = range[1]-range[0]
cl_ext = np.repeat(cl[count], n_rep)
cl_ext = cl_ext.reshape(cl.shape[1:]+(n_rep,))
cl_ext = np.moveaxis(cl_ext, [-1], [0])
cl_dbp[range[0]:range[1]] = cl_ext
cl_dbp = np.moveaxis(cl_dbp, [0], [-3])
return cl_dbp
def bin_cl(cl, bp):
if cl.shape[-3] == bp[-1, -1] - bp[0, 0]:
ell_min = bp[0, 0]
elif cl.shape[-3] == bp[-1, -1] + 1:
ell_min = 0
else:
raise ValueError('Bandpowers and Cl shape mismatch!')
new_shape = list(cl.shape)
new_shape[-3] = bp.shape[0]
new_shape = tuple(new_shape)
cl_bp = np.zeros(new_shape)
cl_bp = np.moveaxis(cl_bp, [-3], [0])
for count, range in enumerate(bp):
cl_re = np.moveaxis(cl, [-3], [0])
cl_bp[count] = np.average(cl_re[range[0]-ell_min:range[1]-ell_min],
axis=0)
cl_bp = np.moveaxis(cl_bp, [0], [-3])
return cl_bp
def couple_cl(ell, cl, mcm_path, n_fields, n_bins, n_bp, return_BB=False):
nmt_cl = np.moveaxis(cl, [0], [-1])
nmt_cl = np.stack((nmt_cl, np.zeros(nmt_cl.shape), np.zeros(nmt_cl.shape),
np.zeros(nmt_cl.shape)))
nmt_cl = np.moveaxis(nmt_cl, [0], [-2])
final_cl = np.zeros((n_fields, n_bins, n_bins, n_bp))
final_cl_BB = np.zeros((n_fields, n_bins, n_bins, n_bp))
for nb1 in range(n_bins):
for nb2 in range(nb1, n_bins):
for nf in range(n_fields):
wf = nmt.NmtWorkspaceFlat()
wf.read_from(os.path.join(
mcm_path, 'mcm_W{}_Z{}{}.dat'.format(nf+1, nb1+1, nb2+1)))
cl_pfb = wf.couple_cell(ell, nmt_cl[nb1, nb2])
final_cl[nf, nb1, nb2] = cl_pfb[0]
final_cl[nf, nb2, nb1] = cl_pfb[0]
final_cl_BB[nf, nb1, nb2] = cl_pfb[-1]
final_cl_BB[nf, nb2, nb1] = cl_pfb[-1]
final_cl = np.moveaxis(final_cl, [-1], [-3])
final_cl_BB = np.moveaxis(final_cl_BB, [-1], [-3])
if return_BB:
return final_cl, final_cl_BB
else:
return final_cl
def couple_decouple_cl(ell, cl, mcm_path, n_fields, n_bins, n_bp,
return_BB=False):
nmt_cl = np.moveaxis(cl, [0], [-1])
nmt_cl = np.stack((nmt_cl, np.zeros(nmt_cl.shape), np.zeros(nmt_cl.shape),
np.zeros(nmt_cl.shape)))
nmt_cl = np.moveaxis(nmt_cl, [0], [-2])
final_cl = np.zeros((n_fields, n_bins, n_bins, n_bp))
final_cl_BB = np.zeros((n_fields, n_bins, n_bins, n_bp))
for nb1 in range(n_bins):
for nb2 in range(nb1, n_bins):
for nf in range(n_fields):
wf = nmt.NmtWorkspaceFlat()
wf.read_from(os.path.join(
mcm_path, 'mcm_W{}_Z{}{}.dat'.format(nf+1, nb1+1, nb2+1)))
cl_pfb = wf.couple_cell(ell, nmt_cl[nb1, nb2])
cl_pfb = wf.decouple_cell(cl_pfb)
final_cl[nf, nb1, nb2] = cl_pfb[0]
final_cl[nf, nb2, nb1] = cl_pfb[0]
final_cl_BB[nf, nb1, nb2] = cl_pfb[-1]
final_cl_BB[nf, nb2, nb1] = cl_pfb[-1]
final_cl = np.moveaxis(final_cl, [-1], [-3])
final_cl_BB = np.moveaxis(final_cl_BB, [-1], [-3])
if return_BB:
return final_cl, final_cl_BB
else:
return final_cl
# ------------------- Flatten and unflatten correlation function -------------#
def position_xipm(n, n_bins, n_theta):
""" Given the position in the array, find the
corresponding position in the unflattened array.
Args:
n: position in the flattened array.
n_bins: number of bins.
n_theta: number of theta_ell variables.
Returns:
p_pm, p_theta, p_bin_1, p_bin_2.
"""
# Check that the input is consistent with these numbers
p_max = 2*n_theta*n_bins*(n_bins+1)/2
if n >= p_max:
raise ValueError("The input number is larger than expected!")
# div: gives position of bins. mod: gives pm and theta
div, mod = np.divmod(n, 2*n_theta)
# Calculate position of pm and theta
if mod < n_theta:
p_pm = 0
p_theta = mod
else:
p_pm = 1
p_theta = mod-n_theta
# Calculate position of bin1 and bin2
intervals = np.flip(np.array([np.arange(x, n_bins+1).sum()
for x in np.arange(2, n_bins+2)]), 0)
p_bin_1 = np.where(intervals <= div)[0][-1]
p_bin_2 = div - intervals[p_bin_1] + p_bin_1
return p_pm, p_theta, p_bin_1, p_bin_2
def unflatten_xipm(array):
""" Unflatten the correlation function.
Args:
array: correlation function.
Returns:
reshaped correlation function (pm, theta, bin1, bin2).
"""
# Local variables
n_bins = len(set.Z_BINS)
n_theta = len(set.THETA_ARCMIN)
# Initialize array with xipm
xipm = np.zeros((2, n_theta, n_bins, n_bins))
# Main loop: scroll all elements of the flattened array and reshape them
for count in range(len(array)):
# From position in flattened array, give position for each index
p_pm, p_theta, p_bin_1, p_bin_2 = position_xipm(count, n_bins, n_theta)
# Assign element to xipm (bin1 and bin2 are symmetric)
xipm[p_pm, p_theta, p_bin_1, p_bin_2] = array[count]
xipm[p_pm, p_theta, p_bin_2, p_bin_1] = array[count]
return xipm
def flatten_xipm(corr, settings):
""" Flatten the correlation function.
Args:
corr: correlation function.
settings: dictionary with settings.
Returns:
flattened correlation function.
"""
# Local variables
n_theta_ell = settings['n_theta_ell']
if settings['method'] in ['kl_off_diag', 'kl_diag']:
n_bins = settings['n_kl']
else:
n_bins = settings['n_bins']
# Flatten array
if settings['method'] in ['full', 'kl_off_diag']:
n_data = 2*n_theta_ell*n_bins*(n_bins+1)/2
data_f = np.empty(n_data)
for n in range(n_data):
p_pm, p_tl, p_b1, p_b2 = position_xipm(n, n_bins, n_theta_ell)
data_f[n] = corr[p_pm, p_tl, p_b1, p_b2]
else:
n_data = 2*n_theta_ell*n_bins
data_f = np.empty(n_data)
for n in range(n_data):
div, mod = np.divmod(n, 2*n_theta_ell)
if mod < n_theta_ell:
p_pm = 0
p_theta = mod
else:
p_pm = 1
p_theta = mod-n_theta_ell
p_bin = div
data_f[n] = corr[p_pm, p_theta, p_bin]
return data_f
# ------------------- Mask and Unmask correlation function -------------------#
def mask_xipm(array, mask, settings):
""" Convert a unmasked array into a masked one.
Args:
array: array with the unmasked xipm.
mask: mask that has been used.
settings: dictionary with settings.
Returns:
array with masked xipm.
"""
if settings['method'] in ['kl_off_diag', 'kl_diag']:
n_bins = settings['n_kl']
else:
n_bins = settings['n_bins']
if settings['method'] in ['full', 'kl_off_diag']:
mask_tot = np.tile(mask.flatten(), n_bins*(n_bins+1)/2)
else:
mask_tot = np.tile(mask.flatten(), n_bins)
return array[mask_tot]
def unmask_xipm(array, mask):
""" Convert a flatten masked array into
an unmasked one (still flatten).
Args:
array: array with the masked xipm.
mask: mask that has been used.
Returns:
array with unmasked xipm.
"""
# Flatten mask and tile mask
mask_f = mask.flatten()
# Get number of times that theta_pm should be repeated
div, mod = np.divmod(len(array), len(mask_f[mask_f]))
if mod == 0:
raise IOError('The length of the input array is not correct!')
mask_f = np.tile(mask_f, div)
# Find positions where to write values
pos = np.where(mask_f)[0]
# Define unmasked array
xipm = np.zeros(len(mask_f))
# Assign components
for n1, n2 in enumerate(pos):
xipm[n2] = array[n1]
return xipm
| 14,672 | 31.973034 | 79 | py |
kl_sample | kl_sample-master/kl_sample/run.py | """
This module contains the main function run, from where
it is possible to run an MCMC (emcee), or evaluate the
likelihood at one single point (single_point).
"""
import numpy as np
import kl_sample.io as io
import kl_sample.cosmo as cosmo_tools
import kl_sample.checks as checks
import kl_sample.likelihood as lkl
import kl_sample.reshape as rsh
import kl_sample.settings as set
import kl_sample.sampler as sampler
def run(args):
""" Run with different samplers: emcee, single_point
Args:
args: the arguments read by the parser.
Returns:
saves to file the output (emcee) or just
print on the screen the likelihood (single_point)
"""
# ----------- Initialize -------------------------------------------------#
# Define absolute paths and check the existence of each required file
path = {
'params': io.path_exists_or_error(args.params_file),
'data': io.read_param(args.params_file, 'data', type='path'),
'output': io.read_param(args.params_file, 'output', type='path')
}
io.path_exists_or_error(path['data'])
io.path_exists_or_create(path['output'])
# Create array with cosmo parameters
add_ia = io.read_param(path['params'], 'add_ia', type='bool')
cosmo = {
'names': ['h', 'omega_c', 'omega_b', 'ln10_A_s', 'n_s', 'w_0', 'w_A']
}
if add_ia:
cosmo['names'].append('A_IA')
cosmo['names'].append('beta_IA')
cosmo['params'] = io.read_cosmo_array(path['params'], cosmo['names'])
cosmo['mask'] = cosmo_tools.get_cosmo_mask(cosmo['params'])
# Read and store the remaining parameters
settings = {
'sampler': io.read_param(path['params'], 'sampler'),
'space': io.read_param(path['params'], 'space'),
'method': io.read_param(path['params'], 'method'),
'n_sims': io.read_param(path['params'], 'n_sims'),
'add_ia': add_ia
}
# Sampler settings
if settings['sampler'] == 'emcee':
settings['n_walkers'] = \
io.read_param(path['params'], 'n_walkers', type='int')
settings['n_steps'] = \
io.read_param(path['params'], 'n_steps', type='int')
settings['n_threads'] = \
io.read_param(path['params'], 'n_threads', type='int')
# KL settings
if settings['method'] in ['kl_diag', 'kl_off_diag']:
settings['n_kl'] = io.read_param(path['params'], 'n_kl', type='int')
settings['kl_scale_dep'] = \
io.read_param(path['params'], 'kl_scale_dep', type='bool')
if settings['method'] == 'kl_diag':
is_diag = True
else:
is_diag = False
# Real/Fourier space settings and mcm path
if settings['space'] == 'real':
settings['ell_max'] = \
io.read_param(path['params'], 'ell_max', type='int')
elif settings['space'] == 'fourier':
settings['bp_ell'] = set.BANDPOWERS
settings['ell_max'] = settings['bp_ell'][-1, -1]
settings['mcm'] = io.read_param(args.params_file, 'mcm', type='path')
io.path_exists_or_error(settings['mcm'])
# Check if there are unused parameters.
checks.unused_params(cosmo, settings, path)
# Perform sanity checks on the parameters and data file
checks.sanity_checks(cosmo, settings, path)
# Read data
data = {
'photo_z': io.read_from_fits(path['data'], 'photo_z')
}
if settings['space'] == 'real':
data['theta_ell'] = np.array(set.THETA_ARCMIN)/60. # theta in degrees
data['mask_theta_ell'] = set.MASK_THETA
data['corr_obs'] = io.read_from_fits(path['data'], 'xipm_obs')
data['corr_sim'] = io.read_from_fits(path['data'], 'xipm_sim')
elif settings['space'] == 'fourier':
data['theta_ell'] = io.read_from_fits(path['data'], 'ELL')
data['mask_theta_ell'] = set.MASK_ELL
cl_EE = io.read_from_fits(path['data'], 'CL_EE')
noise_EE = io.read_from_fits(path['data'], 'CL_EE_NOISE')
sims_EE = io.read_from_fits(path['data'], 'CL_SIM_EE')
data['corr_obs'] = rsh.clean_cl(cl_EE, noise_EE)
data['corr_sim'] = rsh.clean_cl(sims_EE, noise_EE)
if settings['method'] in ['kl_diag', 'kl_off_diag']:
if settings['kl_scale_dep']:
data['kl_t'] = io.read_from_fits(path['data'], 'kl_t_ell')
else:
data['kl_t'] = io.read_from_fits(path['data'], 'kl_t')
# Add some dimension to settings (n_bins, n_x_var)
settings['n_fields'] = data['corr_sim'].shape[0]
settings['n_sims_tot'] = data['corr_sim'].shape[1]
settings['n_bins'] = len(data['photo_z']) - 1
settings['n_theta_ell'] = len(data['theta_ell'])
# Calculate number of elements in data vector
settings['n_data'] = \
len(data['mask_theta_ell'].flatten()[data['mask_theta_ell'].flatten()])
settings['n_data_tot'] = \
settings['n_data']*settings['n_bins']*(settings['n_bins']+1)/2
if settings['method'] == 'kl_diag':
settings['n_data'] = settings['n_data']*settings['n_kl']
elif settings['method'] == 'kl_off_diag':
settings['n_data'] = \
settings['n_data']*settings['n_kl']*(settings['n_kl']+1)/2
else:
settings['n_data'] = \
settings['n_data']*settings['n_bins']*(settings['n_bins']+1)/2
# ------------------- Preliminary computations ---------------------------#
if set.BNT:
data['photo_z'] = io.read_from_fits(path['data'], 'photo_z')
bnt = cosmo_tools.BNT(cosmo['params'], data['photo_z'])
data['bnt_mat'] = bnt.get_matrix()
# Apply KL
if settings['method'] in ['kl_off_diag', 'kl_diag']:
data['corr_obs'] = \
lkl.apply_kl(data['kl_t'], data['corr_obs'], settings)
data['corr_sim'] = \
lkl.apply_kl(data['kl_t'], data['corr_sim'], settings)
# Compute how many simulations have to be used
settings['n_sims'] = \
lkl.how_many_sims(settings['n_sims'], settings['n_sims_tot'],
settings['n_data'], settings['n_data_tot'])
data['corr_sim'] = lkl.select_sims(data, settings)
# Prepare data if real
if settings['space'] == 'real':
# Reshape observed correlation function
data['corr_obs'] = rsh.flatten_xipm(data['corr_obs'], settings)
# Reshape simulated correlation functions
cs = np.empty((settings['n_fields'],
settings['n_sims'])+data['corr_obs'].shape)
for nf in range(settings['n_fields']):
for ns in range(settings['n_sims']):
cs[nf][ns] = \
rsh.flatten_xipm(data['corr_sim'][nf][ns], settings)
data['corr_sim'] = cs
# Mask observed correlation function
data['corr_obs'] = rsh.mask_xipm(data['corr_obs'],
data['mask_theta_ell'], settings)
# Compute inverse covariance matrix
data['inv_cov_mat'] = lkl.compute_inv_covmat(data, settings)
# Prepare data if fourier
else:
# Mask Cl's
data['corr_obs'] = rsh.mask_cl(data['corr_obs'], is_diag=is_diag)
data['corr_sim'] = rsh.mask_cl(data['corr_sim'], is_diag=is_diag)
# Unify fields
data['cov_pf'] = rsh.get_covmat_cl(data['corr_sim'], is_diag=is_diag)
data['corr_obs'] = rsh.unify_fields_cl(data['corr_obs'],
data['cov_pf'], is_diag=is_diag,
pinv=set.PINV)
data['corr_sim'] = rsh.unify_fields_cl(data['corr_sim'],
data['cov_pf'], is_diag=is_diag,
pinv=set.PINV)
# Apply BNT if required
if set.BNT:
data['corr_obs'] = \
cosmo_tools.apply_bnt(data['corr_obs'], data['bnt_mat'])
data['corr_sim'] = \
cosmo_tools.apply_bnt(data['corr_sim'], data['bnt_mat'])
# Reshape observed Cl's
data['corr_obs'] = rsh.flatten_cl(data['corr_obs'], is_diag=is_diag)
# Calculate covmat Cl's
cov = rsh.get_covmat_cl(data['corr_sim'], is_diag=is_diag)
cov = rsh.flatten_covmat(cov, is_diag=is_diag)
factor = \
(settings['n_sims']-settings['n_data']-2.)/(settings['n_sims']-1.)
if set.PINV:
data['inv_cov_mat'] = factor*np.linalg.pinv(cov)
else:
data['inv_cov_mat'] = factor*np.linalg.inv(cov)
# Import Camera's template
if set.THEORY == 'Camera':
settings['cls_template'] = \
io.import_template_Camera(set.CLS_TEMPLATE, settings)
# ------------------- Run ------------------------------------------------#
if settings['sampler'] == 'emcee':
sampler.run_emcee(args, cosmo, data, settings, path)
elif settings['sampler'] == 'single_point':
sampler.run_single_point(cosmo, data, settings)
print('Success!!')
return
| 8,964 | 39.565611 | 79 | py |
kl_sample | kl_sample-master/kl_sample/get_kl.py | """
This module calculates the KL transform given a fiducial cosmology.
"""
import numpy as np
import kl_sample.io as io
import kl_sample.likelihood as lkl
import kl_sample.reshape as rsh
import kl_sample.settings as set
def get_kl(args):
""" Calculate the KL transform
Args:
args: the arguments read by the parser.
Returns:
saves to the data file the KL transform
"""
# ------------------- Initialize -----------------------------------------#
# Define absolute paths and check the existence of each required file
path = {
'params': io.path_exists_or_error(args.params_file),
'data': io.read_param(args.params_file, 'data', type='path'),
}
io.path_exists_or_error(path['data'])
# Create array with cosmo parameters
params_name = ['h', 'omega_c', 'omega_b', 'ln10_A_s', 'n_s', 'w_0', 'w_A']
params_val = io.read_cosmo_array(path['params'], params_name)[:, 1]
# Read photo_z
pz = io.read_from_fits(path['data'], 'PHOTO_Z')
# Read and get noise
if io.read_param(path['params'], 'space') == 'real':
ell_min = 2
ell_max = io.read_param(path['params'], 'ell_max', type='int')
n_eff = io.read_from_fits(path['data'], 'n_eff')
n_eff = n_eff*(180.*60./np.pi)**2. # converted in stedrad^-1
sigma_g = io.read_from_fits(path['data'], 'sigma_g')
noise = np.array([np.diag(sigma_g**2/n_eff) for x in range(ell_max+1)])
elif io.read_param(path['params'], 'space') == 'fourier':
ell_bp = set.BANDPOWERS
ell_min = ell_bp[0, 0]
ell_max = ell_bp[-1, -1] - 1
noise = io.read_from_fits(path['data'], 'CL_EE_NOISE')
sim = io.read_from_fits(path['data'], 'CL_SIM_EE')
sim = rsh.clean_cl(sim, noise)
cov_pf = rsh.get_covmat_cl(sim)
noise = rsh.unify_fields_cl(noise, cov_pf, pinv=set.PINV)
noise = rsh.debin_cl(noise, ell_bp)
# ------------------- Compute KL -----------------------------------------#
kl_t = lkl.compute_kl(params_val, pz, noise, ell_min=ell_min,
ell_max=ell_max, scale_dep=False)
io.write_to_fits(fname=path['data'], array=kl_t, name='kl_t')
kl_t = lkl.compute_kl(params_val, pz, noise, ell_min=ell_min,
ell_max=ell_max, scale_dep=True, bp=ell_bp)
io.write_to_fits(fname=path['data'], array=kl_t, name='kl_t_ell')
io.print_info_fits(path['data'])
print('Success!!')
| 2,480 | 32.986301 | 79 | py |
kl_sample | kl_sample-master/kl_sample/plots.py | import os
import sys
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import kl_sample.io as io
import kl_sample.reshape as rsh
import kl_sample.settings as set
import kl_sample.cosmo as cosmo_tools
import kl_sample.likelihood as lkl
def plots(args):
""" Generate plots for the papers.
Args:
args: the arguments read by the parser.
Returns:
saves to fits files file the output.
"""
fields = set.FIELDS_CFHTLENS
# Define absolute paths
path = {}
path['params'] = io.path_exists_or_error(args.params_file)
path['mcm'] = io.path_exists_or_error(io.read_param(args.params_file, 'mcm', type='path'))+'/'
path['fourier'] = io.path_exists_or_error('{}/data/data_fourier.fits'.format(sys.path[0]))
path['real'] = io.path_exists_or_error('{}/data/data_real.fits'.format(sys.path[0]))
path['output'] = io.path_exists_or_create(os.path.abspath(args.output_path))+'/'
# Settings
settings = {
'kl_scale_dep' : True,
'method' : 'kl_diag',
'n_kl' : 7
}
# Read data
ell = io.read_from_fits(path['fourier'], 'ELL')
cl_EE = io.read_from_fits(path['fourier'], 'CL_EE')
cl_BB = io.read_from_fits(path['fourier'], 'CL_BB')
noise_EE = io.read_from_fits(path['fourier'], 'CL_EE_NOISE')
noise_BB = io.read_from_fits(path['fourier'], 'CL_BB_NOISE')
sims_EE = io.read_from_fits(path['fourier'], 'CL_SIM_EE')
sims_BB = io.read_from_fits(path['fourier'], 'CL_SIM_BB')
pz = io.read_from_fits(path['fourier'], 'PHOTO_Z')
n_eff = io.read_from_fits(path['fourier'], 'N_EFF')
sigma_g = io.read_from_fits(path['fourier'], 'SIGMA_G')
pz_r = io.read_from_fits(path['real'], 'PHOTO_Z')
n_eff_r = io.read_from_fits(path['real'], 'N_EFF')
sigma_g_r = io.read_from_fits(path['real'], 'SIGMA_G')
kl_t = io.read_from_fits(path['fourier'], 'KL_T_ELL')
# io.print_info_fits(path['fourier'])
# Clean data from noise
cl_EE = rsh.clean_cl(cl_EE, noise_EE)
sims_EE = rsh.clean_cl(sims_EE, noise_EE)
cl_BB = rsh.clean_cl(cl_BB, noise_BB)
sims_BB = rsh.clean_cl(sims_BB, noise_BB)
cl_EE_kl = lkl.apply_kl(kl_t, cl_EE, settings)
sims_EE_kl = lkl.apply_kl(kl_t, sims_EE, settings)
cov_pf_kl = rsh.get_covmat_cl(sims_EE_kl,is_diag=True)
cl_EE_kl = rsh.unify_fields_cl(cl_EE_kl, cov_pf_kl, is_diag=True)
sims_EE_kl = rsh.unify_fields_cl(sims_EE_kl, cov_pf_kl, is_diag=True)
# Mask observed Cl's
# cl_EE_kl = rsh.mask_cl(cl_EE_kl, is_diag=True)
# Calculate covmat Cl's
# sims_EE_kl = rsh.mask_cl(sims_EE_kl, is_diag=True)
cov_kl = rsh.get_covmat_cl(sims_EE_kl, is_diag=True)
factor = (2000.-35.-2.)/(2000.-1.)
cov_kl = cov_kl/factor
# Create array with cosmo parameters
params_name = ['h', 'omega_c', 'omega_b', 'ln10_A_s', 'n_s', 'w_0', 'w_A']
params_val = io.read_cosmo_array(path['params'], params_name)[:,1]
# Get theory Cl's
bp = set.BANDPOWERS
n_bins = len(bp)
cosmo = cosmo_tools.get_cosmo_ccl(params_val)
th_cl = cosmo_tools.get_cls_ccl(params_val, cosmo, pz, bp[-1,-1])
from astropy.io import fits
d=fits.open("/users/groups/damongebellini/data/preliminary/7bins/cls_bf.fits")
cl_ee=d[2].data[:len(th_cl)]
# th_cl = rsh.bin_cl(th_cl, bp)
tot_ell = np.arange(bp[-1,-1]+1)
th_cl,th_cl_BB = rsh.couple_decouple_cl(tot_ell, th_cl, path['mcm'], len(fields), n_bins, len(bp),
return_BB=True)
th_clb,th_cl_BBb = rsh.couple_decouple_cl(tot_ell, cl_ee, path['mcm'], len(fields), n_bins, len(bp),
return_BB=True)
th_cl_kl = lkl.apply_kl(kl_t, th_cl, settings)
th_cl_kl = rsh.unify_fields_cl(th_cl_kl, cov_pf_kl, is_diag=True)
io.write_to_fits(fname=path['output']+'obs.fits', array=ell, name='ell')
io.write_to_fits(fname=path['output']+'obs.fits', array=cl_EE_kl, name='cl_EE_obs')
io.write_to_fits(fname=path['output']+'obs.fits', array=cov_kl, name='cov_obs')
io.write_to_fits(fname=path['output']+'obs.fits', array=th_cl_kl, name='cl_EE_th')
plt.figure()
x = ell
for b1 in range(3):
y1 = th_cl_kl[:,b1]
y2 = cl_EE_kl[:,b1]
err = np.sqrt(np.diag(cov_kl[:,:,b1,b1]))
# plt.plot(x, y1, label = '$\\sigma_g^2/n_{eff}$')
plt.errorbar(x, y2, yerr=err, label = 'Bin {}'.format(b1+1), fmt='o')
plt.errorbar(x, -y2, yerr=err, label = 'Bin {}'.format(b1+1), fmt='^')
plt.xscale('log')
plt.yscale('log')
plt.xlabel('$\\ell$')
plt.ylabel('$\\hat{C}_\\ell^{EE}$')
plt.legend(loc='best')
plt.savefig('{}cl_kl_data.pdf'.format(path['output']))
plt.close()
exit(1)
cov_pf = rsh.get_covmat_cl(sims_EE)
cov_pf_BB = rsh.get_covmat_cl(sims_BB)
th_cl = rsh.unify_fields_cl(th_cl, cov_pf)
th_cl_BB = rsh.unify_fields_cl(th_cl_BB, cov_pf_BB)
th_clb = rsh.unify_fields_cl(th_clb, cov_pf)
th_cl_BBb = rsh.unify_fields_cl(th_cl_BBb, cov_pf_BB)
# Unify fields
cl_EE = rsh.unify_fields_cl(cl_EE, cov_pf)
noise_EE = rsh.unify_fields_cl(noise_EE, cov_pf)
sims_EE = rsh.unify_fields_cl(sims_EE, cov_pf)
cl_BB = rsh.unify_fields_cl(cl_BB, cov_pf_BB)
noise_BB = rsh.unify_fields_cl(noise_BB, cov_pf_BB)
sims_BB = rsh.unify_fields_cl(sims_BB, cov_pf_BB)
# Average simulations
sims_EE_avg = np.average(sims_EE, axis=0)
sims_BB_avg = np.average(sims_BB, axis=0)
# Calculate covmat
covmat_EE = rsh.get_covmat_cl(sims_EE)
covmat_BB = rsh.get_covmat_cl(sims_BB)
np.savez("cls_all",dd_EE=cl_EE,dd_BB=cl_BB,
tt_EE=th_cl,tt_BB=th_cl_BB,tb_EE=th_clb,tb_BB=th_cl_BBb,
ss_EE=sims_EE,ss_BB=sims_BB,cv_EE=covmat_EE,cv_BB=covmat_BB)
# Noise based on n_eff and sigma_g
n_eff = n_eff*(180.*60./np.pi)**2. #converted in stedrad^-1
noise_ns = np.array([np.diag(sigma_g**2/n_eff) for x in ell])
# Noise based on n_eff and sigma_g
n_eff_r = n_eff_r*(180.*60./np.pi)**2. #converted in stedrad^-1
noise_ns_r = np.array([np.diag(sigma_g_r**2/n_eff_r) for x in ell])
# Average noise
noise_EE_avg = rsh.debin_cl(noise_EE, bp)
noise_EE_avg = np.average(noise_EE_avg[bp[0,0]:bp[-1,-1]],axis=0)
noise_EE_avg = np.array([noise_EE_avg for x in ell])
# Plot noise
x = ell
for b1 in range(n_bins):
for b2 in range(b1,n_bins):
y1 = noise_ns[:,b1,b2]
y2 = noise_EE_avg[:,b1,b2]
y3 = noise_EE[:,b1,b2]
y4 = noise_ns_r[:,b1,b2]
plt.figure()
plt.plot(x, y1, label = '$\\sigma_g^2/n_{eff}$')
plt.plot(x, y2, label = 'Average')
plt.plot(x, y3, 'o', label = 'Noise')
plt.plot(x, y4, label = '$\\sigma_g^2/n_{eff}$ real')
plt.xscale('log')
plt.yscale('log')
plt.legend(loc='best')
plt.title('Bins {} {}'.format(b1+1,b2+1))
plt.xlabel('$\\ell$')
plt.ylabel('$N_\\ell^{EE}$')
plt.savefig('{}noise_bin{}{}.pdf'.format(path['output'],b1+1,b2+1))
plt.close()
# Plot Cl
x = ell
for b1 in range(n_bins):
for b2 in range(b1,n_bins):
y1 = cl_EE[:,b1,b2]
y2 = th_cl[:,b1,b2]
y3 = sims_EE_avg[:,b1,b2]
err1 = np.sqrt(np.diag(covmat_EE[:,:,b1,b1,b2,b2]))
plt.figure()
plt.errorbar(x, y1, yerr=err1, fmt='-o', label='data')
plt.plot(x, y2, label='theory')
plt.plot(x, y3, label='simulations')
plt.xscale('log')
plt.legend(loc='best')
plt.title('Bins {} {}'.format(b1+1,b2+1))
plt.xlabel('$\\ell$')
plt.ylabel('$C_\\ell^{EE}$')
plt.savefig('{}cl_bin{}{}.pdf'.format(path['output'],b1+1,b2+1))
plt.close()
return
| 7,931 | 36.239437 | 104 | py |
kl_sample | kl_sample-master/kl_sample/__init__.py | 0 | 0 | 0 | py | |
kl_sample | kl_sample-master/kl_sample/prep_real.py | """
This module contains the main function to prepare data in
real space for run. It should be used only once. Then
the data will be stored in the repository.
"""
import os
import numpy as np
import kl_sample.settings as set
import kl_sample.io as io
import kl_sample.reshape as rsh
def prep_real(args):
""" Prepare data in real space.
Args:
args: the arguments read by the parser.
Returns:
saves to a fits file file the output.
"""
# Define absolute paths and check the existence of each required file
path = {
'data': io.path_exists_or_error(
os.path.join(args.input_folder, 'data.fits')),
'xipm': io.path_exists_or_error(
os.path.join(args.input_folder, 'xipm.dat')),
'sims': io.path_exists_or_error(
os.path.join(args.input_folder, 'mockxipm')),
'output': io.path_exists_or_create(
os.path.abspath('data/data_real.fits'))
}
# Read and reshape xipm observed
xipm = np.loadtxt(path['xipm'], dtype='float64')[:, 1]
xipm = rsh.unmask_xipm(xipm, set.MASK_THETA)
xipm = rsh.unflatten_xipm(xipm)
io.write_to_fits(fname=path['output'], array=xipm, name='xipm_obs')
# Read and reshape xipm from simulations
xipm_f = io.unpack_simulated_xipm(fname=path['sims'])
n_fields = xipm_f.shape[0]
n_sims = xipm_f.shape[1]
xipm = np.empty((n_fields, n_sims)+xipm.shape)
for nf in range(n_fields):
for ns in range(n_sims):
xipm[nf, ns] = rsh.unflatten_xipm(xipm_f[nf, ns])
io.write_to_fits(fname=path['output'], array=xipm, name='xipm_sim')
# Calculate photo-z sigma_g and n_eff
photo_z, n_eff, sigma_g = io.read_photo_z_data(path['data'])
io.write_to_fits(fname=path['output'], array=photo_z, name='photo_z')
io.write_to_fits(fname=path['output'], array=n_eff, name='n_eff')
io.write_to_fits(fname=path['output'], array=sigma_g, name='sigma_g')
# Print info about the fits file
io.print_info_fits(fname=path['output'])
print('Success!!')
| 2,061 | 30.723077 | 73 | py |
kl_sample | kl_sample-master/kl_sample/io.py | """
Module containing all the input/output related functions.
Functions:
- argument_parser()
- path_exists_or_error(path)
- path_exists_or_create(path)
- read_param(fname, par, type)
- read_cosmo_array(fname, pars)
- read_from_fits(fname, name)
- read_header_from_fits(fname, name)
- write_to_fits(fname, array, name)
- print_info_fits(fname)
- unpack_simulated_xipm(fname)
- read_photo_z_data(fname)
"""
import argparse
import os
import sys
import re
import numpy as np
from astropy.io import fits
import kl_sample.settings as set
# ------------------- Parser -------------------------------------------------#
def argument_parser():
""" Call the parser to read command line arguments.
Args:
None.
Returns:
args: the arguments read by the parser
"""
parser = argparse.ArgumentParser(
'Sample the cosmological parameter space using lensing data.')
# Add supbarser to select between run and prep modes.
subparsers = parser.add_subparsers(
dest='mode',
help='Options are: '
'(i) prep_real: prepare data in real space. '
'(ii) prep_fourier: prepare data in fourier space. '
'(iii) run: do the actual run. '
'(iv) get_kl: calculate the kl transformation '
'Options (i) and (ii) are usually not necessary since '
'the data are are already stored in this repository.')
run_parser = subparsers.add_parser('run')
prep_real_parser = subparsers.add_parser('prep_real')
prep_fourier_parser = subparsers.add_parser('prep_fourier')
plots_parser = subparsers.add_parser('plots')
get_kl_parser = subparsers.add_parser('get_kl')
# Arguments for 'run'
run_parser.add_argument('params_file', type=str, help='Parameters file')
run_parser.add_argument(
'--restart', '-r', help='Restart the chains from the last point '
'of the output file (only for emcee)', action='store_true')
# Arguments for 'prep_real'
prep_real_parser.add_argument(
'input_folder', type=str, help='Input folder.')
# Arguments for 'prep_fourier'
prep_fourier_parser.add_argument(
'input_path', type=str, help='Input folder. Files that should contain:'
' cat_full.fits, mask_arcsec_N.fits.gz (N=1,..,4), mask_url.txt. '
'See description in kl_sample/prep_fourier.py for more details.')
prep_fourier_parser.add_argument(
'--output_path', '-o', type=str, help='Output folder.')
prep_fourier_parser.add_argument(
'--badfields_path', '-bp', type=str, help='Folder where the bad fields'
' mask are stored, or where they well be downloaded.')
prep_fourier_parser.add_argument(
'--cat_sims_path', '-cp', type=str, help='Folder where the catalogues'
' of the simulations are stored, or where they well be downloaded.')
prep_fourier_parser.add_argument(
'--run_all', '-a', help='Run all routines even if the files are '
'already present', action='store_true')
prep_fourier_parser.add_argument(
'--run_mask', '-mk', help='Run mask routine even if the files are '
'already present', action='store_true')
prep_fourier_parser.add_argument(
'--run_mult', '-m', help='Run multiplicative correction routine even '
'if the files are already present', action='store_true')
prep_fourier_parser.add_argument(
'--run_pz', '-pz', help='Run photo_z routine even if the files are '
'already present', action='store_true')
prep_fourier_parser.add_argument(
'--run_cat', '-c', help='Run catalogue routine even if the files are '
'already present', action='store_true')
prep_fourier_parser.add_argument(
'--run_map', '-mp', help='Run map routine even if the files are '
'already present', action='store_true')
prep_fourier_parser.add_argument(
'--run_cl', '-cl', help='Run Cl routine even if the files are '
'already present', action='store_true')
prep_fourier_parser.add_argument(
'--run_cat_sims', '-cats', help='Run Cat sims routine even if the '
'files are already present', action='store_true')
prep_fourier_parser.add_argument(
'--run_cl_sims', '-cls', help='Run Cl sims routine even if the '
'files are already present', action='store_true')
prep_fourier_parser.add_argument(
'--want_plots', '-p', help='Generate plots for the images',
action='store_true')
prep_fourier_parser.add_argument(
'--remove_files', '-rp', help='Remove downloaded files',
action='store_true')
# Arguments for 'plots'
plots_parser.add_argument(
'output_path', type=str, help='Path to output files')
plots_parser.add_argument(
'--params_file', '-p', type=str, help='Path to parameter file')
# Arguments for 'get_kl'
get_kl_parser.add_argument('params_file', type=str, help='Parameters file')
return parser.parse_args()
# ------------------- Check existence ----------------------------------------#
def path_exists_or_error(path):
""" Check if a path exists, otherwise it returns error.
Args:
path: path to check.
Returns:
abspath: if the file exists it returns its absolute path
"""
abspath = os.path.abspath(path)
if os.path.exists(abspath):
return abspath
raise IOError('Path {} not found!'.format(abspath))
def path_exists_or_create(path):
""" Check if a path exists, otherwise it creates it.
Args:
path: path to check. If path contains a file
name, it does create only the folders containing it.
Returns:
abspath: return the absolute path of path.
"""
abspath = os.path.abspath(path)
folder, name = os.path.split(abspath)
cond1 = not bool(re.fullmatch('.+_', name, re.IGNORECASE))
cond2 = not bool(re.fullmatch(r'.+\..{3}', name, re.IGNORECASE))
if cond1 and cond2:
folder = abspath
if not os.path.exists(folder):
os.makedirs(folder)
return folder
# ------------------- Read ini -----------------------------------------------#
def read_param(fname, par, type='string'):
""" Return the value of a parameter, either from the
input file or from the default settings.
Args:
fname: path of the input file.
par: string containing the name of the parameter
type: output type for the value of the parameter
Returns:
value: value of the parameter par
"""
# Read the file looking for the parameter
value = None
n_par = 0
with open(fname) as fn:
for line in fn:
line = re.sub('#.+', '', line)
if '=' in line:
name, _ = line.split('=')
name = name.strip()
if name == par:
n_par = n_par + 1
_, value = line.split('=')
value = value.strip()
# If there are duplicated parameters raise an error
if n_par > 1:
raise IOError('Found duplicated parameter: ' + par)
# If par was not in the file use the default value
if value is None:
value = set.default_params[par]
if type == 'bool':
return value
print('Default value used for ' + par + ' = ' + str(value))
sys.stdout.flush()
# Convert the parameter to the desired type
if type == 'float':
return float(value)
elif type == 'int':
return int(value)
# Path type returns the absolute path
elif type == 'path':
return os.path.abspath(value)
# Boolean type considers only the first letter (case insensitive)
elif type == 'bool':
if re.match('y.*', value, re.IGNORECASE):
return True
elif re.match('n.*', value, re.IGNORECASE):
return False
else:
raise IOError('Boolean type for ' + par + ' not recognized!')
# Cosmo type has to be returned as a three dimensional array
# The check that it has been converted correctly is done in
# read_cosmo_array.
elif type == 'cosmo':
try:
return np.array([float(value), float(value), float(value)])
except ValueError:
try:
array = value.split(',')
array = [x.strip() for x in array]
return [None if x == 'None' else float(x) for x in array]
except ValueError:
return value
# All other types (such as strings) will be returned as strings
else:
return value
def read_cosmo_array(fname, pars):
""" Read from the parameter file the cosmological
parameters and store them in an array.
Args:
fname: path of the input file.
pars: list of the cosmological parameters. Used
to determine the order in which they are stored
Returns:
cosmo_params: array containing the cosmological
parameters. Each parameter is a row as
[left_bound, central, right_bound].
"""
# Initialize the array
cosmo_params = []
# Run over the parameters and append them
# to the array
for n, par in enumerate(pars):
# Get the values of the parameter
value = read_param(fname, par, type='cosmo')
# Check that the parameter has the correct shape and
# it is not a string
if len(value) == 3 and type(value) is not str:
cosmo_params.append(value)
else:
raise IOError('Check the value of ' + par + '!')
return np.array(cosmo_params)
# ------------------- FITS files ---------------------------------------------#
def read_from_fits(fname, name):
""" Open a fits file and read data from it.
Args:
fname: path of the data file.
name: name of the data we want to extract.
Returns:
array with data for name.
"""
with fits.open(fname) as fn:
return fn[name].data
def read_header_from_fits(fname, name):
""" Open a fits file and read header from it.
Args:
fname: path of the data file.
name: name of the data we want to extract.
Returns:
header.
"""
with fits.open(fname) as fn:
return fn[name].header
def write_to_fits(fname, array, name, type='image', header=None):
""" Write an array to a fits file.
Args:
fname: path of the input file.
array: array to save.
name: name of the image.
Returns:
None
"""
warning = False
# If file does not exist, create it
if not os.path.exists(fname):
hdul = fits.HDUList([fits.PrimaryHDU()])
hdul.writeto(fname)
# Open the file
with fits.open(fname, mode='update') as hdul:
try:
hdul.__delitem__(name)
except KeyError:
pass
if type == 'image':
hdul.append(fits.ImageHDU(array, name=name, header=header))
elif type == 'table':
hdul.append(array)
else:
print('Type '+type+' not recognized! Data not saved to file!')
return True
print('Appended ' + name.upper() + ' to ' + os.path.relpath(fname))
sys.stdout.flush()
return warning
def print_info_fits(fname):
""" Print on screen fits file info.
Args:
fname: path of the input file.
Returns:
None
"""
with fits.open(fname) as hdul:
print(hdul.info())
sys.stdout.flush()
return
def get_keys_from_fits(fname):
""" Get keys from fits file.
Args:
fname: path of the data file.
Returns:
list of keys.
"""
with fits.open(fname) as fn:
return [x.name for x in fn]
# ------------------- On preliminary data ------------------------------------#
def unpack_simulated_xipm(fname):
""" Unpack a tar file containing the simulated
correlation functions and write them into
a single array.
Args:
fname: path of the input file.
Returns:
array with correlation functions.
"""
# Import local variables from settings
n_bins = len(set.Z_BINS)
n_theta = len(set.THETA_ARCMIN)
n_fields = len(set.A_CFHTlens)
# Base name of each file inside the compressed tar
base_name = '/xipm_cfhtlens_sub2real0001_maskCLW1_blind1_z1_z1_athena.dat'
# Calculate how many simulations were run based on the number of files
nfiles = len(os.listdir(fname))
n_sims, mod = np.divmod(nfiles, n_fields*n_bins*(n_bins+1)/2)
if mod != 0:
raise IOError('The number of files in ' + fname + ' is not correct!')
# Initialize array
xipm_sims = np.zeros((n_fields, n_sims, 2*n_theta*n_bins*(n_bins+1)/2))
# Main loop: scroll over each file and import data
for nf in range(n_fields):
for ns in range(n_sims):
for nb1 in range(n_bins):
for nb2 in range(nb1, n_bins):
# Modify the base name to get the actual one
new_name = base_name.replace('maskCLW1', 'maskCLW{0:01d}'
''.format(nf+1))
new_name = new_name.replace('real0001', 'real{0:04d}'
''.format(ns+1))
new_name = new_name.replace('z1_athena', 'z{0:01d}_athena'
''.format(nb1+1))
new_name = new_name.replace('blind1_z1', 'blind1_z{0:01d}'
''.format(nb2+1))
# For each bin pair calculate position on the final array
pos = np.flip(np.arange(n_bins+1), 0)[:nb1].sum()
pos = (pos + nb2 - nb1)*2*n_theta
# Extract file and read it only if it is not None
fn = np.loadtxt(fname+new_name)
# Read xi_plus and xi_minus and stack them
xi = np.hstack((fn[:, 1], fn[:, 2]))
# Write imported data on final array
for i, xi_val in enumerate(xi):
xipm_sims[nf, ns, pos+i] = xi_val
return xipm_sims
def read_photo_z_data(fname):
""" Read CFHTlens data and calculate photo_z,
n_eff and sigma_g.
Args:
fname: path of the input file.
Returns:
arrays with photo_z, n_eff and sigma_g.
"""
# Read from fits
hdul = fits.open(fname, memmap=True)
table = hdul['data'].data
image = hdul['PZ_full'].data
hdul.close()
# Local variables
z_bins = set.Z_BINS
sel_bins = np.array([set.filter_galaxies(table, z_bins[n][0], z_bins[n][1])
for n in range(len(z_bins))])
photo_z = np.zeros((len(z_bins)+1, len(image[0])))
n_eff = np.zeros(len(z_bins))
sigma_g = np.zeros(len(z_bins))
photo_z[0] = (np.arange(len(image[0]))+1./2.)*set.dZ_CFHTlens
# Main loop: for each bin calculate photo_z, n_eff and sigma_g
for n in range(len(z_bins)):
# Useful quantities TODO: Correct ellipticities
w_sum = table['weight'][sel_bins[n]].sum()
w2_sum = (table['weight'][sel_bins[n]]**2.).sum()
m = np.average(table['e1'][sel_bins[n]])
e1 = table['e1'][sel_bins[n]]/(1+m)
e2 = table['e2'][sel_bins[n]]-table['c2'][sel_bins[n]]/(1+m)
# photo_z
photo_z[n+1] = np.dot(table['weight'][sel_bins[n]],
image[sel_bins[n]])/w_sum
# n_eff
n_eff[n] = w_sum**2/w2_sum/set.A_CFHTlens.sum()
# sigma_g
sigma_g[n] = np.dot(table['weight'][sel_bins[n]]**2.,
(e1**2. + e2**2.)/2.)/w2_sum
sigma_g[n] = sigma_g[n]**0.5
# Print progress message
print('----> Completed bin {}/{}'.format(n+1, len(z_bins)))
sys.stdout.flush()
return photo_z, n_eff, sigma_g
# ------------------- Import template Camers ---------------------------------#
def import_template_Camera(path, settings):
ell_max = settings['ell_max']
nb = settings['n_bins']
file = np.genfromtxt(path, unpack=True)
rell = int(file[0].min()), int(file[0].max())
corr = np.zeros((ell_max+1, nb, nb))
triu_r, triu_c = np.triu_indices(nb)
for n, _ in enumerate(range(int(nb*(nb+1)/2))):
corr[rell[0]:rell[1]+1, triu_r[n], triu_c[n]] = file[n+1]
corr[rell[0]:rell[1]+1, triu_c[n], triu_r[n]] = file[n+1]
return corr
| 16,499 | 31.608696 | 79 | py |
kl_sample | kl_sample-master/fourier_analysis/maps2cls.py | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import astropy.io.fits as fits
import pymaster as nmt
import flatmaps as fm
from optparse import OptionParser
def opt_callback(option, opt, value, parser):
setattr(parser.values, option.dest, value.split(','))
parser = OptionParser()
parser.add_option('--mask-file',dest='fname_mask',default='NONE',type=str,help='Path to mask file')
parser.add_option('--maps-file',dest='fname_maps',default='NONE',type=str,help='Path to maps file')
parser.add_option('--nbins',dest='nbins',default=7,type=int,help='Number of bins')
parser.add_option('--ell-bins', dest='fname_ellbins', default='NONE', type=str,
help='Path to ell-binning file. '+
'Format should be: double column (l_min,l_max). One row per bandpower.')
parser.add_option('--output-file', dest='fname_out',default=None,type=str,
help='Output file name.')
parser.add_option('--mcm-output', dest='fname_mcm', default='NONE', type=str,
help='File containing the mode-coupling matrix. '+
'If NONE or non-existing, it will be computed. '+
'If not NONE and non-existing, new file will be created')
parser.add_option('--masking-threshold',dest='mask_thr',default=0.5, type=float,
help='Will discard all pixel with a masked fraction larger than this.')
####
# Read options
(o, args) = parser.parse_args()
#Read mask
fsk,mskfrac=fm.read_flat_map(o.fname_mask,i_map=0)
#Read bandpowers and create NmtBin
lini,lend=np.loadtxt(o.fname_ellbins,unpack=True)
bpws=nmt.NmtBinFlat(lini,lend)
ell_eff=bpws.get_effective_ells()
#Read maps and create NmtFields
fields=[]
for i in np.arange(o.nbins) :
fskb,map1=fm.read_flat_map(o.fname_maps,i_map=2*i+0)
fm.compare_infos(fsk,fskb)
fskb,map2=fm.read_flat_map(o.fname_maps,i_map=2*i+1)
fm.compare_infos(fsk,fskb)
fields.append(nmt.NmtFieldFlat(np.radians(fsk.lx),np.radians(fsk.ly),
mskfrac.reshape([fsk.ny,fsk.nx]),
[map1.reshape([fsk.ny,fsk.nx]),map2.reshape([fsk.ny,fsk.nx])]))
#Read or compute mode-coupling matrix
wsp=nmt.NmtWorkspaceFlat()
if not os.path.isfile(o.fname_mcm) :
print("Computing mode-coupling matrix")
wsp.compute_coupling_matrix(fields[0],fields[0],bpws)
if o.fname_mcm!='NONE' :
wsp.write_to(o.fname_mcm)
else :
print("Reading mode-coupling matrix from file")
wsp.read_from(o.fname_mcm)
#Compute coupled power spectra
cls_coup=[]
ncross=(o.nbins*(o.nbins+1))/2
ordering=np.zeros([ncross,2],dtype=int)
i_x=0
for i in range(o.nbins) :
for j in range(i,o.nbins) :
cls_coup.append(nmt.compute_coupled_cell_flat(fields[i],fields[j],bpws))
ordering[i_x,:]=np.array([i,j])
i_x+=1
cls_coup=np.array(cls_coup)
#n_cross=len(cls_coup)
#n_ell=len(ell_eff)
#Here we'd do the KL stuff. Instead, right now we just decouple the cls
cls_decoup=np.array([wsp.decouple_cell(c) for c in cl_coup])
#Write output
towrite=[]
towrite.append(ell_eff)
header='[0]-l '
for i_c,c in enumerate(cls_decoup) :
towrite.append(c)
header+='[%d]-C(%d,%d) '%(i_c+1,ordering[i_c,0],ordering[i_c,1])
np.savetxt(o.fname_out,np.transpose(np.array(towrite)),header=header)
| 3,271 | 35.764045 | 99 | py |
kl_sample | kl_sample-master/fourier_analysis/flatmaps.py | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import pymaster as nmt
from astropy.io import fits
from astropy.wcs import WCS
class FlatMapInfo(object) :
def __init__(self,wcs,nx=None,ny=None,lx=None,ly=None) :
"""
Creates a flat map
wcs : WCS object containing information about reference point and resolution
nx,ny : Number of pixels in the x/y axes. If None, dx/dy must be provided
lx,ly : Extent of the map in the x/y axes. If None, nx/ny must be provided
"""
self.wcs=wcs.copy()
if nx is None and lx is None :
raise ValueError("Must provide either nx or lx")
if ny is None and ly is None :
raise ValueError("Must provide either ny or ly")
if nx is None :
self.lx=lx
self.nx=int(self.lx/np.abs(self.wcs.wcs.cdelt[0]))+1
else :
self.nx=nx
self.lx=np.fabs(nx*self.wcs.wcs.cdelt[0])
self.dx=self.lx/self.nx
if ny is None :
self.ly=ly
self.ny=int(self.ly/np.abs(self.wcs.wcs.cdelt[1]))+1
else :
self.ny=ny
self.ly=np.fabs(ny*self.wcs.wcs.cdelt[1])
self.dy=self.ly/self.ny
self.npix=self.nx*self.ny
def is_map_compatible(self,mp) :
return self.npix==len(mp)
def get_dims(self) :
"""
Returns map size
"""
return [self.ny,self.nx]
def get_size(self) :
"""
Returns map size
"""
return self.npix
def pos2pix(self,ra,dec) :
"""
Returns pixel indices for arrays of x and y coordinates.
Will return -1 if (x,y) lies outside the map
"""
ra=np.asarray(ra)
scalar_input=False
if ra.ndim==0 :
ra=x[None]
scalar_input=True
dec=np.asarray(dec)
if dec.ndim==0 :
dec=dec[None]
if len(ra)!=len(dec) :
raise ValueError("ra and dec must have the same size!")
ix,iy=np.transpose(self.wcs.wcs_world2pix(np.transpose(np.array([ra,dec])),0))
ix=ix.astype(int); iy=iy.astype(int);
ix_out=np.where(np.logical_or(ix<0,ix>=self.nx))[0]
iy_out=np.where(np.logical_or(iy<0,iy>=self.ny))[0]
ipix=ix+self.nx*iy
ipix[ix_out]=-1
ipix[iy_out]=-1
if scalar_input :
return np.squeeze(ipix)
return ipix
def pos2pix2d(self,ra,dec) :
"""
Returns pixel indices for arrays of x and y coordinates.
"""
ra=np.asarray(ra)
scalar_input=False
if ra.ndim==0 :
ra=x[None]
scalar_input=True
dec=np.asarray(dec)
if dec.ndim==0 :
dec=dec[None]
if len(ra)!=len(dec) :
raise ValueError("ra and dec must have the same size!")
ix,iy=np.transpose(self.wcs.wcs_world2pix(np.transpose(np.array([ra,dec])),0))
ix_out=np.where(np.logical_or(ix<-self.nx,ix>=2*self.nx))[0]
iy_out=np.where(np.logical_or(iy<-self.ny,iy>=2*self.ny))[0]
is_in=np.ones(len(ix),dtype=bool)
is_in[ix_out]=False
is_in[iy_out]=False
is_in[np.isnan(ix)]=False
is_in[np.isnan(iy)]=False
if scalar_input :
return np.squeeze(ix),np.squeeze(iy),np.squeeze(is_in)
return ix,iy,is_in
def pix2pos(self,ipix) :
"""
Returns x,y coordinates of pixel centres for a set of pixel indices.
"""
ipix=np.asarray(ipix)
scalar_input=False
if ipix.ndim==0 :
ipix=ipix[None]
scalar_input=True
i_out=np.where(np.logical_or(ipix<0,ipix>=self.npix))[0]
if len(i_out)>0 :
print(ipix[i_out])
raise ValueError("Pixels outside of range")
ix=ipix%self.nx
ioff=np.array(ipix-ix)
iy=ioff.astype(int)/(int(self.nx))
ix=ix.astype(np.float_)
iy=iy.astype(np.float_)
ra,dec=np.transpose(self.wcs.wcs_pix2world(np.transpose(np.array([ix,iy])),0))
if scalar_input :
return np.squeeze(ra),np.squeeze(dec)
return ra,dec
def get_empty_map(self) :
"""
Returns a map full of zeros
"""
return np.zeros(self.npix,dtype=float)
def view_map(self,map_in,ax=None, xlabel='RA', ylabel='Dec',
title=None, addColorbar=True,posColorbar= False, cmap = cm.viridis,
colorMax= None, colorMin= None,fnameOut=None):
"""
Plots a 2D map (passed as a flattened array)
"""
if len(map_in)!=self.npix :
raise ValueError("Input map doesn't have the correct size")
# set up the colorbar if min, max not given.
#if colorMax is None or colorMin is None:
# if posColorbar:
# ind= np.where(map_in>0)[0]
# colorMin= np.percentile(map_in[ind], 15)
# colorMax= np.percentile(map_in[ind], 95)
# else:
# colorMin= np.percentile(map_in, 15)
# colorMax= np.percentile(map_in, 95)
if ax is None :
fig=plt.figure()
ax=fig.add_subplot(111,projection=self.wcs)
if title is not None :
ax.set_title(title,fontsize=15)
image= ax.imshow(map_in.reshape([self.ny,self.nx]),
origin='lower', interpolation='nearest')
if addColorbar :
plt.colorbar(image)
ax.set_xlabel(xlabel,fontsize=15)
ax.set_ylabel(ylabel,fontsize=15)
if fnameOut is not None :
plt.savefig(fnameOut,bbox_inches='tight')
def write_flat_map(self,filename,maps,descript=None) :
"""
Saves a set of maps in FITS format wit WCS.
"""
if maps.ndim<1 :
raise ValueError("Must supply at least one map")
if maps.ndim==1 :
maps=np.array([maps])
if len(maps[0])!=self.npix :
raise ValueError("Map doesn't conform to this pixelization")
if descript is not None :
if len(maps)==1 :
descript=[descript]
if len(maps)!=len(descript) :
raise ValueError("Need one description per map")
header=self.wcs.to_header()
hdus=[]
for im,m in enumerate(maps) :
head=header.copy()
if descript is not None :
head['DESCR']=(descript[im],'Description')
if im==0 :
hdu=fits.PrimaryHDU(data=m.reshape([self.ny,self.nx]),header=head)
else :
hdu=fits.ImageHDU(data=m.reshape([self.ny,self.nx]),header=head)
hdus.append(hdu)
hdulist=fits.HDUList(hdus)
hdulist.writeto(filename,overwrite=True)
def compute_power_spectrum(self,map1,mask1,map2=None,mask2=None,l_bpw=None,
return_bpw=False,wsp=None,return_wsp=False,
temp1=None,temp2=None) :
"""
Computes power spectrum between two maps.
map1 : first map to correlate
mask1 : mask for the first map
map2 : second map to correlate. If None map2==map1.
mask2 : mask for the second map. If None mask2==mask1.
l_bpw : bandpowers on which to calculate the power spectrum. Should be an [2,N_ell] array, where
the first and second columns list the edges of each bandpower. If None, the function will
create bandpowers of its own taylored to the properties of your map.
return_bpw : if True, the bandpowers will also be returned
wsp : NmtWorkspaceFlat object to accelerate the calculation. If None, this will be precomputed.
return_wsp : if True, the workspace will also be returned
temp1 : if not None, set of contaminants to remove from map1
temp2 : if not None, set of contaminants to remove from map2
"""
same_map=False
if map2 is None :
map2=map1
same_map=True
same_mask=False
if mask2 is None :
mask2=mask1
same_mask=False
if len(map1)!=self.npix :
raise ValueError("Input map has the wrong size")
if (len(map1)!=len(map2)) or (len(map1)!=len(mask1)) or (len(map1)!=len(mask2)) :
raise ValueError("Sizes of all maps and masks don't match")
lx_rad=self.lx*np.pi/180
ly_rad=self.ly*np.pi/180
if l_bpw is None :
ell_min=max(2*np.pi/lx_rad,2*np.pi/ly_rad)
ell_max=min(self.nx*np.pi/lx_rad,self.ny*np.pi/ly_rad)
d_ell=2*ell_min
n_ell=int((ell_max-ell_min)/d_ell)-1
l_bpw=np.zeros([2,n_ell])
l_bpw[0,:]=ell_min+np.arange(n_ell)*d_ell
l_bpw[1,:]=l_bpw[0,:]+d_ell
return_bpw=True
#Generate binning scheme
b=nmt.NmtBinFlat(l_bpw[0,:],l_bpw[1,:])
if temp1 is not None :
tmp1=np.array([[t.reshape([self.ny,self.nx])] for t in temp1])
else :
tmp1=None
if temp2 is not None :
tmp2=np.array([[t.reshape([self.ny,self.nx])] for t in temp2])
else :
tmp2=None
#Generate fields
f1=nmt.NmtFieldFlat(lx_rad,ly_rad,mask1.reshape([self.ny,self.nx]),
[map1.reshape([self.ny,self.nx])],templates=tmp1)
if same_map and same_mask :
f2=f1
else :
f2=nmt.NmtFieldFlat(lx_rad,ly_rad,mask2.reshape([self.ny,self.nx]),
[map2.reshape([self.ny,self.nx])],templates=tmp2)
#Compute workspace if needed
if wsp is None :
wsp=nmt.NmtWorkspaceFlat();
wsp.compute_coupling_matrix(f1,f2,b)
return_wsp=True
#Compute power spectrum
cl_coupled=nmt.compute_coupled_cell_flat(f1,f2,b)
cl_uncoupled=wsp.decouple_cell(cl_coupled)[0]
#Return
if return_bpw and return_wsp :
return cl_uncoupled,l_bpw,wsp
else :
if return_bpw :
return cl_uncoupled,l_bpw
elif return_wsp :
return cl_uncoupled,wsp
else :
return cl_uncoupled
def u_grade(self,mp,x_fac,y_fac=None) :
"""
Up-grades the resolution of a map and returns the associated FlatSkyInfo object.
mp : input map
x_fac : the new map will be sub-divided into x_fac*nx pixels in the x direction
y_fac : the new map will be sub-divided into y_fac*ny pixels in the y direction
if y_fac=None, then y_fac=x_fac
"""
if y_fac is None :
y_fac=x_fac
if len(mp)!=self.npix :
raise ValueError("Input map has a wrong size")
w=WCS(naxis=2)
w.wcs.cdelt=[self.wcs.wcs.cdelt[0]/int(x_fac),self.wcs.wcs.cdelt[1]/int(y_fac)]
w.wcs.crval=self.wcs.wcs.crval
w.wcs.ctype=self.wcs.wcs.ctype
w.wcs.crpix=[self.wcs.wcs.crpix[0]*int(x_fac),self.wcs.wcs.crpix[1]*int(y_fac)]
fm_ug=FlatMapInfo(w,nx=self.nx*int(x_fac),ny=self.ny*int(y_fac))
mp_ug=np.repeat(np.repeat(mp.reshape([self.ny,self.nx]),int(y_fac),axis=0),
int(x_fac),axis=1).flatten()
return fm_ug,mp_ug
def d_grade(self,mp,x_fac,y_fac=None) :
"""
Down-grades the resolution of a map and returns the associated FlatSkyInfo object.
mp : input map
x_fac : the new map will be sub-divided into floor(nx/x_fac) pixels in the x direction
y_fac : the new map will be sub-divided into floor(ny/y_fac) pixels in the y direction
if y_fac=None, then y_fac=x_fac.
Note that if nx/ny is not a multiple of x_fac/y_fac, the remainder pixels will be lost.
"""
if y_fac is None :
y_fac=x_fac
if len(mp)!=self.npix :
raise ValueError("Input map has a wrong size")
print(x_fac,y_fac)
print(int(x_fac),int(y_fac))
w=WCS(naxis=2)
w.wcs.cdelt=[self.wcs.wcs.cdelt[0]*int(x_fac),self.wcs.wcs.cdelt[1]*int(y_fac)]
w.wcs.crval=self.wcs.wcs.crval
w.wcs.ctype=self.wcs.wcs.ctype
w.wcs.crpix=[self.wcs.wcs.crpix[0]/int(x_fac),self.wcs.wcs.crpix[1]/int(y_fac)]
nx_new=self.nx/int(x_fac); ix_max=nx_new*int(x_fac)
ny_new=self.ny/int(y_fac); iy_max=ny_new*int(y_fac)
mp2d=mp.reshape([self.ny,self.nx])[:iy_max,:][:,:ix_max]
fm_dg=FlatMapInfo(w,nx=nx_new,ny=ny_new)
mp_dg=np.mean(np.mean(np.reshape(mp2d.flatten(),[ny_new,int(y_fac),nx_new,int(x_fac)]),axis=-1),axis=-2).flatten()
return fm_dg,mp_dg
@classmethod
def from_coords(FlatMapInfo,ra_arr,dec_arr,reso,pad=None,projection='TAN') :
"""
Generates a FlatMapInfo object that can encompass all points with coordinates
given by ra_arr (R.A.) and dec_arr (dec.) with pixel resolution reso.
The parameter pad should correspond to the number of pixel sizes you want
to leave as padding around the edges of the map. If None, it will default to 20.
The flat-sky maps will use a spherical projection given by the corresponding
parameter. Tested values are 'TAN' (gnomonic) and 'CAR' (Plate carree).
"""
if len(ra_arr.flatten())!=len(dec_arr.flatten()) :
raise ValueError("ra_arr and dec_arr must have the same size")
if pad==None :
pad=20.
elif pad<0 :
raise ValueError("We need a positive padding")
# Find median coordinates
ramean=0.5*(np.amax(ra_arr)+np.amin(ra_arr))
decmean=0.5*(np.amax(dec_arr)+np.amin(dec_arr))
#Compute projection on the tangent plane
w=WCS(naxis=2)
w.wcs.crpix=[0,0]
w.wcs.cdelt=[-reso,reso]
w.wcs.crval=[ramean,decmean]
w.wcs.ctype=['RA---'+projection,'DEC--'+projection]
ix,iy=np.transpose(w.wcs_world2pix(np.transpose(np.array([ra_arr,dec_arr])),0))
#Estimate map size
nsidex=int(np.amax(ix))-int(np.amin(ix))+1+2*int(pad)
nsidey=int(np.amax(iy))-int(np.amin(iy))+1+2*int(pad)
#Off-set to make sure every pixel has positive coordinates
# TODO: worry about 2pi wrapping
offx=-np.amin(ix)+pad
offy=-np.amin(iy)+pad
w.wcs.crpix=[offx,offy]
return FlatMapInfo(w,nx=nsidex,ny=nsidey)
####
def read_flat_map(filename,i_map=0,hdu=None) :
"""
Reads a flat-sky map and the details of its pixelization scheme.
The latter are returned as a FlatMapInfo object.
i_map : map to read. If -1, all maps will be read.
"""
if hdu is None :
hdul=fits.open(filename)
w=WCS(hdul[0].header)
if i_map==-1 :
maps=np.array([h.data for h in hdul])
nm,ny,nx=maps.shape
maps=maps.reshape([nm,ny*nx])
else :
maps=hdul[i_map].data
ny,nx=maps.shape
maps=maps.flatten()
else :
w=WCS(hdu.header)
maps=hdu.data
ny,nx=maps.shape
maps=maps.flatten()
fmi=FlatMapInfo(w,nx=nx,ny=ny)
return fmi,maps
def compare_infos(fsk1,fsk2) :
if (fsk1.nx!=fsk2.nx) or (fsk1.ny!=fsk2.ny) or (fsk1.lx!=fsk2.lx) or (fsk1.ly!=fsk2.ly) :
raise ValueError("Map infos are incompatible")
| 15,536 | 34.799539 | 122 | py |
ilmart | ilmart-main/src/__init__.py | 0 | 0 | 0 | py | |
ilmart | ilmart-main/src/ilmart/ilmart_distill.py | import itertools
import numpy as np
from collections import defaultdict
import lightgbm as lgbm
class IlmartDistill:
def __init__(self, model: lgbm.Booster, distill_mode="full", n_sample=None):
self.model = model
self.feat_name_to_index = {feat: i for i, feat in enumerate(self.model.dump_model()['feature_names'])}
self.feat_min = {}
self.feat_max = {}
for feat_name, feat_info in self.model.dump_model()["feature_infos"].items():
feat_index = self.feat_name_to_index[feat_name]
feat_range = feat_info["max_value"] - feat_info["min_value"]
self.feat_min[feat_index] = feat_info["min_value"] - feat_range * 0.5
self.feat_max[feat_index] = feat_info["max_value"] + feat_range * 0.5
self.n_sample = n_sample
self.distill_mode = distill_mode
# To be computed later
self.hist = None
self.splitting_values = None
self.__create_hist_dict()
def __compute_hist(self, tree_structure: dict, feat_used: tuple, feat_min_max=None):
if feat_min_max is None:
feat_min_max = np.array([[self.feat_min[feat], self.feat_max[feat]] for feat in feat_used], dtype='f')
if "leaf_index" in tree_structure:
limits = []
for i, feat in enumerate(feat_used):
start = np.nonzero(np.isclose(self.splitting_values[feat], feat_min_max[i][0]))[0][0]
try:
end = np.nonzero(np.isclose(self.splitting_values[feat], feat_min_max[i][1]))[0][0]
except Exception as e:
end = len(self.splitting_values[feat]) - 1
limits.append((start, end))
selection = self.hist[feat_used]
slicing = tuple([slice(start, end) for (start, end) in limits])
selection[slicing] += tree_structure["leaf_value"]
return
split_index = feat_used.index(tree_structure["split_feature"])
if "left_child" in tree_structure:
new_min_max = np.copy(feat_min_max)
new_min_max[split_index][1] = min(new_min_max[split_index][1], tree_structure["threshold"])
self.__compute_hist(tree_structure["left_child"], feat_used, feat_min_max=new_min_max)
if "right_child" in tree_structure:
new_min_max = np.copy(feat_min_max)
new_min_max[split_index][0] = max(new_min_max[split_index][0], tree_structure["threshold"])
self.__compute_hist(tree_structure["right_child"], feat_used, feat_min_max=new_min_max)
return
@staticmethod
def __splitting_values(tree_structure, splitting_values_forest, feat_used=None):
split_feat = tree_structure.get("split_feature", None)
if split_feat is None:
return feat_used
if feat_used is None:
feat_used = set()
feat_used.add(split_feat)
splitting_values_forest[split_feat].add(tree_structure["threshold"])
IlmartDistill.__splitting_values(tree_structure["left_child"], splitting_values_forest, feat_used)
IlmartDistill.__splitting_values(tree_structure["right_child"], splitting_values_forest, feat_used)
return feat_used
def __create_hist_dict(self):
self.hist = {}
feats_used = []
tree_infos = self.model.dump_model()["tree_info"]
splitting_values_set = defaultdict(set)
self.splitting_values = {}
# Retrive all the splitting values
for tree_info in tree_infos:
tree_structure = tree_info["tree_structure"]
feats_used.append(IlmartDistill.__splitting_values(tree_structure, splitting_values_set))
if self.distill_mode == "full":
# Add maximum and minimum to have the complete range
for feat in splitting_values_set.keys():
splitting_values_set[feat].add(self.feat_max[feat])
splitting_values_set[feat].add(self.feat_min[feat])
# From the set created to a numpy array with all the values and saved on the current object
for feat, values in splitting_values_set.items():
self.splitting_values[feat] = np.array(sorted(list(splitting_values_set[feat])))
else:
feat_infos = self.model.dump_model()["feature_infos"]
for feat, infos in feat_infos.items():
feat_i = self.feat_name_to_index[feat]
# self.n_sample + 1 because we want exactly self.n_sample bins
step = (self.feat_max[feat_i] - self.feat_min[feat_i]) / (self.n_sample + 1)
self.splitting_values[feat_i] = np.arange(self.feat_min[feat_i], self.feat_max[feat_i], step)
# Create a numpy array with shape corresponding to the feature dimension
for feat_used in feats_used:
feats_key = tuple(sorted(feat_used))
if feats_key not in self.hist:
shape = tuple([len(self.splitting_values[feat]) - 1 for feat in feats_key])
self.hist[feats_key] = np.zeros(shape)
# Compute hist for each tree
if self.distill_mode == "full":
for tree_info, feats in zip(tree_infos, feats_used):
tree_structure = tree_info["tree_structure"]
feats_key = tuple(sorted(feats))
self.__compute_hist(tree_structure, feats_key)
else:
for feats_used in self.hist.keys():
mid_points = [(self.splitting_values[feat_used][1:] + self.splitting_values[feat_used][:-1]) / 2
for feat_used in feats_used]
for coord, value in enumerate(itertools.product(*mid_points)):
sample = np.zeros(self.model.num_feature())
for i, feat_i in enumerate(feats_used):
sample[feat_i] = value[i]
sample = sample.reshape((1, -1))
if len(feats_used) == 1:
self.hist[feats_used][coord] = self.model.predict(sample)
else:
self.hist[feats_used][coord // self.n_sample, coord % self.n_sample] = self.model.predict(
sample)
@staticmethod
def __predict(row, model, interactions_limit=-1):
res = 0
interaction_to_exclude = []
if interactions_limit != -1:
inter_contrib = [(feats, value)for feats, value in model.expected_contribution().items() if len(feats) > 1]
inter_contrib.sort(key=lambda x: x[1], reverse=True)
interaction_to_exclude = [feats for feats, value in inter_contrib[interactions_limit:]]
for feats_hist, hist in model.hist.items():
if feats_hist in interaction_to_exclude:
continue
indices = []
for feat in feats_hist:
index_to_add = np.searchsorted(model.splitting_values[feat], row[feat])
index_to_add -= 1
index_to_add = max(0, index_to_add)
index_to_add = min(len(model.splitting_values[feat]) - 2, index_to_add)
indices.append(index_to_add)
res += hist[tuple(indices)]
return res
def predict(self, X, interactions_limit=-1):
res = np.apply_along_axis(IlmartDistill.__predict, 1, X, self, interactions_limit=interactions_limit)
return res
def expected_contribution(self):
return {feats: np.abs(hist).mean() for feats, hist in self.hist.items()}
| 7,538 | 47.019108 | 119 | py |
ilmart | ilmart-main/src/ilmart/utils.py | from collections import defaultdict
import os
from rankeval.dataset.dataset import Dataset as RankEvalDataset
from rankeval.dataset.datasets_fetcher import load_dataset
from tqdm import tqdm
DATA_HOME = os.environ.get('RANKEVAL_DATA', os.path.join('~', 'rankeval_data'))
DATA_HOME = os.path.expanduser(DATA_HOME)
DICT_NAME_FOLDER = {
"web30k": f"{DATA_HOME}/msn30k/dataset/Fold1",
"yahoo": f"{DATA_HOME}/yahoo/set1",
"istella": f"{DATA_HOME}/istella-sample/dataset/sample"
}
RANKEVAL_MAPPING = {
"web30k": "msn30k",
"istella": "istella-sample"
}
RANKEVAL_MAPPING_FOLD = {
"web30k": 1,
"istella": None
}
def load_datasets(datasets=None):
rankeval_datasets = defaultdict(dict)
datasets_to_load = DICT_NAME_FOLDER
if datasets is not None:
datasets_to_load = {name: DICT_NAME_FOLDER[name] for name in datasets}
for name, dataset_folder in tqdm(datasets_to_load.items()):
if not os.path.isdir(dataset_folder):
if name == "yahoo":
raise Exception(
"""
For copyright reason you have to download the Yahoo dataset separately:
https://webscope.sandbox.yahoo.com/catalog.php?datatype=c
"""
)
# load_dataset used only to download the dataset
load_dataset(dataset_name=RANKEVAL_MAPPING[name],
fold=RANKEVAL_MAPPING_FOLD[name],
download_if_missing=True,
force_download=False,
with_models=False)
for split in ["train", "vali", "test"]:
rankeval_datasets[name][split] = RankEvalDataset.load(f"{dataset_folder}/{split}.txt")
return rankeval_datasets
def is_interpretable(model, verbose=True):
tree_df = model.trees_to_dataframe()
all_pairs = set()
singletons = set()
for tree_index in tree_df["tree_index"].unique():
tree_df_per_index = tree_df[tree_df["tree_index"] == tree_index]
feat_used = [feat for feat in tree_df_per_index["split_feature"].unique() if feat is not None]
if len(feat_used) > 2:
print(tree_index)
print(tree_df_per_index["split_feature"])
return False
if len(feat_used) > 1:
all_pairs.add(tuple(sorted(feat_used)))
elif len(feat_used) == 1:
singletons.add(feat_used[0])
for f1, f2 in all_pairs:
if f1 not in singletons or f2 not in singletons:
return False
if verbose:
print(f"len(all_pairs) = {len(all_pairs)}")
print(f"len(singletons) = {len(singletons)}")
return True
| 2,681 | 35.243243 | 102 | py |
ilmart | ilmart-main/src/ilmart/__init__.py | from .ilmart_distill import IlmartDistill
from .ilmart import Ilmart
| 69 | 22.333333 | 41 | py |
ilmart | ilmart-main/src/ilmart/ilmart.py | import lightgbm as lgbm
import rankeval
from .ilmart_distill import IlmartDistill
from .utils import is_interpretable
class Ilmart():
def __init__(self, verbose, feat_inter_boosting_rounds=2000, inter_rank_strategy="greedy"):
self.verbose = verbose
self._model_main_effects = None
self._model_inter = None
self.feat_inter_boosting_rounds = feat_inter_boosting_rounds
self._fit = False
self.inter_rank_strategy = inter_rank_strategy
self.inter_rank = None
def fit(self, lgbm_params: dict,
num_boosting_rounds: int,
train: rankeval.dataset.dataset.Dataset,
vali: rankeval.dataset.dataset.Dataset,
num_interactions=50, early_stopping_rounds=100):
self.fit_main_effects(lgbm_params, num_boosting_rounds, train, vali, early_stopping_rounds)
if num_interactions > 0:
self.fit_inter_effects(lgbm_params, num_boosting_rounds, train, vali, num_interactions,
early_stopping_rounds)
def fit_main_effects(self,
lgbm_params: dict,
num_boosting_rounds: int,
train: rankeval.dataset.dataset.Dataset,
vali: rankeval.dataset.dataset.Dataset,
early_stopping_rounds=100):
lgbm_params = lgbm_params.copy()
train_lgbm = lgbm.Dataset(train.X, group=train.get_query_sizes(), label=train.y, free_raw_data=False)
vali_lgbm = lgbm.Dataset(vali.X, group=vali.get_query_sizes(), label=vali.y, free_raw_data=False)
lgbm_params["interaction_constraints"] = [[i] for i in range(train.n_features)]
early_stopping = lgbm.early_stopping(early_stopping_rounds, verbose=True)
callbacks = [early_stopping]
if self.verbose:
callbacks.append(lgbm.log_evaluation(period=1, show_stdv=True))
if self.verbose:
print(lgbm_params)
self._model_main_effects = lgbm.train(lgbm_params,
train_lgbm,
num_boost_round=num_boosting_rounds,
valid_sets=[vali_lgbm],
callbacks=callbacks)
self._fit = True
def _get_contribution_greedy(self, model_to_rank: lgbm.Booster):
tree_df = model_to_rank.trees_to_dataframe()
greedy_contrib = []
feat_name_to_index = {feat_name: feat_index for feat_index, feat_name in
enumerate(model_to_rank.feature_name())}
for tree_index in tree_df["tree_index"].unique():
# Compute feat used for tree with index tree:_index
tree_df_per_index = tree_df[tree_df["tree_index"] == tree_index]
feats_used = [feat_name_to_index[feat] for feat in tree_df_per_index["split_feature"].unique() if
feat is not None]
feats_used = tuple(sorted(feats_used))
if len(feats_used) < 2:
continue
if feats_used not in greedy_contrib:
greedy_contrib.append(feats_used)
return [list(pair) for pair in greedy_contrib]
def _get_contribution_aware(self, model_to_rank: lgbm.Booster):
distilled = IlmartDistill(model_to_rank)
distill_contrib = [[feats, value] for feats, value in distilled.expected_contribution().items() if
len(feats) > 1]
distill_contrib.sort(key=lambda x: abs(x[1]), reverse=True)
return [list(feats) for feats, value in distill_contrib]
def _get_contribution(self, model_to_rank: lgbm.Booster):
if self.inter_rank_strategy == "greedy":
return self._get_contribution_greedy(model_to_rank)
else:
return self._get_contribution_aware(model_to_rank)
def _rank_interactions(self, lgbm_params, train: rankeval.dataset.dataset.Dataset):
mif = [feat_i for feat_i, imp in enumerate(self._model_main_effects.feature_importance("split")) if imp > 0]
not_mif = [feat for feat in range(train.n_features) if feat not in mif]
# TODO do it without copying the entire dataset
transformed_dataset = train.X.copy()
transformed_dataset[:, not_mif] = 0
train_lgbm = lgbm.Dataset(transformed_dataset, group=train.get_query_sizes(), label=train.y,
free_raw_data=False)
lgbm_params = lgbm_params.copy()
lgbm_params["num_leaves"] = 3
lgbm_params["learning_rate"] = 0.1
if self.verbose:
print(lgbm_params)
model_to_rank = lgbm.train(lgbm_params,
train_lgbm,
num_boost_round=self.feat_inter_boosting_rounds,
init_model=self._model_main_effects)
return self._get_contribution(model_to_rank)
def fit_inter_effects(self,
lgbm_params: dict,
num_boosting_rounds: int,
train: rankeval.dataset.dataset.Dataset,
vali: rankeval.dataset.dataset.Dataset,
num_interactions: int,
early_stopping_rounds=100,
force_inter_rank=False):
self._check_fit()
lgbm_params = lgbm_params.copy()
if "interaction_constraints" in lgbm_params:
lgbm_params.pop("interaction_constraints")
train_lgbm = lgbm.Dataset(train.X, group=train.get_query_sizes(), label=train.y, free_raw_data=False)
vali_lgbm = lgbm.Dataset(vali.X, group=vali.get_query_sizes(), label=vali.y, free_raw_data=False)
if self.inter_rank is None or force_inter_rank:
self.inter_rank = self._rank_interactions(lgbm_params, train)
lgbm_params["tree_interaction_constraints"] = self.inter_rank[:num_interactions]
if self.verbose:
print(f"tree_interaction_constraints: {lgbm_params['tree_interaction_constraints']}")
early_stopping = lgbm.early_stopping(early_stopping_rounds, verbose=True)
callbacks = [early_stopping]
if self.verbose:
callbacks.append(lgbm.log_evaluation(period=1, show_stdv=True))
if self.verbose:
print(lgbm_params)
self._model_inter = lgbm.train(lgbm_params,
train_lgbm,
num_boost_round=num_boosting_rounds,
valid_sets=[vali_lgbm],
callbacks=callbacks,
init_model=self._model_main_effects)
def _check_fit(self):
if self._model_main_effects is None:
raise Exception("Model not fit yet")
def get_model(self):
if self._model_inter is not None:
return self._model_inter
return self._model_main_effects
def set_model(self, model: lgbm.Booster, inter=False):
if not is_interpretable(model, verbose=False):
raise Exception("The model passed is not interpretable")
if not inter:
self._model_main_effects = model
else:
self._model_inter = model
def get_distill(self):
if self._model_inter is not None:
return IlmartDistill(self._model_inter)
return IlmartDistill(self._model_main_effects)
| 7,560 | 42.454023 | 116 | py |
ilmart | ilmart-main/experiments/download_files.py | import requests
from tqdm import tqdm
import math
import zipfile
import os.path
def convert_size(size_bytes: int):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return f"{s} {size_name[i]}"
def download(id: str, destination: str):
if os.path.isfile(destination):
print(f"File already downloaded in {destination}")
print("Skipping download")
return
URL = "https://docs.google.com/uc?export=download"
CHUNK_SIZE = 1024 * 1024 # 1024 * 1024 B = 1 MB
session = requests.Session()
response = session.get(URL, params={'id': id, "confirm": "t"}, stream=True)
print(f"Start download in {destination}")
bytes_downloaded = 0
with open(destination, "wb") as f:
for i, chunk in tqdm(enumerate(response.iter_content(CHUNK_SIZE))):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if i % 100 == 0 and i != 0:
print(f"Downloaded {convert_size(bytes_downloaded)}")
bytes_downloaded += CHUNK_SIZE
def extract(zip_file: str, dest: str):
print(f"Start unzipping file in {dest}")
with zipfile.ZipFile(zip_file, 'r') as zip_f:
zip_f.extractall(dest)
def main():
FILE_ID = "1fjk1qtS8G6aMP9xxPfBSCo9LvZB5xFh0"
DL_DESTINATION = 'best_models_dl.zip'
UNZIP_DESTINATION = "."
download(FILE_ID, DL_DESTINATION)
extract(DL_DESTINATION, UNZIP_DESTINATION)
FILE_ID = "1JUeAXrWdPmtBPn6ulU9mp5oDFGcZH32G"
DL_DESTINATION = 'results_dl.zip'
UNZIP_DESTINATION = "."
download(FILE_ID, DL_DESTINATION)
extract(DL_DESTINATION, UNZIP_DESTINATION)
if __name__ == '__main__':
main()
| 1,857 | 27.584615 | 79 | py |
ilmart | ilmart-main/experiments/ilmart/ilmart_evaluate.py | #!/usr/bin/env python
# coding: utf-8
from pathlib import Path
import lightgbm as lgbm
import argparse
import json
import pickle
from collections import defaultdict
from ilmart.utils import load_datasets, is_interpretable
from rankeval.metrics import NDCG
def evaluate(models_dir, rankeval_datasets, path_out):
boosters_dict = {}
for name in rankeval_datasets.keys():
file_path = f"{models_dir}/{name}.lgbm"
boosters_dict[name] = lgbm.Booster(model_file=file_path)
ndcgs_ilmart = defaultdict(dict)
for name, model in boosters_dict.items():
print(f"Computing NCDG for {name}")
test_dataset = rankeval_datasets[name]["test"]
predictions = model.predict(test_dataset.X)
for cutoff in [1, 5, 10]:
ndcg = NDCG(cutoff=cutoff, no_relevant_results=1, implementation="exp")
res = ndcg.eval(test_dataset, predictions)
ndcgs_ilmart[name][cutoff] = res[1]
print(f"\tCutoff {cutoff} (mean): {res[0]}")
print(f"Is interpretable? {is_interpretable(model)}")
with open(path_out, "wb") as f:
pickle.dump(ndcgs_ilmart, f)
def main():
parser = argparse.ArgumentParser(description="Ilmart benchmark")
parser.add_argument("--config_path",
type=str,
default="config.json",
help="""
Path to the JSON file containing the configuration for the benchmark. It contains the following keys:
- path_out: path where to save the models.
- path_eval: path where to save the evaluation results of the model
""")
args = parser.parse_args()
with open(args.config_path) as f:
try:
json_args = json.load(f)
path_out = json_args["path_out"]
path_eval = json_args["path_eval"]
except Exception as e:
print(f"Problems reading the configuration file {args.config_path} ")
print(e)
rankeval_datasets = load_datasets()
Path(path_eval).mkdir(parents=True, exist_ok=True)
evaluate(f"{path_out}/without_inter", rankeval_datasets, f"{path_eval}/ilmart.pickle")
evaluate(f"{path_out}/with_inter", rankeval_datasets, f"{path_eval}/ilmart_i.pickle")
if __name__ == '__main__':
main()
| 2,327 | 33.235294 | 117 | py |
ilmart | ilmart-main/experiments/ilmart/ilmart_train.py | #!/usr/bin/env python
# coding: utf-8
import typing
import argparse
import rankeval.dataset
import json
from ilmart.utils import load_datasets
from pathlib import Path
from tqdm import tqdm
from sklearn.model_selection import ParameterGrid
from ilmart import Ilmart
from rankeval.metrics import NDCG
def hyperparams_grid_search(param_grid: dict,
common_params: dict,
train_dataset: rankeval.dataset.Dataset,
vali_dataset: rankeval.dataset.Dataset,
boosting_rounds: int,
initial_ilmart: typing.Optional[Ilmart] = None,
n_interactions: typing.Optional[int] = None):
max_ndcg = 0
best_model = None
for params in tqdm(list(ParameterGrid(param_grid))):
current_params = {**common_params, **params}
if initial_ilmart is not None:
model = initial_ilmart
model.feat_inter_boosting_rounds = boosting_rounds
model.fit_inter_effects(current_params,
boosting_rounds,
train_dataset,
vali_dataset,
n_interactions)
else:
model = Ilmart(verbose=False, inter_rank_strategy="greedy")
model.fit_main_effects(current_params, boosting_rounds, train_dataset, vali_dataset)
predictions = model.get_model().predict(vali_dataset.X)
new_ndcg = NDCG(cutoff=10, no_relevant_results=1, implementation="exp").eval(vali_dataset, predictions)[0]
if new_ndcg > max_ndcg:
best_model = model
max_ndcg = new_ndcg
print(f"New max ndcg found: {max_ndcg}")
print(f"With params: {params}")
return best_model
def save_models(models_dir: str, models_dict: dict):
Path(models_dir).mkdir(parents=True, exist_ok=True)
for name, ilmart_model in models_dict.items():
file_name = f"{models_dir}/{name}.lgbm"
print(f"Saving to {file_name}")
ilmart_model.get_model().save_model(file_name)
def main():
parser = argparse.ArgumentParser(description="Ilmart benchmark")
parser.add_argument("--config_path",
type=str,
default="config.json",
help="""
Path to the JSON file containing the configuration for the benchmark. It contains the following keys:
- path_out: path where to save the models.
- common_params: common params to use during the LGBM training.
- param_grid: the parameter grid to pass to ParameterGrid (sklearn).
- boosting_rounds: the number of boosting rounds to do.
- n_interactions: the number of interactions to add to the model.
""")
args = parser.parse_args()
with open(args.config_path) as f:
try:
json_args = json.load(f)
path_out = json_args["path_out"]
common_params = json_args["common_params"]
param_grid = json_args["param_grid"]
boosting_rounds = json_args["boosting_rounds"]
n_interactions = json_args["n_interactions"]
except Exception as e:
print(f"Problems reading the configuration file {args.config_path} ")
print(e)
rankeval_datasets = load_datasets()
print("Start computing best models with interactions")
best_no_inter_ilmart = {}
for name, datasets in rankeval_datasets.items():
best_no_inter_ilmart[name] = hyperparams_grid_search(param_grid,
common_params,
datasets["train"],
datasets["vali"],
boosting_rounds)
save_models(f"{path_out}/without_inter", best_no_inter_ilmart)
print("Start computing best models with interactions")
best_inter_ilmart = {}
for name, ilmart_model in best_no_inter_ilmart.items():
print(f"Find best model for {name}")
train_ds = rankeval_datasets[name]["train"]
vali_ds = rankeval_datasets[name]["vali"]
best_inter_ilmart[name] = hyperparams_grid_search(param_grid, common_params, train_ds, vali_ds, boosting_rounds,
ilmart_model, n_interactions)
save_models(f"{path_out}/with_inter", best_inter_ilmart)
if __name__ == '__main__':
main()
| 4,762 | 42.3 | 129 | py |
ilmart | ilmart-main/experiments/ebm/ebm_train.py | #!/usr/bin/env python
# coding: utf-8
import pickle
import argparse
from ilmart.utils import load_datasets
import json
from interpret.glassbox import ExplainableBoostingRegressor
from rankeval.metrics import NDCG
from pathlib import Path
from tqdm import tqdm
def train(rankeval_datasets, outerbags, models_dir, n_inter):
for name, datasets in tqdm(rankeval_datasets.items()):
for bag in outerbags:
path = f"{models_dir}/{name}_{bag}.pickle"
if Path(path).exists():
print(f"Model already computed: {name} - {bag} - {path}")
print("Skipping...")
continue
ebm = ExplainableBoostingRegressor(random_state=42, interactions=n_inter, outer_bags=bag, n_jobs=40)
ebm.fit(datasets["train"].X, datasets["train"].y)
print(f"Writing model to: {path}")
with open(path, "wb") as f:
pickle.dump(ebm, f)
def train_all(rankeval_datasets, outerbags, models_dir):
print("Train models without interactions")
out_dir = f"{models_dir}/without_inter"
Path(out_dir).mkdir(parents=True, exist_ok=True)
train(rankeval_datasets, outerbags, out_dir, 0)
print("Train models with 50 interactions")
out_dir = f"{models_dir}/with_inter"
Path(out_dir).mkdir(parents=True, exist_ok=True)
train(rankeval_datasets, outerbags, out_dir, 50)
def evaluate(vali_dataset, model):
ndcg = NDCG(cutoff=10, no_relevant_results=1, implementation="exp")
ndcg_stats = ndcg.eval(vali_dataset, model.predict(vali_dataset.X))
return ndcg_stats[0]
def find_best_model(rankeval_datasets, outerbags, models_dir, out_dir):
best_models = {}
for name, datasets in rankeval_datasets.items():
max_ndcg_10 = 0
best_model = None
print(f"Evaluating models found for {name}")
for outerbag in outerbags:
file_path = f"{models_dir}/{name}_{outerbag}.pickle"
with open(file_path, "rb") as f:
ebm = pickle.load(f)
ndcg_10 = evaluate(datasets["vali"], ebm)
print(f"NDCG for {file_path}: {ndcg_10}")
if ndcg_10 > max_ndcg_10:
print(f"Best outerbag at the moment: {outerbag}")
max_ndcg_10 = ndcg_10
best_model = ebm
best_models[name] = best_model
Path(out_dir).mkdir(parents=True, exist_ok=True)
for name, model in best_models.items():
print(f"Saving the model to {out_dir}")
with open(f"{out_dir}/{name}.pickle", "wb") as f:
pickle.dump(model, f)
def find_best_model_all(rankeval_datasets, outerbags, models_dir, best_models_dir):
print("Starting searching the best model without inter")
find_best_model(rankeval_datasets, outerbags, f"{models_dir}/without_inter", f"{best_models_dir}/without_inter")
print("Starting searching the best model with inter")
find_best_model(rankeval_datasets, outerbags, f"{models_dir}/with_inter", f"{best_models_dir}/with_inter")
def main():
parser = argparse.ArgumentParser(description="EBM train script.")
parser.add_argument("--config_path",
type=str,
default="config.json",
help="""
Path to the JSON file containing the configuration for the benchmark. It contains the following keys:
- models_dir: where to save all the models created.
- best_models_dir: where to save the best models created (fine-tuned).
- outerbags: values of outerbags to try.
""")
args = parser.parse_args()
with open(args.config_path) as f:
try:
json_args = json.load(f)
models_dir = json_args["models_dir"]
print(f"{models_dir=}")
best_models_dir = json_args["best_models_dir"]
print(f"{best_models_dir=}")
outerbags = json_args["outerbags"]
print(f"{outerbags=}")
except Exception as e:
print(f"Problems reading the configuration file {args.config_path} ")
print(e)
Path(models_dir).mkdir(parents=True, exist_ok=True)
print("Load datasets")
rankeval_datasets = load_datasets()
train_all(rankeval_datasets, outerbags, models_dir)
find_best_model_all(rankeval_datasets, outerbags, models_dir, best_models_dir)
if __name__ == '__main__':
main()
| 4,480 | 36.974576 | 117 | py |
ilmart | ilmart-main/experiments/ebm/ebm_evaluate.py | from pathlib import Path
from rankeval.metrics import NDCG
from ilmart.utils import load_datasets
import argparse
import pickle
import json
def evaluate_and_save(models_dict, rankeval_datasets, file_out):
cutoffs = [1, 5, 10]
ndcgs_ebm = {}
for name, model in models_dict.items():
print(f"Evaluating: {name}")
ndcgs = {}
for cutoff in cutoffs:
ndcg = NDCG(cutoff=cutoff, no_relevant_results=1, implementation="exp")
res = ndcg.eval(rankeval_datasets[name]["test"], model.predict(rankeval_datasets[name]["test"].X))
ndcgs[cutoff] = res[1]
print(f"\tCutoff {cutoff} (mean): {res[0]}")
ndcgs_ebm[name] = ndcgs
with open(file_out, "wb") as f:
print(f"Writing results to {file_out}")
pickle.dump(ndcgs_ebm, f)
def main():
parser = argparse.ArgumentParser(description="EBM benchmark evaluation")
parser.add_argument("--config_path",
type=str,
default="config.json",
help="""
Path to the JSON file containing the configuration for the benchmark. It contains the following keys:
- best_models_dir: where to find the best models previously created.
- path_eval: main path to save the NDCG results.
""")
args = parser.parse_args()
with open(args.config_path) as f:
try:
json_args = json.load(f)
best_models_dir = json_args["best_models_dir"]
path_eval = json_args["path_eval"]
except Exception as e:
print(f"Problems reading the configuration file {args.config_path} ")
print(e)
rankeval_datasets = load_datasets()
best_ebm_no_inter = {}
for name in rankeval_datasets.keys():
with open(f"{best_models_dir}/without_inter/{name}.pickle", "rb") as f:
model = pickle.load(f)
best_ebm_no_inter[name] = model
best_ebm_inter = {}
for name in rankeval_datasets.keys():
with open(f"{best_models_dir}/with_inter/{name}.pickle", "rb") as f:
model = pickle.load(f)
best_ebm_inter[name] = model
Path(path_eval).mkdir(parents=True, exist_ok=True)
evaluate_and_save(best_ebm_no_inter, rankeval_datasets, f"{path_eval}/ebm.pickle")
evaluate_and_save(best_ebm_inter, rankeval_datasets, f"{path_eval}/ebm_i.pickle")
if __name__ == '__main__':
main()
| 2,479 | 34.428571 | 118 | py |
ilmart | ilmart-main/experiments/nrgam/nrgam_evaluate.py | #!/usr/bin/env python
# coding: utf-8
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_ranking as tfr
import pickle
import argparse
from tqdm import tqdm
from rankeval.metrics.ndcg import NDCG
from collections import defaultdict
import yahoo_dataset
import numpy as np
DATASET_DICT = {
"web30k": "mslr_web/30k_fold1",
"istella": "istella/s",
"yahoo": "yahoo"
}
LOG_NORMALIZATION = {
"web30k": False,
"istella": True,
"yahoo": False
}
BATCH_SIZE = 128
NORMALIZATION_CONSTANT = 10
def ds_transform(ds, log=False):
ds = ds.map(
lambda feature_map: {key: tf.where(value < 10 ** 6, value, 10 ** 6) for key, value in feature_map.items()})
ds = ds.map(lambda feature_map: {
"_mask": tf.ones_like(feature_map["label"], dtype=tf.bool),
**feature_map
})
ds = ds.padded_batch(batch_size=BATCH_SIZE)
ds = ds.map(lambda feature_map: (feature_map, tf.where(feature_map["_mask"], feature_map.pop("label"), -1.)))
if log:
ds = ds.map(
lambda feature_map, label: (
{key: value + NORMALIZATION_CONSTANT for key, value in feature_map.items() if key != "_mask"}, label))
ds = ds.map(
lambda feature_map, label: (
{key: tf.math.log1p(value) for key, value in feature_map.items() if key != "_mask"}, label))
else:
ds = ds.map(
lambda feature_map, label: ({key: value for key, value in feature_map.items() if key != "_mask"}, label))
return ds
def compute_ndcg_results(batch_results, ds_test_y, cutoffs):
ndcg_results = defaultdict(list)
for batch_id, batch_y_true in tqdm(enumerate(ds_test_y)):
for query_in_batch, y_true_padded in enumerate(batch_y_true):
start_padding_index = np.argmax(y_true_padded == -1)
y_true = y_true_padded[:start_padding_index].numpy()
y_pred = np.array(batch_results[batch_id][query_in_batch][:start_padding_index])
for cutoff in cutoffs:
ndcg = NDCG(cutoff=cutoff, no_relevant_results=1, implementation="exp")
ndcg_results[cutoff].append(ndcg.eval_per_query(y_true, y_pred))
return ndcg_results
def main():
parser = argparse.ArgumentParser(
description="Evaluate the accuracy of Neural Rank GAM for the three dataset: istella, web30k, yahoo")
parser.add_argument("-base_dir", default="../best_models/nrgam", type=str,
help="Base path where the models are saved")
parser.add_argument("-output_file", default="../results/ndcg/nrgam.pickle", type=str,
help="Path of the model to continue to train")
args = parser.parse_args()
base_path = args.base_dir
model_paths = {
"istella": f"{base_path}/istella_model",
"web30k": f"{base_path}/web30k_model",
"yahoo": f"{base_path}/yahoo_model",
}
best_tf_models = {}
for name, path in model_paths.items():
best_tf_models[name] = tf.keras.models.load_model(path)
test_datasets = {}
for name in model_paths.keys():
test_datasets[name] = ds_transform(tfds.load(DATASET_DICT[name], split="test"), log=LOG_NORMALIZATION[name])
ndcgs_nrgam = {}
cutoffs = [1, 5, 10]
for name, model in best_tf_models.items():
ds_test_y = test_datasets[name].map(lambda feature_map, label: label)
ds_test_X = test_datasets[name].map(lambda feature_map, label: feature_map)
batch_results = [model.predict(batch_sample) for batch_sample in tqdm(ds_test_X)]
ndcgs_nrgam[name] = compute_ndcg_results(batch_results, ds_test_y, cutoffs)
with open(args.output_file, "wb") as f:
pickle.dump(ndcgs_nrgam, f)
if __name__ == '__main__':
main()
| 3,771 | 34.92381 | 118 | py |
ilmart | ilmart-main/experiments/nrgam/nrgam_train.py | import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_ranking as tfr
import argparse
import pickle
from pathlib import Path
tf.config.threading.set_inter_op_parallelism_threads(40)
tf.config.threading.set_intra_op_parallelism_threads(40)
DATSET_DICT = {
"mslr_web/30k_fold1": "web30k",
"istella/s": "istella",
"yahoo": "yahoo_dataset"
}
LOG_NORMALIZATION = {
"mslr_web/30k_fold1": False,
"istella/s": True,
"yahoo": False
}
LEARNING_RATE = 0.05
HIDDEN_LAYERS = [16, 8]
BATCH_SIZE = 128
# The minimum value in istella for 4 features (50, 134, 148, 176) could be slightly less than 0,
# and to avoid numerical issue with the log1p transformation we added a constant value to each feature.
NORMALIZATION_CONSTANT = 10
LOSS = "approx_ndcg_loss"
def ds_transform(ds, log=False):
ds = ds.map(
lambda feature_map: {key: tf.where(value < 10 ** 6, value, 10 ** 6) for key, value in feature_map.items()})
ds = ds.map(lambda feature_map: {
"_mask": tf.ones_like(feature_map["label"], dtype=tf.bool),
**feature_map
})
ds = ds.padded_batch(batch_size=BATCH_SIZE)
ds = ds.map(lambda feature_map: (feature_map, tf.where(feature_map["_mask"], feature_map.pop("label"), -1.)))
if log:
ds = ds.map(
lambda feature_map, label: (
{key: value + NORMALIZATION_CONSTANT for key, value in feature_map.items() if key != "_mask"}, label))
ds = ds.map(
lambda feature_map, label: (
{key: tf.math.log1p(value) for key, value in feature_map.items() if key != "_mask"}, label))
else:
ds = ds.map(
lambda feature_map, label: ({key: value for key, value in feature_map.items() if key != "_mask"}, label))
return ds
def init_model(feat_names, initial_model):
if initial_model is not None:
model = tf.keras.models.load_model(initial_model)
print(f"Model correctly loaded from {initial_model}")
else:
feat_cols = {name: tf.feature_column.numeric_column(name, shape=(1,), default_value=0.0)
for name in feat_names}
network = tfr.keras.canned.GAMRankingNetwork(
context_feature_columns=None,
example_feature_columns=feat_cols,
example_hidden_layer_dims=HIDDEN_LAYERS,
activation=tf.nn.relu,
use_batch_norm=True)
loss = tfr.keras.losses.get(LOSS)
metrics = tfr.keras.metrics.default_keras_metrics()
optimizer = tf.keras.optimizers.Adagrad(learning_rate=LEARNING_RATE)
model = tfr.keras.model.create_keras_model(network=network,
loss=loss,
metrics=metrics,
optimizer=optimizer,
size_feature_name=None)
return model
def train_eval(ds_train, ds_vali, ds_test, name, initial_model, epochs=2000, patience=100, base_dir="."):
CHECKPOINTS_FOLDER = f"{name}_checkpoints"
feat_names = list(list(ds_train.take(1))[0][0].keys())
model = init_model(feat_names, initial_model)
early_stopping_callback = tf.keras.callbacks.EarlyStopping(monitor='val_metric/ndcg_10',
patience=patience,
mode="max",
restore_best_weights=True)
Path(CHECKPOINTS_FOLDER).mkdir(parents=True, exist_ok=True)
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=CHECKPOINTS_FOLDER)
history = model.fit(ds_train, epochs=epochs, validation_data=ds_vali,
callbacks=[early_stopping_callback, checkpoint_callback])
model.save(f"{base_dir}/{name}_model")
with open(f"{base_dir}/{name}_history.pickle", "wb") as f:
pickle.dump(history.history, f)
eval_dict = model.evaluate(ds_test, return_dict=True)
with open(f"{base_dir}/{name}_eval_dict.pickle", "wb") as f:
pickle.dump(eval_dict, f)
def main():
parser = argparse.ArgumentParser(description="Train results of Neural Rank Gam.")
parser.add_argument("dataset", metavar="dataset", type=str,
choices=["mslr_web/30k_fold1", "istella/s", "yahoo"],
help="""
Dataset to be used during training.
Possible choice to replicate the results:
- mslr_web/30k_fold1
- istella/s
- yahoo
""")
parser.add_argument("-keep_training", default=None, type=str, help="Path of the model to continue to train")
parser.add_argument("-base_dir", default="../best_models/nrgam", type=str,
help="Path of the model to continue to train")
args = parser.parse_args()
Path(args.base_dir).mkdir(parents=True, exist_ok=True)
ds_train = ds_transform(tfds.load(args.dataset, split="train"), log=LOG_NORMALIZATION[args.dataset])
ds_vali = ds_transform(tfds.load(args.dataset, split="vali"), log=LOG_NORMALIZATION[args.dataset])
ds_test = ds_transform(tfds.load(args.dataset, split="test"), log=LOG_NORMALIZATION[args.dataset])
train_eval(ds_train, ds_vali, ds_test, DATSET_DICT[args.dataset], args.keep_training, base_dir=args.base_dir)
if __name__ == '__main__':
main()
| 5,534 | 39.698529 | 118 | py |
ilmart | ilmart-main/experiments/nrgam/yahoo_dataset/yahoo_test.py | """yahoo dataset."""
import tensorflow_datasets as tfds
from . import yahoo
class YahooTest(tfds.testing.DatasetBuilderTestCase):
"""Tests for yahoo dataset."""
# TODO(yahoo):
DATASET_CLASS = yahoo.Yahoo
SPLITS = {
'train': 3, # Number of fake train example
'test': 1, # Number of fake test example
}
# If you are calling `download/download_and_extract` with a dict, like:
# dl_manager.download({'some_key': 'http://a.org/out.txt', ...})
# then the tests needs to provide the fake output paths relative to the
# fake data directory
# DL_EXTRACT_RESULT = {'some_key': 'output_file1.txt', ...}
if __name__ == '__main__':
tfds.testing.test_main()
| 688 | 26.56 | 73 | py |
ilmart | ilmart-main/experiments/nrgam/yahoo_dataset/yahoo.py | """yahoo dataset."""
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow_datasets.ranking.libsvm_ranking_parser import LibSVMRankingParser
import os
"""
The dataset cannot be shared online due to license constraint, so the download phase is skipped and the data will be
loaded from the folder available in the PATH variable below.
The folder must contain three file:
- train.txt
- vali.txt
- test.txt
"""
DATA_HOME = os.environ.get('RANKEVAL_DATA', os.path.join('~', 'rankeval_data'))
DATA_HOME = os.path.expanduser(DATA_HOME)
PATH = f"{DATA_HOME}/yahoo/set1"
_DESCRIPTION = """
C14 Yahoo! Learning to Rank Challenge, version 1.0
Machine learning has been successfully applied to web search ranking and the goal of this dataset to benchmark such
machine learning algorithms. The dataset consists of features extracted from (query,url) pairs along with relevance
judgments. The queries, ulrs and features descriptions are not given, only the feature values are.
There are two datasets in this distribution: a large one and a small one. Each dataset is divided in 3 sets: training,
validation, and test. Statistics are as follows: Set 1 Set 2 Train Val Test Train Val Test
# queries 19,944 2,994 6,983 1,266 1,266 3,798 # urls 473,134 71,083 165,660 34,815 34,881 103,174 # features 519 596
Number of features in the union of the two sets: 700; in the intersection: 415. Each feature has been normalized to be
in the [0,1] range.
Each url is given a relevance judgment with respect to the query. There are 5 levels of relevance from 0
(least relevant) to 4 (most relevant).
"""
_CITATION = """
@inproceedings{chapelle_yahoo_2011,
title = {Yahoo! {Learning} to {Rank} {Challenge} {Overview}},
url = {https://proceedings.mlr.press/v14/chapelle11a.html},
language = {en},
urldate = {2022-02-10},
booktitle = {Proceedings of the {Learning} to {Rank} {Challenge}},
publisher = {PMLR},
author = {Chapelle, Olivier and Chang, Yi},
month = jan,
year = {2011},
note = {ISSN: 1938-7228},
pages = {1--24},
}
"""
_FEATURE_NAMES = {n: f"feature_{n}" for n in range(1, 700)}
_LABEL_NAME = "label"
class Yahoo(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for yahoo dataset."""
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
}
def _info(self) -> tfds.core.DatasetInfo:
"""Returns the dataset metadata."""
encoding = tfds.features.Encoding.ZLIB
features = {
name: tfds.features.Tensor(
shape=(None,), dtype=tf.float64, encoding=encoding)
for name in _FEATURE_NAMES.values()
}
features[_LABEL_NAME] = tfds.features.Tensor(
shape=(None,), dtype=tf.float64, encoding=encoding)
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict(features),
homepage='https://webscope.sandbox.yahoo.com/catalog.php?datatype=c',
citation=_CITATION,
)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
"""Returns SplitGenerators."""
# We do not download the dataset from the web, we just assume that is already in PATH
splits = {
"train": self._generate_examples(f"{PATH}/train.txt"),
"vali": self._generate_examples(f"{PATH}/vali.txt"),
"test": self._generate_examples(f"{PATH}/test.txt")
}
return splits
def _generate_examples(self, path):
""""Yields examples."""
with tf.io.gfile.GFile(path, "r") as f:
yield from LibSVMRankingParser(f, _FEATURE_NAMES, _LABEL_NAME)
| 3,718 | 35.460784 | 119 | py |
ilmart | ilmart-main/experiments/nrgam/yahoo_dataset/__init__.py | """yahoo dataset."""
from .yahoo import Yahoo
| 47 | 11 | 24 | py |
ilmart | ilmart-main/experiments/lmart/lmart_full.py | from pathlib import Path
import numpy as np
import lightgbm as lgbm
from tqdm import tqdm
from rankeval.metrics import NDCG
from sklearn.model_selection import ParameterGrid
from ilmart.utils import load_datasets
def fine_tuning(train_lgbm, vali_lgbm, common_params, param_grid, verbose=True):
param_grid_list = list(ParameterGrid(param_grid))
best_ndcg_10 = 0
best_model = None
for params in tqdm(param_grid_list):
new_params = dict(**common_params, **params)
dict_search_res = {}
early_stopping = lgbm.early_stopping(50, verbose=True)
eval_result_callback = lgbm.record_evaluation(dict_search_res)
log_eval = lgbm.log_evaluation(period=1, show_stdv=True)
callbacks = [early_stopping, eval_result_callback]
if verbose:
callbacks.append(log_eval)
new_model = lgbm.train(new_params,
train_lgbm,
num_boost_round=2000,
valid_sets=[vali_lgbm],
callbacks=callbacks)
last_ndcg = np.max(dict_search_res['valid_0']['ndcg@10'])
if last_ndcg > best_ndcg_10:
best_ndcg_10 = last_ndcg
best_model = new_model
if verbose:
print(f"Best NDCG@10: {best_ndcg_10}")
return best_model
def main():
common_params = {
"objective": "lambdarank",
"min_data_in_leaf": 50,
"min_sum_hessian_in_leaf": 0,
"num_threads": 40,
"force_col_wise": True,
"verbosity": -1,
"eval_at": 10,
"lambdarank_truncation_level": 13,
}
leaves = list(map(int, np.geomspace(64, 512, num=4)))
param_grid = {'learning_rate': np.geomspace(0.001, 0.1, num=4),
'num_leaves': leaves}
best_models = {}
rankeval_datasets = load_datasets()
models_dir = "../best_models/full"
Path(models_dir).mkdir(parents=True, exist_ok=True)
for name, dataset_dict in rankeval_datasets.items():
model_path = f"{models_dir}/{name}.lgbm"
if Path(model_path).is_file():
print(f"Found {model_path}, loading...")
best_models[name] = lgbm.Booster(model_file=model_path)
else:
train_lgbm = lgbm.Dataset(dataset_dict["train"].X,
group=dataset_dict["train"].get_query_sizes(),
label=dataset_dict["train"].y)
vali_lgbm = lgbm.Dataset(dataset_dict["vali"].X,
group=dataset_dict["vali"].get_query_sizes(),
label=dataset_dict["vali"].y)
best_models[name] = fine_tuning(train_lgbm, vali_lgbm, common_params, param_grid, verbose=True)
best_models[name].save_model(f"{models_dir}/{name}.lgbm")
res_ndcg = {}
for name, model in best_models.items():
test_dataset = rankeval_datasets[name]["test"]
ndcgs = {}
for cutoff in [1, 5, 10]:
ndcg = NDCG(cutoff=cutoff, no_relevant_results=1, implementation="exp")
ndcg_stats = ndcg.eval(test_dataset, model.predict(test_dataset.X))
ndcgs[cutoff] = ndcg_stats
res_ndcg[name] = ndcgs
print(res_ndcg)
if __name__ == '__main__':
main()
| 3,335 | 34.489362 | 107 | py |
pytorch-darknet19 | pytorch-darknet19-master/demo/darknet19_demo.py | import numpy as np
import torch
import torchvision.datasets as dset
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from model import darknet
def main():
imageNet_label = [line.strip() for line in open("demo/imagenet.shortnames.list", 'r')]
dataset = dset.ImageFolder(root="demo/samples/",
transform=transforms.Compose([
transforms.Resize((448, 448)),
transforms.ToTensor()
]))
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False)
darknet19 = darknet.Darknet19(pretrained=True)
darknet19.eval()
for data, _ in dataloader:
output = darknet19.forward(data)
answer = int(torch.argmax(output))
print("Class: {}({})".format(imageNet_label[answer],answer))
plt.imshow(np.array(np.transpose(data[0], (1, 2, 0))))
plt.show()
if __name__ == "__main__":
main()
| 995 | 31.129032 | 90 | py |
pytorch-darknet19 | pytorch-darknet19-master/base/base_model.py | import logging
import torch.nn as nn
import numpy as np
class BaseModel(nn.Module):
"""
Base class for all models
"""
def __init__(self):
super(BaseModel, self).__init__()
self.logger = logging.getLogger(self.__class__.__name__)
def forward(self, *input):
"""
Forward pass logic
:return: Model output
"""
raise NotImplementedError
def summary(self):
"""
Model summary
"""
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
self.logger.info('Trainable parameters: {}'.format(params))
self.logger.info(self)
def __str__(self):
"""
Model prints with number of trainable parameters
"""
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return super(BaseModel, self).__str__() + '\nTrainable parameters: {}'.format(params)
| 1,076 | 26.615385 | 93 | py |
pytorch-darknet19 | pytorch-darknet19-master/base/__init__.py | from .base_model import *
| 27 | 8.333333 | 25 | py |
pytorch-darknet19 | pytorch-darknet19-master/model/darknet.py | from collections import OrderedDict
from torch import nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from base import BaseModel
model_paths = {
'darknet19': 'https://s3.ap-northeast-2.amazonaws.com/deepbaksuvision/darknet19-deepBakSu-e1b3ec1e.pth'
}
class GlobalAvgPool2d(nn.Module):
def __init__(self):
super(GlobalAvgPool2d, self).__init__()
def forward(self, x):
N = x.data.size(0)
C = x.data.size(1)
H = x.data.size(2)
W = x.data.size(3)
x = F.avg_pool2d(x, (H, W))
x = x.view(N, C)
return x
class Darknet19(BaseModel):
def __init__(self, pretrained=True):
super(Darknet19, self).__init__()
self.features = nn.Sequential(OrderedDict([
('layer1', nn.Sequential(OrderedDict([
('conv1_1', nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)),
('bn1_1', nn.BatchNorm2d(32)),
('leaky1_1', nn.LeakyReLU(0.1, inplace=True)),
('maxpool1', nn.MaxPool2d(kernel_size=2, stride=2))
]))),
('layer2', nn.Sequential(OrderedDict([
('conv2_1', nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=False)),
('bn2_1', nn.BatchNorm2d(64)),
('leaky2_1', nn.LeakyReLU(0.1, inplace=True)),
('maxpool2', nn.MaxPool2d(kernel_size=2, stride=2))
]))),
('layer3', nn.Sequential(OrderedDict([
('conv3_1', nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=False)),
('bn3_1', nn.BatchNorm2d(128)),
('leaky3_1', nn.LeakyReLU(0.1, inplace=True)),
('conv3_2', nn.Conv2d(128, 64, kernel_size=1, stride=1, padding=0, bias=False)),
('bn3_2', nn.BatchNorm2d(64)),
('leaky3_2', nn.LeakyReLU(0.1, inplace=True)),
('conv3_3', nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=False)),
('bn3_3', nn.BatchNorm2d(128)),
('leaky3_3', nn.LeakyReLU(0.1, inplace=True)),
('maxpool3', nn.MaxPool2d(kernel_size=2, stride=2))
]))),
('layer4', nn.Sequential(OrderedDict([
('conv4_1', nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=False)),
('bn4_1', nn.BatchNorm2d(256)),
('leaky4_1', nn.LeakyReLU(0.1, inplace=True)),
('conv4_2', nn.Conv2d(256, 128, kernel_size=1, stride=1, padding=0, bias=False)),
('bn4_2', nn.BatchNorm2d(128)),
('leaky4_2', nn.LeakyReLU(0.1, inplace=True)),
('conv4_3', nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=False)),
('bn4_3', nn.BatchNorm2d(256)),
('leaky4_3', nn.LeakyReLU(0.1, inplace=True)),
('maxpool4', nn.MaxPool2d(kernel_size=2, stride=2))
]))),
('layer5', nn.Sequential(OrderedDict([
('conv5_1', nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=False)),
('bn5_1', nn.BatchNorm2d(512)),
('leaky5_1', nn.LeakyReLU(0.1, inplace=True)),
('conv5_2', nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0, bias=False)),
('bn5_2', nn.BatchNorm2d(256)),
('leaky5_2', nn.LeakyReLU(0.1, inplace=True)),
('conv5_3', nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=False)),
('bn5_3', nn.BatchNorm2d(512)),
('leaky5_3', nn.LeakyReLU(0.1, inplace=True)),
('conv5_4', nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=1, bias=False)),
('bn5_4', nn.BatchNorm2d(256)),
('leaky5_4', nn.LeakyReLU(0.1, inplace=True)),
('conv5_5', nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=False)),
('bn5_5', nn.BatchNorm2d(512)),
('leaky5_5', nn.LeakyReLU(0.1, inplace=True)),
('maxpool5', nn.MaxPool2d(kernel_size=2, stride=2))
]))),
('layer6', nn.Sequential(OrderedDict([
('conv6_1', nn.Conv2d(512, 1024, kernel_size=3, stride=1, padding=1, bias=False)),
('bn6_1', nn.BatchNorm2d(1024)),
('leaky6_1', nn.LeakyReLU(0.1, inplace=True)),
('conv6_2', nn.Conv2d(1024, 512, kernel_size=1, stride=1, padding=0, bias=False)),
('bn6_2', nn.BatchNorm2d(512)),
('leaky6_2', nn.LeakyReLU(0.1, inplace=True)),
('conv6_3', nn.Conv2d(512, 1024, kernel_size=3, stride=1, padding=1, bias=False)),
('bn6_3', nn.BatchNorm2d(1024)),
('leaky6_3', nn.LeakyReLU(0.1, inplace=True)),
('conv6_4', nn.Conv2d(1024, 512, kernel_size=1, stride=1, padding=1, bias=False)),
('bn6_4', nn.BatchNorm2d(512)),
('leaky6_4', nn.LeakyReLU(0.1, inplace=True)),
('conv6_5', nn.Conv2d(512, 1024, kernel_size=3, stride=1, padding=1, bias=False)),
('bn6_5', nn.BatchNorm2d(1024)),
('leaky6_5', nn.LeakyReLU(0.1, inplace=True))
])))
]))
self.classifier = nn.Sequential(OrderedDict([
('conv7_1', nn.Conv2d(1024, 1000, kernel_size=(1, 1), stride=(1, 1))),
('globalavgpool', GlobalAvgPool2d()),
('softmax', nn.Softmax(dim=1))
]))
if pretrained:
self.load_state_dict(model_zoo.load_url(model_paths['darknet19'], progress=True))
print('Model is loaded')
def forward(self, x):
out = self.features(x)
out = self.classifier(out)
return out
| 5,509 | 46.094017 | 107 | py |
RBNN | RBNN-master/imagenet/main.py | import argparse
import os
import time
import logging
import random
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import models_cifar
import models_imagenet
import numpy as np
from torch.autograd import Variable
from utils.options import args
from utils.common import *
from modules import *
from datetime import datetime
import dataset
def main():
global args, best_prec1, best_prec5, conv_modules
best_prec1 = 0
best_prec5 = 0
args.print_freq=int(256/args.batch_size*500)
random.seed(args.seed)
if args.evaluate:
args.results_dir = '/tmp'
save_path = os.path.join(args.results_dir, args.save)
if not os.path.exists(save_path):
os.makedirs(save_path)
if not args.resume:
with open(os.path.join(save_path,'config.txt'), 'w') as args_file:
args_file.write(str(datetime.now())+'\n\n')
for args_n,args_v in args.__dict__.items():
args_v = '' if not args_v and not isinstance(args_v,int) else args_v
args_file.write(str(args_n)+': '+str(args_v)+'\n')
setup_logging(os.path.join(save_path, 'logger.log'))
logging.info("saving to %s", save_path)
logging.debug("run arguments: %s", args)
else:
setup_logging(os.path.join(save_path, 'logger.log'), filemode='a')
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
if 'cuda' in args.type:
args.gpus = [int(i) for i in args.gpus.split(',')]
cudnn.benchmark = True
else:
args.gpus = None
if args.dataset=='tinyimagenet':
num_classes=200
model_zoo = 'models_imagenet.'
elif args.dataset=='imagenet':
num_classes=1000
model_zoo = 'models_imagenet.'
elif args.dataset=='cifar10':
num_classes=10
model_zoo = 'models_cifar.'
elif args.dataset=='cifar100':
num_classes=100
model_zoo = 'models_cifar.'
if len(args.gpus)==1:
model = eval(model_zoo+args.model)(num_classes=num_classes).cuda()
else:
model = nn.DataParallel(eval(model_zoo+args.model)(num_classes=num_classes))
if not args.resume:
logging.info("creating model %s", args.model)
logging.info("model structure: %s", model)
num_parameters = sum([l.nelement() for l in model.parameters()])
logging.info("number of parameters: %d", num_parameters)
# evaluate
if args.evaluate:
if not os.path.isfile(args.evaluate):
logging.error('invalid checkpoint: {}'.format(args.evaluate))
else:
checkpoint = torch.load(args.evaluate)
if len(args.gpus)>1:
checkpoint['state_dict'] = dataset.add_module_fromdict(checkpoint['state_dict'])
model.load_state_dict(checkpoint['state_dict'])
logging.info("loaded checkpoint '%s' (epoch %s)",
args.evaluate, checkpoint['epoch'])
elif args.resume:
checkpoint_file = os.path.join(save_path,'checkpoint.pth.tar')
if os.path.isdir(checkpoint_file):
checkpoint_file = os.path.join(
checkpoint_file, 'model_best.pth.tar')
if os.path.isfile(checkpoint_file):
checkpoint = torch.load(checkpoint_file)
if len(args.gpus)>1:
checkpoint['state_dict'] = dataset.add_module_fromdict(checkpoint['state_dict'])
args.start_epoch = checkpoint['epoch'] - 1
best_prec1 = checkpoint['best_prec1']
best_prec5 = checkpoint['best_prec5']
model.load_state_dict(checkpoint['state_dict'])
logging.info("loaded checkpoint '%s' (epoch %s)",
checkpoint_file, checkpoint['epoch'])
else:
logging.error("no checkpoint found at '%s'", args.resume)
criterion = nn.CrossEntropyLoss().cuda()
criterion = criterion.type(args.type)
model = model.type(args.type)
if args.evaluate:
if args.use_dali:
val_loader = dataset.get_imagenet(
type='val',
image_dir=args.data_path,
batch_size=args.batch_size_test,
num_threads=args.workers,
crop=224,
device_id='cuda:0',
num_gpus=1)
else:
val_loader = dataset.get_imagenet_torch(
type='val',
image_dir=args.data_path,
batch_size=args.batch_size_test,
num_threads=args.workers,
device_id='cuda:0'
)
with torch.no_grad():
val_loss, val_prec1, val_prec5 = validate(val_loader, model, criterion, 0)
logging.info('\n Validation Loss {val_loss:.4f} \t'
'Validation Prec@1 {val_prec1:.3f} \t'
'Validation Prec@5 {val_prec5:.3f} \n'
.format(val_loss=val_loss, val_prec1=val_prec1, val_prec5=val_prec5))
return
if args.dataset=='imagenet':
if args.use_dali:
train_loader = dataset.get_imagenet(
type='train',
image_dir=args.data_path,
batch_size=args.batch_size,
num_threads=args.workers,
crop=224,
device_id='cuda:0',
num_gpus=1)
val_loader = dataset.get_imagenet(
type='val',
image_dir=args.data_path,
batch_size=args.batch_size_test,
num_threads=args.workers,
crop=224,
device_id='cuda:0',
num_gpus=1)
else:
train_loader = dataset.get_imagenet_torch(
type='train',
image_dir=args.data_path,
batch_size=args.batch_size,
num_threads=args.workers,
device_id='cuda:0',
)
val_loader = dataset.get_imagenet_torch(
type='val',
image_dir=args.data_path,
batch_size=args.batch_size_test,
num_threads=args.workers,
device_id='cuda:0'
)
else:
train_loader, val_loader = dataset.load_data(
dataset=args.dataset,
data_path=args.data_path,
batch_size=args.batch_size,
batch_size_test=args.batch_size_test,
num_workers=args.workers)
optimizer = torch.optim.SGD([{'params':model.parameters(),'initial_lr':args.lr}], args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
def cosin(i,T,emin=0,emax=0.01):
"customized cos-lr"
return emin+(emax-emin)/2 * (1+np.cos(i*np.pi/T))
if args.resume:
for param_group in optimizer.param_groups:
param_group['lr'] = cosin(args.start_epoch-args.warm_up*4, args.epochs-args.warm_up*4,0, args.lr)
if args.lr_type == 'cos':
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs-args.warm_up*4, eta_min = 0, last_epoch=args.start_epoch-args.warm_up*4)
elif args.lr_type == 'step':
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, args.lr_decay_step, gamma=0.1, last_epoch=-1)
if not args.resume:
logging.info("criterion: %s", criterion)
logging.info('scheduler: %s', lr_scheduler)
def cpt_tk(epoch):
"compute t&k in back-propagation"
T_min, T_max = torch.tensor(args.Tmin).float(), torch.tensor(args.Tmax).float()
Tmin, Tmax = torch.log10(T_min), torch.log10(T_max)
t = torch.tensor([torch.pow(torch.tensor(10.), Tmin + (Tmax - Tmin) / args.epochs * epoch)]).float()
k = max(1/t,torch.tensor(1.)).float()
return t, k
#* setup conv_modules.epoch
conv_modules=[]
for name,module in model.named_modules():
if isinstance(module,nn.Conv2d):
conv_modules.append(module)
for epoch in range(args.start_epoch+1, args.epochs):
time_start = datetime.now()
#*warm up
if args.warm_up and epoch <5:
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr * (epoch+1) / 5
for param_group in optimizer.param_groups:
logging.info('lr: %s', param_group['lr'])
#* compute t/k in back-propagation
t,k = cpt_tk(epoch)
for name,module in model.named_modules():
if isinstance(module,nn.Conv2d):
module.k = k.cuda()
module.t = t.cuda()
for module in conv_modules:
module.epoch = epoch
# train
train_loss, train_prec1, train_prec5 = train(
train_loader, model, criterion, epoch, optimizer)
#* adjust Lr
if epoch >= 4 * args.warm_up:
lr_scheduler.step()
# evaluate
with torch.no_grad():
for module in conv_modules:
module.epoch = -1
val_loss, val_prec1, val_prec5 = validate(
val_loader, model, criterion, epoch)
# remember best prec
is_best = val_prec1 > best_prec1
if is_best:
best_prec1 = max(val_prec1, best_prec1)
best_prec5 = max(val_prec5, best_prec5)
best_epoch = epoch
best_loss = val_loss
# save model
if epoch % 1 == 0:
model_state_dict = model.module.state_dict() if len(args.gpus) > 1 else model.state_dict()
model_parameters = model.module.parameters() if len(args.gpus) > 1 else model.parameters()
save_checkpoint({
'epoch': epoch + 1,
'model': args.model,
'state_dict': model_state_dict,
'best_prec1': best_prec1,
'best_prec5': best_prec5,
'parameters': list(model_parameters),
}, is_best, path=save_path)
if args.time_estimate > 0 and epoch % args.time_estimate==0:
time_end = datetime.now()
cost_time,finish_time = get_time(time_end-time_start,epoch,args.epochs)
logging.info('Time cost: '+cost_time+'\t'
'Time of Finish: '+finish_time)
logging.info('\n Epoch: {0}\t'
'Training Loss {train_loss:.4f} \t'
'Training Prec@1 {train_prec1:.3f} \t'
'Training Prec@5 {train_prec5:.3f} \t'
'Validation Loss {val_loss:.4f} \t'
'Validation Prec@1 {val_prec1:.3f} \t'
'Validation Prec@5 {val_prec5:.3f} \n'
.format(epoch + 1, train_loss=train_loss, val_loss=val_loss,
train_prec1=train_prec1, val_prec1=val_prec1,
train_prec5=train_prec5, val_prec5=val_prec5))
logging.info('*'*50+'DONE'+'*'*50)
logging.info('\n Best_Epoch: {0}\t'
'Best_Prec1 {prec1:.4f} \t'
'Best_Prec5 {prec5:.4f} \t'
'Best_Loss {loss:.3f} \t'
.format(best_epoch+1, prec1=best_prec1, prec5=best_prec5, loss=best_loss))
def forward(data_loader, model, criterion, epoch=0, training=True, optimizer=None):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
if args.use_dali:
for i, batch_data in enumerate(data_loader):
# measure data loading time
data_time.update(time.time() - end)
if i==1 and training:
for module in conv_modules:
module.epoch=-1
inputs = batch_data[0]['data']
target = batch_data[0]['label'].squeeze().long()
batchsize = args.batch_size if training else args.batch_size_test
len_dataloader = int(np.ceil(data_loader._size/batchsize))
if args.gpus is not None:
inputs = inputs.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
input_var = Variable(inputs.type(args.type))
target_var = Variable(target)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
if type(output) is list:
output = output[0]
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
if training:
# compute gradient
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i*batchsize % args.print_freq == 0:
logging.info('{phase} - Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i*batchsize, data_loader._size,
phase='TRAINING' if training else 'EVALUATING',
batch_time=batch_time,
data_time=data_time, loss=losses,
top1=top1, top5=top5))
else:
for i, (inputs, target) in enumerate(data_loader):
# measure data loading time
data_time.update(time.time() - end)
if i==1 and training:
for module in conv_modules:
module.epoch=-1
if args.gpus is not None:
inputs = inputs.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
input_var = Variable(inputs.type(args.type))
target_var = Variable(target)
batchsize = args.batch_size if training else args.batch_size_test
# compute output
output = model(input_var)
loss = criterion(output, target_var)
if type(output) is list:
output = output[0]
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
if training:
# compute gradient
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
logging.info('{phase} - Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i*batchsize, len(data_loader)*batchsize,
phase='TRAINING' if training else 'EVALUATING',
batch_time=batch_time,
data_time=data_time, loss=losses,
top1=top1, top5=top5))
return losses.avg, top1.avg, top5.avg
def train(data_loader, model, criterion, epoch, optimizer):
model.train()
return forward(data_loader, model, criterion, epoch,
training=True, optimizer=optimizer)
def validate(data_loader, model, criterion, epoch):
model.eval()
return forward(data_loader, model, criterion, epoch,
training=False, optimizer=None)
if __name__ == '__main__':
main()
| 17,019 | 40.111111 | 161 | py |
RBNN | RBNN-master/imagenet/modules/binarized_modules.py | import torch
import torch.nn as nn
import math
import numpy as np
import torch.nn.functional as F
from torch.autograd import Function, Variable
from scipy.stats import ortho_group
from utils.options import args
class BinarizeConv2d(nn.Conv2d):
def __init__(self, *kargs, **kwargs):
super(BinarizeConv2d, self).__init__(*kargs, **kwargs)
self.k = torch.tensor([10.]).float()
self.t = torch.tensor([0.1]).float()
self.epoch = -1
w = self.weight
self.a, self.b = get_ab(np.prod(w.shape[1:]))
R1 = torch.tensor(ortho_group.rvs(dim=self.a)).float().cuda()
R2 = torch.tensor(ortho_group.rvs(dim=self.b)).float().cuda()
self.register_buffer('R1', R1)
self.register_buffer('R2', R2)
self.Rweight = torch.ones_like(w)
sw = w.abs().view(w.size(0), -1).mean(-1).float().view(w.size(0), 1, 1).detach()
self.alpha = nn.Parameter(sw.cuda(), requires_grad=True)
self.rotate = nn.Parameter(torch.ones(w.size(0), 1, 1, 1).cuda()*np.pi/2, requires_grad=True)
self.Rotate = torch.zeros(1)
def forward(self, input):
a0 = input
w = self.weight
w1 = w - w.mean([1,2,3], keepdim=True)
w2 = w1 / w1.std([1,2,3], keepdim=True)
a1 = a0 - a0.mean([1,2,3], keepdim=True)
a2 = a1 / a1.std([1,2,3], keepdim=True)
a, b = self.a, self.b
X = w2.view(w.shape[0], a, b)
if self.epoch > -1 and self.epoch % args.rotation_update == 0:
for _ in range(3):
#* update B
V = self.R1.t() @ X.detach() @ self.R2
B = torch.sign(V)
#* update R1
D1 = sum([Bi@(self.R2.t())@(Xi.t()) for (Bi,Xi) in zip(B,X.detach())])
U1, S1, V1 = torch.svd(D1)
self.R1 = (V1@(U1.t()))
#* update R2
D2 = sum([(Xi.t())@self.R1@Bi for (Xi,Bi) in zip(X.detach(),B)])
U2, S2, V2 = torch.svd(D2)
self.R2 = (U2@(V2.t()))
self.Rweight = ((self.R1.t())@X@(self.R2)).view_as(w)
delta = self.Rweight.detach() - w2
w3 = w2 + torch.abs(torch.sin(self.rotate)) * delta
#* binarize
bw = BinaryQuantize().apply(w3, self.k.to(w.device), self.t.to(w.device))
if args.a32:
ba = a2
else:
ba = BinaryQuantize_a().apply(a2, self.k.to(w.device), self.t.to(w.device))
#* 1bit conv
output = F.conv2d(ba, bw, self.bias, self.stride, self.padding,
self.dilation, self.groups)
#* scaling factor
output = output * self.alpha
return output
class BinaryQuantize(Function):
@staticmethod
def forward(ctx, input, k, t):
ctx.save_for_backward(input, k, t)
out = torch.sign(input)
return out
@staticmethod
def backward(ctx, grad_output):
input, k, t = ctx.saved_tensors
grad_input = k * (2 * torch.sqrt(t**2 / 2) - torch.abs(t**2 * input))
grad_input = grad_input.clamp(min=0) * grad_output.clone()
return grad_input, None, None
class BinaryQuantize_a(Function):
@staticmethod
def forward(ctx, input, k, t):
ctx.save_for_backward(input, k, t)
out = torch.sign(input)
return out
@staticmethod
def backward(ctx, grad_output):
input, k, t = ctx.saved_tensors
k = torch.tensor(1.).to(input.device)
t = max(t, torch.tensor(1.).to(input.device))
grad_input = k * (2 * torch.sqrt(t**2 / 2) - torch.abs(t**2 * input))
grad_input = grad_input.clamp(min=0) * grad_output.clone()
return grad_input, None, None
def get_ab(N):
sqrt = int(np.sqrt(N))
for i in range(sqrt, 0, -1):
if N % i == 0:
return i, N // i
| 3,835 | 34.518519 | 101 | py |
RBNN | RBNN-master/imagenet/modules/__init__.py | from .binarized_modules import * | 32 | 32 | 32 | py |
RBNN | RBNN-master/imagenet/dataset/dataset.py | from datetime import datetime
import os
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import transforms, datasets
from torch.utils.data import DataLoader
def load_data(type='both',dataset='cifar10',data_path='/data',batch_size = 256,batch_size_test=256,num_workers=0):
# load data
param = {'cifar10':{'name':datasets.CIFAR10,'size':32,'normalize':[[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]]},
'cifar100':{'name':datasets.CIFAR100,'size':32,'normalize':[(0.507, 0.487, 0.441), (0.267, 0.256, 0.276)]},
'mnist':{'name':datasets.MNIST,'size':32,'normalize':[(0.5,0.5,0.5),(0.5,0.5,0.5)]},
'tinyimagenet':{'name':datasets.ImageFolder,'size':224,'normalize':[(0.4802, 0.4481, 0.3975), (0.2302, 0.2265, 0.2262)]}}
data = param[dataset]
if data['name']==datasets.ImageFolder:
data_transforms = {
'train': transforms.Compose([
transforms.Resize(data['size']),
transforms.RandomRotation(20),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(*data['normalize']),
]),
'val': transforms.Compose([
transforms.Resize(data['size']),
transforms.ToTensor(),
transforms.Normalize(*data['normalize']),
]),
'test': transforms.Compose([
transforms.Resize(data['size']),
transforms.ToTensor(),
transforms.Normalize(*data['normalize']),
])
}
data_dir = os.path.join(data_path,'tiny-imagenet-200')
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: DataLoader(image_datasets[x], batch_size=batch_size, shuffle=(x=='train'), num_workers=num_workers)
for x in ['train', 'val']}
return dataloaders.values()
else:
transform1 = transforms.Compose([
transforms.RandomCrop(data['size'],padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(*data['normalize']),
])
transform2 = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(*data['normalize']),
])
trainset = data['name'](root=data_path,
train=True,
download=False,
transform=transform1);
trainloader = DataLoader(
trainset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True)
testset = data['name'](root=data_path,
train=False,
download=False,
transform=transform2);
testloader = DataLoader(
testset,
batch_size=batch_size_test,
shuffle=False,
num_workers=num_workers,
pin_memory=True)
if type=='both':
return trainloader, testloader
elif type=='train':
return trainloader
elif type=='val':
return testloader
def delete_module_fromdict(statedict):
from collections import OrderedDict
new_state_dict = OrderedDict()
for k,v in statedict.items():
name = k[7:]
new_state_dict[name] = v
return new_state_dict
def add_module_fromdict(statedict):
from collections import OrderedDict
new_state_dict = OrderedDict()
for k,v in statedict.items():
name = 'module.'+k
new_state_dict[name] = v
return new_state_dict
| 3,858 | 37.59 | 134 | py |
RBNN | RBNN-master/imagenet/dataset/__init__.py | from .dataset import load_data, add_module_fromdict
from .imagenet import get_imagenet_iter_dali as get_imagenet
from .imagenet import get_imagenet_iter_torch as get_imagenet_torch | 180 | 59.333333 | 67 | py |
RBNN | RBNN-master/imagenet/dataset/imagenet.py | import time
import torch.utils.data
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import torchvision.datasets as datasets
from nvidia.dali.pipeline import Pipeline
import torchvision.transforms as transforms
from nvidia.dali.plugin.pytorch import DALIClassificationIterator, DALIGenericIterator
class HybridTrainPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, dali_cpu=False, local_rank=0, world_size=1):
super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed=12 + device_id)
dali_device = "gpu"
self.input = ops.FileReader(file_root=data_dir, shard_id=local_rank, num_shards=world_size, random_shuffle=True)
self.decode = ops.ImageDecoder(device="mixed", output_type=types.RGB)
self.res = ops.RandomResizedCrop(device="gpu", size=crop, random_area=[0.08, 1.25])
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
image_type=types.RGB,
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
self.coin = ops.CoinFlip(probability=0.5)
print('DALI "{0}" variant'.format(dali_device))
def define_graph(self):
rng = self.coin()
self.jpegs, self.labels = self.input(name="Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images, mirror=rng)
return [output, self.labels]
class HybridValPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, size, local_rank=0, world_size=1):
super(HybridValPipe, self).__init__(batch_size, num_threads, device_id, seed=12 + device_id)
self.input = ops.FileReader(file_root=data_dir, shard_id=local_rank, num_shards=world_size,
random_shuffle=False)
self.decode = ops.ImageDecoder(device="mixed", output_type=types.RGB)
self.res = ops.Resize(device="gpu", resize_shorter=size, interp_type=types.INTERP_TRIANGULAR)
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
crop=(crop, crop),
image_type=types.RGB,
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
def define_graph(self):
self.jpegs, self.labels = self.input(name="Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images)
return [output, self.labels]
def get_imagenet_iter_dali(type, image_dir, batch_size, num_threads, device_id, num_gpus=1, crop=224, val_size=256,
world_size=1, local_rank=0):
if type == 'train':
pip_train = HybridTrainPipe(batch_size=batch_size, num_threads=num_threads, device_id=local_rank,
data_dir=image_dir + '/ILSVRC2012_img_train',
crop=crop, world_size=world_size, local_rank=local_rank)
pip_train.build()
dali_iter_train = DALIClassificationIterator(pip_train, size=pip_train.epoch_size("Reader") // world_size, auto_reset=True)
return dali_iter_train
elif type == 'val':
pip_val = HybridValPipe(batch_size=batch_size, num_threads=num_threads, device_id=local_rank,
data_dir=image_dir + '/val',
crop=crop, size=val_size, world_size=world_size, local_rank=local_rank)
pip_val.build()
dali_iter_val = DALIClassificationIterator(pip_val, size=pip_val.epoch_size("Reader") // world_size, auto_reset=True)
return dali_iter_val
def get_imagenet_iter_torch(type, image_dir, batch_size, num_threads, device_id, num_gpus=1, crop=224, val_size=256,
world_size=1, local_rank=0):
if type == 'train':
transform = transforms.Compose([
transforms.RandomResizedCrop(crop, scale=(0.08, 1.25)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
dataset = datasets.ImageFolder(image_dir + '/train', transform)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_threads,
pin_memory=True)
else:
transform = transforms.Compose([
transforms.Resize(val_size),
transforms.CenterCrop(crop),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
dataset = datasets.ImageFolder(image_dir + '/val', transform)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=num_threads,
pin_memory=True)
return dataloader
if __name__ == '__main__':
train_loader = get_imagenet_iter_dali(type='train', image_dir='/userhome/memory_data/imagenet', batch_size=256,
num_threads=4, crop=224, device_id=0, num_gpus=1)
print('start iterate')
start = time.time()
for i, data in enumerate(train_loader):
images = data[0]["data"].cuda(non_blocking=True)
labels = data[0]["label"].squeeze().long().cuda(non_blocking=True)
end = time.time()
print('end iterate')
print('dali iterate time: %fs' % (end - start))
train_loader = get_imagenet_iter_torch(type='train', image_dir='/userhome/data/imagenet', batch_size=256,
num_threads=4, crop=224, device_id=0, num_gpus=1)
print('start iterate')
start = time.time()
for i, data in enumerate(train_loader):
images = data[0].cuda(non_blocking=True)
labels = data[1].cuda(non_blocking=True)
end = time.time()
print('end iterate')
print('torch iterate time: %fs' % (end - start))
| 6,531 | 51.677419 | 131 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.