repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
rpg_svo | rpg_svo-master/svo_analysis/src/svo_analysis/analyse_dataset.py | # -*- coding: utf-8 -*-
import associate
import numpy as np
import matplotlib.pyplot as plt
import yaml
def loadDataset(filename):
file = open(filename)
data = file.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
D = np.array([[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"], dtype=np.float64)
return D
dataset_dir = '/home/cforster/Datasets/SlamBenchmark/px4_r2'
trajectory_data = dataset_dir+'/groundtruth.txt'
stepsize = 10
# load dataset
data = loadDataset(trajectory_data)
n = data.shape[0]
steps = np.arange(0,n,stepsize)
# compute trajectory length
last_pos = data[0,1:4]
trajectory_length = 0
for i in steps[1:]:
new_pos = data[i,1:4]
trajectory_length += np.linalg.norm(new_pos-last_pos)
last_pos = new_pos
print 'trajectory lenght = ' + str(trajectory_length) + 'm'
print 'height mean = ' + str(np.mean(data[:,3])) + 'm'
print 'height median = ' + str(np.median(data[:,3])) + 'm'
print 'height std = ' + str(np.std(data[:,3])) + 'm'
print 'duration = ' + str(data[-1,0]-data[0,0]) + 's'
print 'speed = ' + str(trajectory_length/(data[-1,0]-data[0,0])) + 'm/s'
| 1,178 | 27.071429 | 139 | py |
rpg_svo | rpg_svo-master/svo_analysis/src/svo_analysis/analyse_trajectory.py | #!/usr/bin/python
import os
import yaml
import argparse
import numpy as np
import matplotlib.pyplot as plt
import svo_analysis.tum_benchmark_tools.associate as associate
import vikit_py.transformations as transformations
import vikit_py.align_trajectory as align_trajectory
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Cardo']})
rc('text', usetex=True)
def plot_translation_error(timestamps, translation_error, results_dir):
fig = plt.figure(figsize=(8, 2.5))
ax = fig.add_subplot(111, xlabel='time [s]', ylabel='position drift [mm]', xlim=[0,timestamps[-1]-timestamps[0]+4])
ax.plot(timestamps-timestamps[0], translation_error[:,0]*1000, 'r-', label='x')
ax.plot(timestamps-timestamps[0], translation_error[:,1]*1000, 'g-', label='y')
ax.plot(timestamps-timestamps[0], translation_error[:,2]*1000, 'b-', label='z')
ax.legend()
fig.tight_layout()
fig.savefig(results_dir+'/translation_error.pdf')
def plot_rotation_error(timestamps, rotation_error, results_dir):
fig = plt.figure(figsize=(8, 2.5))
ax = fig.add_subplot(111, xlabel='time [s]', ylabel='orientation drift [rad]', xlim=[0,timestamps[-1]-timestamps[0]+4])
ax.plot(timestamps-timestamps[0], rotation_error[:,0], 'r-', label='yaw')
ax.plot(timestamps-timestamps[0], rotation_error[:,1], 'g-', label='pitch')
ax.plot(timestamps-timestamps[0], rotation_error[:,2], 'b-', label='roll')
ax.legend()
fig.tight_layout()
fig.savefig(results_dir+'/orientation_error.pdf')
def analyse_synthetic_trajectory(results_dir):
data = np.loadtxt(os.path.join(results_dir, 'translation_error.txt'))
timestamps = data[:,0]
translation_error = data[:,1:4]
plot_translation_error(timestamps, translation_error, results_dir)
# plot orientation error
data = np.loadtxt(os.path.join(results_dir, 'orientation_error.txt'))
timestamps = data[:,0]
orientation_error = data[:,1:4]
plot_rotation_error(timestamps, orientation_error, results_dir)
def analyse_optitrack_trajectory_with_hand_eye_calib(results_dir, params, n_align_frames = 200):
print('loading hand-eye-calib')
T_cm_quat = np.array([params['hand_eye_calib']['Tcm_qx'],
params['hand_eye_calib']['Tcm_qy'],
params['hand_eye_calib']['Tcm_qz'],
params['hand_eye_calib']['Tcm_qw']])
T_cm_tran = np.array([params['hand_eye_calib']['Tcm_tx'],
params['hand_eye_calib']['Tcm_ty'],
params['hand_eye_calib']['Tcm_tz']])
T_cm = get_rigid_body_trafo(T_cm_quat, T_cm_tran)
T_mc = transformations.inverse_matrix(T_cm)
t_es, p_es, q_es, t_gt, p_gt, q_gt = load_dataset(results_dir, params['cam_delay'])
# align Sim3 to get scale
print('align Sim3 using '+str(n_align_frames)+' first frames.')
scale,rot,trans = align_trajectory.align_sim3(p_gt[0:n_align_frames,:], p_es[0:n_align_frames,:])
print 'scale = '+str(scale)
# get trafo between (v)ision and (o)ptitrack frame
print q_gt[0,:]
print p_gt[0,:]
T_om = get_rigid_body_trafo(q_gt[0,:], p_gt[0,:])
T_vc = get_rigid_body_trafo(q_es[0,:], scale*p_es[0,:])
T_cv = transformations.inverse_matrix(T_vc)
T_ov = np.dot(T_om, np.dot(T_mc, T_cv))
print 'T_ov = ' + str(T_ov)
# apply transformation to estimated trajectory
q_es_aligned = np.zeros(np.shape(q_es))
rpy_es_aligned = np.zeros(np.shape(p_es))
rpy_gt = np.zeros(np.shape(p_es))
p_es_aligned = np.zeros(np.shape(p_es))
for i in range(np.shape(p_es)[0]):
T_vc = get_rigid_body_trafo(q_es[i,:],p_es[i,:])
T_vc[0:3,3] *= scale
T_om = np.dot(T_ov, np.dot(T_vc, T_cm))
p_es_aligned[i,:] = T_om[0:3,3]
q_es_aligned[i,:] = transformations.quaternion_from_matrix(T_om)
rpy_es_aligned[i,:] = transformations.euler_from_quaternion(q_es_aligned[i,:], 'rzyx')
rpy_gt[i,:] = transformations.euler_from_quaternion(q_gt[i,:], 'rzyx')
# plot position error (drift)
translation_error = (p_gt-p_es_aligned)
plot_translation_error(t_es, translation_error, results_dir)
# plot orientation error (drift)
orientation_error = (rpy_gt - rpy_es_aligned)
plot_rotation_error(t_es, orientation_error, results_dir)
# plot scale drift
motion_gt = np.diff(p_gt, 0)
motion_es = np.diff(p_es_aligned, 0)
dist_gt = np.sqrt(np.sum(np.multiply(motion_gt,motion_gt),1))
dist_es = np.sqrt(np.sum(np.multiply(motion_es,motion_es),1))
fig = plt.figure(figsize=(8,2.5))
ax = fig.add_subplot(111, xlabel='time [s]', ylabel='scale change [\%]', xlim=[0,t_es[-1]+4])
scale_drift = np.divide(dist_es,dist_gt)*100-100
ax.plot(t_es, scale_drift, 'b-')
fig.tight_layout()
fig.savefig(results_dir+'/scale_drift.pdf')
# plot trajectory
fig = plt.figure()
ax = fig.add_subplot(111, title='trajectory', aspect='equal', xlabel='x [m]', ylabel='y [m]')
ax.plot(p_es_aligned[:,0], p_es_aligned[:,1], 'b-', label='estimate')
ax.plot(p_gt[:,0], p_gt[:,1], 'r-', label='groundtruth')
ax.plot(p_es_aligned[0:n_align_frames,0], p_es_aligned[0:n_align_frames,1], 'g-', linewidth=2, label='aligned')
ax.legend()
fig.tight_layout()
fig.savefig(results_dir+'/trajectory.pdf')
def analyse_trajectory(results_dir, n_align_frames = 200, use_hand_eye_calib = True):
params = yaml.load(open(os.path.join(results_dir, 'dataset_params.yaml'),'r'))
if params['dataset_is_blender']:
analyse_synthetic_trajectory(results_dir)
elif use_hand_eye_calib:
analyse_optitrack_trajectory_with_hand_eye_calib(results_dir, params, n_align_frames)
else:
t_es, p_es, q_es, t_gt, p_gt, q_gt = load_dataset(results_dir, params['cam_delay'])
scale,rot,trans = align_trajectory.align_sim3(p_gt[0:n_align_frames,:], p_es[0:n_align_frames,:])
p_es_aligned = np.zeros(np.shape(p_es))
for i in range(np.shape(p_es)[0]):
p_es_aligned[i,:] = scale*rot.dot(p_es[i,:]) + trans
# plot position error (drift)
translation_error = (p_gt-p_es_aligned)
plot_translation_error(t_es, translation_error, results_dir)
def get_rigid_body_trafo(quat,trans):
T = transformations.quaternion_matrix(quat)
T[0:3,3] = trans
return T
def load_dataset(results_dir, cam_delay):
print('loading dataset in '+results_dir)
print('cam_delay = '+str(cam_delay))
data_gt = open(os.path.join(results_dir, 'groundtruth.txt')).read()
lines = data_gt.replace(","," ").replace("\t"," ").split("\n")
data_gt = np.array([[np.float(v.strip()) for i,v in enumerate(line.split(" ")) if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"])
#data_gt = np.array([[np.float(v.strip()) for i,v in enumerate(line.split(" ")) if i != 1 and v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"])
data_gt = [(float(l[0]),l[1:]) for l in data_gt]
data_gt = dict(data_gt)
data_es = open(os.path.join(results_dir, 'traj_estimate.txt')).read()
lines = data_es.replace(","," ").replace("\t"," ").split("\n")
data_es = np.array([[np.float(v.strip()) for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"])
data_es = [(float(l[0]),l[1:]) for l in data_es]
data_es = dict(data_es)
matches = associate.associate(data_gt, data_es, -cam_delay, 0.02)
p_gt = np.array([[np.float(value) for value in data_gt[a][0:3]] for a,b in matches])
q_gt = np.array([[np.float(value) for value in data_gt[a][3:7]] for a,b in matches])
p_es = np.array([[np.float(value) for value in data_es[b][0:3]] for a,b in matches])
q_es = np.array([[np.float(value) for value in data_es[b][3:7]] for a,b in matches])
t_gt = np.array([np.float(a) for a,b in matches])
t_es = np.array([np.float(b) for a,b in matches])
# set start time to zero
start_time = min(t_es[0], t_gt[0])
t_es -= start_time
t_gt -= start_time
return t_es, p_es, q_es, t_gt, p_gt, q_gt
if __name__ == '__main__':
# parse command line
parser = argparse.ArgumentParser(description='''
Analyse trajectory
''')
parser.add_argument('results_dir', help='folder with the results')
parser.add_argument('--use_hand_eye_calib', help='', action='store_true')
parser.add_argument('--n_align_frames', help='', default=200)
args = parser.parse_args()
print('analyse trajectory for dataset: '+str(args.results_dir))
analyse_trajectory(args.results_dir,
n_align_frames = int(args.n_align_frames),
use_hand_eye_calib = args.use_hand_eye_calib) | 8,764 | 46.378378 | 164 | py |
rpg_svo | rpg_svo-master/svo_analysis/src/svo_analysis/hand_eye_calib.py | # -*- coding: utf-8 -*-
"""
Created on Sun Aug 4 15:47:55 2013
@author: cforster
"""
import os
import yaml
import numpy as np
import svo_analysis.tum_benchmark_tools.associate as associate
import vikit_py.align_trajectory as align_trajectory
import vikit_py.transformations as transformations
import matplotlib.pyplot as plt
# user config
display = True
dataset_dir = '/home/cforster/catkin_ws/src/rpg_svo/svo_analysis/results/rpg_circle_1'
n_measurements = 400
n_align_sim3 = 600
delta = 50
# load dataset parameters
params = yaml.load(open(os.path.join(dataset_dir, 'dataset_params.yaml')))
# set trajectory groundtruth and estimate file
traj_groundtruth = os.path.join(dataset_dir, 'groundtruth.txt')
traj_estimate = os.path.join(dataset_dir,'traj_estimate.txt')
# load data
data_gt = associate.read_file_list(traj_groundtruth)
data_es = associate.read_file_list(traj_estimate)
# select matches
offset = -params['cam_delay']
print ('offset = '+str(offset))
matches = associate.associate(data_gt, data_es, offset, 0.02)
#matches = matches[500:]
p_gt = np.array([[float(value) for value in data_gt[a][0:3]] for a,b in matches])
q_gt = np.array([[float(value) for value in data_gt[a][3:7]] for a,b in matches])
p_es = np.array([[float(value) for value in data_es[b][0:3]] for a,b in matches])
q_es = np.array([[float(value) for value in data_es[b][3:7]] for a,b in matches])
# --------------------------------------------------------------------------------
# align Sim3 to get scale
scale,rot,trans = align_trajectory.align_sim3(p_gt[0:n_align_sim3,:], p_es[0:n_align_sim3,:])
#model_aligned = s * R * model + t
#alignment_error = model_aligned - data
#t_error = np.sqrt(np.sum(np.multiply(alignment_error,alignment_error),0)).A[0]
p_es_aligned = np.transpose(scale*np.dot(rot,np.transpose(p_es)))+trans
p_es = scale*p_es
print 's='+str(scale)
print 't='+str(trans)
print 'R='+str(rot)
# plot sim3 aligned trajectory
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
ax.plot(p_es_aligned[:,0], p_es_aligned[:,1], 'r-', label='estimate', alpha=0.2)
ax.plot(p_gt[:,0], p_gt[:,1], 'b-', label='groundtruth', alpha=0.2)
ax.plot(p_es_aligned[:n_align_sim3,0], p_es_aligned[:n_align_sim3,1], 'g-', label='aligned', linewidth=2)
ax.plot(p_gt[:n_align_sim3,0], p_gt[:n_align_sim3,1], 'm-', label='aligned', linewidth=2)
ax.legend()
for (x1,y1,z1),(x2,y2,z2) in zip(p_es_aligned[:n_align_sim3:10],p_gt[:n_align_sim3:10]):
ax.plot([x1,x2],[y1,y2],'-',color="red")
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(p_gt[:,0], 'r-')
ax.plot(p_gt[:,1], 'g-')
ax.plot(p_gt[:,2], 'b-')
ax.plot(p_es_aligned[:,0], 'r--')
ax.plot(p_es_aligned[:,1], 'g--')
ax.plot(p_es_aligned[:,2], 'b--')
# --------------------------------------------------------------------------------
# hand-eye-calib
# select random measurements
I = np.array(np.random.rand(n_measurements,1)*(np.shape(matches)[0]-delta), dtype=int)[:,0]
R,b = align_trajectory.hand_eye_calib(q_gt, q_es, p_gt, p_es, I, delta, True)
print 'quat = ' + str(transformations.quaternion_from_matrix(transformations.convert_3x3_to_4x4(R)))
print 'b = ' + str(b)
rpy_es = np.zeros([q_es.shape[0]-1, 3])
rpy_gt = np.zeros([q_gt.shape[0]-1, 3])
t_gt = np.zeros([q_es.shape[0]-1,3])
t_es = np.zeros([q_es.shape[0]-1,3])
for i in range(delta,np.shape(q_es)[0]):
A1 = transformations.quaternion_matrix(q_es[i-delta,:])[:3,:3]
A2 = transformations.quaternion_matrix(q_es[i,:])[:3,:3]
A = np.dot(A1.transpose(), A2)
B1 = transformations.quaternion_matrix(q_gt[i-delta,:])[:3,:3]
B2 = transformations.quaternion_matrix(q_gt[i,:])[:3,:3]
B = np.dot(B1.transpose(), B2)
B_es = np.dot(np.transpose(R), np.dot(A, R))
rpy_gt[i-delta,:] = transformations.euler_from_matrix(B, 'rzyx')
rpy_es[i-delta,:] = transformations.euler_from_matrix(B_es, 'rzyx')
t_B = np.dot(np.transpose(B1),(p_gt[i,:]-p_gt[i-delta,:]))
t_A = np.dot(np.transpose(A1),(p_es[i,:]-p_es[i-delta,:]))
t_gt[i-delta,:] = t_B
t_es[i-delta,:] = np.dot(np.transpose(R), np.dot(A,b[:,0]) + t_A - b[:,0])
alignment_error = (t_gt-t_es)
error = np.sqrt(np.sum(np.multiply(alignment_error,alignment_error),1))
I_accurate = np.argwhere(error < np.percentile(error, 90))[:,0]
if display:
plt.figure()
plt.plot(rpy_es[:,0], 'r-', label='es roll')
plt.plot(rpy_es[:,1], 'g-', label='es pitch')
plt.plot(rpy_es[:,2], 'b-', label='es yaw')
plt.plot(rpy_gt[:,0], 'r--', label='gt roll')
plt.plot(rpy_gt[:,1], 'g--', label='gt pitch')
plt.plot(rpy_gt[:,2], 'b--', label='gt yaw')
plt.legend()
plt.figure()
plt.plot(t_gt[:,0], 'r-', label='gt x')
plt.plot(t_gt[:,1], 'g-', label='gt y')
plt.plot(t_gt[:,2], 'b-', label='gt z')
plt.plot(t_es[:,0], 'r--', label='es x')
plt.plot(t_es[:,1], 'g--', label='es y')
plt.plot(t_es[:,2], 'b--', label='es z')
plt.legend()
plt.figure()
plt.plot(error,'-g')
print 'e_rms = ' + str(np.sqrt(np.dot(error,error) / len(error)))
print 'e_mean = ' + str(np.mean(error))
print 'e_median = ' + str(np.median(error))
print 'e_std = ' + str(np.std(error))
# now sample again from the filtered list:
N = 10
display = False
n_acc_meas = I_accurate.size
n_measurements = 500
#for i in range(5):
# print '-------------------------------------'
# i0 = np.array(rand(n_measurements,1)*np.shape(I_accurate)[0], dtype=int)[:,0]
# i1 = np.minimum(i0+N, n_acc_meas-1)
# I = np.empty(i0.size*2, dtype=int)
# I[0::2] = I_accurate[i0]
# I[1::2] = I_accurate[i1]
# R,b = handEyeCalib(q_gt[I,:], q_es[I,:], p_gt[I,:], p_es[I,:], True)
# print 'quat = ' + str(ru.dcm2quat(R))
# print 'b = ' + str(b)
# rpy_es = np.zeros([q_es.shape[0]-1, 3])
# rpy_gt = np.zeros([q_gt.shape[0]-1, 3])
# t_gt = np.zeros([q_es.shape[0]-1,3])
# t_es = np.zeros([q_es.shape[0]-1,3])
#
# delta = 10
# for i in range(delta,np.shape(q_es)[0]):
# A1 = ru.quat2dcm(q_es[i-delta,:])
# A2 = ru.quat2dcm(q_es[i,:])
# A = np.dot(A1.transpose(), A2)
# B1 = ru.quat2dcm(q_gt[i-delta,:])
# B2 = ru.quat2dcm(q_gt[i,:])
# B = np.dot(B1.transpose(), B2)
# B_es = np.dot(np.transpose(R), np.dot(A, R))
# rpy_gt[i-delta,:] = ru.dcm2rpy(B)
# rpy_es[i-delta,:] = ru.dcm2rpy(B_es)
# t_B = np.dot(np.transpose(B1),(p_gt[i,:]-p_gt[i-delta,:]))
# t_A = np.dot(np.transpose(A1),(p_es[i,:]-p_es[i-delta,:]))
# t_gt[i-delta,:] = t_B
# t_es[i-delta,:] = np.dot(np.transpose(R), np.dot(A,b[:,0]) + t_A - b[:,0])
# alignment_error = (t_gt-t_es)
# error = np.sqrt(np.sum(np.multiply(alignment_error,alignment_error),1))
#
# if display:
# plt.figure()
# plt.plot(rpy_es[:,0], 'r-', label='es roll')
# plt.plot(rpy_es[:,1], 'g-', label='es pitch')
# plt.plot(rpy_es[:,2], 'b-', label='es yaw')
# plt.plot(rpy_gt[:,0], 'r--', label='gt roll')
# plt.plot(rpy_gt[:,1], 'g--', label='gt pitch')
# plt.plot(rpy_gt[:,2], 'b--', label='gt yaw')
# plt.legend()
# plt.figure()
# plt.plot(t_gt[:,0], 'r-', label='es x')
# plt.plot(t_gt[:,1], 'g-', label='es y')
# plt.plot(t_gt[:,2], 'b-', label='es z')
# plt.plot(t_es[:,0], 'r--', label='gt x')
# plt.plot(t_es[:,1], 'g--', label='gt y')
# plt.plot(t_es[:,2], 'b--', label='gt z')
# plt.legend()
# plt.figure()
# plt.plot(error,'-g')
#
# print 'e_rms = ' + str(np.sqrt(np.dot(error,error) / len(error)))
# print 'e_mean = ' + str(np.mean(error))
# print 'e_median = ' + str(np.median(error))
# print 'e_std = ' + str(np.std(error))
| 7,489 | 31.850877 | 105 | py |
rpg_svo | rpg_svo-master/svo_analysis/src/svo_analysis/__init__.py | 0 | 0 | 0 | py | |
rpg_svo | rpg_svo-master/svo_analysis/src/svo_analysis/analyse_depth.py | #!/usr/bin/python
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Cardo']})
rc('text', usetex=True)
def precision_plot(ax, errors, max_error, color, linestyle):
number_of_error_ranges = 500
error_step = max_error / number_of_error_ranges
precision_array = np.zeros(number_of_error_ranges)
error_array = np.zeros(number_of_error_ranges)
n_measurements = len(errors)
for error_ind in range(0, number_of_error_ranges):
precision_array[error_ind] = np.sum(errors < error_step*error_ind) / float(n_measurements) * 100.0
error_array[error_ind] = error_step*error_ind
plot_obj, = ax.plot(error_array, precision_array, label='error', linestyle=linestyle, color=color)
return plot_obj
def analyse_depth(results_dir):
D = np.loadtxt(os.path.join(results_dir, 'depth_error.txt'), delimiter=' ')
D = D[D[:,0] > 1,:]
fig = plt.figure(figsize=(6,5))
ax = fig.add_subplot(111, xlabel='depth error [m]', ylabel='Precision [\%]')
errors = np.abs(D[:,1])
precision_plot(ax, errors, 0.05, 'r', '-')
fig.tight_layout()
fig.savefig(os.path.join(results_dir,'depth_error.pdf'), bbox_inches="tight")
def plot_depth_over_time(results_dir, ax, x_axis_data, color, label=''):
D = np.loadtxt(os.path.join(results_dir, 'depth_error.txt'), delimiter=' ')
D = D[D[:,0] > 1,:]
idxs = np.unique(D[:,0])
percentile_10 = np.zeros(len(idxs))
percentile_50 = np.zeros(len(idxs))
percentile_90 = np.zeros(len(idxs))
for i in range(len(idxs)):
errors = np.abs(D[D[:,0]==idxs[i],1])
percentile_10[i] = np.percentile(errors, 10)
percentile_50[i] = np.percentile(errors, 50)
percentile_90[i] = np.percentile(errors, 90)
if len(x_axis_data) == 0:
x_axis_data = idxs
print np.shape(x_axis_data)
print np.shape(idxs)
print '--'
ax.plot(x_axis_data, percentile_50, linewidth=2, color=color, label=label)
ax.plot(x_axis_data, percentile_10, linewidth=0.5, color=color, alpha=0.5)
ax.plot(x_axis_data, percentile_90, linewidth=0.5, color=color, alpha=0.5)
ax.fill_between(x_axis_data, percentile_10, percentile_90, color=color, alpha=0.1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Analyse depth estimate')
parser.add_argument('results_dir', help='folder with the results')
args = parser.parse_args()
analyse_depth(args.results_dir)
fig = plt.figure(figsize=(6,5))
ax = fig.add_subplot(111, xlabel='Measurement', ylabel='Scale-Drift')
plot_depth_over_time(args.results_dir, [], ax) | 2,697 | 39.878788 | 106 | py |
rpg_svo | rpg_svo-master/svo_analysis/src/svo_analysis/tum_benchmark_tools/associate.py | #!/usr/bin/python
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, Juergen Sturm, TUM
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of TUM nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Requirements:
# sudo apt-get install python-argparse
"""
The Kinect provides the color and depth images in an un-synchronized way. This means that the set of time stamps from the color images do not intersect with those of the depth images. Therefore, we need some way of associating color images to depth images.
For this purpose, you can use the ''associate.py'' script. It reads the time stamps from the rgb.txt file and the depth.txt file, and joins them by finding the best matches.
"""
import argparse
import sys
import os
import numpy
def read_file_list(filename):
"""
Reads a trajectory from a text file.
File format:
The file format is "stamp d1 d2 d3 ...", where stamp denotes the time stamp (to be matched)
and "d1 d2 d3.." is arbitary data (e.g., a 3D position and 3D orientation) associated to this timestamp.
Input:
filename -- File name
Output:
dict -- dictionary of (stamp,data) tuples
"""
file = open(filename)
data = file.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
list = [[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"]
list = [(float(l[0]),l[1:]) for l in list if len(l)>1]
return dict(list)
def associate(first_list, second_list,offset,max_difference):
"""
Associate two dictionaries of (stamp,data). As the time stamps never match exactly, we aim
to find the closest match for every input tuple.
Input:
first_list -- first dictionary of (stamp,data) tuples
second_list -- second dictionary of (stamp,data) tuples
offset -- time offset between both dictionaries (e.g., to model the delay between the sensors)
max_difference -- search radius for candidate generation
Output:
matches -- list of matched tuples ((stamp1,data1),(stamp2,data2))
"""
first_keys = first_list.keys()
second_keys = second_list.keys()
potential_matches = [(abs(a - (b + offset)), a, b)
for a in first_keys
for b in second_keys
if abs(a - (b + offset)) < max_difference]
potential_matches.sort()
matches = []
for diff, a, b in potential_matches:
if a in first_keys and b in second_keys:
first_keys.remove(a)
second_keys.remove(b)
matches.append((a, b))
matches.sort()
return matches
if __name__ == '__main__':
# parse command line
parser = argparse.ArgumentParser(description='''
This script takes two data files with timestamps and associates them
''')
parser.add_argument('first_file', help='estimated trajectory (format: timestamp data)')
parser.add_argument('second_file', help='groundtruth (format: timestamp data)')
parser.add_argument('--offset', help='time offset added to the timestamps of the second file (default: 0.0)',default=0.0)
parser.add_argument('--max_difference', help='maximally allowed time difference for matching entries (default: 0.02)',default=0.02)
args = parser.parse_args()
first_list = read_file_list(args.first_file)
second_list = read_file_list(args.second_file)
matches = associate(first_list, second_list,float(args.offset),float(args.max_difference))
filename = 'groundtruth_matched.txt'
associates_file = open(filename, 'w')
for a,b in matches:
img = first_list[a]
pos = second_list[b]
associates_file.write('%.6f %s %s %s %s %s %s %s %s\n' %
(a, img[0],
pos[0], pos[1], pos[2],
pos[3], pos[4], pos[5], pos[6]))
print('Wrote matches to file: ' + filename)
| 5,409 | 40.29771 | 256 | py |
rpg_svo | rpg_svo-master/svo_analysis/src/svo_analysis/tum_benchmark_tools/evaluate_ate.py | #!/usr/bin/python
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, Juergen Sturm, TUM
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of TUM nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Requirements:
# sudo apt-get install python-argparse
"""
This script computes the absolute trajectory error from the ground truth
trajectory and the estimated trajectory.
"""
import sys
import numpy
import argparse
import associate
def align(model,data):
"""Align two trajectories using the method of Horn (closed-form).
Input:
model -- first trajectory (3xn)
data -- second trajectory (3xn)
Output:
rot -- rotation matrix (3x3)
trans -- translation vector (3x1)
trans_error -- translational error per point (1xn)
"""
numpy.set_printoptions(precision=3,suppress=True)
model_zerocentered = model - model.mean(1)
data_zerocentered = data - data.mean(1)
W = numpy.zeros( (3,3) )
for column in range(model.shape[1]):
W += numpy.outer(model_zerocentered[:,column],data_zerocentered[:,column])
U,d,Vh = numpy.linalg.linalg.svd(W.transpose())
S = numpy.matrix(numpy.identity( 3 ))
if(numpy.linalg.det(U) * numpy.linalg.det(Vh)<0):
S[2,2] = -1
rot = U*S*Vh
trans = data.mean(1) - rot * model.mean(1)
model_aligned = rot * model + trans
alignment_error = model_aligned - data
trans_error = numpy.sqrt(numpy.sum(numpy.multiply(alignment_error,alignment_error),0)).A[0]
return rot,trans,trans_error
def plot_traj(ax,stamps,traj,style,color,label):
"""
Plot a trajectory using matplotlib.
Input:
ax -- the plot
stamps -- time stamps (1xn)
traj -- trajectory (3xn)
style -- line style
color -- line color
label -- plot legend
"""
stamps.sort()
interval = numpy.median([s-t for s,t in zip(stamps[1:],stamps[:-1])])
x = []
y = []
last = stamps[0]
for i in range(len(stamps)):
if stamps[i]-last < 2*interval:
x.append(traj[i][0])
y.append(traj[i][1])
elif len(x)>0:
ax.plot(x,y,style,color=color,label=label)
label=""
x=[]
y=[]
last= stamps[i]
if len(x)>0:
ax.plot(x,y,style,color=color,label=label)
if __name__=="__main__":
# parse command line
parser = argparse.ArgumentParser(description='''
This script computes the absolute trajectory error from the ground truth trajectory and the estimated trajectory.
''')
parser.add_argument('first_file', help='ground truth trajectory (format: timestamp tx ty tz qx qy qz qw)')
parser.add_argument('second_file', help='estimated trajectory (format: timestamp tx ty tz qx qy qz qw)')
parser.add_argument('--offset', help='time offset added to the timestamps of the second file (default: 0.0)',default=0.0)
parser.add_argument('--scale', help='scaling factor for the second trajectory (default: 1.0)',default=1.0)
parser.add_argument('--max_difference', help='maximally allowed time difference for matching entries (default: 0.02)',default=0.02)
parser.add_argument('--save', help='save aligned second trajectory to disk (format: stamp2 x2 y2 z2)')
parser.add_argument('--save_associations', help='save associated first and aligned second trajectory to disk (format: stamp1 x1 y1 z1 stamp2 x2 y2 z2)')
parser.add_argument('--plot', help='plot the first and the aligned second trajectory to an image (format: png)')
parser.add_argument('--verbose', help='print all evaluation data (otherwise, only the RMSE absolute translational error in meters after alignment will be printed)', action='store_true')
args = parser.parse_args()
first_list = associate.read_file_list(args.first_file)
second_list = associate.read_file_list(args.second_file)
matches = associate.associate(first_list, second_list,float(args.offset),float(args.max_difference))
if len(matches)<2:
sys.exit("Couldn't find matching timestamp pairs between groundtruth and estimated trajectory! Did you choose the correct sequence?")
first_xyz = numpy.matrix([[float(value) for value in first_list[a][0:3]] for a,b in matches]).transpose()
second_xyz = numpy.matrix([[float(value)*float(args.scale) for value in second_list[b][0:3]] for a,b in matches]).transpose()
rot,trans,trans_error = align(second_xyz,first_xyz)
second_xyz_aligned = rot * second_xyz + trans
first_stamps = first_list.keys()
first_stamps.sort()
first_xyz_full = numpy.matrix([[float(value) for value in first_list[b][0:3]] for b in first_stamps]).transpose()
second_stamps = second_list.keys()
second_stamps.sort()
second_xyz_full = numpy.matrix([[float(value)*float(args.scale) for value in second_list[b][0:3]] for b in second_stamps]).transpose()
second_xyz_full_aligned = rot * second_xyz_full + trans
if args.verbose:
print "compared_pose_pairs %d pairs"%(len(trans_error))
print "absolute_translational_error.rmse %f m"%numpy.sqrt(numpy.dot(trans_error,trans_error) / len(trans_error))
print "absolute_translational_error.mean %f m"%numpy.mean(trans_error)
print "absolute_translational_error.median %f m"%numpy.median(trans_error)
print "absolute_translational_error.std %f m"%numpy.std(trans_error)
print "absolute_translational_error.min %f m"%numpy.min(trans_error)
print "absolute_translational_error.max %f m"%numpy.max(trans_error)
else:
print "%f"%numpy.sqrt(numpy.dot(trans_error,trans_error) / len(trans_error))
if args.save_associations:
file = open(args.save_associations,"w")
file.write("\n".join(["%f %f %f %f %f %f %f %f"%(a,x1,y1,z1,b,x2,y2,z2) for (a,b),(x1,y1,z1),(x2,y2,z2) in zip(matches,first_xyz.transpose().A,second_xyz_aligned.transpose().A)]))
file.close()
if args.save:
file = open(args.save,"w")
file.write("\n".join(["%f "%stamp+" ".join(["%f"%d for d in line]) for stamp,line in zip(second_stamps,second_xyz_full_aligned.transpose().A)]))
file.close()
if args.plot:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
from matplotlib.patches import Ellipse
fig = plt.figure()
ax = fig.add_subplot(111)
plot_traj(ax,first_stamps,first_xyz_full.transpose().A,'-',"black","ground truth")
plot_traj(ax,second_stamps,second_xyz_full_aligned.transpose().A,'-',"blue","estimated")
label="difference"
for (a,b),(x1,y1,z1),(x2,y2,z2) in zip(matches,first_xyz.transpose().A,second_xyz_aligned.transpose().A):
ax.plot([x1,x2],[y1,y2],'-',color="red",label=label)
label=""
ax.legend()
ax.set_xlabel('x [m]')
ax.set_ylabel('y [m]')
plt.savefig(args.plot,dpi=90)
| 8,437 | 42.05102 | 189 | py |
rpg_svo | rpg_svo-master/svo_analysis/src/svo_analysis/tum_benchmark_tools/evaluate_rpe.py | #!/usr/bin/python
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, Juergen Sturm, TUM
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of TUM nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
This script computes the relative pose error from the ground truth trajectory
and the estimated trajectory.
"""
import argparse
import random
import numpy
import sys
_EPS = numpy.finfo(float).eps * 4.0
def transform44(l):
"""
Generate a 4x4 homogeneous transformation matrix from a 3D point and unit quaternion.
Input:
l -- tuple consisting of (stamp,tx,ty,tz,qx,qy,qz,qw) where
(tx,ty,tz) is the 3D position and (qx,qy,qz,qw) is the unit quaternion.
Output:
matrix -- 4x4 homogeneous transformation matrix
"""
t = l[1:4]
q = numpy.array(l[4:8], dtype=numpy.float64, copy=True)
nq = numpy.dot(q, q)
if nq < _EPS:
return numpy.array((
( 1.0, 0.0, 0.0, t[0])
( 0.0, 1.0, 0.0, t[1])
( 0.0, 0.0, 1.0, t[2])
( 0.0, 0.0, 0.0, 1.0)
), dtype=numpy.float64)
q *= numpy.sqrt(2.0 / nq)
q = numpy.outer(q, q)
return numpy.array((
(1.0-q[1, 1]-q[2, 2], q[0, 1]-q[2, 3], q[0, 2]+q[1, 3], t[0]),
( q[0, 1]+q[2, 3], 1.0-q[0, 0]-q[2, 2], q[1, 2]-q[0, 3], t[1]),
( q[0, 2]-q[1, 3], q[1, 2]+q[0, 3], 1.0-q[0, 0]-q[1, 1], t[2]),
( 0.0, 0.0, 0.0, 1.0)
), dtype=numpy.float64)
def read_trajectory(filename, matrix=True):
"""
Read a trajectory from a text file.
Input:
filename -- file to be read
matrix -- convert poses to 4x4 matrices
Output:
dictionary of stamped 3D poses
"""
file = open(filename)
data = file.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
list = [[float(v.strip()) for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"]
list_ok = []
for i,l in enumerate(list):
if l[4:8]==[0,0,0,0]:
continue
isnan = False
for v in l:
if numpy.isnan(v):
isnan = True
break
if isnan:
sys.stderr.write("Warning: line %d of file '%s' has NaNs, skipping line\n"%(i,filename))
continue
list_ok.append(l)
if matrix :
traj = dict([(l[0],transform44(l[0:])) for l in list_ok])
else:
traj = dict([(l[0],l[1:8]) for l in list_ok])
return traj
def find_closest_index(L,t):
"""
Find the index of the closest value in a list.
Input:
L -- the list
t -- value to be found
Output:
index of the closest element
"""
beginning = 0
difference = abs(L[0] - t)
best = 0
end = len(L)
while beginning < end:
middle = int((end+beginning)/2)
if abs(L[middle] - t) < difference:
difference = abs(L[middle] - t)
best = middle
if t == L[middle]:
return middle
elif L[middle] > t:
end = middle
else:
beginning = middle + 1
return best
def ominus(a,b):
"""
Compute the relative 3D transformation between a and b.
Input:
a -- first pose (homogeneous 4x4 matrix)
b -- second pose (homogeneous 4x4 matrix)
Output:
Relative 3D transformation from a to b.
"""
return numpy.dot(numpy.linalg.inv(a),b)
def scale(a,scalar):
"""
Scale the translational components of a 4x4 homogeneous matrix by a scale factor.
"""
return numpy.array(
[[a[0,0], a[0,1], a[0,2], a[0,3]*scalar],
[a[1,0], a[1,1], a[1,2], a[1,3]*scalar],
[a[2,0], a[2,1], a[2,2], a[2,3]*scalar],
[a[3,0], a[3,1], a[3,2], a[3,3]]]
)
def compute_distance(transform):
"""
Compute the distance of the translational component of a 4x4 homogeneous matrix.
"""
return numpy.linalg.norm(transform[0:3,3])
def compute_angle(transform):
"""
Compute the rotation angle from a 4x4 homogeneous matrix.
"""
# an invitation to 3-d vision, p 27
return numpy.arccos( min(1,max(-1, (numpy.trace(transform[0:3,0:3]) - 1)/2) ))
def distances_along_trajectory(traj):
"""
Compute the translational distances along a trajectory.
"""
keys = traj.keys()
keys.sort()
motion = [ominus(traj[keys[i+1]],traj[keys[i]]) for i in range(len(keys)-1)]
distances = [0]
sum = 0
for t in motion:
sum += compute_distance(t)
distances.append(sum)
return distances
def rotations_along_trajectory(traj,scale):
"""
Compute the angular rotations along a trajectory.
"""
keys = traj.keys()
keys.sort()
motion = [ominus(traj[keys[i+1]],traj[keys[i]]) for i in range(len(keys)-1)]
distances = [0]
sum = 0
for t in motion:
sum += compute_angle(t)*scale
distances.append(sum)
return distances
def evaluate_trajectory(traj_gt,traj_est,param_max_pairs=10000,param_fixed_delta=False,param_delta=1.00,param_delta_unit="s",param_offset=0.00,param_scale=1.00):
"""
Compute the relative pose error between two trajectories.
Input:
traj_gt -- the first trajectory (ground truth)
traj_est -- the second trajectory (estimated trajectory)
param_max_pairs -- number of relative poses to be evaluated
param_fixed_delta -- false: evaluate over all possible pairs
true: only evaluate over pairs with a given distance (delta)
param_delta -- distance between the evaluated pairs
param_delta_unit -- unit for comparison:
"s": seconds
"m": meters
"rad": radians
"deg": degrees
"f": frames
param_offset -- time offset between two trajectories (to model the delay)
param_scale -- scale to be applied to the second trajectory
Output:
list of compared poses and the resulting translation and rotation error
"""
stamps_gt = list(traj_gt.keys())
stamps_est = list(traj_est.keys())
stamps_gt.sort()
stamps_est.sort()
stamps_est_return = []
for t_est in stamps_est:
t_gt = stamps_gt[find_closest_index(stamps_gt,t_est + param_offset)]
t_est_return = stamps_est[find_closest_index(stamps_est,t_gt - param_offset)]
t_gt_return = stamps_gt[find_closest_index(stamps_gt,t_est_return + param_offset)]
if not t_est_return in stamps_est_return:
stamps_est_return.append(t_est_return)
if(len(stamps_est_return)<2):
raise Exception("Number of overlap in the timestamps is too small. Did you run the evaluation on the right files?")
if param_delta_unit=="s":
index_est = list(traj_est.keys())
index_est.sort()
elif param_delta_unit=="m":
index_est = distances_along_trajectory(traj_est)
elif param_delta_unit=="rad":
index_est = rotations_along_trajectory(traj_est,1)
elif param_delta_unit=="deg":
index_est = rotations_along_trajectory(traj_est,180/numpy.pi)
elif param_delta_unit=="f":
index_est = range(len(traj_est))
else:
raise Exception("Unknown unit for delta: '%s'"%param_delta_unit)
if not param_fixed_delta:
if(param_max_pairs==0 or len(traj_est)<numpy.sqrt(param_max_pairs)):
pairs = [(i,j) for i in range(len(traj_est)) for j in range(len(traj_est))]
else:
pairs = [(random.randint(0,len(traj_est)-1),random.randint(0,len(traj_est)-1)) for i in range(param_max_pairs)]
else:
pairs = []
for i in range(len(traj_est)):
j = find_closest_index(index_est,index_est[i] + param_delta)
if j!=len(traj_est)-1:
pairs.append((i,j))
if(param_max_pairs!=0 and len(pairs)>param_max_pairs):
pairs = random.sample(pairs,param_max_pairs)
gt_interval = numpy.median([s-t for s,t in zip(stamps_gt[1:],stamps_gt[:-1])])
gt_max_time_difference = 2*gt_interval
result = []
for i,j in pairs:
stamp_est_0 = stamps_est[i]
stamp_est_1 = stamps_est[j]
stamp_gt_0 = stamps_gt[ find_closest_index(stamps_gt,stamp_est_0 + param_offset) ]
stamp_gt_1 = stamps_gt[ find_closest_index(stamps_gt,stamp_est_1 + param_offset) ]
if(abs(stamp_gt_0 - (stamp_est_0 + param_offset)) > gt_max_time_difference or
abs(stamp_gt_1 - (stamp_est_1 + param_offset)) > gt_max_time_difference):
continue
error44 = ominus( scale(
ominus( traj_est[stamp_est_1], traj_est[stamp_est_0] ),param_scale),
ominus( traj_gt[stamp_gt_1], traj_gt[stamp_gt_0] ) )
trans = compute_distance(error44)
rot = compute_angle(error44)
result.append([stamp_est_0,stamp_est_1,stamp_gt_0,stamp_gt_1,trans,rot])
if len(result)<2:
raise Exception("Couldn't find matching timestamp pairs between groundtruth and estimated trajectory!")
return result
def percentile(seq,q):
"""
Return the q-percentile of a list
"""
seq_sorted = list(seq)
seq_sorted.sort()
return seq_sorted[int((len(seq_sorted)-1)*q)]
if __name__ == '__main__':
random.seed(0)
parser = argparse.ArgumentParser(description='''
This script computes the relative pose error from the ground truth trajectory and the estimated trajectory.
''')
parser.add_argument('groundtruth_file', help='ground-truth trajectory file (format: "timestamp tx ty tz qx qy qz qw")')
parser.add_argument('estimated_file', help='estimated trajectory file (format: "timestamp tx ty tz qx qy qz qw")')
parser.add_argument('--max_pairs', help='maximum number of pose comparisons (default: 10000, set to zero to disable downsampling)', default=10000)
parser.add_argument('--fixed_delta', help='only consider pose pairs that have a distance of delta delta_unit (e.g., for evaluating the drift per second/meter/radian)', action='store_true')
parser.add_argument('--delta', help='delta for evaluation (default: 1.0)',default=1.0)
parser.add_argument('--delta_unit', help='unit of delta (options: \'s\' for seconds, \'m\' for meters, \'rad\' for radians, \'f\' for frames; default: \'s\')',default='s')
parser.add_argument('--offset', help='time offset between ground-truth and estimated trajectory (default: 0.0)',default=0.0)
parser.add_argument('--scale', help='scaling factor for the estimated trajectory (default: 1.0)',default=1.0)
parser.add_argument('--save', help='text file to which the evaluation will be saved (format: stamp_est0 stamp_est1 stamp_gt0 stamp_gt1 trans_error rot_error)')
parser.add_argument('--plot', help='plot the result to a file (requires --fixed_delta, output format: png)')
parser.add_argument('--verbose', help='print all evaluation data (otherwise, only the mean translational error measured in meters will be printed)', action='store_true')
args = parser.parse_args()
if args.plot and not args.fixed_delta:
sys.exit("The '--plot' option can only be used in combination with '--fixed_delta'")
traj_gt = read_trajectory(args.groundtruth_file)
traj_est = read_trajectory(args.estimated_file)
result = evaluate_trajectory(traj_gt,
traj_est,
int(args.max_pairs),
args.fixed_delta,
float(args.delta),
args.delta_unit,
float(args.offset),
float(args.scale))
stamps = numpy.array(result)[:,0]
trans_error = numpy.array(result)[:,4]
rot_error = numpy.array(result)[:,5]
if args.save:
f = open(args.save,"w")
f.write("\n".join([" ".join(["%f"%v for v in line]) for line in result]))
f.close()
if args.verbose:
print "compared_pose_pairs %d pairs"%(len(trans_error))
print "translational_error.rmse %f m"%numpy.sqrt(numpy.dot(trans_error,trans_error) / len(trans_error))
print "translational_error.mean %f m"%numpy.mean(trans_error)
print "translational_error.median %f m"%numpy.median(trans_error)
print "translational_error.std %f m"%numpy.std(trans_error)
print "translational_error.min %f m"%numpy.min(trans_error)
print "translational_error.max %f m"%numpy.max(trans_error)
print "rotational_error.rmse %f deg"%(numpy.sqrt(numpy.dot(rot_error,rot_error) / len(rot_error)) * 180.0 / numpy.pi)
print "rotational_error.mean %f deg"%(numpy.mean(rot_error) * 180.0 / numpy.pi)
print "rotational_error.median %f deg"%numpy.median(rot_error)
print "rotational_error.std %f deg"%(numpy.std(rot_error) * 180.0 / numpy.pi)
print "rotational_error.min %f deg"%(numpy.min(rot_error) * 180.0 / numpy.pi)
print "rotational_error.max %f deg"%(numpy.max(rot_error) * 180.0 / numpy.pi)
else:
print numpy.mean(trans_error)
if args.plot:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(stamps - stamps[0],trans_error,'-',color="blue")
#ax.plot([t for t,e in err_rot],[e for t,e in err_rot],'-',color="red")
ax.set_xlabel('time [s]')
ax.set_ylabel('translational error [m]')
plt.savefig(args.plot,dpi=300)
| 15,381 | 39.16188 | 192 | py |
rpg_svo | rpg_svo-master/svo_analysis/src/svo_analysis/tum_benchmark_tools/__init__.py | # -*- coding: utf-8 -*-
| 25 | 7.666667 | 23 | py |
rpg_svo | rpg_svo-master/svo_analysis/scripts/comparison.py | #!/usr/bin/python
import os
import sys
import time
import rospkg
import numpy as np
import matplotlib.pyplot as plt
import yaml
import argparse
import svo_analysis.analyse_depth as analyse_depth
from matplotlib import rc
# tell matplotlib to use latex font
rc('font',**{'family':'serif','serif':['Cardo']})
rc('text', usetex=True)
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
def plot_trajectory(ax, filename, label, color, linewidth):
file = open(filename)
data = file.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
trajectory = np.array([[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"], dtype=np.float64)
ax.plot(trajectory[:,1], trajectory[:,2], label=label, color=color, linewidth=linewidth)
def save_figure(fig, name, directory):
fig.tight_layout()
fig.savefig(os.path.join(directory, name+'.pdf'), bbox_inches="tight")
def distances_along_trajectory(traj):
keys = traj.keys()
keys.sort()
motion = [ominus(traj[keys[i+1]],traj[keys[i]]) for i in range(len(keys)-1)]
distances = [0]
sum = 0
for t in motion:
sum += compute_distance(t)
distances.append(sum)
return distances
def get_distance_from_start(gt):
distances = np.diff(gt[:,1:4],axis=0)
distances = np.sqrt(np.sum(np.multiply(distances,distances),1))
distances = np.cumsum(distances)
distances = np.concatenate(([0], distances))
return distances
def compare_results(comp_params, results_dir, comparison_dir):
print('run comparison: '+comp_params['comparison_name'])
line_styles = ['-','--',':']
line_colors = ['b','g','r','m','c']
# -------------------------------------------------------------------------
# plot trajectory:
fig = plt.figure(figsize=(6,2))
ax = fig.add_subplot(111, xlabel='x [m]', ylabel='y [m]')
for exp_set in comp_params['experiment_sets']:
print('processing experiment set: ' + exp_set['label'])
for i, exp in enumerate(exp_set['experiments']):
data = np.loadtxt(os.path.join(results_dir, exp, 'traj_estimate.txt'))
if i == 0:
base_plot, = ax.plot(data[:,1], data[:,2], label=exp_set['label'], linestyle=line_styles[np.mod(i, len(line_styles))])
else:
ax.plot(data[:,1], data[:,2], color=base_plot.get_color(), linestyle= line_styles[np.mod(i, len(line_styles))])
ax.legend(loc='upper left', ncol=3)
save_figure(fig, 'trajectory', comparison_dir)
# -------------------------------------------------------------------------
# plot translation error:
fig = plt.figure(figsize=(6,2))
ax = fig.add_subplot(111, xlabel='distance [m]', ylabel='translation drift [mm]')
for exp_set in comp_params['experiment_sets']:
print('processing experiment set: ' + exp_set['label'])
for i, exp in enumerate(exp_set['experiments']):
gt = np.loadtxt(os.path.join(results_dir, exp, 'groundtruth_matched.txt'))
distances = get_distance_from_start(gt)
data = np.loadtxt(os.path.join(results_dir, exp, 'translation_error.txt'))
e = np.sqrt(np.sum(np.multiply(data[:,1:4],data[:,1:4]),1))
if np.shape(e)[0] > np.shape(distances)[0]:
print('WARNING: estimate has more measurement than groundtruth: '
+str(np.shape(e)[0]-np.shape(distances)[0]))
e = e[0:np.shape(distances)[0]]
distances = distances[0:np.shape(e)[0]]
print '--'
print np.shape(e)
print np.shape(distances)
if i == 0:
base_plot, = ax.plot(distances, e*1000, label=exp_set['label'], linestyle= line_styles[np.mod(i, len(line_styles))])
else:
ax.plot(distances, e*1000, color=base_plot.get_color(), linestyle= line_styles[np.mod(i, len(line_styles))])
ax.legend(loc='upper left', ncol=3)
save_figure(fig, 'translation_error', comparison_dir)
# -------------------------------------------------------------------------
# plot depth estimation error:
fig = plt.figure(figsize=(6,5))
ax = fig.add_subplot(111, xlabel='Travelled distance [m]', ylabel='Error [m]')
for k, exp_set in enumerate(comp_params['experiment_sets']):
print('plot depth error for experiment: ' + exp)
exp = exp_set['experiments'][0]
gt = np.loadtxt(os.path.join(results_dir, exp, 'groundtruth_matched.txt'))
x_axis_data = get_distance_from_start(gt)
analyse_depth.plot_depth_over_time(os.path.join(results_dir, exp), ax,
x_axis_data[1:], line_colors[k], exp_set['label'])
ax.legend(loc='upper left', ncol=3)
save_figure(fig, 'depth_error_textures', comparison_dir)
fig = plt.figure(figsize=(6,5))
ax = fig.add_subplot(111, xlabel='Travelled distance [m]', ylabel='Error [m]')
exp_set = comp_params['experiment_sets'][0]
for i, exp in enumerate(exp_set['experiments']):
print('plot depth error for speed: ' + exp)
gt = np.loadtxt(os.path.join(results_dir, exp, 'groundtruth_matched.txt'))
x_axis_data = get_distance_from_start(gt)
params = yaml.load(open(os.path.join(results_dir, exp, 'dataset_params.yaml')))
analyse_depth.plot_depth_over_time(os.path.join(results_dir, exp), ax,
x_axis_data[1:], line_colors[i],
str(params['trajectory_modifiers']['speed'])+' m/s')
ax.legend(loc='upper left', ncol=3)
save_figure(fig, 'depth_error_speed', comparison_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compare results of a VO pipeline.')
parser.add_argument('comparison_file', help='A YAML file that contains the details of the comparison')
args = parser.parse_args()
# load comparison file
args.experiment_file = args.comparison_file.replace('.yaml','')
comparison_params_file = os.path.join(rospkg.RosPack().get_path('svo_analysis'),
'comparisons', args.comparison_file+'.yaml')
if os.path.exists(comparison_params_file):
comp_params = yaml.load(open(comparison_params_file, 'r'))
else:
raise Exception("Provided comparison file does not exist.")
# create folder for comparison results
comparison_dir = os.path.join(rospkg.RosPack().get_path('svo_analysis'),
'comparisons', comp_params['comparison_name'])
if not os.path.exists(comparison_dir):
os.makedirs(comparison_dir)
# folder where the results of previous experiments are saved
results_dir = os.path.join(rospkg.RosPack().get_path('svo_analysis'), 'results')
# run comparison
compare_results(comp_params, results_dir, comparison_dir)
| 7,184 | 43.90625 | 150 | py |
rpg_svo | rpg_svo-master/svo_analysis/scripts/benchmark.py | #!/usr/bin/python
"""
Created on Sat Aug 10 18:21:47 2013
@author: Christian Forster
"""
import os
import yaml
import rospkg
import argparse
import time
import vikit_py.cpu_info as cpu_info
import vikit_py.ros_node as ros_node
import evaluate
import shutil
def run_experiment(dataset, params):
# load dataset parameters
params['dataset_directory'] = os.path.join(os.environ['SVO_DATASET_DIR'], dataset)
if not os.path.exists(params['dataset_directory']):
raise Exception("Provided dataset folder does not exist.")
dataset_params_file = os.path.join(params['dataset_directory'], 'dataset_params.yaml')
dataset_params = yaml.load(open(dataset_params_file,'r'))
# load algorithm parameters
algo_params_file = os.path.join(rospkg.RosPack().get_path('svo_ros'),
'param', params['param_settings']+'.yaml')
algo_params = yaml.load(open(algo_params_file,'r'))
# combine all parameters
if 'rig_size' not in dataset_params or dataset_params['rig_size'] == 1:
params = dict(params.items() + algo_params.items() + dataset_params['cam0'].items())
else:
params = dict(params.items() + algo_params.items() + dataset_params.items())
if 'dataset_is_blender' in dataset_params:
params['dataset_is_blender'] = dataset_params['dataset_is_blender']
else:
params['dataset_is_blender'] = False
if 'dataset_first_frame' in dataset_params:
params['dataset_first_frame'] = dataset_params['dataset_first_frame']
else:
params['dataset_first_frame'] = 0
# dump experiment params to file and copy the other parameter files:
params_dump_file = os.path.join(params['trace_dir'],'params.yaml')
with open(params_dump_file,'w') as outfile:
outfile.write(yaml.dump(params, default_flow_style=False))
shutil.copyfile(dataset_params_file,
os.path.join(params['trace_dir'], 'dataset_params.yaml'))
# copy the groundtruth trajectory to the trace dir for later evaluation
if params['dataset_is_blender']:
shutil.copyfile(os.path.join(params['dataset_directory'], 'trajectory_nominal.txt'),
os.path.join(params['trace_dir'], 'groundtruth_matched.txt'))
else:
shutil.copyfile(os.path.join(params['dataset_directory'], 'groundtruth_matched.txt'),
os.path.join(params['trace_dir'], 'groundtruth_matched.txt'))
# execute ros node
node = ros_node.RosNode(args.version, args.executable)
node.run(params)
if __name__=="__main__":
# parse command line
parser = argparse.ArgumentParser(description='''
Runs SVO with the dataset and parameters specified in the provided experiment file.
''')
parser.add_argument('experiment_file', help='experiment file in svo_analysis/experiments folder')
parser.add_argument('--evaluate', help='evaluate tracefile after running SVO', action='store_true')
parser.add_argument('--version', help='version of svo to evaluate', default='svo_ros')
parser.add_argument('--executable', help='the executable to be called', default='benchmark')
args = parser.parse_args()
# load experiment parameters
args.experiment_file = args.experiment_file.replace('.yaml','')
experiment_params_file = os.path.join(rospkg.RosPack().get_path('svo_analysis'),
'experiments', args.experiment_file+'.yaml')
if os.path.exists(experiment_params_file):
experiment_params = yaml.load(open(experiment_params_file, 'r'))
else:
print('experiment file does not exist. run with default setttings')
experiment_params = dict({'experiment_label':'default','param_settings':'vo_fast',
'datasets':list([args.experiment_file])})
params = dict()
params['experiment_label'] = experiment_params['experiment_label']
params['param_settings'] = experiment_params['param_settings']
params['time'] = time.strftime("%Y%m%d_%H%M", time.localtime())
params['platform'] = cpu_info.get_cpu_info()
params['trace_name'] = 'trace'
trace_dirs = list()
for dataset in experiment_params['datasets']:
params['experiment_name'] = params['time']+'_'+args.version+'_'+args.experiment_file+'_'+dataset
params['trace_dir'] = os.path.join(rospkg.RosPack().get_path('svo_analysis'),
'results', params['experiment_name'])
if not os.path.exists(params['trace_dir']):
os.makedirs(params['trace_dir'])
trace_dirs.append(params['trace_dir'])
run_experiment(dataset, params)
# TODO: check if it is a synthetic dataset
for trace in trace_dirs:
evaluate.evaluate_dataset(trace)
| 4,842 | 42.241071 | 104 | py |
rpg_svo | rpg_svo-master/svo_analysis/scripts/evaluate.py | #!/usr/bin/python
"""
Created on Sat Aug 10 18:21:47 2013
@author: Christian Forster
"""
import os
import csv
import numpy as np
import rospkg
import argparse
import yaml
import svo_analysis.analyse_logs as analyse_logs
import svo_analysis.analyse_timing as analyse_timing
import svo_analysis.analyse_trajectory as analyse_trajectory
import svo_analysis.analyse_depth as analyse_depth
def evaluate_dataset(trace_dir):
# read tracefile
data = csv.reader(open(os.path.join(trace_dir, 'trace.csv')))
fields = data.next()
D = dict()
for field in fields:
D[field] = list()
# fill dictionary with column values
for row in data:
for (field, value) in zip(fields, row):
D[field].append(float(value))
# change dictionary values from list to numpy array for easier manipulation
for field, value in D.items():
D[field] = np.array(D[field])
# generate plots
analyse_logs.analyse_logs(D, trace_dir)
analyse_timing.analyse_timing(D, trace_dir)
analyse_trajectory.analyse_trajectory(trace_dir)
param = yaml.load(open(os.path.join(trace_dir, 'dataset_params.yaml'), 'r'))
if param['dataset_is_blender']:
analyse_depth.analyse_depth(trace_dir)
analyse_depth.analyse_depth_over_time(trace_dir)
if __name__=="__main__":
parser = argparse.ArgumentParser(description='''
Evaluates tracefiles of SVO and generates the plots.
''')
parser.add_argument('experiment_name', help='directory name of the tracefiles')
args = parser.parse_args()
trace_dir = os.path.join(rospkg.RosPack().get_path('svo_analysis'),
'results',
args.experiment_name)
evaluate_dataset(trace_dir) | 1,753 | 29.241379 | 83 | py |
rpg_svo | rpg_svo-master/svo_analysis/scripts/compare_results.py | #!/usr/bin/python
import os
import sys
import time
import rospkg
import numpy as np
import matplotlib.pyplot as plt
import yaml
import argparse
from matplotlib import rc
# tell matplotlib to use latex font
rc('font',**{'family':'serif','serif':['Cardo']})
rc('text', usetex=True)
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
def plot_trajectory(ax, filename, label, color, linewidth):
file = open(filename)
data = file.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
trajectory = np.array([[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"], dtype=np.float64)
ax.plot(trajectory[:,1], trajectory[:,2], label=label, color=color, linewidth=linewidth)
def compare_results(experiments, results_dir, comparison_dir,
plot_scale_drift = False):
# ------------------------------------------------------------------------------
# position error
fig_poserr = plt.figure(figsize=(8,6))
ax_poserr_x = fig_poserr.add_subplot(311, ylabel='x-error [m]')
ax_poserr_y = fig_poserr.add_subplot(312, ylabel='y-error [m]')
ax_poserr_z = fig_poserr.add_subplot(313, ylabel='z-error [m]', xlabel='time [s]')
for exp in experiments:
# load dataset parameters
params_stream = open(os.path.join(results_dir, exp, 'params.yaml'))
params = yaml.load(params_stream)
# plot translation error
trans_error = np.loadtxt(os.path.join(results_dir, exp, 'translation_error.txt'))
trans_error[:,0] = trans_error[:,0]-trans_error[0,0]
ax_poserr_x.plot(trans_error[:,0], trans_error[:,1], label=params['experiment_label'])
ax_poserr_y.plot(trans_error[:,0], trans_error[:,2])
ax_poserr_z.plot(trans_error[:,0], trans_error[:,3])
ax_poserr_x.set_xlim([0, trans_error[-1,0]+4])
ax_poserr_y.set_xlim([0, trans_error[-1,0]+4])
ax_poserr_z.set_xlim([0, trans_error[-1,0]+4])
ax_poserr_x.legend(bbox_to_anchor=[0, 0], loc='lower left', ncol=3)
ax_poserr_x.grid()
ax_poserr_y.grid()
ax_poserr_z.grid()
fig_poserr.tight_layout()
fig_poserr.savefig(os.path.join(comparison_dir, 'translation_error.pdf'))
# ------------------------------------------------------------------------------
# orientation error
fig_roterr = plt.figure(figsize=(8,6))
ax_roterr_r = fig_roterr.add_subplot(311, ylabel='roll-error [rad]')
ax_roterr_p = fig_roterr.add_subplot(312, ylabel='pitch-error [rad]')
ax_roterr_y = fig_roterr.add_subplot(313, ylabel='yaw-error [rad]', xlabel='time [s]')
for exp in experiments:
# load dataset parameters
params_stream = open(os.path.join(results_dir, exp, 'params.yaml'))
params = yaml.load(params_stream)
# plot translation error
rot_error = np.loadtxt(os.path.join(results_dir, exp, 'orientation_error.txt'))
rot_error[:,0] = rot_error[:,0]-rot_error[0,0]
ax_roterr_r.plot(rot_error[:,0], rot_error[:,3], label=params['experiment_label'])
ax_roterr_p.plot(rot_error[:,0], rot_error[:,2])
ax_roterr_y.plot(rot_error[:,0], rot_error[:,1])
ax_roterr_r.set_xlim([0, rot_error[-1,0]+4])
ax_roterr_p.set_xlim([0, rot_error[-1,0]+4])
ax_roterr_y.set_xlim([0, rot_error[-1,0]+4])
ax_roterr_r.legend(bbox_to_anchor=[0, 1], loc='upper left', ncol=3)
ax_roterr_r.grid()
ax_roterr_p.grid()
ax_roterr_y.grid()
fig_roterr.tight_layout()
fig_roterr.savefig(os.path.join(comparison_dir, 'orientation_error.pdf'))
# ------------------------------------------------------------------------------
# scale error
if plot_scale_drift:
fig_scale = plt.figure(figsize=(8,2.5))
ax_scale = fig_scale.add_subplot(111, xlabel='time [s]', ylabel='scale change [\%]')
for exp in experiments:
# load dataset parameters
params = yaml.load(open(os.path.join(results_dir, exp, 'params.yaml')))
# plot translation error
scale_drift = open(os.path.join(results_dir, exp, 'scale_drift.txt'))
scale_drift[:,0] = scale_drift[:,0]-scale_drift[0,0]
ax_scale.plot(scale_drift[:,0], scale_drift[:,1], label=params['experiment_label'])
ax_scale.set_xlim([0, rot_error[-1,0]+4])
ax_scale.legend(bbox_to_anchor=[0, 1], loc='upper left', ncol=3)
ax_scale.grid()
fig_scale.tight_layout()
fig_scale.savefig(os.path.join(comparison_dir, 'scale_drift.pdf'))
# ------------------------------------------------------------------------------
# trajectory
# fig_traj = plt.figure(figsize=(8,4.8))
# ax_traj = fig_traj.add_subplot(111, xlabel='x [m]', ylabel='y [m]', aspect='equal', xlim=[-3.1, 4], ylim=[-1.5, 2.6])
#
# plotTrajectory(ax_traj, '/home/cforster/Datasets/asl_vicon_d2/groundtruth_filtered.txt', 'Groundtruth', 'k', 1.5)
# plotTrajectory(ax_traj, results_dir+'/20130911_2229_nslam_i7_asl2_fast/traj_estimate_rotated.txt', 'Fast', 'g', 1)
# plotTrajectory(ax_traj, results_dir+'/20130906_2149_ptam_i7_asl2/traj_estimate_rotated.txt', 'PTAM', 'r', 1)
#
# mark_inset(ax_traj, axins, loc1=2, loc2=4, fc="none", ec='b')
# plt.draw()
# plt.show()
# ax_traj.legend(bbox_to_anchor=[1, 0], loc='lower right', ncol=3)
# ax_traj.grid()
# fig_traj.tight_layout()
# fig_traj.savefig('../results/trajectory_asl.pdf')
if __name__ == '__main__':
default_name = time.strftime("%Y%m%d_%H%M", time.localtime())+'_comparison'
parser = argparse.ArgumentParser(description='Compare results.')
parser.add_argument('result_directories', nargs='+', help='list of result directories to compare')
parser.add_argument('--name', help='name of the comparison', default=default_name)
args = parser.parse_args()
# create folder for comparison results
results_dir = os.path.join(rospkg.RosPack().get_path('svo_analysis'), 'results')
comparison_dir = os.path.join(results_dir, args.name)
if not os.path.exists(comparison_dir):
os.makedirs(comparison_dir)
# run comparison
compare_results(args.result_directories, results_dir, comparison_dir)
| 6,127 | 39.582781 | 148 | py |
rpg_svo | rpg_svo-master/rqt_svo/setup.py | #!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup(
packages=['rqt_svo'],
package_dir={'': 'src'},
scripts=['scripts/rqt_svo']
)
setup(**d) | 248 | 19.75 | 60 | py |
rpg_svo | rpg_svo-master/rqt_svo/src/rqt_svo/svo.py | #!/usr/bin/env python
import os
import rospy
import argparse
from qt_gui.plugin import Plugin
from .svo_widget import SvoWidget
class Svo(Plugin):
"""
Subclass of Plugin to display SVO status
"""
def __init__(self, context):
# Init Plugin
super(Svo, self).__init__(context)
self.setObjectName('SvoPlugin')
# Load arguments
# TODO load topic name from args
args = self._parse_args(context.argv())
# Create QWidget
self._widget = SvoWidget()
# Show _widget.windowTitle on left-top of each plugin (when
# it's set in _widget). This is useful when you open multiple
# plugins at once. Also if you open multiple instances of your
# plugin at once, these lines add number to make it easy to
# tell from pane to pane.
if context.serial_number() > 1:
self._widget.setWindowTitle(self._widget.windowTitle() + (' (%d)' % context.serial_number()))
# Add widget to the user interface
context.add_widget(self._widget)
def _parse_args(self, argv):
parser = argparse.ArgumentParser(prog='rqt_svo', add_help=False)
group = parser.add_argument_group('Options for rqt_svo plugin')
group.add_argument('topic', type=argparse.FileType('r'), nargs='*', default=[], help='Svo Info Topic to display')
return parser.parse_args(argv)
def save_settings(self, plugin_settings, instance_settings):
# TODO save intrinsic configuration, usually using:
# instance_settings.set_value(k, v)
print('Saving namespace')
namespace = self._widget._svo_namespace
instance_settings.set_value('namespace', namespace)
pass
#
def restore_settings(self, plugin_settings, instance_settings):
# TODO restore intrinsic configuration, usually using:
# v = instance_settings.value(k)
namespace = instance_settings.value('namespace', 'default')
self._widget.topic_line_edit.setText(namespace)
pass
# def shutdown_plugin(self):
# # self._unregister_publisher()
# pass
#
# def save_settings(self, plugin_settings, instance_settings):
# # TODO save intrinsic configuration, usually using:
# # instance_settings.set_value(k, v)
# pass
#
# def restore_settings(self, plugin_settings, instance_settings):
# # TODO restore intrinsic configuration, usually using:
# # v = instance_settings.value(k)
# pass
# def _unregister_publisher(self):
# if self._publisher is not None:
# self._publisher.unregister()
# self._publisher = None
#def trigger_configuration(self):
# Comment in to signal that the plugin has a way to configure
# This will enable a setting button (gear icon) in each dock widget title bar
# Usually used to open a modal configuration dialog
| 2,790 | 33.036585 | 117 | py |
rpg_svo | rpg_svo-master/rqt_svo/src/rqt_svo/svo_widget.py | #!/usr/bin/env python
import os
import rospy
import rospkg
import numpy as np
from python_qt_binding import loadUi
from python_qt_binding.QtGui import QWidget
from python_qt_binding.QtCore import QTimer, Slot
from svo_msgs.msg import Info
from std_msgs.msg import String
class SvoWidget(QWidget):
_last_info_msg = Info()
_publisher = None
_subscriber = None
_num_received_msgs = 0
_svo_namespace = None
def __init__(self, svo_namespace='svo'):
# Init QWidget
super(SvoWidget, self).__init__()
self.setObjectName('SvoWidget')
# load UI
ui_file = os.path.join(rospkg.RosPack().get_path('rqt_svo'), 'resource', 'widget.ui')
loadUi(ui_file, self)
# init and start update timer for data, the timer calls the function update_info all 40ms
self._update_info_timer = QTimer(self)
self._update_info_timer.timeout.connect(self.update_info)
self._update_info_timer.start(40)
# set the functions that are called when a button is pressed
self.button_start.pressed.connect(self.on_start_button_pressed)
self.button_reset.pressed.connect(self.on_reset_button_pressed)
self.button_quit.pressed.connect(self.on_quit_button_pressed)
# set callback for changed topic
self.topic_line_edit.setText(svo_namespace)
self.register(svo_namespace)
self.topic_line_edit.textChanged.connect(self._on_topic_changed)
# TODO: set a timer when the last message was received and give a warning if it is too long ago!
@Slot(str)
def _on_topic_changed(self, topic):
self._svo_namespace = str(topic)
self.unregister()
self.register(self._svo_namespace)
def register(self, svo_namespace):
# Load parameters
max_num_features = rospy.get_param(svo_namespace+'/max_fts', 120)
# Feature bar
self.num_tracked_bar.setMaximum(max_num_features)
print('set maximum number of features to '+str(max_num_features))
# Subscribe to ROS Info topic and register callback
self._subscriber = rospy.Subscriber(svo_namespace+'/info', Info, self.info_cb)
# Initialize Publisher
self._publisher = rospy.Publisher(svo_namespace+'/remote_key', String)
def unregister(self):
if self._publisher is not None:
self._publisher.unregister()
self._publisher = None
if self._subscriber is not None:
self._subscriber.unregister()
self._subscriber = None
def info_cb(self, msg):
self._last_info_msg = msg
self._num_received_msgs += 1
def update_info(self):
info_text = 'Not Connected'
if self._num_received_msgs > 0:
fps = 0
if self._last_info_msg.processing_time > 0:
fps = 1.0/self._last_info_msg.processing_time
info_text = 'fps = %.2f' % fps
info_text += '\t #Features = ' + str(self._last_info_msg.num_matches)
info_text += '\t'
if self._last_info_msg.stage == 0:
info_text += '\t PAUSED'
elif self._last_info_msg.stage == 1:
info_text += '\t FIRST_FRAME'
elif self._last_info_msg.stage == 2:
info_text += '\t SECOND_FRAME'
elif self._last_info_msg.stage == 3:
info_text += '\t RUNNING'
if self._last_info_msg.tracking_quality == 0:
info_text += '\t CRITICAL'
elif self._last_info_msg.tracking_quality == 1:
info_text += '\t BAD TRACKING'
elif self._last_info_msg.tracking_quality == 2:
info_text += '\t GOOD TRACKING'
# set info text
self.svo_info_label.setText(info_text)
# set progress bar
self.num_tracked_bar.setValue(self._last_info_msg.num_matches)
def on_start_button_pressed(self):
print('START SLAM')
self.send_command('s')
def on_reset_button_pressed(self):
print('RESET SLAM')
self.send_command('r')
def on_quit_button_pressed(self):
print('QUIT SLAM')
self.send_command('q')
def send_command(self, cmd):
if self._publisher is None:
return
self._publisher.publish(String(cmd))
| 4,040 | 30.818898 | 100 | py |
rpg_svo | rpg_svo-master/rqt_svo/src/rqt_svo/__init__.py | 0 | 0 | 0 | py | |
tagperson-blender | tagperson-blender-master/tools/batch_generate.py | import bpy
from blendereid.config import get_cfg
from blendereid.utils import bpy_utils, misc
from blendereid.core import process
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='args parser for blender renderer.')
parser.add_argument('--config-file', default='', help='')
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
return args
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
# default_setup(cfg, args)
return cfg
if __name__ == "__main__":
bpy_utils.register_bpy_libs()
bpy_utils.remove_object(obj_key='Cube')
args = parse_args()
print("Command Line Args:", args)
cfg = setup(args)
misc.fix_random_seeds(cfg.FIX_SEED + cfg.PROCESS.FIRST_INDEX - 1) # ensure diffrent seeds for start index
process.generate_multiple_persons(cfg)
| 1,122 | 25.738095 | 109 | py |
tagperson-blender | tagperson-blender-master/blendereid/core/shield.py | import os
import cv2
from tqdm import tqdm
import bpy
import json
import random
import numpy as np
from blendereid.core import background
def load_multiple_shield_imgs(shield_root):
print(f"start to load images from {shield_root}")
shield_img_list = []
shield_names = os.listdir(shield_root)
shield_names = sorted(shield_names)
for shield_name in tqdm(shield_names):
shield_file_path = os.path.join(shield_root, shield_name)
shield_file_path = os.path.abspath(shield_file_path)
img = cv2.imread(shield_file_path)
img_bg = bpy.data.images.load(shield_file_path)
shield_img_list.append(img_bg)
print(f"total collect {len(shield_names)}, valid {len(shield_img_list)}...")
return shield_img_list
def load_shield_v2_imgs(cfg):
shiled_v2_root = cfg.COMPOSITE.SHIELD_V2.ROOT
print(f"start to load images from {shiled_v2_root}")
shield_v2_img_list = []
shield_names = os.listdir(shiled_v2_root)
shield_bg_names = list(filter(lambda x: x.find('_bg.png') > -1, shield_names))
shield_bg_names = sorted(shield_bg_names)
num_limit = cfg.COMPOSITE.SHIELD_V2.NUM_LIMIT
if num_limit > 0:
shield_bg_names = shield_bg_names[:num_limit]
for shield_bg_name in tqdm(shield_bg_names):
# bg
shield_bg_file_path = os.path.join(shiled_v2_root, shield_bg_name)
shield_bg_file_path = os.path.abspath(shield_bg_file_path)
# img = cv2.imread(shield_file_path)
img_bg = bpy.data.images.load(shield_bg_file_path)
# fg
shield_fg_name = shield_bg_name.replace("_bg.png", "_fg.png")
shield_fg_file_path = os.path.join(shiled_v2_root, shield_fg_name)
shield_fg_file_path = os.path.abspath(shield_fg_file_path)
# img = cv2.imread(shield_file_path)
img_fg = bpy.data.images.load(shield_fg_file_path)
shield_v2_img_list.append((img_bg, img_fg))
print(f"total collect {len(shield_bg_names)}, valid {len(shield_v2_img_list)}...")
return shield_v2_img_list
def load_shield_image_info(cfg):
"""
load cached shield_image_info, include x,y,p for each image
"""
save_json_path = cfg.COMPOSITE.SHIELD_V2.FIX_RESOLUTION_PER_IMAGE.SAVE_JSON_PATH
if not os.path.exists(save_json_path):
raise ValueError(f'save_json_path not exist: {save_json_path}')
print(f"Background Image Info cache is enabled, loading it from {save_json_path}")
with open(save_json_path) as f:
shield_image_info = json.load(f)
return shield_image_info
def get_img_shield_map(cfg, img_shield_v2_list):
"""
assume all image name is unique
"""
img_shield_v2_map = {}
if img_shield_v2_list is None:
return img_shield_v2_map
for img_shield_2 in img_shield_v2_list:
for fg_or_bg in img_shield_2:
img_shield_name = os.path.basename(fg_or_bg.filepath)
img_shield_v2_map[img_shield_name] = fg_or_bg
return img_shield_v2_map
def random_select_one_shield_v2_name(cfg, img_shield_v2_list):
img_shield_v2 = None
if cfg.COMPOSITE.SHIELD_V2.ENABLED and random.random() < cfg.COMPOSITE.SHIELD_V2.PROB:
num_shield_v2 = len(img_shield_v2_list)
select_idx = np.random.choice(range(num_shield_v2))
img_shield_v2 = img_shield_v2_list[select_idx]
if img_shield_v2 is not None:
img_shield_v2_name = [os.path.basename(fg_or_bg.filepath) for fg_or_bg in img_shield_v2]
else:
img_shield_v2_name = []
return img_shield_v2_name
def set_shield_v2_info_by_name(image_bg_map, image_shield_v2_map, image_shield_v2_name, resolution_x, resolution_y):
shield_image_node = bpy.context.scene.node_tree.nodes.get("Image.001")
shield_image_node.image = None
if len(image_shield_v2_name) == 0:
return
(img_bg_name, img_fg_name) = image_shield_v2_name
set_shield_v2_node_by_name(image_shield_v2_map, img_fg_name, resolution_x, resolution_y)
background.set_background_node_by_bg_name(image_shield_v2_map, img_bg_name)
def set_shield_v2_node_by_name(image_shield_v2_map, image_fg_name, resolution_x, resolution_y):
if image_fg_name is None or image_fg_name not in image_shield_v2_map:
img_shield_v2_node = None
else:
img_shield_v2_node = image_shield_v2_map[image_fg_name]
shield_image_node = bpy.context.scene.node_tree.nodes.get("Image.001")
shield_image_node.image = img_shield_v2_node
# set the shield image size
shield_scale_node = bpy.context.scene.node_tree.nodes.get("Scale.001")
render = bpy.data.scenes['Scene'].render
render_width = resolution_x
render_height = resolution_y
resolution_percentage = 100
shield_scale_node.space = 'ABSOLUTE'
shield_scale_node.inputs[1].default_value = render_width
shield_scale_node.inputs[2].default_value = render_height
# set the shield image position
shield_transform_node = bpy.context.scene.node_tree.nodes.get("Transform")
shield_transform_node.inputs[1].default_value = 0
shield_transform_node.inputs[2].default_value = 0
def set_shield_from_predefined_img(image_bg, image_fg, resolution_x, resolution_y):
shield_image_node = bpy.context.scene.node_tree.nodes.get("Image.001")
shield_image_node.image = image_fg
# set the shield image size
shield_scale_node = bpy.context.scene.node_tree.nodes.get("Scale.001")
render = bpy.data.scenes['Scene'].render
render_width = resolution_x
render_height = resolution_y
resolution_percentage = 100
shield_scale_node.space = 'ABSOLUTE'
shield_scale_node.inputs[1].default_value = render_width
shield_scale_node.inputs[2].default_value = render_height
# set the shield image position
shield_transform_node = bpy.context.scene.node_tree.nodes.get("Transform")
shield_transform_node.inputs[1].default_value = 0
shield_transform_node.inputs[2].default_value = 0
background.set_backgound_from_predefined_img(image_bg)
| 6,000 | 38.222222 | 116 | py |
tagperson-blender | tagperson-blender-master/blendereid/core/compositing.py | import os
import bpy
import random
import numpy as np
from blendereid.core import background, render_config
def compose_render_nodes(default_bg_file_path=None):
# TODO: remove this code
if default_bg_file_path is None:
default_bg_file_path = 'data_demo/background_demo/bg_blender_notext.png'
# add background
bg_file_path = os.path.abspath(default_bg_file_path)
img_bg = bpy.data.images.load(bg_file_path)
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
composite_node = tree.nodes.get("Composite")
render_node = tree.nodes.get("Render Layers")
image_node = tree.nodes.new("CompositorNodeImage")
image_node.image = img_bg
scale_node = tree.nodes.new("CompositorNodeScale")
alpha_node = tree.nodes.new("CompositorNodeAlphaOver")
scale_node.space = 'RELATIVE'
scale_node.space = 'RENDER_SIZE'
gamma_node = tree.nodes.new("CompositorNodeGamma")
# crop_node = tree.nodes.new("CompositorNodeCrop")
# crop_node.relative = False
# crop_node.use_crop_size = True
# crop_node.min_x = 0
# crop_node.min_y = 0
# crop_node.max_x = 128
# crop_node.max_y = 256
# Image.001
shield_image_node = tree.nodes.new("CompositorNodeImage")
shield_scale_node = tree.nodes.new("CompositorNodeScale")
shield_scale_node.space = 'ABSOLUTE'
shield_transform_node = tree.nodes.new("CompositorNodeTransform")
# Alpha Over.001
shield_alpha_node = tree.nodes.new("CompositorNodeAlphaOver")
# add for color mask
# Alpha Over.002
color_mask_alpha_node = tree.nodes.new("CompositorNodeAlphaOver")
color_mask_alpha_node.inputs[0].default_value = 0.0
color_mask_node = tree.nodes.new("CompositorNodeRGB")
color_mask_node.outputs[0].default_value = (0.0, 0.0, 0.0, 0.0)
links = tree.links
# link = links.new(image_node.outputs[0], crop_node.inputs[0])
# link = links.new(crop_node.outputs[0], scale_node.inputs[0])
link = links.new(image_node.outputs[0], scale_node.inputs[0])
# link = links.new(scale_node.outputs[0], alpha_node.inputs[1])
link = links.new(scale_node.outputs[0], color_mask_alpha_node.inputs[1])
link = links.new(color_mask_node.outputs[0], color_mask_alpha_node.inputs[2])
# color_mask_alpha_node + render_node -> alpha_node
link = links.new(color_mask_alpha_node.outputs[0], alpha_node.inputs[1])
link = links.new(render_node.outputs[0], alpha_node.inputs[2])
# shelter branch
link = links.new(shield_image_node.outputs[0], shield_scale_node.inputs[0])
link = links.new(shield_scale_node.outputs[0], shield_transform_node.inputs[0])
link = links.new(shield_transform_node.outputs[0], shield_alpha_node.inputs[2])
link = links.new(alpha_node.outputs[0], shield_alpha_node.inputs[1])
link = links.new(shield_alpha_node.outputs[0], gamma_node.inputs[0])
link = links.new(gamma_node.outputs[0], composite_node.inputs[0])
def adjust_render_nodes(cfg, img_shield, img_shield_v2=None, shield_image_info=None):
# for gamma
if cfg.COMPOSITE.GAMMA.RANDOM.ENABLE:
gamma_node = bpy.context.scene.node_tree.nodes.get("Gamma")
gamma_node.inputs[1].default_value = cfg.COMPOSITE.GAMMA.RANDOM.BASE
gamma_node.inputs[1].default_value += random.uniform(cfg.COMPOSITE.GAMMA.RANDOM.LOWER_BOUND, cfg.COMPOSITE.GAMMA.RANDOM.UPPER_BOUND)
# for front front shield
shield_image_node = bpy.context.scene.node_tree.nodes.get("Image.001")
if cfg.COMPOSITE.SHIELD.ENABLE and img_shield is not None:
# TODO: replace this image with new
shield_image_node.image = img_shield
# set the shield image size
shield_scale_node = bpy.context.scene.node_tree.nodes.get("Scale.001")
render = bpy.data.scenes['Scene'].render
render_width = render.resolution_x
render_height = render.resolution_y
resolution_percentage = render.resolution_percentage
shield_scale_node.space = 'ABSOLUTE'
shield_scale_node.inputs[1].default_value = render_width * cfg.COMPOSITE.SHIELD.SCALE.WIDTH_SCALE.BASE * random.uniform(cfg.COMPOSITE.SHIELD.SCALE.WIDTH_SCALE.RANDOM.LOWER_BOUND, cfg.COMPOSITE.SHIELD.SCALE.WIDTH_SCALE.RANDOM.UPPER_BOUND) # X
shield_scale_node.inputs[2].default_value = render_height * cfg.COMPOSITE.SHIELD.SCALE.HEIGHT_SCALE.BASE * random.uniform(cfg.COMPOSITE.SHIELD.SCALE.HEIGHT_SCALE.RANDOM.LOWER_BOUND, cfg.COMPOSITE.SHIELD.SCALE.HEIGHT_SCALE.RANDOM.UPPER_BOUND) # Y
# set the shield image position
shield_transform_node = bpy.context.scene.node_tree.nodes.get("Transform")
shield_transform_node.inputs[1].default_value = render_width * cfg.COMPOSITE.SHIELD.TRANSFORM.X_SCALE.BASE * random.uniform(cfg.COMPOSITE.SHIELD.TRANSFORM.X_SCALE.RANDOM.LOWER_BOUND, cfg.COMPOSITE.SHIELD.TRANSFORM.X_SCALE.RANDOM.UPPER_BOUND) # X
shield_transform_node.inputs[2].default_value = render_height * cfg.COMPOSITE.SHIELD.TRANSFORM.Y_SCALE.BASE * random.uniform(cfg.COMPOSITE.SHIELD.TRANSFORM.Y_SCALE.RANDOM.LOWER_BOUND, cfg.COMPOSITE.SHIELD.TRANSFORM.Y_SCALE.RANDOM.UPPER_BOUND) # Y
print(f"render_width={render_width}, render_height={render_height}, percentage={resolution_percentage}")
print(f"width={shield_scale_node.inputs[1].default_value}, height={shield_scale_node.inputs[2].default_value}")
print(f"x={shield_transform_node.inputs[1].default_value}, y={shield_transform_node.inputs[2].default_value}")
else:
shield_image_node.image = None
if cfg.COMPOSITE.SHIELD_V2.ENABLED and img_shield_v2 is not None:
print(f"set shield_v2 node")
(img_bg, img_fg) = img_shield_v2
set_shield_img(img_fg)
background.set_backgound_from_predefined_img(img_bg)
# reset image resolution
render_config.modify_render_config_by_bg_image(cfg, os.path.basename(img_bg.filepath), shield_image_info)
def set_shield_img(img_fg_shield):
shield_image_node = bpy.context.scene.node_tree.nodes.get("Image.001")
shield_image_node.image = img_fg_shield
# set the shield image size
shield_scale_node = bpy.context.scene.node_tree.nodes.get("Scale.001")
render = bpy.data.scenes['Scene'].render
render_width = render.resolution_x
render_height = render.resolution_y
resolution_percentage = render.resolution_percentage
shield_scale_node.space = 'ABSOLUTE'
shield_scale_node.inputs[1].default_value = render_width
shield_scale_node.inputs[2].default_value = render_height
# set the shield image position
shield_transform_node = bpy.context.scene.node_tree.nodes.get("Transform")
shield_transform_node.inputs[1].default_value = 0
shield_transform_node.inputs[2].default_value = 0
def random_select_one_shield(cfg, img_shield_list):
img_shield = None
if cfg.COMPOSITE.SHIELD.ENABLE and random.random() < cfg.COMPOSITE.SHIELD.PROB:
img_shield = np.random.choice(img_shield_list)
return img_shield
# @deprecated, see `random_select_one_shield_v2` in shield.py
def random_select_one_shield_v2(cfg, img_shield_v2_list):
img_shield_v2 = None
if cfg.COMPOSITE.SHIELD_V2.ENABLED and random.random() < cfg.COMPOSITE.SHIELD_V2.PROB:
num_shield_v2 = len(img_shield_v2_list)
select_idx = np.random.choice(range(num_shield_v2))
img_shield_v2 = img_shield_v2_list[select_idx]
return img_shield_v2
def random_select_gamma_value(cfg, camera_id):
# for gamma
if cfg.COMPOSITE.GAMMA.RANDOM.ENABLE:
if cfg.COMPOSITE.GAMMA.RANDOM.CAMERA_BASE:
DEFAULT_GAMMA_VALUE = 1.0
gamma_camera_map = {}
for (i, value) in enumerate(cfg.COMPOSITE.GAMMA.RANDOM.CAMERA_VALUES):
gamma_camera_map[i+1] = cfg.COMPOSITE.GAMMA.RANDOM.CAMERA_VALUES[i]
gamma_value = gamma_camera_map[camera_id] if camera_id in gamma_camera_map else DEFAULT_GAMMA_VALUE
else:
gamma_value = cfg.COMPOSITE.GAMMA.RANDOM.BASE
gamma_value += random.uniform(cfg.COMPOSITE.GAMMA.RANDOM.LOWER_BOUND, cfg.COMPOSITE.GAMMA.RANDOM.UPPER_BOUND)
return gamma_value
return 1.0
def set_gamma_value(gamma_value):
if gamma_value is not None:
gamma_node = bpy.context.scene.node_tree.nodes.get("Gamma")
gamma_node.inputs[1].default_value = gamma_value
| 8,379 | 46.078652 | 254 | py |
tagperson-blender | tagperson-blender-master/blendereid/core/camera_config.py | import random
def adjust_camera_roration(obj_camera, obj_person, cfg, elev=10, distance=25):
camera_rotation_euler_x_bias = cfg.CAMERA.ROTATION_EULER_X_BIAS.BIAS
if cfg.CAMERA.OCCATIONAL_JUMP.ENABLE:
if random.random() < cfg.CAMERA.OCCATIONAL_JUMP.PROB:
camera_rotation_euler_x_bias = cfg.CAMERA.OCCATIONAL_JUMP.ROTATION_EULER_X_BIAS.BIAS
# camera_rotation_euler_x_bias += max(0, elev-10) / 1000
camera_rotation_euler_x_bias += 0.01 - max(0, 28 - obj_person.dimensions[2]) / 2000 * (elev - 10) / 7
# camera_rotation_euler_x_bias -= max(0, distance-25) / 200
if cfg.CAMERA.ROTATION_EULER_X_BIAS.RANDOM.ENABLE:
if cfg.CAMERA.ROTATION_EULER_X_BIAS.RANDOM.USE_GAUSS:
camera_rotation_euler_x_bias += random.gauss(cfg.CAMERA.ROTATION_EULER_X_BIAS.RANDOM.MU, cfg.CAMERA.ROTATION_EULER_X_BIAS.RANDOM.SIGMA)
else:
# camera_rotation_euler_x_bias += (random.random() + cfg.CAMERA.ROTATION_EULER_X_BIAS.RANDOM.BIAS - 0.5) * cfg.CAMERA.ROTATION_EULER_X_BIAS.RANDOM.SCALE
camera_rotation_euler_x_bias += random.uniform(cfg.CAMERA.ROTATION_EULER_X_BIAS.RANDOM.LOWER_BOUND, cfg.CAMERA.ROTATION_EULER_X_BIAS.RANDOM.UPPER_BOUND)
camera_rotation_euler_z_bias = cfg.CAMERA.ROTATION_EULER_Z_BIAS.BIAS
if cfg.CAMERA.ROTATION_EULER_Z_BIAS.RANDOM.ENABLE:
if cfg.CAMERA.ROTATION_EULER_Z_BIAS.RANDOM.USE_GAUSS:
camera_rotation_euler_z_bias += random.gauss(cfg.CAMERA.ROTATION_EULER_Z_BIAS.RANDOM.MU, cfg.CAMERA.ROTATION_EULER_Z_BIAS.RANDOM.SIGMA)
else:
# camera_rotation_euler_z_bias += (random.random() + cfg.CAMERA.ROTATION_EULER_Z_BIAS.RANDOM.BIAS - 0.5) * cfg.CAMERA.ROTATION_EULER_Z_BIAS.RANDOM.SCALE
camera_rotation_euler_z_bias += random.uniform(cfg.CAMERA.ROTATION_EULER_Z_BIAS.RANDOM.LOWER_BOUND, cfg.CAMERA.ROTATION_EULER_Z_BIAS.RANDOM.UPPER_BOUND)
camera_rotation_euler_y_bias = cfg.CAMERA.ROTATION_EULER_Y_BIAS.BIAS
if cfg.CAMERA.ROTATION_EULER_Y_BIAS.RANDOM.ENABLE:
if cfg.CAMERA.ROTATION_EULER_Y_BIAS.RANDOM.USE_GAUSS:
camera_rotation_euler_y_bias += random.gauss(cfg.CAMERA.ROTATION_EULER_Y_BIAS.RANDOM.MU, cfg.CAMERA.ROTATION_EULER_Y_BIAS.RANDOM.SIGMA)
obj_pose = obj_person.matrix_world.to_translation()
camera_pos = obj_camera.matrix_world.to_translation()
direction = obj_pose - camera_pos
# obj_pose the cameras '-Z' and use its 'Y' as up
rot_quat = direction.to_track_quat('-Z', 'Y')
# assume we're using euler rotation
obj_camera.rotation_euler = rot_quat.to_euler()
obj_camera.rotation_euler.x += camera_rotation_euler_x_bias
obj_camera.rotation_euler.z += camera_rotation_euler_z_bias
obj_camera.rotation_euler.y += camera_rotation_euler_y_bias
# print(f"object pos: {obj_pose}, camera: {camera_pos}")
# print(f"direction: {direction}")
# print(f"rot_quat: {rot_quat}")
# print(f"rotation_euler: {obj_camera.rotation_euler}")
return camera_rotation_euler_x_bias, camera_rotation_euler_y_bias, camera_rotation_euler_z_bias
def get_random_camera_elev(cfg):
elev = cfg.CAMERA.ELEV.BASE
if cfg.CAMERA.ELEV.RANDOM.ENABLED == True:
if not cfg.CAMERA.ELEV.RANDOM.USE_GAUSS:
elev += random.randint(cfg.CAMERA.ELEV.RANDOM.LOWER_BOUND, cfg.CAMERA.ELEV.RANDOM.UPPER_BOUND)
else:
elev = random.gauss(cfg.CAMERA.ELEV.RANDOM.MU, cfg.CAMERA.ELEV.RANDOM.SIGMA)
return elev
def get_random_camera_azim(cfg):
azim = random.randint(0, 360)
return azim
def get_random_camera_distance(cfg, elev, obj_person):
distance = (0.2 + cfg.CAMERA.DISTANCE.PERSON_HEIGHT_FACTOR * obj_person.dimensions[2]) + random.uniform(cfg.CAMERA.DISTANCE.RANDOM.LOWER_BOUND, cfg.CAMERA.DISTANCE.RANDOM.UPPER_BOUND) # for mhx2
cur_distance = distance + float(elev - 10) * 0.12 * (1+(max(0, obj_person.dimensions[2] - 15) / 100))
return cur_distance
def get_adjusted_camera_rotation(cfg, obj_person, elev):
camera_rotation_euler_x_bias = cfg.CAMERA.ROTATION_EULER_X_BIAS.BIAS
camera_rotation_euler_x_bias += 0.01 - max(0, 28 - obj_person.dimensions[2]) / 2000 * (elev - 10) / 7
if cfg.CAMERA.ROTATION_EULER_X_BIAS.RANDOM.ENABLE:
if cfg.CAMERA.ROTATION_EULER_X_BIAS.RANDOM.USE_GAUSS:
camera_rotation_euler_x_bias += random.gauss(cfg.CAMERA.ROTATION_EULER_X_BIAS.RANDOM.MU, cfg.CAMERA.ROTATION_EULER_X_BIAS.RANDOM.SIGMA)
else:
camera_rotation_euler_x_bias += random.uniform(cfg.CAMERA.ROTATION_EULER_X_BIAS.RANDOM.LOWER_BOUND, cfg.CAMERA.ROTATION_EULER_X_BIAS.RANDOM.UPPER_BOUND)
camera_rotation_euler_z_bias = cfg.CAMERA.ROTATION_EULER_Z_BIAS.BIAS
if cfg.CAMERA.ROTATION_EULER_Z_BIAS.RANDOM.ENABLE:
if cfg.CAMERA.ROTATION_EULER_Z_BIAS.RANDOM.USE_GAUSS:
camera_rotation_euler_z_bias += random.gauss(cfg.CAMERA.ROTATION_EULER_Z_BIAS.RANDOM.MU, cfg.CAMERA.ROTATION_EULER_Z_BIAS.RANDOM.SIGMA)
else:
# camera_rotation_euler_z_bias += (random.random() + cfg.CAMERA.ROTATION_EULER_Z_BIAS.RANDOM.BIAS - 0.5) * cfg.CAMERA.ROTATION_EULER_Z_BIAS.RANDOM.SCALE
camera_rotation_euler_z_bias += random.uniform(cfg.CAMERA.ROTATION_EULER_Z_BIAS.RANDOM.LOWER_BOUND, cfg.CAMERA.ROTATION_EULER_Z_BIAS.RANDOM.UPPER_BOUND)
camera_rotation_euler_y_bias = cfg.CAMERA.ROTATION_EULER_Y_BIAS.BIAS
if cfg.CAMERA.ROTATION_EULER_Y_BIAS.RANDOM.ENABLE:
if cfg.CAMERA.ROTATION_EULER_Y_BIAS.RANDOM.USE_GAUSS:
camera_rotation_euler_y_bias += random.gauss(cfg.CAMERA.ROTATION_EULER_Y_BIAS.RANDOM.MU, cfg.CAMERA.ROTATION_EULER_Y_BIAS.RANDOM.SIGMA)
return camera_rotation_euler_x_bias, camera_rotation_euler_y_bias, camera_rotation_euler_z_bias
def set_camera_roration(obj_camera, obj_person, cre_x_bias=0.0, cre_y_bias=0.0, cre_z_bias=0.0):
position_person = obj_person.matrix_world.to_translation()
position_camera = obj_camera.matrix_world.to_translation()
direction = position_person - position_camera
# position_person the cameras '-Z' and use its 'Y' as up
rot_quat = direction.to_track_quat('-Z', 'Y')
# assume we're using euler rotation
obj_camera.rotation_euler = rot_quat.to_euler()
obj_camera.rotation_euler.x += cre_x_bias
obj_camera.rotation_euler.z += cre_z_bias
obj_camera.rotation_euler.y += cre_y_bias
| 6,369 | 57.440367 | 198 | py |
tagperson-blender | tagperson-blender-master/blendereid/core/pose_manager.py | import numpy as np
import os
import random
def fetch_pose_names(cfg, frame_count):
pose_root = cfg.POSE.ROOT
# pose_serial_candidates = ['02_01', '02_02', '02_03', '02_04', '02_05']
# pose_serial_candidates = ['02_01', '02_02', '02_03']
pose_serial_candidates = cfg.POSE.SERIALS
# pose_serial_candidates = ['walk']
pose_serials = np.random.choice(pose_serial_candidates, 2)
# first_count = random.randint(int(0.3 * frame_count), int(0.7 * frame_count))
# cur_pose_root = os.path.join(pose_root, pose_serials[0])
# first_pose_candidates = os.listdir(cur_pose_root)
# # first_pose_candidates = sorted(first_pose_candidates)
# first_pose_names = np.random.choice(first_pose_candidates, first_count)
# first_pose_names = [f'{pose_serials[0]}/{first_pose_name}' for first_pose_name in first_pose_names]
# second_count = frame_count - first_count
# cur_pose_root = os.path.join(pose_root, pose_serials[1])
# second_pose_candidates = os.listdir(cur_pose_root)
# # second_pose_candidates = sorted(second_pose_candidates)
# second_pose_names = np.random.choice(second_pose_candidates, second_count)
# second_pose_names = [f'{pose_serials[1]}/{second_pose_name}' for second_pose_name in second_pose_names]
# first_pose_names.extend(second_pose_names)
# return first_pose_names
pose_candidates = []
for pose_serial in pose_serial_candidates:
cur_pose_root = os.path.join(pose_root, pose_serial)
p_candidates = os.listdir(cur_pose_root)
p_candidates = sorted(p_candidates)
p_candidates = [f"{pose_serial}/{p_candidate}" for p_candidate in p_candidates]
pose_candidates.extend(p_candidates)
pose_names = np.random.choice(pose_candidates, frame_count)
return pose_names
def find_adjacent_pose_path(pose_path):
pose_dir = os.path.dirname(pose_path)
pose_name = os.path.basename(pose_path)
pure_pose_name = os.path.splitext(pose_name)[0]
pose_info = pure_pose_name.split("_")
pose_name_seq = pose_info[2]
pose_name_seq = int(pose_name_seq)
add_num = random.randint(1, 5)
# assume the seq is in 2-50
next_pose_seq = (pose_name_seq + add_num) % 50
if next_pose_seq < 2:
next_pose_seq += 2
next_pose_name = f"{pose_info[0]}_{pose_info[1]}_{next_pose_seq}.json"
next_pose_path = os.path.join(pose_dir, next_pose_name)
return next_pose_path
def apply_transform_to_bones(obj_person):
obj_person.keyframe_insert(data_path='location')
# apply keyframe on person's bone pose
for bone in obj_person.pose.bones:
bone.keyframe_insert(data_path = 'location')
if bone.rotation_mode == "QUATERNION":
bone.keyframe_insert(data_path = 'rotation_quaternion')
else:
bone.keyframe_insert(data_path = 'rotation_euler')
bone.keyframe_insert(data_path = 'scale')
def fetch_pose_paths_in_one_camera(cfg, frame_count):
pose_root = cfg.POSE.ROOT
pose_serial_candidates = cfg.POSE.SERIALS
pose_serials = np.random.choice(pose_serial_candidates, 1)
cur_pose_root = os.path.join(pose_root, pose_serials[0])
pose_candidates = os.listdir(cur_pose_root)
pose_candidates = sorted(pose_candidates)
pose_candidates = [os.path.join(cur_pose_root, f"{p_candidate}") for p_candidate in pose_candidates]
pose_path = np.random.choice(pose_candidates, frame_count)
# TODO: sort pose_path
return pose_path | 3,472 | 41.353659 | 109 | py |
tagperson-blender | tagperson-blender-master/blendereid/core/attribute_manager.py | import os
import json
import numpy as np
def generate_attribute_save_path(cfg, mesh_id, camera_index, sequence_id, save_name=None):
if cfg.OUTPUT_DIR_FOR_ATTRIBUTE != '':
output_dir = cfg.OUTPUT_DIR_FOR_ATTRIBUTE
else:
output_dir = os.path.join(cfg.OUTPUT_DIR, "output_attributes")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if save_name is None:
save_name = f"{mesh_id}_c{camera_index}s1_{int(sequence_id)}.json"
save_path = f"{output_dir}/{save_name}"
return save_path
def save_attribute_dict(cfg, save_path, attribute):
attribute_dict = attribute.__dict__
non_save_key_list = [
'pose_path',
'save_path',
]
for non_save_key in non_save_key_list:
if non_save_key in attribute_dict:
del attribute_dict[non_save_key]
print(attribute_dict)
with open(save_path, 'w') as f:
json.dump(attribute_dict, f)
def override_attribute_with_given_dict(render_attribute, render_options_dict):
if render_options_dict is None:
return render_attribute
for key, value in render_options_dict.items():
# TODO: temp logic in 20210909
if key == 'img_height':
continue
if key == 'img_width':
assert 'img_height' in render_options_dict
target_ratio = render_options_dict['img_height'] / render_options_dict['img_width']
target_width = render_attribute.img_width
target_height = int(target_width * target_ratio)
setattr(render_attribute, 'img_height', target_height)
continue
# temp light_azim_rela in 20210925
if key == 'light_azim':
assert hasattr(render_attribute, 'camera_azim') == True
target_light_azim = (render_attribute.camera_azim + render_options_dict['light_azim']) % 360
setattr(render_attribute, "light_azim", target_light_azim)
setattr(render_attribute, key, value)
return render_attribute
def load_attribute_distribution_file(cfg):
"""
the file is json format.
the content in the file is a list, where each element is a serial of attribute
"""
if not cfg.ATTRIBUTE.USE_DISTRIBUTION_FILE.ENABLED:
return None
distribute_file_path = cfg.ATTRIBUTE.USE_DISTRIBUTION_FILE.FILE_PATH
if not os.path.exists(distribute_file_path):
raise ValueError(f'attribute use distribute file is enabled, but the file {distribute_file_path} not exist')
with open(distribute_file_path) as f:
attribute_distribute_list = json.load(f)
attribute_distribute_list = filter_attribute_distribution_list_by_limit_fields(cfg, attribute_distribute_list)
return attribute_distribute_list
def filter_attribute_distribution_list_by_limit_fields(cfg, attribute_distribution_list):
limit_fields = cfg.ATTRIBUTE.USE_DISTRIBUTION_FILE.LIMIT_FIELDS
if len(limit_fields) == 0:
return attribute_distribution_list
limit_fields_dict = set(limit_fields)
filtered_attribute_distribution_list = []
for attribute_distribution in attribute_distribution_list:
filter_one = {}
for k, v in attribute_distribution.items():
if k in limit_fields_dict:
filter_one[k] = v
filtered_attribute_distribution_list.append(filter_one)
return filtered_attribute_distribution_list
def random_sample_one_from_attribute_distribute_list(attribute_distribution_list):
if attribute_distribution_list is None:
return None
if len(attribute_distribution_list) == 0:
return None
attribute_item = np.random.choice(attribute_distribution_list)
return attribute_item
def attempt_override_attribute(render_attribute, attribute_distribution_list):
override_attribute_dict = random_sample_one_from_attribute_distribute_list(attribute_distribution_list)
if override_attribute_dict is not None:
render_attribute = override_attribute_with_given_dict(render_attribute, override_attribute_dict)
return render_attribute
| 4,106 | 34.713043 | 116 | py |
tagperson-blender | tagperson-blender-master/blendereid/core/light_config.py | import random
def get_random_light_azim(cfg, camera_azim):
light_azim = (camera_azim + random.randint(cfg.LIGHT.AZIM.RANDOM_LOWER_BOUND, cfg.LIGHT.AZIM.RANDOM_UPPER_BOUND)) % 360
return light_azim
def get_random_light_elev(cfg, camera_elev):
light_elev_base = cfg.LIGHT.ELEV.BASE
light_elev_random = 0
if cfg.LIGHT.ELEV.RANDOM.ENABLED:
if cfg.LIGHT.ELEV.RANDOM.USE_GAUSS:
light_elev_random = random.gauss(cfg.LIGHT.ELEV.RANDOM.MU, cfg.LIGHT.ELEV.RANDOM.SIGMA)
else:
light_elev_random = random.randint(cfg.LIGHT.ELEV.RANDOM.LOWER_BOUND, cfg.LIGHT.ELEV.RANDOM.UPPER_BOUND)
light_elev = light_elev_base + light_elev_random
return light_elev
def get_random_light_distance(cfg, camera_distance):
light_distance = camera_distance * cfg.LIGHT.DISTANCE.RATIO_TO_CAMERA_DISTANCE
return light_distance | 875 | 40.714286 | 123 | py |
tagperson-blender | tagperson-blender-master/blendereid/core/expand.py | """
kind of augmentation, expand in certain factor
"""
import copy
import numpy as np
import random
import os
def expand_render_attribute_list(cfg, render_attribute_list):
expanded_list = []
base_num = len(render_attribute_list)
for render_attribute in render_attribute_list:
expanded_list.append(render_attribute)
if cfg.EXPERIMENT.EXPAND.CAMERA_DISTANCE.ENABLED:
for i in range(0, cfg.EXPERIMENT.EXPAND.CAMERA_DISTANCE.EXPAND_NUM):
random_max = cfg.EXPERIMENT.EXPAND.CAMERA_DISTANCE.RANDOM.UPPER_BOUND
random_min = cfg.EXPERIMENT.EXPAND.CAMERA_DISTANCE.RANDOM.LOWER_BOUND
expand_render_attribute = expand_camera_distance(render_attribute, increment=base_num*(i+1), random_max=random_max, random_min=random_min)
expanded_list.append(expand_render_attribute)
if cfg.EXPERIMENT.EXPAND.CAMERA_ELEV.ENABLED:
for i in range(0, cfg.EXPERIMENT.EXPAND.CAMERA_ELEV.EXPAND_NUM):
random_max = cfg.EXPERIMENT.EXPAND.CAMERA_ELEV.RANDOM.UPPER_BOUND
random_min = cfg.EXPERIMENT.EXPAND.CAMERA_ELEV.RANDOM.LOWER_BOUND
expand_render_attribute = expand_camera_elev(render_attribute, increment=base_num*(cfg.EXPERIMENT.EXPAND.CAMERA_DISTANCE.EXPAND_NUM+i+1), random_max=random_max, random_min=random_min)
expanded_list.append(expand_render_attribute)
if cfg.EXPERIMENT.EXPAND.GAMMA.ENABLED:
for i in range(0, cfg.EXPERIMENT.EXPAND.GAMMA.EXPAND_NUM):
expand_render_attribute = expand_gamma_value(render_attribute, increment=base_num*(i+1))
expanded_list.append(expand_render_attribute)
return expanded_list
def add_increment_in_save_path(save_path: str, increment: int):
save_root = os.path.dirname(save_path)
save_path = os.path.basename(save_path)
save_name, save_ext = os.path.splitext(save_path)
info_array = save_name.split("_")
seq_id = int(info_array[2]) + increment
save_path = f"{info_array[0]}_{info_array[1]}_{seq_id}{save_ext}"
save_path = os.path.join(save_root, save_path)
return save_path
def expand_camera_distance(render_attribute, increment: int, random_max = 10, random_min = -10):
expand_render_attribute = copy.deepcopy(render_attribute)
expand_render_attribute.camera_distance += random.uniform(random_min, random_max)
expand_render_attribute.save_path = add_increment_in_save_path(expand_render_attribute.save_path, increment)
return expand_render_attribute
def expand_camera_elev(render_attribute, increment: int, random_max = 10, random_min = -10):
expand_render_attribute = copy.deepcopy(render_attribute)
expand_render_attribute.camera_elev += random.uniform(random_min, random_max)
expand_render_attribute.camera_elev = max(0, expand_render_attribute.camera_elev)
expand_render_attribute.save_path = add_increment_in_save_path(expand_render_attribute.save_path, increment)
return expand_render_attribute
def expand_gamma_value(render_attribute, increment: int):
expand_render_attribute = copy.deepcopy(render_attribute)
expand_render_attribute.gamma_value = round(random.uniform(0.2, 2.5), 3)
expand_render_attribute.save_path = add_increment_in_save_path(expand_render_attribute.save_path, increment)
return expand_render_attribute | 3,376 | 53.467742 | 199 | py |
tagperson-blender | tagperson-blender-master/blendereid/core/world_config.py | import bpy
import random
# world color relative
DEFAULT_WORLD_BACKGROUND_COLOR = (0.05087608844041824, 0.05087608844041824, 0.05087608844041824, 1.0) # no use current
DEFAULT_WORLD_BACKGROUND_COLOR_V2 = (0.0509, 0.0509, 0.0509, 1.0) # no use current
DEFAULT_COLOR_MASK = (0.0, 0.0, 0.0, 0.0)
def get_original_world_background_color():
color = bpy.data.worlds["World"].node_tree.nodes["Background"].inputs[0].default_value
return color
def apply_world_background_color(cfg, color=(0.3, 0.1, 0.8, 0.5)):
# light material effect
apply_world_color_frontend(color)
if cfg.OPTION.APPLY_CAMERA_WORLD_COLOR.APPLY_TO_BACKGROUND:
apply_color_mask_alpha(color)
def apply_world_color_frontend(color):
bpy.data.worlds["World"].node_tree.nodes["Background"].inputs[0].default_value = color
def apply_color_mask_alpha(color_mask=(0.0, 0.0, 0.0, 0.0)):
color_mask_alpha_node = bpy.context.scene.node_tree.nodes.get("Alpha Over.002")
color_mask_alpha_node.inputs[0].default_value = color_mask[3]
color_mask_node = bpy.context.scene.node_tree.nodes.get("RGB")
color_mask_node.outputs[0].default_value = color_mask
def reset_to_original_world(cfg):
apply_world_background_color(cfg, DEFAULT_WORLD_BACKGROUND_COLOR)
if cfg.OPTION.APPLY_CAMERA_WORLD_COLOR.APPLY_TO_BACKGROUND:
apply_color_mask_alpha(DEFAULT_COLOR_MASK)
def set_world_background_color_by_camera_id(cfg, camera_id):
camera_world_color_map = {
1: (0.85, 0.85, 0.05, 0.2),
2: (0.85, 0.05, 0.05, 0.2),
3: (0.05, 0.05, 0.85, 0.2),
# 4: (0.05, 0.85, 0.85, 0.2),
# 5: (0.85, 0.05, 0.85, 0.2),
# 6: (0.85, 0.85, 0.85, 0.2),
}
world_color_scale = 1.0
if cfg.OPTION.APPLY_CAMERA_WORLD_COLOR.RANDOM.ENABLED:
world_color_scale = random.uniform(cfg.OPTION.APPLY_CAMERA_WORLD_COLOR.RANDOM.LOWER_BOUND, cfg.OPTION.APPLY_CAMERA_WORLD_COLOR.RANDOM.UPPER_BOUND)
world_color_values = cfg.OPTION.APPLY_CAMERA_WORLD_COLOR.VALUES
for (i, value) in enumerate(world_color_values):
s1, s2, s3 = fetch_wc_random_scales(cfg, world_color_scale)
value_random = [value[0] * s1, value[1] * s2, value[2] * s3, value[3]]
camera_world_color_map[i+1] = value_random
if camera_id in camera_world_color_map:
color = camera_world_color_map[camera_id]
apply_world_background_color(cfg, color)
else:
reset_to_original_world(cfg)
def attemp_apply_world_color(cfg, camera_idx):
if cfg.OPTION.APPLY_CAMERA_WORLD_COLOR.ENABLE:
set_world_background_color_by_camera_id(cfg, camera_idx)
def fetch_wc_random_scales(cfg, world_color_scale):
if cfg.OPTION.APPLY_CAMERA_WORLD_COLOR.RANDOM.INDEPENDENT:
s1 = random.uniform(cfg.OPTION.APPLY_CAMERA_WORLD_COLOR.RANDOM.LOWER_BOUND, cfg.OPTION.APPLY_CAMERA_WORLD_COLOR.RANDOM.UPPER_BOUND)
s2 = random.uniform(cfg.OPTION.APPLY_CAMERA_WORLD_COLOR.RANDOM.LOWER_BOUND, cfg.OPTION.APPLY_CAMERA_WORLD_COLOR.RANDOM.UPPER_BOUND)
s3 = random.uniform(cfg.OPTION.APPLY_CAMERA_WORLD_COLOR.RANDOM.LOWER_BOUND, cfg.OPTION.APPLY_CAMERA_WORLD_COLOR.RANDOM.UPPER_BOUND)
else:
s1 = world_color_scale
s2 = world_color_scale
s3 = world_color_scale
return s1, s2, s3
def get_world_background_color_by_camera_id(cfg, camera_id):
if not cfg.OPTION.APPLY_CAMERA_WORLD_COLOR.ENABLE:
return DEFAULT_WORLD_BACKGROUND_COLOR, DEFAULT_COLOR_MASK
camera_world_color_map = {
1: (0.85, 0.85, 0.05, 0.2),
2: (0.85, 0.05, 0.05, 0.2),
3: (0.05, 0.05, 0.85, 0.2),
# 4: (0.05, 0.85, 0.85, 0.2),
# 5: (0.85, 0.05, 0.85, 0.2),
# 6: (0.85, 0.85, 0.85, 0.2),
}
world_color_scale = 1.0
if cfg.OPTION.APPLY_CAMERA_WORLD_COLOR.RANDOM.ENABLED:
world_color_scale = random.uniform(cfg.OPTION.APPLY_CAMERA_WORLD_COLOR.RANDOM.LOWER_BOUND, cfg.OPTION.APPLY_CAMERA_WORLD_COLOR.RANDOM.UPPER_BOUND)
world_color_values = cfg.OPTION.APPLY_CAMERA_WORLD_COLOR.VALUES
for (i, value) in enumerate(world_color_values):
s1, s2, s3 = fetch_wc_random_scales(cfg, world_color_scale)
value_random = [value[0] * s1, value[1] * s2, value[2] * s3, value[3]]
camera_world_color_map[i+1] = value_random
color = camera_world_color_map[camera_id] if camera_id in camera_world_color_map else DEFAULT_WORLD_BACKGROUND_COLOR_V2
color_to_background = color if cfg.OPTION.APPLY_CAMERA_WORLD_COLOR.APPLY_TO_BACKGROUND else DEFAULT_COLOR_MASK
if cfg.OPTION.APPLY_CAMERA_WORLD_COLOR.BACKGROUND.ENABLED:
world_color_background_values = cfg.OPTION.APPLY_CAMERA_WORLD_COLOR.BACKGROUND.VALUES
camera_world_background_color_map = {}
for (i, value) in enumerate(world_color_background_values):
s1, s2, s3 = 1.0, 1.0, 1.0
value_random = [value[0] * s1, value[1] * s2, value[2] * s3, value[3]]
camera_world_background_color_map[i+1] = value_random
color_to_background = camera_world_background_color_map[camera_id] if camera_id in camera_world_background_color_map else DEFAULT_COLOR_MASK
return color, color_to_background
def attemp_apply_world_color_v2(color, color_to_background):
apply_world_color_frontend(color)
apply_color_mask_alpha(color_to_background) | 5,337 | 44.623932 | 154 | py |
tagperson-blender | tagperson-blender-master/blendereid/core/background.py | import cv2
import os
import json
from tqdm import tqdm
import bpy
import random
def load_multiple_bg_imgs(cfg):
if cfg.BACKGROUND.USE_EMPTY_BACKGROUND:
return []
bg_root = cfg.BACKGROUND.ROOT
print(f"start to load images from {bg_root}")
bg_img_list = []
bg_names = os.listdir(bg_root)
bg_names = sorted(bg_names)
bg_names = list(filter(lambda x: x.find(".jpg") > -1, bg_names))
num_limit = cfg.BACKGROUND.NUM_LIMIT
if num_limit > 0:
bg_names = bg_names[:num_limit]
for bg_name in tqdm(bg_names):
bg_file_path = os.path.join(bg_root, bg_name)
bg_file_path = os.path.abspath(bg_file_path)
img = cv2.imread(bg_file_path)
h, w = img.shape[0], img.shape[1]
if h / w > 5:
continue
img_bg = bpy.data.images.load(bg_file_path)
bg_img_list.append(img_bg)
print(f"total collect {len(bg_names)}, valid {len(bg_img_list)}...")
return bg_img_list
def load_multiple_bg_imgs_group_by_camera(cfg):
"""
This methods is parallel with `load_multiple_bg_imgs`
return a dict where the key is `cid`, the value is list of background image node
"""
bg_root = cfg.BACKGROUND.ROOT
print(f"start to load images from {bg_root}")
bg_imgs_dict = {}
camera_ids = os.listdir(bg_root)
for camera_id in camera_ids:
camera_path = os.path.join(bg_root, camera_id)
if not os.path.isdir(camera_path):
raise ValueError(f"camera_path is not a dir {camera_path}")
c_bg_names = os.listdir(camera_path)
c_bg_names = list(filter(lambda x: x.find(".jpg") > -1, c_bg_names))
c_bg_names = sorted(c_bg_names)
num_limit = cfg.BACKGROUND.NUM_LIMIT
if num_limit > 0:
c_bg_names = c_bg_names[:num_limit]
c_bg_img_list = []
for bg_name in tqdm(c_bg_names):
bg_file_path = os.path.join(camera_path, bg_name)
bg_file_path = os.path.abspath(bg_file_path)
img = cv2.imread(bg_file_path)
h, w = img.shape[0], img.shape[1]
if h / w > 5:
continue
img_bg = bpy.data.images.load(bg_file_path)
c_bg_img_list.append(img_bg)
bg_imgs_dict[int(camera_id)] = c_bg_img_list
return bg_imgs_dict
def random_select_one_background_img(cfg, img_bg_list, camera_index):
if cfg.BACKGROUND.USE_CAMERA_GROUP:
assert isinstance(img_bg_list, dict), f"cfg.BACKGROUND.USE_CAMERA_GROUP is enabled but `img_bg_list` is not a dict."
num_camera = len(img_bg_list.keys())
idx = camera_index % num_camera
selected_img_bg_list = img_bg_list[idx]
else:
selected_img_bg_list = img_bg_list
# set image content
if img_bg_list is None or len(img_bg_list) == 0:
img_bg = None
else:
img_bg = random.choice(selected_img_bg_list)
img_bg = set_backgound_from_predefined_img(img_bg)
# set scale
return img_bg
def set_backgound_from_predefined_img(img_bg):
image_node = bpy.context.scene.node_tree.nodes.get("Image")
image_node.image = img_bg
return img_bg
def load_bg_image_info(cfg):
"""
load cached bg_image_info, include x,y,p for each image
"""
save_json_path = cfg.BACKGROUND.FIX_RESOLUTION_PER_IMAGE.SAVE_JSON_PATH
if not os.path.exists(save_json_path):
raise ValueError(f'save_json_path not exist: {save_json_path}')
print(f"Background Image Info cache is enabled, loading it from {save_json_path}")
with open(save_json_path) as f:
bg_image_info = json.load(f)
return bg_image_info
def random_get_one_bg_name(cfg, img_bg_list, camere_idx):
if cfg.BACKGROUND.USE_CAMERA_GROUP:
assert isinstance(img_bg_list, dict), f"cfg.BACKGROUND.USE_CAMERA_GROUP is enabled but `img_bg_list` is not a dict."
num_camera = len(img_bg_list.keys())
idx = camere_idx % num_camera
selected_img_bg_list = img_bg_list[idx]
else:
selected_img_bg_list = img_bg_list
# set image content
if img_bg_list is None or len(img_bg_list) == 0:
img_bg = None
else:
img_bg = random.choice(selected_img_bg_list)
bg_name = os.path.basename(img_bg.filepath) if img_bg is not None else None
return bg_name
def get_img_bg_map(cfg, img_bg_list):
"""
assume all image name is unique
"""
img_bg_map = {}
if cfg.BACKGROUND.USE_CAMERA_GROUP:
assert isinstance(img_bg_list, dict), f"cfg.BACKGROUND.USE_CAMERA_GROUP is enabled but `img_bg_list` is not a dict."
for key, c_img_bg_list in img_bg_list.items():
for img_bg in c_img_bg_list:
bg_name = os.path.basename(img_bg.filepath)
img_bg_map[bg_name] = img_bg
else:
for img_bg in img_bg_list:
bg_name = os.path.basename(img_bg.filepath)
img_bg_map[bg_name] = img_bg
return img_bg_map
def set_background_node_by_bg_name(img_bg_map, bg_name):
if bg_name is None or bg_name not in img_bg_map:
bg_node = None
else:
bg_node = img_bg_map[bg_name]
set_backgound_from_predefined_img(bg_node)
| 5,203 | 32.146497 | 124 | py |
tagperson-blender | tagperson-blender-master/blendereid/core/partner.py | import numpy as np
from blendereid.utils import mesh_utils, geometry
import os
import random
def random_select_one_partner(cfg):
random_min = cfg.PARTNER.SELECT_MESH_ID_MIN
random_max = cfg.PARTNER.SELECT_MESH_ID_MAX
partner_mesh_id = np.random.randint(random_min, random_max)
partner_mesh_file_name = mesh_utils.format_mesh_file_name(partner_mesh_id, cfg.SOURCE.OBJ_POSE_NAME, suffix="mhx2")
partner_obj_file_path = os.path.join(cfg.SOURCE.ROOT, partner_mesh_file_name)
return partner_obj_file_path
def set_partner_location_and_rotation(cfg, person_location, partner_obj_person, azim):
opposite_azim = (azim + 180) % 360
relative_azim = random.randint(-45, 45)
target_azim = opposite_azim + relative_azim
distance = random.uniform(4, 6) + (45 - abs(relative_azim)) * 0.1
elev = 0
partner_obj_person.location = geometry.calculate_target_location(person_location, distance, target_azim, 0)
if random.random() < 0.5:
partner_obj_person.rotation_euler.z = random.uniform(-0.5, 0.5)
else:
partner_obj_person.rotation_euler.z = random.uniform(2.64, 3.64)
| 1,130 | 40.888889 | 119 | py |
tagperson-blender | tagperson-blender-master/blendereid/core/process.py |
import os
import bpy
from tqdm import tqdm
import numpy as np
from blendereid.core import background, compositing, camera_config, render_config, pose_manager, world_config, shield, attribute_manager
from blendereid.utils import bpy_utils, mesh_utils, resume_utils, geometry
from blendereid.schema.attribute import Attribute
from blendereid.core import light_config
from blendereid.core import expand
def generate_multiple_persons(cfg):
# build temp background compositor logic
compositing.compose_render_nodes()
if not cfg.BACKGROUND.USE_CAMERA_GROUP:
img_bg_list = background.load_multiple_bg_imgs(cfg)
else:
# NOTE: img_bg_list may be a dict
img_bg_list = background.load_multiple_bg_imgs_group_by_camera(cfg)
bg_image_info = None # cached image -> x,y,percentage info
if cfg.BACKGROUND.FIX_RESOLUTION_PER_IMAGE.ENABLED:
bg_image_info = background.load_bg_image_info(cfg)
img_bg_map = background.get_img_bg_map(cfg, img_bg_list)
img_shield_list = None
if cfg.COMPOSITE.SHIELD.ENABLE:
img_shield_list = shield.load_multiple_shield_imgs(cfg.COMPOSITE.SHIELD.ROOT)
img_shield_v2_list = None
shield_image_info = None # cached image -> x,y,percentage info
if cfg.COMPOSITE.SHIELD_V2.ENABLED:
img_shield_v2_list = shield.load_shield_v2_imgs(cfg)
if cfg.COMPOSITE.SHIELD_V2.FIX_RESOLUTION_PER_IMAGE.ENABLED:
shield_image_info = shield.load_shield_image_info(cfg)
img_shield_v2_map = shield.get_img_shield_map(cfg, img_shield_v2_list)
# attribute_distribution
attribute_distribution_list = attribute_manager.load_attribute_distribution_file(cfg)
render_config.setup_basic_render(cfg)
for mesh_id in tqdm(range(cfg.PROCESS.FIRST_INDEX, cfg.PROCESS.LAST_INDEX+1)):
# resume
if cfg.PROCESS.RESUME == True:
render_results_exist = resume_utils.check_rendering_result_exist(cfg, mesh_id)
if render_results_exist:
print(f"Mesh {mesh_id} rendered, skip...")
continue
mesh_file_name = mesh_utils.format_mesh_file_name(mesh_id, cfg.SOURCE.OBJ_POSE_NAME, suffix="mhx2")
obj_file_path = os.path.join(cfg.SOURCE.ROOT, mesh_file_name)
# render_one_person(cfg, obj_file_path, img_bg_list, bg_image_info, img_shield_list, img_shield_v2_list, shield_image_info)
render_one_person_v2(cfg, obj_file_path, img_bg_list, img_bg_map, bg_image_info, img_shield_list, img_shield_v2_list, img_shield_v2_map, shield_image_info, attribute_distribution_list)
def render_one_person_v2(cfg, obj_file_path, img_bg_list, img_bg_map, bg_image_info, img_shield_list, img_shield_v2_list, img_shield_v2_map, shield_image_info, attribute_distribution_list):
# 1. load_obj
obj_key, obj_person = load_person_obj(cfg, obj_file_path)
# 2. generate render_params
render_attribute_list = compose_render_params_list(cfg, obj_person, obj_file_path, img_bg_list, bg_image_info, img_shield_v2_list, shield_image_info)
# 2-2. possible expand
if cfg.EXPERIMENT.EXPAND.ENABLED:
render_attribute_list = expand.expand_render_attribute_list(cfg, render_attribute_list)
# 3. render & record_attribute
for render_attribute in render_attribute_list:
# if use the attribute sampled from the distribution
if cfg.ATTRIBUTE.USE_DISTRIBUTION_FILE.RANDOM_SAMPLE.ENABLED:
render_attribute = attribute_manager.attempt_override_attribute(render_attribute, attribute_distribution_list)
render_one_person_by_render_params(obj_person, render_attribute, img_bg_map, img_shield_v2_map)
if cfg.ATTRIBUTE.ENABLED:
attribute_save_name = os.path.basename(render_attribute.save_path).replace(".jpg", ".json")
attribute_save_path = attribute_manager.generate_attribute_save_path(cfg, render_attribute.mesh_id, render_attribute.camera_idx, render_attribute.seq_idx, save_name=attribute_save_name)
attribute_manager.save_attribute_dict(cfg, attribute_save_path, render_attribute)
# remove obj for next render
bpy_utils.remove_object_v2(obj_key=obj_key)
def load_person_obj(cfg, obj_file_path):
# TODO: add partner logic
# load and fetch obj for current person
mesh_utils.load_object(obj_file_path)
obj_key = mesh_utils.get_obj_key(obj_file_path)
obj_person = bpy.data.objects[obj_key]
# correct alpha for materials to avoid incorrect transparent
bpy_utils.correct_alpha_channel()
return obj_key, obj_person
def compose_render_params_list(cfg,
obj_person,
obj_file_path,
img_bg_list,
bg_image_info,
img_shield_v2_list,
shield_image_info
):
# select c_i cameras from `total_camera_count`, and fetch m images for each camera
total_camera_count = cfg.PROCESS.TOTAL_CAMERA_COUNT
camera_count_per_id = cfg.PROCESS.CAMERA_COUNT_PER_ID
image_count_per_camera = cfg.PROCESS.IMAGE_COUNT_PER_CAMERA
camera_ids = np.random.choice(range(total_camera_count), camera_count_per_id, replace=False)
camera_ids.sort()
seq_idx = 0
render_attribute_list = []
for camera_idx in camera_ids:
pose_paths = pose_manager.fetch_pose_paths_in_one_camera(cfg, image_count_per_camera)
for pose_path in pose_paths:
# compose one image
seq_idx += 1
# camera: distance, elev, azim, cre_x_bia, ...
camera_azim = camera_config.get_random_camera_azim(cfg)
camera_elev = camera_config.get_random_camera_elev(cfg)
camera_distance = camera_config.get_random_camera_distance(cfg, camera_elev, obj_person)
cre_x_bias, cre_y_bias, cre_z_bias = camera_config.get_adjusted_camera_rotation(cfg, obj_person, camera_elev)
# light
light_azim = light_config.get_random_light_azim(cfg, camera_azim)
light_elev = light_config.get_random_light_elev(cfg, camera_elev)
light_distance = light_config.get_random_light_distance(cfg, camera_distance)
# background
# TODO: use a pure get method
bg_name = background.random_get_one_bg_name(cfg, img_bg_list, camera_idx)
# pose
pose_name = os.path.basename(pose_path)
# render_resolution
if not cfg.BACKGROUND.FIX_RESOLUTION_PER_IMAGE.ENABLED:
resolution_x, resolution_y, resolution_percentage = render_config.get_random_render_resolution(cfg)
else:
resolution_x, resolution_y, resolution_percentage = render_config.get_random_render_resolution_by_bg_image(cfg, bg_name, bg_image_info)
img_width = int(resolution_x * resolution_percentage / 100)
img_height = int(resolution_y * resolution_percentage / 100)
# shield
img_shield_v2_name = shield.random_select_one_shield_v2_name(cfg, img_shield_v2_list)
if len(img_shield_v2_name) == 2 and cfg.COMPOSITE.SHIELD_V2.FIX_RESOLUTION_PER_IMAGE.ENABLED:
resolution_x, resolution_y, resolution_percentage = render_config.get_random_render_resolution_by_bg_image(cfg, img_shield_v2_name[0], shield_image_info)
img_width = int(resolution_x * resolution_percentage / 100)
img_height = int(resolution_y * resolution_percentage / 100)
# world_color
world_color, world_color_to_background = world_config.get_world_background_color_by_camera_id(cfg, camera_idx)
# gamma
gamma_value = compositing.random_select_gamma_value(cfg, camera_idx)
# meth_id
mesh_id = mesh_utils.get_mesh_id(obj_file_path)
# save_path
save_path = render_config.generate_save_path(cfg, mesh_id, camera_idx, seq_idx)
cur_attribute = Attribute(
mesh_id,
camera_azim,
camera_elev,
camera_distance,
light_azim,
light_elev,
light_distance,
background=bg_name,
pose=pose_name,
camera_idx=camera_idx,
img_width=img_width,
img_height=img_height,
partner_exist=False, # TODO
partner_mesh_id_list=[], # TODO
cre_x_bias=cre_x_bias,
cre_y_bias=cre_y_bias,
cre_z_bias=cre_z_bias,
world_color=world_color,
world_color_to_background=world_color_to_background,
seq_idx=seq_idx,
pose_path=pose_path,
save_path=save_path,
img_shield_v2_name=img_shield_v2_name,
gamma_value=gamma_value
)
render_attribute_list.append(cur_attribute)
return render_attribute_list
def render_one_person_by_render_params(obj_person, render_attribute, img_bg_map, img_shield_v2_map):
camera = bpy.data.objects['Camera']
render = bpy.data.scenes['Scene'].render
light = bpy.data.objects['Light']
# set pose
bpy.ops.mcp.load_pose(filepath=render_attribute.pose_path)
pose_manager.apply_transform_to_bones(obj_person)
bpy.context.view_layer.update()
# set camera
person_location = (obj_person.location[0], obj_person.location[1], obj_person.location[2])
camera_location = geometry.calculate_target_location(person_location, render_attribute.camera_distance, render_attribute.camera_azim, render_attribute.camera_elev)
camera.location = camera_location
bpy.context.view_layer.update()
print(f"person_location: {person_location}")
print(f"camera_location: {camera_location}")
print(f"camera_rotation: {camera.rotation_euler}")
camera_config.set_camera_roration(camera, obj_person, render_attribute.cre_x_bias, render_attribute.cre_y_bias, render_attribute.cre_z_bias)
print(f"camera_rotation: {camera.rotation_euler}")
# set light
light.location = geometry.calculate_target_location(obj_person.location, render_attribute.light_distance, render_attribute.light_azim, render_attribute.light_elev)
bpy.context.view_layer.update()
# set background
background.set_background_node_by_bg_name(img_bg_map, render_attribute.background)
# set shield
img_shield_v2_name = render_attribute.img_shield_v2_name
shield.set_shield_v2_info_by_name(img_bg_map, img_shield_v2_map, img_shield_v2_name, render_attribute.img_width, render_attribute.img_height)
# set world_color
world_config.attemp_apply_world_color_v2(render_attribute.world_color, render_attribute.world_color_to_background)
# set gamma
compositing.set_gamma_value(render_attribute.gamma_value)
# render
render_config.modify_render_config_v2(render_attribute.img_width, render_attribute.img_height, 100)
render.filepath = render_attribute.save_path
bpy.ops.render.render(write_still=True)
| 11,011 | 42.87251 | 197 | py |
tagperson-blender | tagperson-blender-master/blendereid/core/process_v1.py |
import os
import bpy
import random
from blendereid.core import background, compositing, camera_config, render_config, pose_manager, world_config, \
attribute_manager
from blendereid.core import partner
from blendereid.utils import bpy_utils, mesh_utils, determinastic_utils, geometry
from blendereid.schema.attribute import Attribute
def render_one_person(cfg, obj_file_path, img_bg_list, bg_image_info, img_shield_list, img_shield_v2_list, shield_image_info, determinastic_params=None):
partner_obj_key = None
partner_obj_person = None
if cfg.PARTNER.ENABLED:
if random.random() < cfg.PARTNER.PROB:
partner_obj_file_path = partner.random_select_one_partner(cfg)
mesh_utils.load_object(partner_obj_file_path)
partner_obj_key = mesh_utils.get_obj_key(partner_obj_file_path)
partner_obj_person = bpy.data.objects[partner_obj_key]
print(f"partner_obj_person's location: {partner_obj_person.location}")
print(f"partner_obj_person's dimensions: {partner_obj_person.dimensions}")
# load and fetch obj for current person
mesh_utils.load_object(obj_file_path)
obj_key = mesh_utils.get_obj_key(obj_file_path)
obj_person = bpy.data.objects[obj_key]
# correct alpha for materials to avoid incorrect transparent
bpy_utils.correct_alpha_channel()
# start to render
# TODO: abstract this into a method
distance_person_weight_factor = cfg.CAMERA.DISTANCE.PERSON_HEIGHT_FACTOR
if cfg.CAMERA.OCCATIONAL_JUMP.ENABLE:
if random.random() < cfg.CAMERA.OCCATIONAL_JUMP.PROB:
distance_person_weight_factor = cfg.CAMERA.OCCATIONAL_JUMP.DISTANCE.PERSON_HEIGHT_FACTOR
distance = (0.2 + cfg.CAMERA.DISTANCE.PERSON_HEIGHT_FACTOR * obj_person.dimensions[2]) + random.uniform(cfg.CAMERA.DISTANCE.RANDOM.LOWER_BOUND, cfg.CAMERA.DISTANCE.RANDOM.UPPER_BOUND) # for mhx2
image_count_cur_id = cfg.PROCESS.IMAGE_COUNT_PER_ID + random.uniform(cfg.PROCESS.IMAGE_COUNT_PER_ID_RANDOM_LOWER_BOUND, cfg.PROCESS.IMAGE_COUNT_PER_ID_RANDOM_UPPER_BOUND)
pose_count = int(image_count_cur_id / cfg.PROCESS.CONTINUOUS_COUNT)
pose_names = pose_manager.fetch_pose_names(cfg, pose_count)
pose_paths = [os.path.join(cfg.POSE.ROOT, f'{pose_name}') for pose_name in pose_names]
idx_offset = 1
for pose_idx, pose_path in enumerate(pose_paths):
camera_parameters = []
elev = camera_config.get_random_camera_elev(cfg)
cur_distance = distance + float(elev - 10) * 0.12 * (1+(max(0, obj_person.dimensions[2] - 15) / 100))
# [Determinstic Value] set camera_distance
cur_distance = determinastic_utils.set_variable_according_to_determinastic_params(cur_distance, 'camera_distance', determinastic_params)
# 3+3
azim_rand = random.randint(0, 360)
if cfg.CAMERA.AVERAGE_AZIM.ENABLED:
azim_rand = int(pose_idx * (360 / len(pose_paths)))
for continuous_idx in range(0, cfg.PROCESS.CONTINUOUS_COUNT):
disturb = random.randint(-10, 10)
azim = (azim_rand + disturb) % 360
if cfg.DEBUG.ONE_IMAGE_PER_PERSON:
azim = -60
# [Determinstic Value] set camera_elev
elev = determinastic_utils.set_variable_according_to_determinastic_params(elev, 'camera_elev', determinastic_params)
# [Determinstic Value] set camera_elev
azim = determinastic_utils.set_variable_according_to_determinastic_params(azim, 'camera_azim', determinastic_params)
camera_parameters.append((cur_distance, elev, azim))
# azim_rand = (azim_rand + 90) % 360
# for disturb in [0, 10, 20]:
# azim = azim_rand + disturb % 360
# camera_parameters.append((distance, elev, azim))
idx_offset = render_one_person_one_pose(cfg, obj_file_path, obj_person, pose_path, camera_parameters, idx_offset=idx_offset, img_bg_list=img_bg_list, bg_image_info=bg_image_info, img_shield_list=img_shield_list, img_shield_v2_list=img_shield_v2_list, shield_image_info=shield_image_info, partner_obj_person=partner_obj_person)
if cfg.DEBUG.ONE_IMAGE_PER_PERSON:
break
# remove obj for next render
bpy_utils.remove_object_v2(obj_key=obj_key)
bpy_utils.remove_object_v2(obj_key=partner_obj_key)
def render_one_person_one_pose(cfg, obj_file_path, obj_person, pose_path, camera_parameters, idx_offset=0, img_bg_list=[], bg_image_info=None, img_shield_list=[], img_shield_v2_list=[], shield_image_info=None, partner_obj_person=None):
load_pos = os.path.splitext(os.path.basename(obj_file_path))[1] == '.mhx2'
if load_pos:
bpy.ops.mcp.load_pose(filepath=pose_path)
pose_manager.apply_transform_to_bones(obj_person)
bpy.context.view_layer.update()
print(f"object_person's location: {obj_person.location}")
print(f"object_person's dimensions: {obj_person.dimensions}")
camera = bpy.data.objects['Camera']
render = bpy.data.scenes['Scene'].render
light = bpy.data.objects['Light']
idx = idx_offset
for (distance, elev, azim) in camera_parameters:
# TODO: remove this: no use, here keep same with original code
camera_index = random.randint(0, 5)
# TODO: modify this: in fact, it is idx-1, here keep same with original code
camera_idx = int((idx+1) / cfg.PROCESS.IMAGE_COUNT_PER_CAMERA)
# prw_background.random_crop_background_from_prw(img_bg_list, camera_index)
bg_name = background.random_select_one_background_img(cfg, img_bg_list, camera_idx)
# print(f"distance:{distance}, elev: {elev}, azim: {azim}")
person_location = (obj_person.location[0], obj_person.location[1], obj_person.location[2])
camera.location = geometry.calculate_target_location(person_location, distance, azim, elev)
if partner_obj_person is not None:
if random.random() < cfg.PARTNER.PROB_IN_ONE_PERSON:
partner.set_partner_location_and_rotation(cfg, person_location, partner_obj_person, azim)
else:
partner_obj_person.location = (9999, 9999, 9999)
# print(light.location)
# light.location = geometry.calculate_target_location(obj_person.location, distance/1.5, azim, 75)
light_azim = (azim + random.randint(cfg.LIGHT.AZIM.RANDOM_LOWER_BOUND, cfg.LIGHT.AZIM.RANDOM_UPPER_BOUND)) % 360
light_elev = cfg.LIGHT.ELEV.BASE
light_distance = distance / 1.5
light.location = geometry.calculate_target_location(obj_person.location, light_distance, light_azim, light_elev)
# light.location = geometry.calculate_target_location(obj_person.location, distance/1.5, random.randint(0, 359), 75)
bpy.context.view_layer.update()
cre_x_bias, cre_y_bias, cre_z_bias = camera_config.adjust_camera_roration(camera, obj_person, cfg, elev, distance)
mesh_id = mesh_utils.get_mesh_id(obj_file_path)
render.filepath = render_config.generate_save_path(cfg, mesh_id, camera_idx, idx)
if not cfg.BACKGROUND.FIX_RESOLUTION_PER_IMAGE.ENABLED:
render_config.modify_render_config(cfg)
else:
render_config.modify_render_config_by_bg_image(cfg, os.path.basename(bg_name.filepath), bg_image_info)
img_shield = compositing.random_select_one_shield(cfg, img_shield_list)
img_shield_v2 = compositing.random_select_one_shield_v2(cfg, img_shield_v2_list)
compositing.adjust_render_nodes(cfg, img_shield, img_shield_v2, shield_image_info)
world_config.attemp_apply_world_color(cfg, camera_idx)
bpy.ops.render.render(write_still=True)
partner_exist = False if partner_obj_person is None else True
partner_mesh_id_list = [] if partner_obj_person is None else [mesh_utils.get_mesh_id_from_obj_name(partner_obj_person.name)]
# record attribute
if cfg.ATTRIBUTE.ENABLED:
img_percentage = render.resolution_percentage
img_width = int(render.resolution_x * img_percentage / 100)
img_height = int(render.resolution_y * img_percentage / 100)
background_name = os.path.basename(bg_name.filepath) if bg_name is not None else ""
attribute_obj = Attribute(
mesh_id=mesh_id,
camera_azim=azim,
camera_elev=elev,
camera_distance=distance,
light_azim=light_azim,
light_elev=light_elev,
light_distance=light_distance,
# TODO: verify basename is enough
background=background_name,
pose=os.path.basename(pose_path),
camera_idx=camera_idx,
img_width=img_width,
img_height=img_height,
partner_exist=partner_exist,
partner_mesh_id_list=partner_mesh_id_list,
cre_x_bias=cre_x_bias,
cre_y_bias=cre_y_bias,
cre_z_bias=cre_z_bias
)
attribute_save_path = attribute_manager.generate_attribute_save_path(cfg, mesh_id, camera_idx, idx)
attribute_manager.save_attribute_dict(cfg, attribute_save_path, attribute_obj)
idx += 1
if cfg.DEBUG.ONE_IMAGE_PER_PERSON:
break
# I think this code is too dirty, but can not find alternative now
if cfg.PROCESS.IMAGE_COUNT_PER_BACKGROUND > 1:
for bgc in range(1, cfg.PROCESS.IMAGE_COUNT_PER_BACKGROUND):
# vary pose
if load_pos and cfg.PROCESS.POSE_CHANGE:
pose_path = pose_manager.find_adjacent_pose_path(pose_path)
bpy.ops.mcp.load_pose(filepath=pose_path)
pose_manager.apply_transform_to_bones(obj_person)
bpy.context.view_layer.update()
# random camera
distance = distance * random.uniform(0.80, 1.20)
camera.location = geometry.calculate_target_location(person_location, distance, azim, elev)
cre_x_bias, cre_y_bias, cre_z_bias = camera_config.adjust_camera_roration(camera, obj_person, cfg, elev, distance)
# random resolution
if not cfg.BACKGROUND.FIX_RESOLUTION_PER_IMAGE.ENABLED:
render_config.modify_render_config(cfg)
else:
render_config.modify_render_config_by_bg_image(cfg, os.path.basename(bg_name.filepath), bg_image_info)
compositing.adjust_render_nodes(cfg, img_shield, img_shield_v2, shield_image_info)
# render
render.filepath = render_config.generate_save_path(cfg, mesh_id, camera_idx, idx)
bpy.ops.render.render(write_still=True)
# record attribute
if cfg.ATTRIBUTE.ENABLED:
img_percentage = render.resolution_percentage
img_width = int(render.resolution_x * img_percentage / 100)
img_height = int(render.resolution_y * img_percentage / 100)
background_name = os.path.basename(bg_name.filepath) if bg_name is not None else ""
attribute_obj = Attribute(
mesh_id=mesh_id,
camera_azim=azim,
camera_elev=elev,
camera_distance=distance,
light_azim=light_azim,
light_elev=light_elev,
light_distance=light_distance,
# TODO: verify basename is enough
background=background_name,
pose=os.path.basename(pose_path),
camera_idx=camera_idx,
img_width=img_width,
img_height=img_height,
partner_exist=partner_exist,
partner_mesh_id_list=partner_mesh_id_list,
cre_x_bias=cre_x_bias,
cre_y_bias=cre_y_bias,
cre_z_bias=cre_z_bias
)
attribute_save_path = attribute_manager.generate_attribute_save_path(cfg, mesh_id, camera_idx, idx)
attribute_manager.save_attribute_dict(cfg, attribute_save_path, attribute_obj)
idx +=1
return idx
| 12,514 | 50.714876 | 334 | py |
tagperson-blender | tagperson-blender-master/blendereid/core/render_config.py | import os
import bpy
import random
def compose_output_dir(cfg):
if cfg.OUTPUT_DIR_FOR_IMAGE != '':
output_dir = cfg.OUTPUT_DIR_FOR_IMAGE
else:
output_dir = os.path.join(cfg.OUTPUT_DIR, "output_persons")
return output_dir
def generate_save_path(cfg, mesh_id, camera_index, sequence_id):
output_dir = compose_output_dir(cfg)
save_name = f"{mesh_id}_c{camera_index}s1_{int(sequence_id)}.jpg"
save_path = f"{output_dir}/{save_name}"
return save_path
def setup_basic_render(cfg):
render = bpy.data.scenes['Scene'].render
render.resolution_x = 128
render.resolution_y = 256
render.resolution_percentage = 50
bpy.context.scene.cycles.samples = 8
# render.tile_x = 8
# render.tile_y = 8
render.film_transparent = True
bpy.context.scene.render.engine = 'CYCLES'
bpy.context.scene.render.image_settings.file_format='JPEG'
def modify_render_config(cfg):
# TODO: modify to `modify_render_config_v2` logic, modify render by given params
render = bpy.data.scenes['Scene'].render
render.resolution_x = get_random_resolution_x(cfg)
render.resolution_y = get_random_resolution_y(cfg, render.resolution_x)
render.resolution_percentage = get_random_resolution_percentage(cfg)
def get_random_resolution_x(cfg):
resolution_x = cfg.RENDER.RESOLUTION_X.BASE + random.randint(cfg.RENDER.RESOLUTION_X.RANDOM_LOWER_BOUND, cfg.RENDER.RESOLUTION_X.RANDOM_UPPER_BOUND)
return resolution_x
def get_random_resolution_y(cfg, resolution_x):
if not cfg.RENDER.RESOLUTION_Y.USE_RATIO.ENABLED:
resolution_y = cfg.RENDER.RESOLUTION_Y.BASE + random.randint(cfg.RENDER.RESOLUTION_Y.RANDOM_LOWER_BOUND, cfg.RENDER.RESOLUTION_Y.RANDOM_UPPER_BOUND)
else:
y_ratio = random.uniform(cfg.RENDER.RESOLUTION_Y.USE_RATIO.RANDOM_LOWER_BOUND, cfg.RENDER.RESOLUTION_Y.USE_RATIO.RANDOM_UPPER_BOUND)
resolution_y = int(resolution_x * y_ratio)
return resolution_y
def get_random_resolution_percentage(cfg):
resolution_percentage = cfg.RENDER.RESOLUTION_PERCENTAGE.BASE + random.randint(cfg.RENDER.RESOLUTION_PERCENTAGE.RANDOM_LOWER_BOUND, cfg.RENDER.RESOLUTION_PERCENTAGE.RANDOM_UPPER_BOUND)
return resolution_percentage
def modify_render_config_by_bg_image(cfg, bg_image_name, bg_image_info):
# TODO: modify to `modify_render_config_v2` logic, modify render by given params
"""
20210628 modify render image x, y and percentage by it's bg_image_name
The purpose is to ensure same bg_image_name appear at same resolution parameters
"""
render = bpy.data.scenes['Scene'].render
if bg_image_info is None or bg_image_name not in bg_image_info:
print(f"Warning: bg_image_name `{bg_image_name}` not found...")
render.resolution_x = get_random_resolution_x(cfg)
render.resolution_y = get_random_resolution_y(cfg, render.resolution_x)
render.resolution_percentage = get_random_resolution_percentage(cfg)
else:
render.resolution_x = bg_image_info[bg_image_name]['x']
render.resolution_y = bg_image_info[bg_image_name]['y']
render.resolution_percentage = bg_image_info[bg_image_name]['p']
def get_random_render_resolution(cfg):
resolution_x = get_random_resolution_x(cfg)
resolution_y = get_random_resolution_y(cfg, resolution_x)
resolution_percentage = get_random_resolution_percentage(cfg)
return resolution_x, resolution_y, resolution_percentage
def get_random_render_resolution_by_bg_image(cfg, bg_image_name, bg_image_info):
if bg_image_info is None or bg_image_name not in bg_image_info:
print(f"Warning: bg_image_name `{bg_image_name}` not found...")
return get_random_render_resolution(cfg)
else:
resolution_x = bg_image_info[bg_image_name]['x']
resolution_y = bg_image_info[bg_image_name]['y']
resolution_percentage = bg_image_info[bg_image_name]['p']
return resolution_x, resolution_y, resolution_percentage
def modify_render_config_v2(resolution_x, resolution_y, resolution_percentage):
render = bpy.data.scenes['Scene'].render
render.resolution_x = resolution_x
render.resolution_y = resolution_y
render.resolution_percentage = resolution_percentage
def enable_cuda_render():
"""
There has some bugs, it slow down the render
"""
bpy.context.scene.cycles.device = 'GPU'
bpy.context.preferences.addons['cycles'].preferences.compute_device_type = 'CUDA'
bpy.data.scenes['Scene'].cycles.device = 'GPU'
# Enable and list all devices, or optionally disable CPU
print("----------------------------------------------")
for devices in bpy.context.preferences.addons['cycles'].preferences.get_devices():
for d in devices:
d.use = True
# if d.type == 'CPU':
# d.use = False
print("Device '{}' type {} : {}" . format(d.name, d.type, d.use))
print("----------------------------------------------")
| 4,982 | 42.710526 | 188 | py |
tagperson-blender | tagperson-blender-master/blendereid/config/config.py | # encoding: utf-8
import logging
import os
from typing import Any
import yaml
from yacs.config import CfgNode as _CfgNode
BASE_KEY = "_BASE_"
class CfgNode(_CfgNode):
@staticmethod
def load_yaml_with_base(filename: str, allow_unsafe: bool = False):
"""
Just like `yaml.load(open(filename))`, but inherit attributes from its
`_BASE_`.
Args:
filename (str): the file name of the current config. Will be used to
find the base config file.
allow_unsafe (bool): whether to allow loading the config file with
`yaml.unsafe_load`.
Returns:
(dict): the loaded yaml
"""
with open(filename, "r") as f:
try:
cfg = yaml.safe_load(f)
except yaml.constructor.ConstructorError:
if not allow_unsafe:
raise
logger = logging.getLogger(__name__)
logger.warning(
"Loading config {} with yaml.unsafe_load. Your machine may "
"be at risk if the file contains malicious content.".format(
filename
)
)
f.close()
with open(filename, "r") as f:
cfg = yaml.unsafe_load(f)
def merge_a_into_b(a, b):
# merge dict a into dict b. values in a will overwrite b.
for k, v in a.items():
if isinstance(v, dict) and k in b:
assert isinstance(
b[k], dict
), "Cannot inherit key '{}' from base!".format(k)
merge_a_into_b(v, b[k])
else:
b[k] = v
if BASE_KEY in cfg:
base_cfg_file = cfg[BASE_KEY]
if base_cfg_file.startswith("~"):
base_cfg_file = os.path.expanduser(base_cfg_file)
if not any(
map(base_cfg_file.startswith, ["/", "https://", "http://"])
):
# the path to base cfg is relative to the config file itself.
base_cfg_file = os.path.join(
os.path.dirname(filename), base_cfg_file
)
base_cfg = CfgNode.load_yaml_with_base(
base_cfg_file, allow_unsafe=allow_unsafe
)
del cfg[BASE_KEY]
merge_a_into_b(cfg, base_cfg)
return base_cfg
return cfg
def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = False):
"""
Merge configs from a given yaml file.
Args:
cfg_filename: the file name of the yaml config.
allow_unsafe: whether to allow loading the config file with
`yaml.unsafe_load`.
"""
loaded_cfg = CfgNode.load_yaml_with_base(
cfg_filename, allow_unsafe=allow_unsafe
)
loaded_cfg = type(self)(loaded_cfg)
self.merge_from_other_cfg(loaded_cfg)
# Forward the following calls to base, but with a check on the BASE_KEY.
def merge_from_other_cfg(self, cfg_other):
"""
Args:
cfg_other (CfgNode): configs to merge from.
"""
assert (
BASE_KEY not in cfg_other
), "The reserved key '{}' can only be used in files!".format(BASE_KEY)
return super().merge_from_other_cfg(cfg_other)
def merge_from_list(self, cfg_list: list):
"""
Args:
cfg_list (list): list of configs to merge from.
"""
keys = set(cfg_list[0::2])
assert (
BASE_KEY not in keys
), "The reserved key '{}' can only be used in files!".format(BASE_KEY)
return super().merge_from_list(cfg_list)
def __setattr__(self, name: str, val: Any):
if name.startswith("COMPUTED_"):
if name in self:
old_val = self[name]
if old_val == val:
return
raise KeyError(
"Computed attributed '{}' already exists "
"with a different value! old={}, new={}.".format(
name, old_val, val
)
)
self[name] = val
else:
super().__setattr__(name, val)
def get_cfg() -> CfgNode:
"""
Get a copy of the default config.
"""
from .defaults import _C
return _C.clone()
| 4,518 | 32.227941 | 80 | py |
tagperson-blender | tagperson-blender-master/blendereid/config/defaults.py | from .config import CfgNode as CN
_C = CN()
_C.FIX_SEED = 19
_C.SOURCE = CN()
_C.SOURCE.ROOT = '/path/to/.mhx2'
_C.SOURCE.OBJ_POSE_NAME = 'Standing02'
# background relative
_C.BACKGROUND = CN()
_C.BACKGROUND.ROOT = '/path/to/background_dir/'
_C.BACKGROUND.NUM_LIMIT = -1
_C.BACKGROUND.FIX_RESOLUTION_PER_IMAGE = CN()
_C.BACKGROUND.FIX_RESOLUTION_PER_IMAGE.ENABLED = False
_C.BACKGROUND.FIX_RESOLUTION_PER_IMAGE.SAVE_JSON_PATH = '/path/to/background_fix.json'
_C.BACKGROUND.USE_CAMERA_GROUP = False
_C.BACKGROUND.USE_EMPTY_BACKGROUND = False
# camera relative
_C.CAMERA = CN()
_C.CAMERA.ROTATION_EULER_X_BIAS = CN()
_C.CAMERA.ROTATION_EULER_X_BIAS.BIAS = 0.34
_C.CAMERA.ROTATION_EULER_X_BIAS.RANDOM = CN()
_C.CAMERA.ROTATION_EULER_X_BIAS.RANDOM.ENABLE = True
_C.CAMERA.ROTATION_EULER_X_BIAS.RANDOM.LOWER_BOUND = -0.025
_C.CAMERA.ROTATION_EULER_X_BIAS.RANDOM.UPPER_BOUND = 0.025
_C.CAMERA.ROTATION_EULER_X_BIAS.RANDOM.USE_GAUSS = False
_C.CAMERA.ROTATION_EULER_X_BIAS.RANDOM.MU = 0
_C.CAMERA.ROTATION_EULER_X_BIAS.RANDOM.SIGMA = 0.05
_C.CAMERA.ROTATION_EULER_Z_BIAS = CN()
_C.CAMERA.ROTATION_EULER_Z_BIAS.BIAS = 0.0
_C.CAMERA.ROTATION_EULER_Z_BIAS.RANDOM = CN()
_C.CAMERA.ROTATION_EULER_Z_BIAS.RANDOM.ENABLE = True
_C.CAMERA.ROTATION_EULER_Z_BIAS.RANDOM.LOWER_BOUND = -0.05 # abandon
_C.CAMERA.ROTATION_EULER_Z_BIAS.RANDOM.UPPER_BOUND = 0.05 # abandon
_C.CAMERA.ROTATION_EULER_Z_BIAS.RANDOM.USE_GAUSS = False
_C.CAMERA.ROTATION_EULER_Z_BIAS.RANDOM.MU = 0
_C.CAMERA.ROTATION_EULER_Z_BIAS.RANDOM.SIGMA = 0.01
# add in 20210704
_C.CAMERA.ROTATION_EULER_Y_BIAS = CN()
_C.CAMERA.ROTATION_EULER_Y_BIAS.BIAS = 0.0
_C.CAMERA.ROTATION_EULER_Y_BIAS.RANDOM = CN()
_C.CAMERA.ROTATION_EULER_Y_BIAS.RANDOM.ENABLE = True
_C.CAMERA.ROTATION_EULER_Y_BIAS.RANDOM.USE_GAUSS = False
_C.CAMERA.ROTATION_EULER_Y_BIAS.RANDOM.MU = 0
_C.CAMERA.ROTATION_EULER_Y_BIAS.RANDOM.SIGMA = 0.01
_C.CAMERA.ELEV = CN()
_C.CAMERA.ELEV.BASE = 0
_C.CAMERA.ELEV.RANDOM = CN()
_C.CAMERA.ELEV.RANDOM.ENABLED = True
_C.CAMERA.ELEV.RANDOM.LOWER_BOUND = 10
_C.CAMERA.ELEV.RANDOM.UPPER_BOUND = 45
_C.CAMERA.ELEV.RANDOM.USE_GAUSS = False
_C.CAMERA.ELEV.RANDOM.MU = 0.0
_C.CAMERA.ELEV.RANDOM.SIGMA = 0.01
_C.CAMERA.DISTANCE = CN()
_C.CAMERA.DISTANCE.PERSON_HEIGHT_FACTOR = 1.40
_C.CAMERA.DISTANCE.RANDOM = CN()
_C.CAMERA.DISTANCE.RANDOM.LOWER_BOUND = -2.0
_C.CAMERA.DISTANCE.RANDOM.UPPER_BOUND = 2.0
_C.CAMERA.OCCATIONAL_JUMP = CN()
_C.CAMERA.OCCATIONAL_JUMP.ENABLE = False
_C.CAMERA.OCCATIONAL_JUMP.PROB = 0.1
_C.CAMERA.OCCATIONAL_JUMP.ROTATION_EULER_X_BIAS = CN()
_C.CAMERA.OCCATIONAL_JUMP.ROTATION_EULER_X_BIAS.BIAS = 0.60
_C.CAMERA.OCCATIONAL_JUMP.DISTANCE = CN()
_C.CAMERA.OCCATIONAL_JUMP.DISTANCE.PERSON_HEIGHT_FACTOR = 1.00
_C.CAMERA.AVERAGE_AZIM = CN()
_C.CAMERA.AVERAGE_AZIM.ENABLED = False
# light
_C.LIGHT = CN()
_C.LIGHT.AZIM = CN()
_C.LIGHT.AZIM.RANDOM_LOWER_BOUND = -45
_C.LIGHT.AZIM.RANDOM_UPPER_BOUND = 45
_C.LIGHT.ELEV = CN()
_C.LIGHT.ELEV.BASE = 75
_C.LIGHT.ELEV.RANDOM = CN()
_C.LIGHT.ELEV.RANDOM.ENABLED = False
_C.LIGHT.ELEV.RANDOM.LOWER_BOUND = 0
_C.LIGHT.ELEV.RANDOM.UPPER_BOUND = 0
_C.LIGHT.ELEV.RANDOM.USE_GAUSS = True
_C.LIGHT.ELEV.RANDOM.SIGMA = 0
_C.LIGHT.ELEV.RANDOM.MU = 0
_C.LIGHT.DISTANCE = CN()
_C.LIGHT.DISTANCE.RATIO_TO_CAMERA_DISTANCE = 0.666
_C.LIGHT.DISTANCE.USE_FIXED = CN()
_C.LIGHT.DISTANCE.USE_FIXED.ENABLED = False
_C.LIGHT.DISTANCE.USE_FIXED.FIXED_VALUE = 20
# camera render
_C.RENDER = CN()
_C.RENDER.RESOLUTION_PERCENTAGE = CN()
_C.RENDER.RESOLUTION_PERCENTAGE.BASE = 50
_C.RENDER.RESOLUTION_PERCENTAGE.RANDOM_UPPER_BOUND = 40
_C.RENDER.RESOLUTION_PERCENTAGE.RANDOM_LOWER_BOUND = -40
_C.RENDER.RESOLUTION_X = CN()
_C.RENDER.RESOLUTION_X.BASE = 100
_C.RENDER.RESOLUTION_X.RANDOM_UPPER_BOUND = 20
_C.RENDER.RESOLUTION_X.RANDOM_LOWER_BOUND = -20
_C.RENDER.RESOLUTION_Y = CN()
_C.RENDER.RESOLUTION_Y.BASE = 256
_C.RENDER.RESOLUTION_Y.RANDOM_UPPER_BOUND = 40
_C.RENDER.RESOLUTION_Y.RANDOM_LOWER_BOUND = -40
# if USE_RATIO, the X.RANDOM and Y.RANDOM can be merge into one `RATION.RANDOM`
# if USE_RATIO.ENABLED == True, the RESOLUTION_X.RANDOM_UPPER_BOUND and RESOLUTION_X.RANDOM_LOWER_BOUND should be `0`
_C.RENDER.RESOLUTION_Y.USE_RATIO = CN()
_C.RENDER.RESOLUTION_Y.USE_RATIO.ENABLED = False
_C.RENDER.RESOLUTION_Y.USE_RATIO.RANDOM_UPPER_BOUND = 1.5
_C.RENDER.RESOLUTION_Y.USE_RATIO.RANDOM_LOWER_BOUND = 1.0
# composite
_C.COMPOSITE = CN()
_C.COMPOSITE.GAMMA = CN()
_C.COMPOSITE.GAMMA.RANDOM = CN()
_C.COMPOSITE.GAMMA.RANDOM.ENABLE = False
_C.COMPOSITE.GAMMA.RANDOM.BASE = 1.0
# [0.5, 2.0] is fine, 0.5 is light, 2.0 black
_C.COMPOSITE.GAMMA.RANDOM.LOWER_BOUND = -0.5
_C.COMPOSITE.GAMMA.RANDOM.UPPER_BOUND = 1.0
_C.COMPOSITE.GAMMA.RANDOM.CAMERA_BASE = False
_C.COMPOSITE.GAMMA.RANDOM.CAMERA_VALUES = []
_C.COMPOSITE.SHIELD = CN()
_C.COMPOSITE.SHIELD.ROOT = 'data/shield/'
_C.COMPOSITE.SHIELD.ENABLE = False
_C.COMPOSITE.SHIELD.PROB = 0.05
_C.COMPOSITE.SHIELD.SCALE = CN()
_C.COMPOSITE.SHIELD.SCALE.WIDTH_SCALE = CN()
_C.COMPOSITE.SHIELD.SCALE.WIDTH_SCALE.BASE = 1.0
_C.COMPOSITE.SHIELD.SCALE.WIDTH_SCALE.RANDOM = CN()
_C.COMPOSITE.SHIELD.SCALE.WIDTH_SCALE.RANDOM.LOWER_BOUND = 1.0
_C.COMPOSITE.SHIELD.SCALE.WIDTH_SCALE.RANDOM.UPPER_BOUND = 1.0
_C.COMPOSITE.SHIELD.SCALE.HEIGHT_SCALE = CN()
_C.COMPOSITE.SHIELD.SCALE.HEIGHT_SCALE.BASE = 0.5
_C.COMPOSITE.SHIELD.SCALE.HEIGHT_SCALE.RANDOM = CN()
_C.COMPOSITE.SHIELD.SCALE.HEIGHT_SCALE.RANDOM.LOWER_BOUND = 1.0
_C.COMPOSITE.SHIELD.SCALE.HEIGHT_SCALE.RANDOM.UPPER_BOUND = 1.0
_C.COMPOSITE.SHIELD.TRANSFORM = CN()
_C.COMPOSITE.SHIELD.TRANSFORM.X_SCALE = CN()
_C.COMPOSITE.SHIELD.TRANSFORM.X_SCALE.BASE = 0.0
_C.COMPOSITE.SHIELD.TRANSFORM.X_SCALE.RANDOM = CN()
_C.COMPOSITE.SHIELD.TRANSFORM.X_SCALE.RANDOM.LOWER_BOUND = 1.0
_C.COMPOSITE.SHIELD.TRANSFORM.X_SCALE.RANDOM.UPPER_BOUND = 1.0
_C.COMPOSITE.SHIELD.TRANSFORM.Y_SCALE = CN()
_C.COMPOSITE.SHIELD.TRANSFORM.Y_SCALE.BASE = -0.25
_C.COMPOSITE.SHIELD.TRANSFORM.Y_SCALE.RANDOM = CN()
_C.COMPOSITE.SHIELD.TRANSFORM.Y_SCALE.RANDOM.LOWER_BOUND = 1.0
_C.COMPOSITE.SHIELD.TRANSFORM.Y_SCALE.RANDOM.UPPER_BOUND = 1.0
_C.COMPOSITE.SHIELD_V2 = CN()
_C.COMPOSITE.SHIELD_V2.ENABLED = False
_C.COMPOSITE.SHIELD_V2.ROOT = 'data/shield_v2/'
_C.COMPOSITE.SHIELD_V2.PROB = 0.05
_C.COMPOSITE.SHIELD_V2.NUM_LIMIT = -1
_C.COMPOSITE.SHIELD_V2.FIX_RESOLUTION_PER_IMAGE = CN()
_C.COMPOSITE.SHIELD_V2.FIX_RESOLUTION_PER_IMAGE.ENABLED = False
_C.COMPOSITE.SHIELD_V2.FIX_RESOLUTION_PER_IMAGE.SAVE_JSON_PATH = '/path/to/shield_fix.json'
# pose relative
_C.POSE = CN()
_C.POSE.ROOT = '/path/to/pose_dir/'
_C.POSE.SERIALS = ['02_01', '02_02']
_C.PROCESS = CN()
_C.PROCESS.FIRST_INDEX = 1
_C.PROCESS.LAST_INDEX = 343
_C.PROCESS.IMAGE_COUNT_PER_ID = 24
_C.PROCESS.IMAGE_COUNT_PER_ID_RANDOM_LOWER_BOUND = 0
_C.PROCESS.IMAGE_COUNT_PER_ID_RANDOM_UPPER_BOUND = 0
_C.PROCESS.CONTINUOUS_COUNT = 1
_C.PROCESS.IMAGE_COUNT_PER_CAMERA = 3
# repeat one image for one background
_C.PROCESS.IMAGE_COUNT_PER_BACKGROUND = 1
_C.PROCESS.POSE_CHANGE = False
# if the output folder contains images of current mesh_id x `IMAGE_COUNT_PER_ID`, continue
_C.PROCESS.RESUME = False
# 20210814_refactor_process
_C.PROCESS.TOTAL_CAMERA_COUNT = 30
_C.PROCESS.CAMERA_COUNT_PER_ID = 10 #
_C.OPTION = CN()
_C.OPTION.APPLY_CAMERA_WORLD_COLOR = CN()
_C.OPTION.APPLY_CAMERA_WORLD_COLOR.ENABLE = False
_C.OPTION.APPLY_CAMERA_WORLD_COLOR.VALUES = []
_C.OPTION.APPLY_CAMERA_WORLD_COLOR.APPLY_TO_BACKGROUND = False
_C.OPTION.APPLY_CAMERA_WORLD_COLOR.RANDOM = CN()
_C.OPTION.APPLY_CAMERA_WORLD_COLOR.RANDOM.ENABLED = False
_C.OPTION.APPLY_CAMERA_WORLD_COLOR.RANDOM.LOWER_BOUND = 0.5
_C.OPTION.APPLY_CAMERA_WORLD_COLOR.RANDOM.UPPER_BOUND = 1.0
_C.OPTION.APPLY_CAMERA_WORLD_COLOR.RANDOM.INDEPENDENT = False
_C.OPTION.APPLY_CAMERA_WORLD_COLOR.BACKGROUND = CN()
_C.OPTION.APPLY_CAMERA_WORLD_COLOR.BACKGROUND.ENABLED = False
_C.OPTION.APPLY_CAMERA_WORLD_COLOR.BACKGROUND.VALUES = []
# add partner
_C.PARTNER = CN()
_C.PARTNER.ENABLED = False
_C.PARTNER.PROB = 0.05
_C.PARTNER.PROB_IN_ONE_PERSON = 0.3
_C.PARTNER.COUNT = 1
_C.PARTNER.SELECT_MESH_ID_MIN = 1
_C.PARTNER.SELECT_MESH_ID_MAX = 656
# attribute
_C.ATTRIBUTE = CN()
_C.ATTRIBUTE.ENABLED = False
# attribute distribution
_C.ATTRIBUTE.USE_DISTRIBUTION_FILE = CN()
_C.ATTRIBUTE.USE_DISTRIBUTION_FILE.ENABLED = False
_C.ATTRIBUTE.USE_DISTRIBUTION_FILE.FILE_PATH = ''
_C.ATTRIBUTE.USE_DISTRIBUTION_FILE.LIMIT_FIELDS = []
_C.ATTRIBUTE.USE_DISTRIBUTION_FILE.RANDOM_SAMPLE = CN()
_C.ATTRIBUTE.USE_DISTRIBUTION_FILE.RANDOM_SAMPLE.ENABLED = False
# experiment, expand
_C.EXPERIMENT = CN()
_C.EXPERIMENT.EXPAND = CN()
_C.EXPERIMENT.EXPAND.ENABLED = False
_C.EXPERIMENT.EXPAND.CAMERA_DISTANCE = CN()
_C.EXPERIMENT.EXPAND.CAMERA_DISTANCE.ENABLED = False
_C.EXPERIMENT.EXPAND.CAMERA_DISTANCE.RANDOM = CN()
_C.EXPERIMENT.EXPAND.CAMERA_DISTANCE.RANDOM.UPPER_BOUND = 5
_C.EXPERIMENT.EXPAND.CAMERA_DISTANCE.RANDOM.LOWER_BOUND = -5
_C.EXPERIMENT.EXPAND.CAMERA_DISTANCE.EXPAND_NUM = 1
_C.EXPERIMENT.EXPAND.CAMERA_ELEV = CN()
_C.EXPERIMENT.EXPAND.CAMERA_ELEV.ENABLED = False
_C.EXPERIMENT.EXPAND.CAMERA_ELEV.RANDOM = CN()
_C.EXPERIMENT.EXPAND.CAMERA_ELEV.RANDOM.UPPER_BOUND = 10
_C.EXPERIMENT.EXPAND.CAMERA_ELEV.RANDOM.LOWER_BOUND = -10
_C.EXPERIMENT.EXPAND.CAMERA_ELEV.EXPAND_NUM = 1
_C.EXPERIMENT.EXPAND.GAMMA = CN()
_C.EXPERIMENT.EXPAND.GAMMA.ENABLED = False
_C.EXPERIMENT.EXPAND.GAMMA.EXPAND_NUM = 1
# debug
_C.DEBUG = CN()
_C.DEBUG.ONE_IMAGE_PER_PERSON = False
_C.OUTPUT_DIR = "logs/"
# if the paths below is specific, directly use them rather than `OUTPUT_DIR`
# This is to avoid tedious copy, paste and link operation
_C.OUTPUT_DIR_FOR_IMAGE = ''
_C.OUTPUT_DIR_FOR_ATTRIBUTE = ''
| 9,577 | 35.143396 | 119 | py |
tagperson-blender | tagperson-blender-master/blendereid/config/__init__.py | # encoding: utf-8
from .config import CfgNode, get_cfg
from .defaults import _C as cfg
| 88 | 16.8 | 36 | py |
tagperson-blender | tagperson-blender-master/blendereid/schema/attribute.py |
class Attribute:
mesh_id: int
camera_azim: int
camera_elev: int
camera_distance: float
light_azim: int
light_elev: int
light_distance: float
background: str
pose: str
camera_idx: int
img_width: int
img_height: int
partner_exist: bool
partner_mesh_id_list: list
cre_x_bias: float
cre_y_bias: float
cre_z_bias: float
world_color: list
world_color_to_background: list
seq_idx: int
img_shield_v2_name: list
gamma_value: float
# only use to render
pose_path: str
save_path: str
def __init__(self, mesh_id, camera_azim, camera_elev, camera_distance, light_azim, light_elev, light_distance, background='', pose='', camera_idx=-1, img_width=0, img_height=0, partner_exist=False, partner_mesh_id_list=[], cre_x_bias=0.0, cre_y_bias=0.0, cre_z_bias=0.0, world_color=(0.0, 0.0, 0.0, 0.0), world_color_to_background=(0.0, 0.0, 0.0, 0.0), seq_idx=0, pose_path='', save_path='', img_shield_v2_name=[], gamma_value=None):
self.mesh_id = mesh_id
self.camera_azim = camera_azim
self.camera_elev = camera_elev
self.camera_distance = round(camera_distance, 3)
self.light_azim = light_azim
self.light_elev = light_elev
self.light_distance = round(light_distance, 3)
self.background = background
self.pose = pose
self.camera_idx = int(camera_idx)
self.img_width = img_width
self.img_height = img_height
self.partner_exist = partner_exist
self.partner_mesh_id_list = partner_mesh_id_list
self.cre_x_bias = round(cre_x_bias, 3)
self.cre_y_bias = round(cre_y_bias, 3)
self.cre_z_bias = round(cre_z_bias, 3)
self.world_color = world_color
self.world_color_to_background = world_color_to_background
self.seq_idx = int(seq_idx)
self.img_shield_v2_name = img_shield_v2_name
self.gamma_value = gamma_value
self.pose_path = pose_path
self.save_path = save_path
| 2,039 | 35.428571 | 437 | py |
tagperson-blender | tagperson-blender-master/blendereid/utils/resume_utils.py | import glob
import os
from blendereid.core import render_config
def check_rendering_result_exist(cfg, current_mesh_id):
if cfg.PROCESS.IMAGE_COUNT_PER_ID_RANDOM_LOWER_BOUND !=0 or cfg.PROCESS.IMAGE_COUNT_PER_ID_RANDOM_UPPER_BOUND != 0:
print(f"config is using dynamic IMAGE_COUNT_PER_ID, could not check whether it rendered.")
return False
output_dir = render_config.compose_output_dir(cfg)
mesh_save_path_pattern = os.path.join(output_dir, f"{current_mesh_id}_c*s1_*.jpg")
mesh_path = glob.glob(mesh_save_path_pattern)
if len(mesh_path) == cfg.PROCESS.IMAGE_COUNT_PER_ID:
return True
return False | 653 | 37.470588 | 119 | py |
tagperson-blender | tagperson-blender-master/blendereid/utils/misc.py | import random
import numpy as np
def fix_random_seeds(seed: int):
print(f"fix random seed to {seed}.")
random.seed(seed)
np.random.seed(seed) | 155 | 18.5 | 40 | py |
tagperson-blender | tagperson-blender-master/blendereid/utils/geometry.py |
import math
def calculate_target_location(origin_location, distance, azimuth, elevation):
"""
Given the origin point, distance, azimuth and elevation, find the target location
azimuth ~ (0, 360]
elevation ~ (0, 90]
"""
assert len(origin_location) == 3
# project to plane
z = distance * math.sin(math.radians(elevation))
plane_distance = distance * math.cos(math.radians(elevation))
# suppose azimuth from the first quartile
x = plane_distance * math.cos(math.radians(azimuth))
y = plane_distance * math.sin(math.radians(azimuth))
# find target location
vec = (x, y, z)
target_location = (origin_location[0] + vec[0], origin_location[1] + vec[1], origin_location[2] + vec[2])
# print(f"target_location={target_location}")
return target_location
| 827 | 29.666667 | 109 | py |
tagperson-blender | tagperson-blender-master/blendereid/utils/mesh_utils.py | import os
import bpy
def format_mesh_file_name(mesh_id, obj_pose, suffix='mhx2'):
mesh_file_name = f"{mesh_id}_{obj_pose}.{suffix}"
return mesh_file_name
def load_object(obj_file_path):
ext = os.path.splitext(os.path.basename(obj_file_path))[1]
if ext == '.obj':
bpy.ops.import_scene.obj(filepath=obj_file_path, axis_forward='-Z', axis_up='Y', filter_glob="*.obj;*.mtl;")
elif ext == '.mhx2':
bpy.ops.import_scene.makehuman_mhx2(filepath=obj_file_path)
else:
raise ValueError(f"Unsupported subfix of obj_file_path: {obj_file_path}, only `.obj` and `.mhx2` supported.")
def get_mesh_id(obj_file_path):
"""
obj_key is `person_05`
mesh_id is `5`
"""
pure_name = get_obj_key(obj_file_path)
mesh_id = int(pure_name.split("_")[0])
return mesh_id
def get_obj_key(obj_file_path):
file_name = os.path.basename(obj_file_path)
pure_name, ext_name = os.path.splitext(file_name)
if ext_name == '.obj':
return pure_name
elif ext_name == '.mhx2':
return pure_name.lower()
else:
return ''
def get_mesh_id_from_obj_name(obj_name):
mesh_id = int(obj_name.split("_")[0])
return mesh_id
| 1,204 | 27.023256 | 117 | py |
tagperson-blender | tagperson-blender-master/blendereid/utils/bpy_utils.py | import bpy
import import_runtime_mhx2
import makewalk
class SampleOperator(bpy.types.Operator):
bl_idname = "object.sample_operator"
bl_label = "Sample Object Operator"
@classmethod
def poll(cls, context):
return context.active_object is not None
def register_bpy_libs():
bpy.utils.register_class(SampleOperator)
import_runtime_mhx2.register()
makewalk.register()
def remove_object(obj_key = 'Cube'):
bpy.ops.object.mode_set(mode="OBJECT")
bpy.data.objects[obj_key].select_set(True)
bpy.ops.object.delete()
if obj_key in bpy.data.meshes:
mesh = bpy.data.meshes[obj_key]
bpy.data.meshes.remove(mesh)
def remove_object_v2(obj_key = 'Cube'):
if obj_key is None:
return
# delete obj
try:
bpy.ops.object.mode_set(mode="OBJECT")
bpy.data.objects[obj_key].select_set(True)
bpy.ops.object.delete()
except:
pass
# delete mesh
for mesh_key in bpy.data.meshes.keys():
if mesh_key.find(obj_key) > -1:
mesh = bpy.data.meshes[mesh_key]
print(f"remove meshes {mesh_key}, {mesh}")
bpy.data.meshes.remove(mesh)
"""
For some reason, the alpha channel in mesh may be incorrect
We need to correct them
"""
def correct_alpha_channel():
meshes_keys = bpy.data.meshes.keys()
for mesh_key in meshes_keys:
mesh = bpy.data.meshes[mesh_key]
for material_key in mesh.materials.keys():
# print(f"material_key={material_key}")
tree_links = mesh.materials[material_key].node_tree.links
alpha_links = mesh.materials[material_key].node_tree.nodes['Principled BSDF'].inputs[18].links
if len(alpha_links) > 0:
link = alpha_links[0]
tree_links.remove(link)
| 1,817 | 27.40625 | 106 | py |
tagperson-blender | tagperson-blender-master/blendereid/utils/determinastic_utils.py |
def set_variable_according_to_determinastic_params(variable_value, variable_name: str, determinastic_params: dict):
"""
camera_azim
camera_elev
camera_distance
"""
if determinastic_params is None:
return variable_value
if variable_name not in determinastic_params:
return variable_value
return determinastic_params[variable_name] | 385 | 24.733333 | 115 | py |
tagperson-blender | tagperson-blender-master/demo/01_render_one_person.py | import os
import bpy
from tqdm import tqdm
import imageio
import cv2
import numpy as np
from blendereid.utils import bpy_utils, geometry
from blendereid.utils import mesh_utils
from blendereid.core import compositing
from blendereid.core import render_config
from blendereid.core import pose_manager
from blendereid.core import background
from blendereid.core import world_config
from blendereid.schema.attribute import Attribute
def prepare_scene():
bpy_utils.register_bpy_libs()
bpy_utils.remove_object(obj_key='Cube')
def prepare_render_setting():
compositing.compose_render_nodes()
render_config.setup_basic_render(None)
def compose_gif_file(jpg_file_list, save_path='tmp/ani.gif', fps=5):
gif_images = []
for jpg_file in jpg_file_list:
gif_images.append(imageio.imread(jpg_file))
imageio.mimsave(save_path, gif_images, fps=fps)
print(f"gif image save into {save_path}")
def compose_gallery_line_file(jpg_file_list, save_path='tmp/gallery.jpg', target_w=128, target_h=256):
gallery_np = None
for jpg_file in jpg_file_list:
jpg_img = cv2.imread(jpg_file)
resized_img = cv2.resize(jpg_img, (target_w, target_h))
gallery_np = np.concatenate((gallery_np, resized_img), axis=1) if gallery_np is not None else resized_img
cv2.imwrite(save_path, gallery_np)
print(f"gallery image save into {save_path}")
def load_person_mesh(mesh_path):
# load and fetch obj for current person
mesh_utils.load_object(mesh_path)
obj_key = mesh_utils.get_obj_key(mesh_path)
obj_person = bpy.data.objects[obj_key]
# correct alpha for materials to avoid incorrect transparent
bpy_utils.correct_alpha_channel()
return obj_key, obj_person
def render_one_person_on_certain_attribute(obj_person, attribute: Attribute, save_path):
# render option
camera_distance = attribute.camera_distance
camera_elev = attribute.camera_elev
camera_azim = attribute.camera_azim
# pose
pose_path = attribute.pose
if pose_path is not None:
bpy.ops.mcp.load_pose(filepath=pose_path)
pose_manager.apply_transform_to_bones(obj_person)
bpy.context.view_layer.update()
# background
img_bg = None if attribute.background is None else bpy.data.images.load(attribute.background)
background.set_backgound_from_predefined_img(img_bg)
obj_camera = bpy.data.objects['Camera']
render = bpy.data.scenes['Scene'].render
light = bpy.data.objects['Light']
person_location = (obj_person.location[0], obj_person.location[1], obj_person.location[2])
obj_camera.location = geometry.calculate_target_location(person_location, camera_distance, camera_azim, camera_elev)
light_azim = attribute.light_azim
light_elev = attribute.light_elev
light_distance = attribute.light_distance
light.location = geometry.calculate_target_location(obj_person.location, light_distance, light_azim, light_elev)
bpy.context.view_layer.update()
camera_rotation_euler_x_bias = attribute.cre_x_bias
camera_rotation_euler_z_bias = attribute.cre_y_bias
camera_rotation_euler_y_bias = attribute.cre_z_bias
obj_pose = obj_person.matrix_world.to_translation()
camera_pos = obj_camera.matrix_world.to_translation()
direction = obj_pose - camera_pos
# obj_pose the cameras '-Z' and use its 'Y' as up
rot_quat = direction.to_track_quat('-Z', 'Y')
# assume we're using euler rotation
obj_camera.rotation_euler = rot_quat.to_euler()
obj_camera.rotation_euler.x += camera_rotation_euler_x_bias
obj_camera.rotation_euler.z += camera_rotation_euler_z_bias
obj_camera.rotation_euler.y += camera_rotation_euler_y_bias
render.filepath = save_path
# render_config.modify_render_config(cfg)
# image size
render.resolution_x = attribute.img_width
render.resolution_y = attribute.img_height
render.resolution_percentage = 100
world_config.apply_world_color_frontend(attribute.world_color)
world_config.apply_color_mask_alpha(attribute.world_color)
# set gamma
compositing.set_gamma_value(attribute.gamma_value)
bpy.ops.render.render(write_still=True)
# render a static image for the person
def render_sample_person(mesh_path, save_path):
obj_key, obj_person = load_person_mesh(mesh_path)
person_attribute = Attribute(
mesh_id=None,
camera_azim=280,
camera_elev=30,
camera_distance=30.0,
light_azim=280,
light_elev=75,
light_distance=25.0/1.5,
background='data_demo/background_demo/000000000086-1-left.jpg',
pose='data_demo/pose_demo/08_04_19.json',
camera_idx=-1,
img_width=256,
img_height=512,
partner_exist=False,
partner_mesh_id_list=[],
cre_x_bias=0.32,
cre_y_bias=0.0,
cre_z_bias=0.0
)
render_one_person_on_certain_attribute(obj_person, person_attribute, save_path=save_path)
bpy_utils.remove_object_v2(obj_key=obj_key)
# render a gif for different rendering options
def render_sample_person_demo_camera_azim(mesh_path, save_path):
"""
render a person surround by camera
"""
obj_key, obj_person = load_person_mesh(mesh_path)
person_attribute_list = [Attribute(
mesh_id=None,
camera_azim=camera_azim,
camera_elev=30,
camera_distance=30.0,
light_azim=camera_azim,
light_elev=65,
light_distance=25.0/1.5,
background='data_demo/background_demo/bg_blender_notext.png',
pose='data_demo/pose_demo/08_04_19.json',
camera_idx=-1,
img_width=256,
img_height=512,
partner_exist=False,
partner_mesh_id_list=[],
cre_x_bias=0.32,
cre_y_bias=0.0,
cre_z_bias=0.0
) for camera_azim in range(0, 359, 10)]
save_path_list = [f'tmp/{idx}.jpg' for idx in range(len(person_attribute_list))]
for idx, p_attr in tqdm(enumerate(person_attribute_list)):
render_one_person_on_certain_attribute(obj_person, p_attr, save_path=save_path_list[idx])
compose_gif_file(save_path_list, save_path=save_path, fps=20)
# remove tmp file
for tmp_file in save_path_list:
os.remove(tmp_file)
bpy_utils.remove_object_v2(obj_key=obj_key)
def parse_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--mesh_path', type=str, default=None)
parser.add_argument('--save_path', type=str, default='tmp/sample_person.jpg')
parser.add_argument('--demo_camera_azim', action="store_true")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
mesh_path = args.mesh_path
if mesh_path is None or not os.path.exists(mesh_path):
raise ValueError(f"Mesh path is not valid: `{mesh_path}`")
print(f"Render images from {mesh_path}")
# start
prepare_scene()
prepare_render_setting()
if args.demo_camera_azim:
if not args.save_path.endswith(".gif"):
raise ValueError(f"the `save_path` should be `.gif` format when executing the `demo_camera_azim` option...")
render_sample_person_demo_camera_azim(mesh_path, save_path=args.save_path)
else:
render_sample_person(mesh_path, save_path=args.save_path)
| 7,350 | 33.350467 | 120 | py |
DACBench | DACBench-main/setup.py | import json
import os
from setuptools import setup, find_packages
def get_other_requirements():
other_requirements = {}
for file in os.listdir('./other_requirements'):
with open(f'./other_requirements/{file}', encoding='utf-8') as rq:
requirements = json.load(rq)
other_requirements.update(requirements)
return other_requirements
setup(
version="0.2.1",
packages=find_packages(exclude=['tests', 'examples', 'dacbench.wrappers.*', 'dacbench.envs.fast-downward/*']),
)
| 531 | 27 | 114 | py |
DACBench | DACBench-main/examples/container.py | from pathlib import Path
# in order to run this we need to build the container first by running
# `singularity build --fakeroot dachbench.sif dacbench/container/singularity_recipes/dachbench.def` from project root.
# For more details refer to dacbench/container/Container Roadmap.md
from dacbench.agents import RandomAgent
from dacbench.benchmarks import SigmoidBenchmark
from dacbench.container.remote_runner import RemoteRunner
if __name__ == '__main__':
container_source = (Path(__file__).parent.parent / "dacbench.sif").resolve()
if not container_source.exists():
raise RuntimeError(f"Container file not found ({container_source}). Please build before running this example")
# config
# more extensive tests needed here to find bugs/missing implementation
benchmark = SigmoidBenchmark()
benchmark.set_seed(42)
episodes = 10
# run
remote_runner = RemoteRunner(benchmark, container_source=container_source)
agent = RandomAgent(remote_runner.get_environment())
remote_runner.run(agent, episodes) | 1,052 | 41.12 | 118 | py |
DACBench | DACBench-main/examples/logger.py | from pathlib import Path
from dacbench.plotting import plot_performance, plot_performance_per_instance
from dacbench.logger import Logger, log2dataframe, load_logs
from dacbench.agents.simple_agents import RandomAgent
from dacbench.benchmarks import SigmoidBenchmark
from dacbench.runner import run_benchmark
from dacbench.wrappers import PerformanceTrackingWrapper, StateTrackingWrapper
import matplotlib.pyplot as plt
# Run an experiment and log the results
if __name__ == "__main__":
# Make benchmark
bench = SigmoidBenchmark()
# Run for 10 episodes each on 10 seeds
num_episodes = 10
seeds = range(10)
# Make logger object and add modules for performance & state logging
logger = Logger(
experiment_name="sigmoid_example",
output_path=Path("plotting/data"),
step_write_frequency=None,
episode_write_frequency=None,
)
state_logger = logger.add_module(StateTrackingWrapper)
performance_logger = logger.add_module(PerformanceTrackingWrapper)
for s in seeds:
# Make & wrap benchmark environment
env = bench.get_benchmark(seed=s)
env = PerformanceTrackingWrapper(env, logger=performance_logger)
env = StateTrackingWrapper(env, logger=state_logger)
# Add env to logger
logger.set_env(env)
# Run random agent
agent = RandomAgent(env)
run_benchmark(env, agent, num_episodes, logger)
# Close logger object
logger.close()
# Load performance of last seed into pandas DataFrame
logs = load_logs(performance_logger.get_logfile())
dataframe = log2dataframe(logs, wide=True)
# Plot overall performance
plot_performance(dataframe)
plt.show()
# Plot performance per instance
plot_performance_per_instance(dataframe)
plt.show()
| 1,820 | 29.35 | 78 | py |
DACBench | DACBench-main/examples/ray_ppo.py | import ray
from ray.tune.registry import register_env
from ray.rllib.agents import ppo
from dacbench import benchmarks
from dacbench.wrappers import ObservationWrapper
import argparse
def make_benchmark(config):
bench = getattr(benchmarks, config["benchmark"])()
env = bench.get_benchmark(seed=config["seed"])
if config["benchmark"] in ["SGDBenchmark", "CMAESBenchmark"]:
env = ObservationWrapper(env)
return env
parser = argparse.ArgumentParser(description="Run ray PPO for DACBench")
parser.add_argument("--outdir", type=str, default="output", help="Output directory")
parser.add_argument(
"--benchmarks", nargs="+", type=str, default=["LubyBenchmark"], help="Benchmarks to run PPO for"
)
parser.add_argument(
"--timesteps", type=int, default=10000, help="Number of timesteps to run"
)
parser.add_argument(
"--save_interval", type=int, default=100, help="Checkpoint interval"
)
parser.add_argument(
"--seeds",
nargs="+",
type=int,
default=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
help="Seeds for evaluation",
)
parser.add_argument("--torch", action="store_true")
parser.add_argument("--fd_port", type=int, default=55555)
args = parser.parse_args()
for b in args.benchmarks:
for s in args.seeds:
config = {"seed": s, "benchmark": b}
if b == "FastDownwardBenchmark":
config["port"] = args.fd_port
register_env(f"{b}", lambda conf: make_benchmark(conf))
ray.init()
trainer = ppo.PPOTrainer(
config={
"num_gpus": 0,
"env": f"{b}",
"env_config": config,
"framework": "tf" if not args.torch else "torch",
}
)
for i in range(args.timesteps):
trainer.train()
if i % args.save_interval == 0:
trainer.save(args.outdir + f"./{b}_{s}")
ray.shutdown()
| 1,902 | 30.716667 | 100 | py |
DACBench | DACBench-main/examples/tabular_rl_luby.py | """
Code adapted from
"Dynamic Algorithm Configuration:Foundation of a New Meta-Algorithmic Framework"
by A. Biedenkapp and H. F. Bozkurt and T. Eimer and F. Hutter and M. Lindauer.
Original environment authors: André Biedenkapp, H. Furkan Bozkurt
"""
import sys
import numpy as np
from dacbench.benchmarks import LubyBenchmark
# Make Luby environment
from examples.example_utils import (
QTable,
EpisodeStats,
get_decay_schedule,
make_tabular_policy,
update,
)
def q_learning(
environment,
num_episodes: int,
discount_factor: float = 1.0,
alpha: float = 0.5,
epsilon: float = 0.1,
verbose: bool = False,
track_test_stats: bool = False,
float_state=False,
epsilon_decay: str = "const",
decay_starts: int = 0,
number_of_evaluations: int = 1,
test_environment=None,
):
"""
Q-Learning algorithm
"""
assert 0 <= discount_factor <= 1, "Lambda should be in [0, 1]"
assert 0 <= epsilon <= 1, "epsilon has to be in [0, 1]"
assert alpha > 0, "Learning rate has to be positive"
# The action-value function.
# Nested dict that maps state -> (action -> action-value).
Q = QTable(environment.action_space.n, float_state)
test_stats = None
if track_test_stats:
test_stats = EpisodeStats(
episode_lengths=np.zeros(num_episodes),
episode_rewards=np.zeros(num_episodes),
expected_rewards=np.zeros(num_episodes),
)
# Keeps track of episode lengths and rewards
train_stats = EpisodeStats(
episode_lengths=np.zeros(num_episodes),
episode_rewards=np.zeros(num_episodes),
expected_rewards=np.zeros(num_episodes),
)
epsilon_schedule = get_decay_schedule(
epsilon, decay_starts, num_episodes, epsilon_decay
)
for i_episode in range(num_episodes):
epsilon = epsilon_schedule[i_episode]
# The policy we're following
policy = make_tabular_policy(Q, epsilon, environment.action_space.n)
# Print out which episode we're on, useful for debugging.
if (i_episode + 1) % 100 == 0:
if verbose:
print("\rEpisode {:>5d}/{}.".format(i_episode + 1, num_episodes))
else:
print(
"\rEpisode {:>5d}/{}.".format(i_episode + 1, num_episodes), end=""
)
sys.stdout.flush()
Q, rs, exp_rew, ep_len = update(Q, environment, policy, alpha, discount_factor)
train_stats.episode_rewards[i_episode] = rs
train_stats.expected_rewards[i_episode] = exp_rew
train_stats.episode_lengths[i_episode] = ep_len
if not verbose:
print("\rEpisode {:>5d}/{}.".format(i_episode + 1, num_episodes))
return Q, (test_stats, train_stats)
bench = LubyBenchmark()
env = bench.get_environment()
# Execute 10 episodes of tabular Q-Learning
q_func, test_train_stats = q_learning(env, 10)
print(f"Rewards: {test_train_stats[1].episode_rewards}")
print(f"Episode Lenghts: {test_train_stats[1].episode_lengths}")
| 3,062 | 31.242105 | 87 | py |
DACBench | DACBench-main/examples/multi_agent_sigmoid.py | from dacbench.benchmarks import SigmoidBenchmark
bench = SigmoidBenchmark()
bench.config['multi_agent'] = True
env = bench.get_environment()
env.register_agent(0)
env.register_agent(1)
env.reset()
terminated, truncated = False, False
total_reward = 0
while not (terminated or truncated):
for a in [0, 1]:
observation, reward, terminated, truncated, info = env.last()
action = env.action_spaces[a].sample()
env.step(action)
observation, reward, terminated, truncated, info = env.last()
total_reward += reward
print(f"The final reward was {total_reward}.") | 592 | 30.210526 | 69 | py |
DACBench | DACBench-main/examples/coax_ppo_cmaes.py | import jax
import jax.numpy as jnp
import coax
import haiku as hk
from numpy import prod
import optax
from dacbench.benchmarks import CMAESBenchmark
from dacbench.wrappers import ObservationWrapper
# the name of this script
name = 'ppo'
# the Pendulum MDP
bench = CMAESBenchmark()
env = bench.get_environment()
env = ObservationWrapper(env)
env = coax.wrappers.TrainMonitor(env, name=name, tensorboard_dir=f"./data/tensorboard/{name}")
def func_pi(S, is_training):
shared = hk.Sequential((
hk.Linear(8), jax.nn.relu,
hk.Linear(8), jax.nn.relu,
))
mu = hk.Sequential((
shared,
hk.Linear(8), jax.nn.relu,
hk.Linear(prod(env.action_space.shape), w_init=jnp.zeros),
hk.Reshape(env.action_space.shape),
))
logvar = hk.Sequential((
shared,
hk.Linear(8), jax.nn.relu,
hk.Linear(prod(env.action_space.shape), w_init=jnp.zeros),
hk.Reshape(env.action_space.shape),
))
return {'mu': mu(S), 'logvar': logvar(S)}
def func_v(S, is_training):
seq = hk.Sequential((
hk.Linear(8), jax.nn.relu,
hk.Linear(8), jax.nn.relu,
hk.Linear(8), jax.nn.relu,
hk.Linear(1, w_init=jnp.zeros), jnp.ravel
))
return seq(S)
# define function approximators
pi = coax.Policy(func_pi, env)
v = coax.V(func_v, env)
# target network
pi_targ = pi.copy()
# experience tracer
tracer = coax.reward_tracing.NStep(n=5, gamma=0.9)
buffer = coax.experience_replay.SimpleReplayBuffer(capacity=512)
# policy regularizer (avoid premature exploitation)
policy_reg = coax.regularizers.EntropyRegularizer(pi, beta=0.01)
# updaters
simpletd = coax.td_learning.SimpleTD(v, optimizer=optax.adam(1e-3))
ppo_clip = coax.policy_objectives.PPOClip(pi, regularizer=policy_reg, optimizer=optax.adam(1e-4))
# train
for _ in range(10):
done, truncated = False, False
s, info = env.reset()
while not (done or truncated):
a, logp = pi_targ(s, return_logp=True)
s_next, r, done, truncated, info = env.step(a)
# trace rewards
tracer.add(s, a, r, done or truncated, logp)
while tracer:
buffer.add(tracer.pop())
# learn
if len(buffer) >= buffer.capacity:
for _ in range(int(4 * buffer.capacity / 32)): # 4 passes per round
transition_batch = buffer.sample(batch_size=32)
metrics_v, td_error = simpletd.update(transition_batch, return_td_error=True)
metrics_pi = ppo_clip.update(transition_batch, td_error)
env.record_metrics(metrics_v)
env.record_metrics(metrics_pi)
buffer.clear()
pi_targ.soft_update(pi, tau=0.1)
if done or truncated:
break
s = s_next | 2,786 | 26.058252 | 97 | py |
DACBench | DACBench-main/examples/run_dacbench.py | from dacbench.runner import run_dacbench
from dacbench.agents import RandomAgent
# Function to create an agent fulfilling the DACBench Agent interface
# In this case: a simple random agent
def make_agent(env):
return RandomAgent(env)
# Result output path
path = "dacbench_tabular"
# Run all DACBench benchmarks with the agent for 2 episodes each
run_dacbench(path, make_agent, 2)
| 389 | 23.375 | 69 | py |
DACBench | DACBench-main/examples/example_utils.py | import gym
import argparse
import numpy as np
from collections import defaultdict, namedtuple
from dacbench.logger import Logger
class DummyEnv(gym.Env):
def __init__(self):
self.c_step = None
self.action_space = gym.spaces.Discrete(2)
self.observation_space = gym.spaces.Discrete(1)
self.reward_range = (-10, 10)
def step(self, action):
self.c_step += 1
return np.array([0]), 0, False, self.c_step > 9, {}
def reset(self):
self.c_step = 0
return np.array([1]), {}
class QTable(dict):
def __init__(self, n_actions, float_to_int=False, **kwargs):
"""
Look up table for state-action values.
:param n_actions: action space size
:param float_to_int:
flag to determine if state values need to be rounded to the closest integer
"""
super().__init__(**kwargs)
self.n_actions = n_actions
self.float_to_int = float_to_int
self.__table = defaultdict(lambda: np.zeros(n_actions))
def __getitem__(self, item):
try:
table_state, table_action = item
if self.float_to_int:
table_state = map(int, table_state)
return self.__table[tuple(table_state)][table_action]
except ValueError:
if self.float_to_int:
item = map(int, item)
return self.__table[tuple(item)]
def __setitem__(self, key, value):
try:
table_state, table_action = key
if self.float_to_int:
table_state = map(int, table_state)
self.__table[tuple(table_state)][table_action] = value
except ValueError:
if self.float_to_int:
key = map(int, key)
self.__table[tuple(key)] = value
def __contains__(self, item):
return tuple(item) in self.__table.keys()
def keys(self):
return self.__table.keys()
def make_tabular_policy(Q: QTable, epsilon: float, nA: int) -> callable:
"""
Creates an epsilon-greedy policy based on a given Q-function and epsilon.
I.e. create weight vector from which actions get sampled.
:param Q: tabular state-action lookup function
:param epsilon: exploration factor
:param nA: size of action space to consider for this policy
"""
def policy_fn(observation):
policy = np.ones(nA) * epsilon / nA
best_action = np.random.choice(
np.argwhere( # random choice for tie-breaking only
Q[observation] == np.amax(Q[observation])
).flatten()
)
policy[best_action] += 1 - epsilon
return policy
return policy_fn
def get_decay_schedule(
start_val: float, decay_start: int, num_episodes: int, type_: str
):
"""
Create epsilon decay schedule
:param start_val: Start decay from this value (i.e. 1)
:param decay_start: number of iterations to start epsilon decay after
:param num_episodes: Total number of episodes to decay over
:param type_: Which strategy to use. Implemented choices: 'const', 'log', 'linear'
:return:
"""
if type_ == "const":
return np.array([start_val for _ in range(num_episodes)])
elif type_ == "log":
return np.hstack(
[
[start_val for _ in range(decay_start)],
np.logspace(
np.log10(start_val),
np.log10(0.000001),
(num_episodes - decay_start),
),
]
)
elif type_ == "linear":
return np.hstack(
[
[start_val for _ in range(decay_start)],
np.linspace(start_val, 0, (num_episodes - decay_start)),
]
)
else:
raise NotImplementedError
def greedy_eval_Q(Q: QTable, this_environment, nevaluations: int = 1):
"""
Evaluate Q function greediely with epsilon=0
:returns
average cumulative reward,
the expected reward after resetting the environment,
episode length
"""
cumuls = []
for _ in range(nevaluations):
evaluation_state, _ = this_environment.reset()
episode_length, cummulative_reward = 0, 0
expected_reward = np.max(Q[evaluation_state])
greedy = make_tabular_policy(Q, 0, this_environment.action_space.n)
while True: # roll out episode
evaluation_action = np.random.choice(
list(range(this_environment.action_space.n)), p=greedy(evaluation_state)
)
s_, evaluation_reward, eval_done, evaluation_done, _ = this_environment.step(
evaluation_action
)
cummulative_reward += evaluation_reward
episode_length += 1
if evaluation_done or eval_done:
break
evaluation_state = s_
cumuls.append(cummulative_reward)
return np.mean(cumuls), expected_reward, episode_length # Q, cumulative reward
def update(
Q: QTable, environment, policy: callable, alpha: float, discount_factor: float
):
"""
Q update
:param Q: state-action value look-up table
:param environment: environment to use
:param policy: the current policy
:param alpha: learning rate
:param discount_factor: discounting factor
"""
# Need to parse to string to easily handle list as state with defdict
policy_state, _ = environment.reset()
episode_length, cummulative_reward = 0, 0
expected_reward = np.max(Q[policy_state])
terminated, truncated = False, False
while not (terminated or truncated): # roll out episode
policy_action = np.random.choice(
list(range(environment.action_space.n)), p=policy(policy_state)
)
s_, policy_reward, terminated, truncated, _ = environment.step(policy_action)
cummulative_reward += policy_reward
episode_length += 1
Q[[policy_state, policy_action]] = Q[[policy_state, policy_action]] + alpha * (
(policy_reward + discount_factor * Q[[s_, np.argmax(Q[s_])]])
- Q[[policy_state, policy_action]]
)
policy_state = s_
return (
Q,
cummulative_reward,
expected_reward,
episode_length,
) # Q, cumulative reward
EpisodeStats = namedtuple(
"Stats", ["episode_lengths", "episode_rewards", "expected_rewards"]
)
def zeroOne(stringput):
"""
Helper to keep input arguments in [0, 1]
"""
val = float(stringput)
if val < 0 or val > 1.0:
raise argparse.ArgumentTypeError("%r is not in [0, 1]", stringput)
return val | 6,680 | 31.120192 | 89 | py |
DACBench | DACBench-main/examples/plotting/state_plotting.py | from pathlib import Path
from dacbench.logger import load_logs, log2dataframe
from dacbench.plotting import plot_state
import matplotlib.pyplot as plt
import pandas as pd
def plot_state_CMAES():
"""
Plot state information of CMA-ES run over time
"""
# Since converting the json logs to a data frame takes a couple of minutes
# we we cache the logs for tuning the plot settings in a picked datafarme object
path = Path("output/cached_logs.pickle")
if not path.exists():
file = Path("./data/CMAESBenchmark/StateTrackingWrapper.jsonl")
if not file.exists():
print(
"Please run 'examples/benchmarks/chainerrl_cma.py' to generate plotting data first"
)
return
logs = load_logs(file)
dataframe = log2dataframe(logs, wide=True)
dataframe.to_pickle(path)
else:
dataframe = pd.read_pickle(path)
Path("output").mkdir(exist_ok=True)
# The CMAES observation space has over 170 dims. Here we just plot a subset
# here we get all different parts of the states
columns = pd.DataFrame(
(column.split("_") for column in dataframe.columns),
columns=["part", "subpart", "i"],
)
state_parts = columns[columns["part"] == "state"]["subpart"].unique()
print(f"State parts {state_parts}")
# But since History Deltas(80), Past Deltas(40) and Past Sigma Deltas(40)
# have to many dims to be plotted we only show
state_parts = ["Loc", "Population Size", "Sigma"]
for state_part in state_parts:
state_part_columns = [
column
for column in dataframe.columns
if not column.startswith("state") or column.split("_")[1] == state_part
]
grid = plot_state(dataframe[state_part_columns], interval=100, title=state_part)
grid.savefig(f"output/cmaes_state_{state_part}.pdf")
plt.show()
# one can also show the global step (increasing step over episodes) on x axis
grid = plot_state(
dataframe[state_part_columns],
show_global_step=True,
interval=100,
title=state_part,
)
grid.savefig(f"output/cmaes_state_{state_part}_global_step.pdf")
plt.show()
if __name__ == "__main__":
plot_state_CMAES()
| 2,291 | 32.217391 | 99 | py |
DACBench | DACBench-main/examples/plotting/performance_plotting.py | from pathlib import Path
from seaborn import plotting_context
from dacbench.logger import load_logs, log2dataframe
from dacbench.plotting import plot_performance_per_instance, plot_performance
import matplotlib.pyplot as plt
def per_instance_example():
"""
Plot CMA performance for each training instance
"""
file = Path("./data/chainererrl_cma/PerformanceTrackingWrapper.jsonl")
logs = load_logs(file)
data = log2dataframe(logs, wide=True, drop_columns=["time"])
grid = plot_performance_per_instance(
data, title="CMA Mean Performance per Instance"
)
grid.savefig("output/cma_performance_per_instance.pdf")
plt.show()
def performance_example():
"""
Plot Sigmoid performance over time, divided by seed and with each seed in its own plot
"""
file = Path("./data/sigmoid_example/PerformanceTrackingWrapper.jsonl")
logs = load_logs(file)
data = log2dataframe(logs, wide=True, drop_columns=["time"])
Path("output").mkdir(exist_ok=True)
# overall
grid = plot_performance(data, title="Overall Performance")
grid.savefig("output/sigmoid_overall_performance.pdf")
plt.show()
# per instance seed (hue)
grid = plot_performance(data, title="Overall Performance", hue="seed")
grid.savefig("output/sigmoid_overall_performance_per_seed_hue.pdf")
plt.show()
# per instance seed (col)
with plotting_context("poster"):
grid = plot_performance(
data, title="Overall Performance", col="seed", col_wrap=3
)
grid.fig.subplots_adjust(top=0.92)
grid.savefig("output/sigmoid_overall_performance_per_seed.pdf")
plt.show()
if __name__ == "__main__":
per_instance_example()
performance_example()
| 1,758 | 29.327586 | 90 | py |
DACBench | DACBench-main/examples/plotting/action_plotting.py | from pathlib import Path
from dacbench.logger import load_logs, log2dataframe
from dacbench.plotting import plot_action
import matplotlib.pyplot as plt
def plot_scalar_action():
"""
Plot Sigmoid actions over time by action component and by mean action component in intervals
"""
file = Path("./data/sigmoid_example/ActionFrequencyWrapper.jsonl")
logs = load_logs(file)
dataframe = log2dataframe(logs, wide=True)
Path("output").mkdir(exist_ok=True)
grid = plot_action(dataframe, interval=18, title="Sigmoid", col="seed", col_wrap=3)
grid.savefig("output/sigmoid_example_action_interval_18.pdf")
plt.show()
grid = plot_action(dataframe, title="Sigmoid", col="seed", col_wrap=3)
grid.savefig("output/sigmoid_example_action.pdf")
plt.show()
def plot_action_modea():
"""
Plot ModEA actions over time and in intervals
"""
file = Path("data/ModeaBenchmark/ActionFrequencyWrapper.jsonl")
logs = load_logs(file)
dataframe = log2dataframe(logs, wide=True)
Path("output").mkdir(exist_ok=True)
grid = plot_action(dataframe, interval=5)
grid.savefig("output/modea_action_interval_5.pdf")
plt.show()
grid = plot_action(dataframe)
grid.savefig("output/modea_action.pdf")
plt.show()
if __name__ == "__main__":
plot_action_modea()
plot_scalar_action()
| 1,358 | 27.914894 | 96 | py |
DACBench | DACBench-main/examples/plotting/time_plotting.py | from pathlib import Path
import pandas as pd
from dacbench.logger import load_logs, log2dataframe
from dacbench.plotting import plot_step_time, plot_episode_time
import matplotlib.pyplot as plt
def step_time_example(data):
"""
Plot time spent per step on average and split by seed
Parameters
----------
data : pd.DataFrame
The non-wide data frame resulting from loading the logging results from EpisodeTimeTracker
"""
grid = plot_step_time(data, y_label="Step Duration [s]")
grid.savefig("output/sigmoid_step_duration.pdf")
plt.show()
grid = plot_step_time(data, y_label="Step Duration [s]", hue="seed")
grid.savefig("output/sigmoid_step_duration_per_seed.pdf")
plt.show()
def episode_time_example(data):
"""
Plot time spent per episode
Parameters
----------
data : pd.DataFrame
The non-wide data frame resulting from loading the logging results from EpisodeTimeTracker
"""
print(data[~data.episode_duration.isna()])
grid = plot_episode_time(
data[~data.episode_duration.isna()], y_label="Episode Duration [s]"
)
grid.savefig("output/sigmoid_episode_duration.pdf")
plt.show()
def step_time_interval_example(data: pd.DataFrame, interval: int = 10):
"""
Plot mean time spent on steps in a given interval
Parameters
----------
data : pd.DataFrame
The non-wide data frame resulting from loading the logging results from EpisodeTimeTracker
interval : int
Number of steps to average over
"""
grid = plot_step_time(data, interval, title="Mean Step Duration")
grid.savefig("output/sigmoid_step_duration.pdf")
plt.show()
if __name__ == "__main__":
# Load data from file into pandas DataFrame
file = Path("data/sigmoid_example/EpisodeTimeWrapper.jsonl")
logs = load_logs(file)
data = log2dataframe(logs, wide=True, drop_columns=["time"])
Path("output").mkdir(exist_ok=True)
# Plot episode time
episode_time_example(data)
# Plot step time (overall & per seed)
step_time_example(data)
# Plot step time over intervals of 10 steps
step_time_interval_example(data)
| 2,181 | 28.486486 | 98 | py |
DACBench | DACBench-main/examples/wrappers/reward_noise_wrapping.py | from chainerrl import wrappers
from examples.example_utils import DummyEnv, train_chainer, make_chainer_dqn
from dacbench.wrappers import RewardNoiseWrapper
# We use a dummy env with constant reward of 1 to demontrate the different noise values
env = DummyEnv()
# Chainer requires casting
env = wrappers.CastObservationToFloat32(env)
# Make chainer agent
obs_size = env.observation_space.n
agent = make_chainer_dqn(obs_size, env.action_space)
# First example: Adding reward noise from the default settings of normal and exponential distributions
print(
"Demonstrating the most common distributions: standard versions of normal and exponential"
)
print("\n")
for noise_dist in ["standard_normal", "standard_exponential"]:
print(f"Current noise distribution: {noise_dist}")
print("Base reward is 0")
wrapped = RewardNoiseWrapper(env, noise_dist=noise_dist)
train_chainer(agent, wrapped)
print("\n")
# Second example: Using customized reward noise distributions
print("Other distributions with added arguments")
print("\n")
for noise_dist, args in zip(
["normal", "uniform", "logistic"], [[0, 0.1], [-1, 1], [0, 2]]
):
print(f"Current noise distribution: {noise_dist}")
print("Base reward is 0")
wrapped = RewardNoiseWrapper(env, noise_dist=noise_dist, dist_args=args)
train_chainer(agent, wrapped)
print("\n")
# Third example: using noise from a custom noise function
print("Custom 'noise' function: always add 1")
print("\n")
def noise():
return 1
wrapped = RewardNoiseWrapper(env, noise_function=noise)
train_chainer(agent, wrapped)
| 1,594 | 31.55102 | 102 | py |
DACBench | DACBench-main/examples/wrappers/state_tracking_CMAES.py | from pathlib import Path
from dacbench.agents import RandomAgent
from dacbench.logger import Logger
from dacbench.runner import run_benchmark
from dacbench.benchmarks import CMAESBenchmark
from dacbench.wrappers import StateTrackingWrapper
# Make CMAESBenchmark environment
bench = CMAESBenchmark()
env = bench.get_environment()
# Make Logger object to track state information
logger = Logger(
experiment_name=type(bench).__name__, output_path=Path("../plotting/data")
)
logger.set_env(env)
# Wrap env with StateTrackingWrapper
env = StateTrackingWrapper(env, logger=logger.add_module(StateTrackingWrapper))
# Run random agent for 5 episodes and log state information to file
# You can plot these results with the plotting examples
agent = RandomAgent(env)
run_benchmark(env, agent, 5, logger=logger)
logger.close()
| 825 | 29.592593 | 79 | py |
DACBench | DACBench-main/examples/wrappers/instance_handling.py | import numpy as np
from dacbench.benchmarks import SigmoidBenchmark
from dacbench.wrappers import InstanceSamplingWrapper
# Helper method to sample a single sigmoid instance
def sample_sigmoid():
rng = np.random.default_rng()
shifts = rng.normal(5, 2.5, 1)
slopes = rng.choice([-1, 1], 1) * rng.uniform(size=1) * 2
return np.concatenate((shifts, slopes))
# Sample n sigmoid instances
def sample_instance(n):
instances = {}
for _ in range(n):
instances[n] = sample_sigmoid()
return instances
# Helper method to print current instance set
def print_instance_set(instance_set):
c = 1
for i in instance_set.keys():
print(f"Instance {c}: {instance_set[i][0]}, {instance_set[i][1]}")
c += 1
# Make Sigmoid benchmark object
bench = SigmoidBenchmark()
bench.set_action_values([3])
# First example: read instances from default instance set path
instances_from_file = bench.get_environment()
print("Instance set read from file")
print_instance_set(instances_from_file.instance_set)
print("\n")
# Second example: Sample instance set before training
instance_set = sample_instance(20)
bench.config.instance_set = instance_set
instances_sampled_beforehand = bench.get_environment()
print("Instance set sampled before env creation")
print_instance_set(instances_sampled_beforehand.instance_set)
print("\n")
# Third example: Sample instances during training using the InstanceSamplingWrapper
print("Instance sampled each reset")
instances_on_the_fly = InstanceSamplingWrapper(
instances_from_file, sampling_function=sample_sigmoid
)
print("Resetting")
instances_on_the_fly.reset()
print(
f"Instance: {instances_on_the_fly.instance_set[0][0]}, {instances_on_the_fly.instance_set[0][1]}"
)
print("Resetting")
instances_on_the_fly.reset()
print(
f"Instance: {instances_on_the_fly.instance_set[0][0]}, {instances_on_the_fly.instance_set[0][1]}"
)
print("Resetting")
instances_on_the_fly.reset()
print(
f"Instance: {instances_on_the_fly.instance_set[0][0]}, {instances_on_the_fly.instance_set[0][1]}"
)
print("Resetting")
instances_on_the_fly.reset()
print(
f"Instance: {instances_on_the_fly.instance_set[0][0]}, {instances_on_the_fly.instance_set[0][1]}"
)
print("Resetting")
print("\n")
# Advanced option: directly setting the instance set during training
env = bench.get_environment()
print("Replacing the instance_set mid training")
env.instance_set = {0: [0, 0]}
print_instance_set(env.instance_set)
print("Instance set change")
env.instance_set = {0: [2, 1], 1: [3, 5], 2: [1, 1]}
print_instance_set(env.instance_set)
| 2,596 | 29.916667 | 101 | py |
DACBench | DACBench-main/examples/wrappers/action_tracking_modcma.py | from pathlib import Path
from dacbench.agents import RandomAgent
from dacbench.logger import Logger
from dacbench.runner import run_benchmark
from dacbench.benchmarks import ModCMABenchmark
from dacbench.wrappers import ActionFrequencyWrapper
# Make ModeaBenchmark environment
bench = ModCMABenchmark()
env = bench.get_environment()
# Make logger object
logger = Logger(
experiment_name=type(bench).__name__, output_path=Path("../plotting/data")
)
logger.set_env(env)
logger.add_benchmark(bench)
# Wrap environment to track action frequency
env = ActionFrequencyWrapper(env, logger=logger.add_module(ActionFrequencyWrapper))
# Run random agent for 5 episodes and log actions to file
agent = RandomAgent(env)
run_benchmark(env, agent, 5, logger=logger)
| 762 | 27.259259 | 83 | py |
DACBench | DACBench-main/dacbench/run_baselines.py | import argparse
import itertools
import sys
from pathlib import Path
import numpy as np
from dacbench import benchmarks
from dacbench.agents import DynamicRandomAgent, GenericAgent, StaticAgent
from dacbench.envs.policies import NON_OPTIMAL_POLICIES, OPTIMAL_POLICIES
from dacbench.logger import Logger
from dacbench.runner import run_benchmark
from dacbench.wrappers import PerformanceTrackingWrapper
modea_actions = [
np.arange(2),
np.arange(2),
np.arange(2),
np.arange(2),
np.arange(2),
np.arange(2),
np.arange(2),
np.arange(2),
np.arange(2),
np.arange(3),
np.arange(3),
]
DISCRETE_ACTIONS = {
"SigmoidBenchmark": list(itertools.product(*[np.arange(val) for val in (5, 10)])),
"LubyBenchmark": np.arange(6),
"FastDownwardBenchmark": [0, 1],
"CMAESBenchmark": [np.around(a, decimals=1) for a in np.linspace(0.2, 10, num=50)],
"ModeaBenchmark": list(itertools.product(*modea_actions)),
"SGDBenchmark": [np.around(a, decimals=1) for a in np.linspace(0, 10, num=50)],
}
def run_random(results_path, benchmark_name, num_episodes, seeds, fixed):
"""
Run random policy.
Parameters
----------
results_path : str
Path to where results should be saved
benchmark_name : str
Name of the benchmark to run
num_episodes : int
Number of episodes to run for each benchmark
seeds : list[int]
List of seeds to runs all benchmarks for. If None (default) seeds [1, ..., 10] are used.
fixed : int
Number of fixed steps per action
"""
bench = getattr(benchmarks, benchmark_name)()
for s in seeds:
if fixed > 1:
experiment_name = f"random_fixed{fixed}_{s}"
else:
experiment_name = f"random_{s}"
logger = Logger(
experiment_name=experiment_name, output_path=results_path / benchmark_name
)
env = bench.get_benchmark(seed=s)
env = PerformanceTrackingWrapper(
env, logger=logger.add_module(PerformanceTrackingWrapper)
)
agent = DynamicRandomAgent(env, fixed)
logger.add_agent(agent)
logger.add_benchmark(bench)
logger.set_env(env)
run_benchmark(env, agent, num_episodes, logger)
logger.close()
def run_static(results_path, benchmark_name, action, num_episodes, seeds=np.arange(10)):
"""
Run static policy.
Parameters
----------
results_path : str
Path to where results should be saved
benchmark_name : str
Name of the benchmark to run
action : int | float
The action to run
num_episodes : int
Number of episodes to run for each benchmark
seeds : list[int]
List of seeds to runs all benchmarks for. If None (default) seeds [1, ..., 10] are used.
"""
bench = getattr(benchmarks, benchmark_name)()
for s in seeds:
logger = Logger(
experiment_name=f"static_{action}_{s}",
output_path=results_path / benchmark_name,
)
env = bench.get_benchmark(seed=s)
env = PerformanceTrackingWrapper(
env, logger=logger.add_module(PerformanceTrackingWrapper)
)
agent = StaticAgent(env, action)
logger.add_agent(agent)
logger.add_benchmark(bench)
logger.set_env(env)
logger.set_additional_info(action=action)
run_benchmark(env, agent, num_episodes, logger)
logger.close()
def run_optimal(results_path, benchmark_name, num_episodes, seeds):
"""
Run optimal policy.
Parameters
----------
results_path : str
Path to where results should be saved
benchmark_name : str
Name of the benchmark to run
num_episodes : int
Number of episodes to run for each benchmark
seeds : list[int]
List of seeds to runs all benchmarks for. If None (default) seeds [1, ..., 10] are used.
"""
if benchmark_name not in OPTIMAL_POLICIES:
print("No optimal policy found for this benchmark")
return
policy = OPTIMAL_POLICIES[benchmark_name]
run_policy(results_path, benchmark_name, num_episodes, policy, seeds)
def run_dynamic_policy(results_path, benchmark_name, num_episodes, seeds=np.arange(10)):
"""
Run dynamic baseline policy.
Parameters
----------
results_path : str
Path to where results should be saved
benchmark_name : str
Name of the benchmark to run
num_episodes : int
Number of episodes to run for each benchmark
seeds : list[int]
List of seeds to runs all benchmarks for. If None (default) seeds [1, ..., 10] are used.
"""
if benchmark_name not in NON_OPTIMAL_POLICIES:
print("No dynamic policy found for this benchmark")
policy = NON_OPTIMAL_POLICIES[benchmark_name]
run_policy(results_path, benchmark_name, num_episodes, policy, seeds)
def run_policy(results_path, benchmark_name, num_episodes, policy, seeds=np.arange(10)):
"""
Run generic policy.
Parameters
----------
results_path : str
Path to where results should be saved
benchmark_name : str
Name of the benchmark to run
num_episodes : int
Number of episodes to run for each benchmark
policy : AbstractDACBenchAgent
The policy to run
seeds : list[int]
List of seeds to runs all benchmarks for. If None (default) seeds [1, ..., 10] are used.
"""
bench = getattr(benchmarks, benchmark_name)()
for s in seeds:
if benchmark_name == "CMAESBenchmark":
experiment_name = f"csa_{s}"
else:
experiment_name = f"optimal_{s}"
logger = Logger(
experiment_name=experiment_name, output_path=results_path / benchmark_name
)
env = bench.get_benchmark(seed=s)
env = PerformanceTrackingWrapper(
env, logger=logger.add_module(PerformanceTrackingWrapper)
)
agent = GenericAgent(env, policy)
logger.add_agent(agent)
logger.add_benchmark(bench)
logger.set_env(env)
run_benchmark(env, agent, num_episodes, logger)
logger.close()
def main(args):
"""Main evaluation loop."""
parser = argparse.ArgumentParser(
description="Run simple baselines for DAC benchmarks",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("--outdir", type=str, default="output", help="Output directory")
parser.add_argument(
"--benchmarks",
nargs="+",
type=str,
choices=benchmarks.__all__,
default=None,
help="Benchmarks to run baselines for, if not provides all benchmarks are run.",
)
parser.add_argument(
"--num_episodes",
type=int,
default=10,
help="Number of episodes to evaluate policy on",
)
parser.add_argument(
"--random",
action="store_true",
help="Run random policy. Use '--fixed_random' to fix the "
"random action for a number of steps",
)
parser.add_argument("--static", action="store_true", help="Run static policy")
parser.add_argument(
"--optimal",
action="store_true",
help=f"Run optimal policy. Only available for {', '.join(OPTIMAL_POLICIES.keys())}",
)
parser.add_argument(
"--dyna_baseline",
action="store_true",
help=f"Run dynamic baseline. Only available for {', '.join(NON_OPTIMAL_POLICIES.keys())}",
)
shortened_possible_actions = {
benchmark: ", ".join(
(
map(str, actions)
if len(actions) < 4
else map(str, [*actions[:3], "...", actions[-1]])
)
)
for benchmark, actions in DISCRETE_ACTIONS.items()
}
possible_actions = ", ".join(
[
f"{benchmark} : {actions}"
for benchmark, actions in shortened_possible_actions.items()
]
)
parser.add_argument(
"--actions",
nargs="+",
type=float,
default=None,
help="Action(s) for static policy. Make sure, that the actions correspond to the benchmarks. Available action "
f"are {possible_actions}",
)
parser.add_argument(
"--seeds",
nargs="+",
type=int,
default=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
help="Seeds for evaluation",
)
parser.add_argument(
"--fixed_random",
type=int,
default=0,
help="Fixes random actions for n steps",
)
args = parser.parse_args(args)
if args.benchmarks is None:
benchs = benchmarks.__all__
else:
benchs = args.benchmarks
args.outdir = Path(args.outdir)
if args.random:
for b in benchs:
run_random(args.outdir, b, args.num_episodes, args.seeds, args.fixed_random)
if args.static:
for b in benchs:
if args.actions is None:
actions = DISCRETE_ACTIONS[b]
else:
actions = args.actions
if b == "FastDownwardBenchmark":
actions = [int(a) for a in actions]
for a in actions:
run_static(args.outdir, b, a, args.num_episodes, args.seeds)
if args.optimal:
for b in benchs:
if b not in OPTIMAL_POLICIES:
print("Option not available!")
break
run_optimal(args.outdir, b, args.num_episodes, args.seeds)
if args.dyna_baseline:
for b in benchs:
if b not in NON_OPTIMAL_POLICIES:
print("Option not available!")
break
run_dynamic_policy(args.outdir, b, args.num_episodes, args.seeds)
if __name__ == "__main__":
main(sys.argv[1:])
| 9,836 | 28.809091 | 119 | py |
DACBench | DACBench-main/dacbench/abstract_agent.py | class AbstractDACBenchAgent:
"""Abstract class to implement for use with the runner function."""
def __init__(self, env):
"""
Initialize agent.
Parameters
----------
env : gym.Env
Environment to train on
"""
pass
def act(self, state, reward):
"""
Compute and return environment action.
Parameters
----------
state
Environment state
reward
Environment reward
Returns
-------
action
Action to take
"""
raise NotImplementedError
def train(self, next_state, reward):
"""
Train during episode if needed (pass if not).
Parameters
----------
next_state
Environment state after step
reward
Environment reward
"""
raise NotImplementedError
def end_episode(self, state, reward):
"""
End of episode training if needed (pass if not).
Parameters
----------
state
Environment state
reward
Environment reward
"""
raise NotImplementedError
| 1,225 | 18.774194 | 71 | py |
DACBench | DACBench-main/dacbench/plotting.py | from typing import List, Tuple
import numpy as np
import pandas as pd
import seaborn as sns
sns.set_style("darkgrid")
def space_sep_upper(column_name: str) -> str:
"""
Separates strings at underscores into headings. Used to generate labels from logging names.
Parameters
----------
column_name : str
Name to generate label for
Returns
-------
str
"""
if column_name is None:
return None
return column_name.title().replace("_", " ")
def generate_global_step(
data: pd.DataFrame,
x_column: str = "global_step",
x_label_columns: str = ["episode", "step"],
) -> Tuple[pd.DataFrame, str, List[str]]:
"""
Add a global_step column which enumerate all step over all episodes.
Returns the altered data, a data frame containing mapping between global_step, x_column and x_label_columns.
Often used in combination with add_multi_level_ticks.
Parameters
----------
data: pd.DataFrame
data source
x_column: str
the name of the global_step (default 'global_step')
x_label_columns: [str, ...]
the name and hierarchical order of the columns (default ['episode', 'step']
Returns
-------
(data, plot_index, x_column, x_label_columns)
"""
plot_index = (
data.groupby(x_label_columns)
.count()
.reset_index()[x_label_columns]
.sort_values(x_label_columns)
)
plot_index[x_column] = np.arange(len(plot_index))
plot_index.set_index(x_column)
data = data.merge(plot_index, on=x_label_columns)
return data, plot_index, x_column, x_label_columns
def add_multi_level_ticks(
grid: sns.FacetGrid, plot_index: pd.DataFrame, x_column: str, x_label_columns: str
) -> None:
"""
Expects a FacedGrid with global_step (x_column) as x-axis and replaces the tick labels to match format episode:step.
E.g. Run with 3 episodes, each of 10 steps. This results in 30 global steps.
The resulting tick labels could be ['0', '4', '9', '14', '19', '24', '29'].
After applying this method they will look like ['0:0', '0:4', '1:0', '1:4', '2:0', '2:4', '3:0', '3:4']
Parameters
----------
grid: sns.FacesGrid
The grid to plot onto
plot_index: pd.DataFrame
The mapping between current tick labels (global step values) and new tick labels joined by ':'.
usually the result from generate_global_step
x_column: str
column label to use for looking up tick values
x_label_columns: [str, ...]
columns labels of columns to use for new labels (joined by ':'
"""
for ax in grid.axes.flat:
ticks = ax.get_xticks()
sub_set = plot_index[plot_index[x_column].isin(ticks)]
new_labels = (
sub_set.loc[tick][x_label_columns].tolist()
if tick in sub_set.index
else (None, None)
for tick in ticks
)
new_labels = [
f"{epoch}:{step}" if epoch is not None else "" for epoch, step in new_labels
]
ax.set_xticklabels(new_labels, minor=False)
def plot(
plot_function,
settings: dict,
title: str = None,
x_label: str = None,
y_label: str = None,
**kwargs,
) -> sns.FacetGrid:
"""
Helper function that creates a FacetGrid.
1. Updates settings with kwargs (overwrites values)
2. Plots using plot_function(**settings)
3. Set x and y labels of not provided the columns names will converted to pretty strings using space_sep_upper
4. Sets title (some times has to be readjusted afterwards especially in case of large plots e.g. multiple rows/cols)
Parameters
----------
plot_function:
function to generate the FacedGrid. E.g. sns.catplot or sns.catplot
settings: dict
a dicts containing all needed default settings.
title: str
Title of the plot (optional)
x_label: str
Label of the x-axis (optional)
y_label: str
Label of the y-axis (optional)
kwargs:
Keyword arguments to overwrite default settings.
Returns
-------
sns.FacedGrid
"""
settings.update(kwargs.items()) # 1.
grid = plot_function(**settings) # 2.
# 3.
x_label = space_sep_upper(grid._x_var) if x_label is None else x_label
y_label = space_sep_upper(grid._y_var) if y_label is None else y_label
grid.set_xlabels(x_label)
grid.set_ylabels(y_label)
# 4.
grid.tight_layout()
if title is not None:
grid.fig.suptitle(title, y=0.97) # rule of thumb. Has to be improved in future
grid.fig.subplots_adjust(top=0.9)
return grid
def plot_performance(
data, title=None, x_label=None, y_label=None, **kwargs
) -> sns.FacetGrid:
"""
Create a line plot of the performance over episodes.
Per default the mean performance and and one stddev over all instances and seeds is shown if you want to change
this specify a property to map those attributes to e.g hue='seed' or/and col='instance'.
For more details see: https://seaborn.pydata.org/generated/seaborn.relplot.html
For examples refer to examples/plotting/performance_plotting.py
Parameters
----------
data: pd.DataFrame
Dataframe resulting from logging and loading using log2dataframe(logs, wide=True)
title: str
Title of the plot (optional)
x_label: str
Label of the x-axis (optional)
y_label: str
Label of the y-axis (optional)
kwargs:
Keyword arguments to overwrite default settings.
Returns
-------
sns.FacedGrid
"""
settings = {
"data": data,
"x": "episode",
"y": "overall_performance",
"kind": "line",
}
grid = plot(sns.relplot, settings, title, x_label, y_label, **kwargs)
return grid
def plot_performance_per_instance(
data, title=None, x_label=None, y_label=None, **args
) -> sns.FacetGrid:
"""
Create a bar plot of the mean performance per instance ordered by the performance.
Per default the mean performance seeds is shown if you want to change
this specify a property to map seed to e.g. col='seed'.
For more details see: https://seaborn.pydata.org/generated/seaborn.catplot.html
For examples refer to examples/plotting/performance_plotting.py
Parameters
----------
data: pd.DataFrame
Dataframe resulting from logging and loading using log2dataframe(logs, wide=True)
title: str
Title of the plot (optional)
x_label: str
Label of the x-axis (optional)
y_label: str
Label of the y-axis (optional)
kwargs:
Keyword arguments to overwrite default settings.
Returns
-------
sns.FacedGrid
"""
# order the columns by mean instance
order = data.groupby("instance").mean().sort_values("overall_performance").index
settings = {
"data": data,
"x": "instance",
"y": "overall_performance",
"order": order,
"kind": "bar",
}
grid = plot(sns.catplot, settings, title, x_label, y_label, **args)
# todo: should probably not always be set like this (multi row/col)
grid.set_titles("Mean Performance per Instance")
return grid
def plot_step_time(
data,
show_global_step=False,
interval=1,
title=None,
x_label=None,
y_label=None,
**args,
) -> sns.FacetGrid:
"""
Create a line plot showing the measured time per step.
Per default the mean performance and and one stddev over all instances and seeds is shown if you want to change
this specify a property to map those attributes to e.g hue='seed' or/and col='instance'.
For more details see: https://seaborn.pydata.org/generated/seaborn.relplot.html
For examples refer to examples/plotting/time_plotting.py
Parameters
----------
data: pd.DataFrame
Dataframe resulting from logging and loading using log2dataframe(logs, wide=True)
show_global_step: bool
If to show the global_step (step enumerated over all episodes) or Episode:Step. (False default)
interval: int
Interval in number of steps to average over. (default = 1)
title: str
Title of the plot (optional)
x_label: str
Label of the x-axis (optional)
y_label: str
Label of the y-axis (optional)
kwargs:
Keyword arguments to overwrite default settings.
Returns
-------
sns.FacedGrid
"""
multi_level_x_label = "Epoch:Step"
data, plot_index, x_column, x_label_columns = generate_global_step(data)
if interval > 1:
data["groups"] = data[x_column] // interval
data = data.groupby("groups").agg({x_column: "min", "step_duration": "mean"})
y_label = (
f"Mean per duration per {interval} steps" if y_label is None else y_label
)
settings = {
"data": data,
"x": x_column,
"y": "step_duration",
"kind": "line",
}
if x_label is None and not show_global_step:
x_label = multi_level_x_label
grid = plot(sns.relplot, settings, title, x_label, y_label, **args)
if not show_global_step:
add_multi_level_ticks(grid, plot_index, x_column, x_label_columns)
return grid
def plot_episode_time(
data, title=None, x_label=None, y_label=None, **kargs
) -> sns.FacetGrid:
"""
Create a line plot showing the measured time per episode.
Per default the mean performance and and one stddev over all instances and seeds is shown if you want to change
this specify a property to map those attributes to e.g hue='seed' or/and col='instance'.
For more details see: https://seaborn.pydata.org/generated/seaborn.relplot.html
For examples refer to examples/plotting/time_plotting.py
Parameters
----------
data: pd.DataFrame
Dataframe resulting from logging and loading using log2dataframe(logs, wide=True)
title: str
Title of the plot (optional)
x_label: str
Label of the x-axis (optional)
y_label: str
Label of the y-axis (optional)
kwargs:
Keyword arguments to overwrite default settings.
Returns
-------
sns.FacedGrid
"""
settings = {
"data": data,
"x": "episode",
"y": "episode_duration",
"kind": "line",
}
grid = plot(sns.relplot, settings, title, x_label, y_label, **kargs)
return grid
def plot_action(
data,
show_global_step=False,
interval=1,
title=None,
x_label=None,
y_label=None,
**kargs,
):
"""
Create a line plot showing actions over time.
Please be aware that action spaces can be quite large and the plots can become quite messy (and take some time)
if you try plot all dimensions at once. It is therefore recommended to select a subset of columns before running the
plot method.
Per default the mean performance and and one stddev over all instances and seeds is shown if you want to change
this specify a property to map those attributes to e.g hue='seed' or/and col='instance'.
For more details see: https://seaborn.pydata.org/generated/seaborn.relplot.html
For examples refer to examples/plotting/action_plotting.py
Parameters
----------
data: pd.DataFrame
Dataframe resulting from logging and loading using log2dataframe(logs, wide=True)
show_global_step: bool
If to show the global_step (step enumerated over all episodes) or Episode:Step. (False default)
interval: int
Interval in number of steps to average over. (default = 1)
title: str
Title of the plot (optional)
x_label: str
Label of the x-axis (optional)
y_label: str
Label of the y-axis (optional)
kwargs:
Keyword arguments to overwrite default settings.
Returns
-------
sns.FacedGrid
"""
return plot_space(
data, "action", show_global_step, interval, title, x_label, y_label, **kargs
)
def plot_state(
data,
show_global_step=False,
interval=1,
title=None,
x_label=None,
y_label=None,
**kargs,
):
"""
Create a line plot showing state over time.
Please be aware that state can be quite large and the plots can become quite messy (and take some time)
if you try plot all dimensions at once. It is therefore recommended to select a subset of columns before running the
plot method. Especially for dict state spaces.
Per default the mean performance and and one stddev over all instances and seeds is shown if you want to change
this specify a property to map those attributes to e.g hue='seed' or/and col='instance'.
For more details see: https://seaborn.pydata.org/generated/seaborn.relplot.html
For examples refer to examples/plotting/state_plotting.py
Parameters
----------
data: pd.DataFrame
Dataframe resulting from logging and loading using log2dataframe(logs, wide=True)
show_global_step: bool
If to show the global_step (step enumerated over all episodes) or Episode:Step. (False default)
interval: int
Interval in number of steps to average over. (default = 1)
title: str
Title of the plot (optional)
x_label: str
Label of the x-axis (optional)
y_label: str
Label of the y-axis (optional)
kwargs:
Keyword arguments to overwrite default settings.
Returns
-------
sns.FacedGrid
"""
return plot_space(
data, "state", show_global_step, interval, title, x_label, y_label, **kargs
)
def plot_space(
data,
space_column_name,
show_global_step,
interval=1,
title=None,
x_label=None,
y_label=None,
**args,
) -> sns.FacetGrid:
"""
Create a line plot showing space over time.
Please be aware that spaces can be quite large and the plots can become quite messy (and take some time)
if you try plot all dimensions at once. It is therefore recommended to select a subset of columns before running the
plot method. Especially for dict spaces.
Per default the mean performance and and one stddev over all instances and seeds is shown if you want to change
this specify a property to map those attributes to e.g hue='seed' or/and col='instance'.
For more details see: https://seaborn.pydata.org/generated/seaborn.relplot.html
For examples refer to
examples/plotting/state_plotting.py or
examples/plotting/action_plotting.py
Parameters
----------
data: pd.DataFrame
Dataframe resulting from logging and loading using log2dataframe(logs, wide=True)
space_column_name : str
Name of the column in the space which to plot
show_global_step: bool
If to show the global_step (step enumerated over all episodes) or Episode:Step. (False default)
interval: int
Interval in number of steps to average over. (default = 1)
title: str
Title of the plot (optional)
x_label: str
Label of the x-axis (optional)
y_label: str
Label of the y-axis (optional)
kwargs:
Keyword arguments to overwrite default settings.
Returns
-------
sns.FacedGrid
"""
# first find columns with prefix space_column_name
space_entries = list(
filter(lambda col: col.startswith(space_column_name), data.columns)
)
number_of_space_entries = len(space_entries)
y_label_name = space_column_name
if number_of_space_entries > 1:
# if we have more than one space dims we reshape the dataframe in order to be able to control the plots behavior
# per dimension
data = pd.wide_to_long(
data,
stubnames=[space_column_name],
sep="_",
i=["episode", "step", "instance"]
+ (["seed"] if "seed" in data.columns else []),
j="i",
suffix=".*",
).reset_index()
elif number_of_space_entries == 1 and space_column_name not in data.columns:
# Of there is only one dimension but the name is odd
space_column_name, *_ = space_entries
data, plot_index, x_column, x_label_columns = generate_global_step(data)
# perform averaging over intervals
if interval > 1:
data["interval"] = data[x_column] // interval
group_columns = list(
data.columns.drop(x_label_columns + [x_column, space_column_name])
)
data = data.groupby(group_columns).agg(
{x_column: "min", space_column_name: "mean"}
)
y_label = (
f"Mean {y_label_name} per {interval} steps" if y_label is None else y_label
)
data = data.reset_index()
settings = {
"data": data,
"x": x_column,
"y": space_column_name,
"kind": "line",
}
# we want the different dims in different plots / columns
# todo: refactor
if number_of_space_entries > 1:
settings["col"] = "i"
if number_of_space_entries > 3:
settings["col_wrap"] = 3
if "instance" in data.columns:
settings["hue"] = "instance"
if x_label is None:
x_label = None if show_global_step else "Epoch:Step"
if y_label is None:
y_label = y_label_name
grid = plot(sns.relplot, settings, title, x_label, y_label, **args)
if not show_global_step:
add_multi_level_ticks(grid, plot_index, x_column, x_label_columns)
return grid
| 17,495 | 29.857143 | 120 | py |
DACBench | DACBench-main/dacbench/abstract_env.py | import random
import gymnasium as gym
import numpy as np
from gymnasium.utils import seeding
class AbstractEnv(gym.Env):
"""Abstract template for environments."""
def __init__(self, config):
"""
Initialize environment.
Parameters
----------
config : dict
Environment configuration
If to seed the action space as well
"""
super(AbstractEnv, self).__init__()
self.config = config
if "instance_update_func" in self.config.keys():
self.instance_updates = self.config["instance_update_func"]
else:
self.instance_updates = "round_robin"
self.instance_set = config["instance_set"]
self.instance_id_list = sorted(list(self.instance_set.keys()))
self.instance_index = 0
self.inst_id = self.instance_id_list[self.instance_index]
self.instance = self.instance_set[self.inst_id]
self.test = False
if "test_set" in self.config.keys():
self.test_set = config["test_set"]
self.test_instance_id_list = sorted(list(self.test_set.keys()))
self.test_instance_index = 0
self.test_inst_id = self.test_instance_id_list[self.test_instance_index]
self.test_instance = self.test_set[self.test_inst_id]
self.training_set = self.instance_set
self.training_id_list = self.instance_id_list
self.training_inst_id = self.inst_id
self.training_instance = self.instance
else:
self.test_set = None
self.benchmark_info = config["benchmark_info"]
self.initial_seed = None
self.np_random = None
self.n_steps = config["cutoff"]
self.c_step = 0
self.reward_range = config["reward_range"]
if "observation_space" in config.keys():
self.observation_space = config["observation_space"]
else:
if not config["observation_space_class"] == "Dict":
try:
self.observation_space = getattr(
gym.spaces, config["observation_space_class"]
)(
*config["observation_space_args"],
dtype=config["observation_space_type"],
)
except KeyError:
print(
"Either submit a predefined gym.space 'observation_space' or an 'observation_space_class' as well as a list of 'observation_space_args' and the 'observation_space_type' in the configuration."
)
print("Tuple observation_spaces are currently not supported.")
raise KeyError
else:
try:
self.observation_space = getattr(
gym.spaces, config["observation_space_class"]
)(*config["observation_space_args"])
except AssertionError:
print(
"To use a Dict observation space, the 'observation_space_args' in the configuration should be a list containing a Dict of gym.Spaces"
)
raise TypeError
# TODO: use dicts by default for actions and observations
# The config could change this for RL purposes
if "config_space" in config.keys():
actions = config["config_space"].get_hyperparameters()
action_types = [type(a).__name__ for a in actions]
# Uniform action space
if all(t == action_types[0] for t in action_types):
if "Float" in action_types[0]:
low = np.array([a.lower for a in actions])
high = np.array([a.upper for a in actions])
self.action_space = gym.spaces.Box(low=low, high=high)
elif "Integer" in action_types[0] or "Categorical" in action_types[0]:
if len(action_types) == 1:
try:
n = actions[0].upper - actions[0].lower
except:
n = len(actions[0].choices)
self.action_space = gym.spaces.Discrete(n)
else:
ns = []
for a in actions:
try:
ns.append(a.upper - a.lower)
except:
ns.append(len(a.choices))
self.action_space = gym.spaces.MultiDiscrete(np.array(ns))
else:
raise ValueError(
"Only float, integer and categorical hyperparameters are supported as of now"
)
# Mixed action space
# TODO: implement this
else:
raise ValueError("Mixed type config spaces are currently not supported")
elif "action_space" in config.keys():
self.action_space = config["action_space"]
else:
try:
self.action_space = getattr(gym.spaces, config["action_space_class"])(
*config["action_space_args"]
)
except KeyError:
print(
"Either submit a predefined gym.space 'action_space' or an 'action_space_class' as well as a list of 'action_space_args' in the configuration"
)
raise KeyError
except TypeError:
print("Tuple and Dict action spaces are currently not supported")
raise TypeError
# seeding the environment after initialising action space
self.seed(config.get("seed", None), config.get("seed_action_space", False))
def step_(self):
"""
Pre-step function for step count and cutoff.
Returns
-------
bool
End of episode
"""
truncated = False
self.c_step += 1
if self.c_step >= self.n_steps:
truncated = True
return truncated
def reset_(self, seed=0, options={}, instance=None, instance_id=None, scheme=None):
"""Pre-reset function for progressing through the instance set.Will either use round robin, random or no progression scheme."""
if seed is not None:
self.seed(seed, self.config.get("seed_action_space", False))
self.c_step = 0
if scheme is None:
scheme = self.instance_updates
self.use_next_instance(instance, instance_id, scheme=scheme)
def use_next_instance(self, instance=None, instance_id=None, scheme=None):
"""
Changes instance according to chosen instance progession.
Parameters
----------
instance
Instance specification for potentional new instances
instance_id
ID of the instance to switch to
scheme
Update scheme for this progression step (either round robin, random or no progression)
"""
if instance is not None:
self.instance = instance
elif instance_id is not None:
self.inst_id = instance_id
self.instance = self.instance_set[self.inst_id]
elif scheme == "round_robin":
self.instance_index = (self.instance_index + 1) % len(self.instance_id_list)
self.inst_id = self.instance_id_list[self.instance_index]
self.instance = self.instance_set[self.inst_id]
elif scheme == "random":
self.inst_id = np.random.choice(self.instance_id_list)
self.instance = self.instance_set[self.inst_id]
def step(self, action):
"""
Execute environment step.
Parameters
----------
action
Action to take
Returns
-------
state
Environment state
reward
Environment reward
terminated: bool
Run finished flag
truncated: bool
Run timed out flag
info : dict
Additional metainfo
"""
raise NotImplementedError
def reset(self, seed: int = None):
"""
Reset environment.
Parameters
----------
seed
Seed for the environment
Returns
-------
state
Environment state
info: dict
Additional metainfo
"""
raise NotImplementedError
def get_inst_id(self):
"""
Return instance ID.
Returns
-------
int
ID of current instance
"""
return self.inst_id
def get_instance_set(self):
"""
Return instance set.
Returns
-------
list
List of instances
"""
return self.instance_set
def get_instance(self):
"""
Return current instance.
Returns
-------
type flexible
Currently used instance
"""
return self.instance
def set_inst_id(self, inst_id):
"""
Change current instance ID.
Parameters
----------
inst_id : int
New instance index
"""
self.inst_id = inst_id
self.instance_index = self.instance_id_list.index(self.inst_id)
def set_instance_set(self, inst_set):
"""
Change instance set.
Parameters
----------
inst_set: list
New instance set
"""
self.instance_set = inst_set
self.instance_id_list = sorted(list(self.instance_set.keys()))
def set_instance(self, instance):
"""
Change currently used instance.
Parameters
----------
instance:
New instance
"""
self.instance = instance
def seed(self, seed=None, seed_action_space=False):
"""
Set rng seed.
Parameters
----------
seed:
seed for rng
seed_action_space: bool, default False
if to seed the action space as well
"""
self.initial_seed = seed
# maybe one should use the seed generated by seeding.np_random(seed) but it can be to large see issue https://github.com/openai/gym/issues/2210
random.seed(seed)
np.random.seed(seed)
self.np_random, seed = seeding.np_random(seed)
# uses the uncorrelated seed from seeding but makes sure that no randomness is introduces.
if seed_action_space:
self.action_space.seed(seed)
return [seed]
def use_test_set(self):
"""Change to test instance set."""
if self.test_set is None:
raise ValueError(
"No test set was provided, please check your benchmark config."
)
self.test = True
self.training_set = self.instance_set
self.training_id_list = self.instance_id_list
self.training_inst_id = self.inst_id
self.training_instance = self.instance
self.instance_set = self.test_set
self.instance_id_list = self.test_instance_id_list
self.inst_id = self.test_inst_id
self.instance = self.test_instance
def use_training_set(self):
"""Change to training instance set."""
self.test = False
self.test_set = self.instance_set
self.test_instance_id_list = self.instance_id_list
self.test_inst_id = self.inst_id
self.test_instance = self.instance
self.instance_set = self.training_set
self.instance_id_list = self.training_id_list
self.inst_id = self.training_inst_id
self.instance = self.training_instance
class AbstractMADACEnv(AbstractEnv):
"""Multi-Agent version of DAC environment."""
def __init__(self, config):
"""
Initialize environment.
Parameters
----------
config : dict
Environment configuration
If to seed the action space as well
"""
super(AbstractMADACEnv, self).__init__(config)
self.multi_agent = False
if "multi_agent" in config.keys():
self.multi_agent = config.multi_agent
if self.multi_agent:
space_class = type(self.action_space)
if space_class == gym.spaces.Box:
num_hps = len(self.action_space.low)
elif space_class == gym.spaces.MultiDiscrete:
num_hps = len(self.action_space.nvec)
else:
print(
"The MultiAgent environment currently only supports action spaces of types Box and MultiDiscrete"
)
raise TypeError
self.possible_agents = np.arange(num_hps)
self.hp_names = []
if "config_space" in self.config.keys():
self.hp_names = self.config["config_space"].get_hyperparameter_names()
self.max_num_agent = len(self.possible_agents)
self.env_step = self.step
self.env_reset = self.reset
self.step = self.multi_agent_step
self.reset = self.multi_agent_reset
self.agents = []
self.current_agent = None
self.observation = None
self.reward = None
self.termination = False
self.truncation = False
self.info = None
# TODO: this should be set to a reasonable default, ideally
# Else playing with less than the full number of agents will be really hard
if "default_action" in self.config.keys():
self.action = self.config.default_action
else:
self.action = self.action_space.sample()
self.observation_spaces = {}
for a in self.possible_agents:
self.observation_spaces[a] = self.observation_space
space_class = type(self.action_space)
if space_class == gym.spaces.Box:
lowers = self.action_space.low
uppers = self.action_space.high
else:
num_options = [n for n in self.action_space.nvec]
self.action_spaces = {}
for a in self.possible_agents:
if space_class == gym.spaces.Box:
subspace = gym.spaces.Box(
low=np.array([lowers[a]]), high=np.array([uppers[a]])
)
else:
subspace = gym.spaces.Discrete(num_options[a])
self.action_spaces[a] = subspace
def multi_agent_reset(self, seed: int = None):
"""
Reset env, but don't return observations.
Parameters
----------
seed : int
seed to use
"""
self.observation, self.info = self.env_reset(seed)
def last(self):
"""
Get current step data.
Returns
-------
np.array, float, bool, bool, dict
"""
return (
self.observation,
self.reward,
self.termination,
self.truncation,
self.info,
)
def multi_agent_step(self, action):
"""
Step for a single hyperparameter.
Parameters
----------
action
the action in the current agent's dimension
"""
self.action[self.current_agent] = action
self.current_agent = self.agents.index(self.current_agent) + 1
if self.current_agent >= len(self.agents):
(
self.observation,
self.reward,
self.termination,
self.truncation,
self.info,
) = self.env_step(self.action)
self.current_agent = self.agents[0]
def register_agent(self, agent_id):
"""
Add agent.
Parameters
----------
agent_id : int
id of the agent to add
"""
if type(agent_id) == str:
if len(agent_id) > 1:
if agent_id in self.hp_names:
agent_id = self.hp_names.index(agent_id)
else:
agent_id = int(agent_id)
assert agent_id not in self.agents
assert agent_id in self.possible_agents
self.agents.append(agent_id)
if self.current_agent is None:
self.current_agent = agent_id
def remove_agent(self, agent_id):
"""
Remove agent.
Parameters
----------
agent_id : int
id of the agent to remove
"""
if agent_id in self.agents:
self.agents.remove(agent_id)
@property
def num_agents(self):
"""Current number of agents."""
return len(self.agents)
@property
def agent_selection(self):
"""Current agent."""
return self.current_agent
@property
def infos(self):
"""Current infos per agent."""
infos = {}
for a in self.agents:
infos[a] = self.info
return infos
@property
def rewards(self):
"""Current rewards values per agent."""
rewards = {}
for a in self.agents:
rewards[a] = self.rewards
return rewards
@property
def terminations(self):
"""Current termination values per agent."""
terminations = {}
for a in self.agents:
terminations[a] = self.termination
return terminations
@property
def truncations(self):
"""Current truncation values per agent."""
truncations = {}
for a in self.agents:
truncations[a] = self.truncation
return truncations
| 17,933 | 30.573944 | 215 | py |
DACBench | DACBench-main/dacbench/abstract_benchmark.py | import json
from functools import partial
from types import FunctionType
import numpy as np
from gymnasium import spaces
from dacbench import wrappers
class AbstractBenchmark:
"""Abstract template for benchmark classes."""
def __init__(self, config_path=None, config: "objdict" = None):
"""
Initialize benchmark class.
Parameters
----------
config_path : str
Path to load configuration from (if read from file)
config : objdict
Object dict containing the config
"""
if config is not None and config_path is not None:
raise ValueError("Both path to config and config where provided")
self.wrap_funcs = []
if config_path:
self.config_path = config_path
self.read_config_file(self.config_path)
elif config:
self.load_config(config)
else:
self.config = None
def get_config(self):
"""
Return current configuration.
Returns
-------
dict
Current config
"""
return self.config
def serialize_config(self):
"""
Save configuration to json.
Parameters
----------
path : str
File to save config to
"""
conf = self.config.copy()
if "observation_space_type" in self.config:
conf["observation_space_type"] = f"{self.config['observation_space_type']}"
if isinstance(conf["observation_space_args"][0], dict):
conf["observation_space_args"] = self.jsonify_dict_space(
conf["observation_space_args"][0]
)
elif "observation_space" in self.config:
conf["observation_space"] = self.space_to_list(conf["observation_space"])
if "action_space" in self.config:
conf["action_space"] = self.space_to_list(conf["action_space"])
# TODO: how can we use the built in serialization of configspace here?
if "config_space" in self.config:
conf["config_space"] = self.process_configspace(self.config.config_space)
conf = AbstractBenchmark.__stringify_functions(conf)
for k in self.config.keys():
if isinstance(self.config[k], np.ndarray) or isinstance(
self.config[k], list
):
if type(self.config[k][0]) == np.ndarray:
conf[k] = list(map(list, conf[k]))
for i in range(len(conf[k])):
if (
not type(conf[k][i][0]) == float
and np.inf not in conf[k][i]
and -np.inf not in conf[k][i]
):
conf[k][i] = list(map(int, conf[k][i]))
elif isinstance(conf[k], np.ndarray):
conf[k] = conf[k].tolist()
conf["wrappers"] = self.jsonify_wrappers()
# can be recovered from instance_set_path, and could contain function that are not serializable
if "instance_set" in conf:
del conf["instance_set"]
return conf
def process_configspace(self, configuration_space):
"""This is largely the builting cs.json.write method, but doesn't save the result directly. If this is ever implemented in cs, we can replace this method."""
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import (
CategoricalHyperparameter,
Constant,
NormalFloatHyperparameter,
NormalIntegerHyperparameter,
OrdinalHyperparameter,
UniformFloatHyperparameter,
UniformIntegerHyperparameter,
UnParametrizedHyperparameter,
)
from ConfigSpace.read_and_write.json import (
_build_categorical,
_build_condition,
_build_constant,
_build_forbidden,
_build_normal_float,
_build_normal_int,
_build_ordinal,
_build_uniform_float,
_build_uniform_int,
_build_unparametrized_hyperparameter,
)
if not isinstance(configuration_space, ConfigurationSpace):
raise TypeError(
"pcs_parser.write expects an instance of %s, "
"you provided '%s'" % (ConfigurationSpace, type(configuration_space))
)
hyperparameters = []
conditions = []
forbiddens = []
for hyperparameter in configuration_space.get_hyperparameters():
if isinstance(hyperparameter, Constant):
hyperparameters.append(_build_constant(hyperparameter))
elif isinstance(hyperparameter, UnParametrizedHyperparameter):
hyperparameters.append(
_build_unparametrized_hyperparameter(hyperparameter)
)
elif isinstance(hyperparameter, UniformFloatHyperparameter):
hyperparameters.append(_build_uniform_float(hyperparameter))
elif isinstance(hyperparameter, NormalFloatHyperparameter):
hyperparameters.append(_build_normal_float(hyperparameter))
elif isinstance(hyperparameter, UniformIntegerHyperparameter):
hyperparameters.append(_build_uniform_int(hyperparameter))
elif isinstance(hyperparameter, NormalIntegerHyperparameter):
hyperparameters.append(_build_normal_int(hyperparameter))
elif isinstance(hyperparameter, CategoricalHyperparameter):
hyperparameters.append(_build_categorical(hyperparameter))
elif isinstance(hyperparameter, OrdinalHyperparameter):
hyperparameters.append(_build_ordinal(hyperparameter))
else:
raise TypeError(
"Unknown type: %s (%s)"
% (
type(hyperparameter),
hyperparameter,
)
)
for condition in configuration_space.get_conditions():
conditions.append(_build_condition(condition))
for forbidden_clause in configuration_space.get_forbiddens():
forbiddens.append(_build_forbidden(forbidden_clause))
rval = {}
if configuration_space.name is not None:
rval["name"] = configuration_space.name
rval["hyperparameters"] = hyperparameters
rval["conditions"] = conditions
rval["forbiddens"] = forbiddens
return rval
@classmethod
def from_json(cls, json_config):
"""Get config from json dict."""
config = objdict(json.loads(json_config))
if "config_space" in config.keys():
from ConfigSpace import ConfigurationSpace
from ConfigSpace.read_and_write.json import (
_construct_condition,
_construct_forbidden,
_construct_hyperparameter,
)
if "name" in config.config_space:
configuration_space = ConfigurationSpace(
name=config.config_space["name"]
)
else:
configuration_space = ConfigurationSpace()
for hyperparameter in config.config_space["hyperparameters"]:
configuration_space.add_hyperparameter(
_construct_hyperparameter(
hyperparameter,
)
)
for condition in config.config_space["conditions"]:
configuration_space.add_condition(
_construct_condition(
condition,
configuration_space,
)
)
for forbidden in config.config_space["forbiddens"]:
configuration_space.add_forbidden_clause(
_construct_forbidden(
forbidden,
configuration_space,
)
)
config.config_space = configuration_space
return cls(config=config)
def to_json(self):
"""Write config to json."""
conf = self.serialize_config()
return json.dumps(conf)
def save_config(self, path):
"""Write config to path."""
conf = self.serialize_config()
with open(path, "w") as fp:
json.dump(conf, fp, default=lambda o: "not serializable")
def jsonify_wrappers(self):
"""
Write wrapper description to list.
Returns
-------
list
"""
wrappers = []
for func in self.wrap_funcs:
args = func.args
arg_descriptions = []
contains_func = False
func_dict = {}
for i in range(len(args)):
if callable(args[i]):
contains_func = True
func_dict[f"{args[i]}"] = [args[i].__module__, args[i].__name__]
arg_descriptions.append(["function", f"{args[i]}"])
# elif isinstance(args[i], ModuleLogger):
# pass
else:
arg_descriptions.append({args[i]})
function = func.func.__name__
if contains_func:
wrappers.append([function, arg_descriptions, func_dict])
else:
wrappers.append([function, arg_descriptions])
return wrappers
def dejson_wrappers(self, wrapper_list):
"""
Load wrapper from list.
Parameters
----------
wrapper_list : list
wrapper description to load
"""
for i in range(len(wrapper_list)):
import importlib
func = getattr(wrappers, wrapper_list[i][0])
arg_descriptions = wrapper_list[i][1]
args = []
for a in arg_descriptions:
if a[0] == "function":
module = importlib.import_module(wrapper_list[i][2][a[1]][0])
name = wrapper_list[i][2][a[1]][0]
func = getattr(module, name)
args.append(func)
# elif a[0] == "logger":
# pass
else:
args.append(a)
self.wrap_funcs.append(partial(func, *args))
@staticmethod
def __import_from(module: str, name: str):
"""
Imports the class / function / ... with name from module.
Parameters
----------
module : str
module to import from
name : str
name to import
Returns
-------
the imported object
"""
module = __import__(module, fromlist=[name])
return getattr(module, name)
@classmethod
def class_to_str(cls):
"""Get string name from class."""
return cls.__module__, cls.__name__
@staticmethod
def __decorate_config_with_functions(conf: dict):
"""
Replaced the stringified functions with the callable objects.
Parameters
----------
conf :
config to parse
"""
for key, value in {
k: v
for k, v in conf.items()
if isinstance(v, list) and len(v) == 3 and v[0] == "function"
}.items():
_, module_name, function_name = value
conf[key] = AbstractBenchmark.__import_from(module_name, function_name)
return conf
@staticmethod
def __stringify_functions(conf: dict) -> dict:
"""
Replaced all callables in the config with a triple ('function', module_name, function_name).
Parameters
----------
conf : dict
config to parse
Returns
-------
modified dict
"""
for key, value in {
k: v for k, v in conf.items() if isinstance(v, FunctionType)
}.items():
conf[key] = ["function", conf[key].__module__, conf[key].__name__]
return conf
def space_to_list(self, space):
"""
Make list from gym space.
Parameters
----------
space: gym.spaces.Space
space to parse
"""
res = []
if isinstance(space, spaces.Box):
res.append("Box")
res.append([space.low.tolist(), space.high.tolist()])
res.append("numpy.float32")
elif isinstance(space, spaces.Discrete):
res.append("Discrete")
res.append([space.n])
elif isinstance(space, spaces.Dict):
res.append("Dict")
res.append(self.jsonify_dict_space(space.spaces))
elif isinstance(space, spaces.MultiDiscrete):
res.append("MultiDiscrete")
res.append([space.nvec])
elif isinstance(space, spaces.MultiBinary):
res.append("MultiBinary")
res.append([space.n])
return res
def list_to_space(self, space_list):
"""
Make gym space from list.
Parameters
----------
space_list: list
list to space-ify
"""
if space_list[0] == "Dict":
args = self.dictify_json(space_list[1])
space = getattr(spaces, space_list[0])(args)
elif len(space_list) == 2:
space = getattr(spaces, space_list[0])(*space_list[1])
else:
typestring = space_list[2].split(".")[1]
dt = getattr(np, typestring)
args = [np.array(arg) for arg in space_list[1]]
space = getattr(spaces, space_list[0])(*args, dtype=dt)
return space
def jsonify_dict_space(self, dict_space):
"""
Gym spaces to json dict.
Parameters
----------
dict_space : dict
space dict
"""
keys = []
types = []
arguments = []
for k in dict_space.keys():
keys.append(k)
value = dict_space[k]
if not isinstance(value, (spaces.Box, spaces.Discrete)):
raise ValueError(
f"Only Dict spaces made up of Box spaces or discrete spaces are supported but got {type(value)}"
)
if isinstance(value, spaces.Box):
types.append("box")
low = value.low.astype(float).tolist()
high = value.high.astype(float).tolist()
arguments.append([low, high])
if isinstance(value, spaces.Discrete):
types.append("discrete")
n = int(value.n)
arguments.append([n])
return [keys, types, arguments]
def dictify_json(self, dict_list):
"""
Json to dict structure for gym spaces.
Parameters
----------
dict_list: list
list of dicts
"""
dict_space = {}
keys, types, args = dict_list
for k, type, args_ in zip(keys, types, args):
if type == "box":
prepared_args = map(np.array, args_)
dict_space[k] = spaces.Box(*prepared_args, dtype=np.float32)
elif type == "discrete":
dict_space[k] = spaces.Discrete(*args_)
else:
raise TypeError(
f"Currently only Discrete and Box spaces are allowed in Dict spaces, got {type}"
)
return dict_space
def load_config(self, config: "objdict"):
"""
Load config.
Parameters
----------
config: objdict
config to load
"""
self.config = config
if "observation_space_type" in self.config:
# Types have to be numpy dtype (for gym spaces)s
if type(self.config["observation_space_type"]) == str:
if self.config["observation_space_type"] == "None":
self.config["observation_space_type"] = None
else:
typestring = self.config["observation_space_type"].split(" ")[1][
:-2
]
typestring = typestring.split(".")[1]
self.config["observation_space_type"] = getattr(np, typestring)
if "observation_space" in self.config:
self.config["observation_space"] = self.list_to_space(
self.config["observation_space"]
)
elif "observation_space_class" in config.keys():
if config.observation_space_class == "Dict":
self.config["observation_space_args"] = [
self.dictify_json(self.config["observation_space_args"])
]
if "action_space" in self.config:
self.config["action_space"] = self.list_to_space(
self.config["action_space"]
)
if "wrappers" in self.config:
self.dejson_wrappers(self.config["wrappers"])
del self.config["wrappers"]
self.config = AbstractBenchmark.__decorate_config_with_functions(self.config)
for k in self.config.keys():
if type(self.config[k]) == list:
if type(self.config[k][0]) == list:
map(np.array, self.config[k])
self.config[k] = np.array(self.config[k])
def read_config_file(self, path):
"""
Read configuration from file.
Parameters
----------
path : str
Path to config file
"""
with open(path, "r") as fp:
config = objdict(json.load(fp))
self.load_config(config)
def get_environment(self):
"""
Make benchmark environment.
Returns
-------
env : gym.Env
Benchmark environment
"""
raise NotImplementedError
def set_seed(self, seed):
"""
Set environment seed.
Parameters
----------
seed : int
New seed
"""
self.config["seed"] = seed
def set_action_space(self, kind, args):
"""
Change action space.
Parameters
----------
kind : str
Name of action space class
args: list
List of arguments to pass to action space class
"""
self.config["action_space"] = kind
self.config["action_space_args"] = args
def set_observation_space(self, kind, args, data_type):
"""
Change observation_space.
Parameters
----------
kind : str
Name of observation space class
args : list
List of arguments to pass to observation space class
data_type : type
Data type of observation space
"""
self.config["observation_space"] = kind
self.config["observation_space_args"] = args
self.config["observation_space_type"] = data_type
def register_wrapper(self, wrap_func):
"""
Register wrapper.
Parameters
----------
wrap_func : function
wrapper init function
"""
if isinstance(wrap_func, list):
self.wrap_funcs.append(*wrap_func)
else:
self.wrap_funcs.append(wrap_func)
def __eq__(self, other):
"""Check for equality."""
return type(self) == type(other) and self.config == other.config
# This code is taken from https://goodcode.io/articles/python-dict-object/
class objdict(dict):
"""Modified dict to make config changes more flexible."""
def __getattr__(self, name):
"""Get attribute."""
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
"""Set attribute."""
self[name] = value
def __delattr__(self, name):
"""Delete attribute."""
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: " + name)
def copy(self):
"""Copy self."""
return objdict(**super().copy())
def __eq__(self, other):
"""Check for equality."""
if not isinstance(other, dict):
return False
if not set(other.keys()) == set(self.keys()):
return False
truth = []
for key in self.keys():
if any(isinstance(obj[key], np.ndarray) for obj in (self, other)):
truth.append(np.array_equal(self[key], other[key]))
else:
truth.append(other[key] == self[key])
return all(truth)
def __ne__(self, other):
"""Check for inequality."""
return not self == other
| 20,990 | 30.901216 | 165 | py |
DACBench | DACBench-main/dacbench/logger.py | import json
from abc import ABCMeta, abstractmethod
from collections import ChainMap, defaultdict
from datetime import datetime
from functools import reduce
from itertools import chain
from numbers import Number
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
import numpy as np
import pandas as pd
from dacbench import AbstractBenchmark, AbstractEnv
from dacbench.abstract_agent import AbstractDACBenchAgent
def load_logs(log_file: Path) -> List[Dict]:
"""
Loads the logs from a jsonl written by any logger.
The result is the list of dicts in the format:
{
'instance': 0,
'episode': 0,
'step': 1,
'example_log_val': {
'values': [val1, val2, ... valn],
'times: [time1, time2, ..., timen],
}
...
}
Parameters
----------
log_file: pathlib.Path
The path to the log file
Returns
-------
[Dict, ...]
"""
with open(log_file, "r") as log_file:
logs = list(map(json.loads, log_file))
return logs
def split(predicate: Callable, iterable: Iterable) -> Tuple[List, List]:
"""
Splits the iterable into two list depending on the result of predicate.
Parameters
----------
predicate: Callable
A function taking an element of the iterable and return Ture or False
iterable: Iterable
the iterable to split
Returns
-------
(positives, negatives)
"""
positives, negatives = [], []
for item in iterable:
(positives if predicate(item) else negatives).append(item)
return positives, negatives
def flatten_log_entry(log_entry: Dict) -> List[Dict]:
"""
Transforms a log entry.
From:
{
'step': 0,
'episode': 2,
'some_value': {
'values' : [34, 45],
'times':['28-12-20 16:20:53', '28-12-20 16:21:30'],
}
}
To:
[
{ 'step': 0,'episode': 2, 'value': 34, 'time': '28-12-20 16:20:53'},
{ 'step': 0,'episode': 2, 'value': 45, 'time': '28-12-20 16:21:30'}
]
Parameters
----------
log_entry: Dict
A log entry
"""
dict_entries, top_level_entries = split(
lambda item: isinstance(item[1], dict), log_entry.items()
)
rows = []
for value_name, value_dict in dict_entries:
current_rows = (
dict(
top_level_entries
+ [("value", value), ("time", time), ("name", value_name)]
)
for value, time in zip(value_dict["values"], value_dict["times"])
)
rows.extend(map(dict, current_rows))
return rows
def list_to_tuple(list_: List) -> Tuple:
"""
Recursively transforms a list of lists into tuples of tuples.
Parameters
----------
list_:
(nested) list
Returns
-------
(nested) tuple
"""
return tuple(
list_to_tuple(item) if isinstance(item, list) else item for item in list_
)
def log2dataframe(
logs: List[dict], wide: bool = False, drop_columns: List[str] = ["time"]
) -> pd.DataFrame:
"""
Converts a list of log entries to a pandas dataframe.
Usually used in combination with load_dataframe.
Parameters
----------
logs: List
List of log entries
wide: bool
wide=False (default) produces a dataframe with columns (episode, step, time, name, value)
wide=True returns a dataframe (episode, step, time, name_1, name_2, ...) if the variable name_n has not been logged
at (episode, step, time) name_n is NaN.
drop_columns: List[str]
List of column names to be dropped (before reshaping the long dataframe) mostly used in combination
with wide=True to reduce NaN values
Returns
-------
dataframe
"""
flat_logs = map(flatten_log_entry, logs)
rows = reduce(lambda l1, l2: l1 + l2, flat_logs)
dataframe = pd.DataFrame(rows)
dataframe.time = pd.to_datetime(dataframe.time)
if drop_columns is not None:
dataframe = dataframe.drop(columns=drop_columns)
dataframe = dataframe.infer_objects()
list_column_candidates = dataframe.dtypes == object
for i, candidate in enumerate(list_column_candidates):
if candidate:
dataframe.iloc[:, i] = dataframe.iloc[:, i].apply(
lambda x: list_to_tuple(x) if isinstance(x, list) else x
)
if wide:
primary_index_columns = ["episode", "step"]
field_id_column = "name"
additional_columns = list(
set(dataframe.columns)
- set(primary_index_columns + ["time", "value", field_id_column])
)
index_columns = primary_index_columns + additional_columns + [field_id_column]
dataframe = dataframe.set_index(index_columns)
dataframe = dataframe.unstack()
dataframe.reset_index(inplace=True)
dataframe.columns = [a if b == "" else b for a, b in dataframe.columns]
return dataframe.infer_objects()
def seed_mapper(self):
"""Helper function for seeding."""
if self.env is None:
return None
return self.env.initial_seed
def instance_mapper(self):
"""Helper function to get instance id."""
if self.env is None:
return None
return self.env.get_inst_id()
class AbstractLogger(metaclass=ABCMeta):
"""
Logger interface.
The logger classes provide a way of writing structured logs as jsonl files and also help to track information like
current episode, step, time ...
In the jsonl log file each row corresponds to a step.
"""
valid_types = {
"recursive": [dict, list, tuple, np.ndarray],
"primitive": [str, int, float, bool, np.number],
}
def __init__(
self,
experiment_name: str,
output_path: Path,
step_write_frequency: int = None,
episode_write_frequency: int = 1,
):
"""
Initializes Logger.
Parameters
----------
experiment_name: str
Name of the folder to store the result in
output_path: pathlib.Path
Path under which the experiment folder is created
step_write_frequency: int
number of steps after which the loggers writes to file.
If None only the data is only written to file if write is called, if triggered by episode_write_frequency
or on close
episode_write_frequency: int
see step_write_frequency
"""
self.experiment_name = experiment_name
self.output_path = output_path
self.log_dir = self._init_logging_dir(self.output_path / self.experiment_name)
self.step_write_frequency = step_write_frequency
self.episode_write_frequency = episode_write_frequency
self._additional_info = {}
self.additional_info_auto_mapper = {
"instance": instance_mapper,
"seed": seed_mapper,
}
self.env = None
@property
def additional_info(self):
"""Log additional info."""
additional_info = self._additional_info.copy()
auto_info = {
key: mapper(self)
for key, mapper in self.additional_info_auto_mapper.items()
if mapper(self) is not None
}
additional_info.update(auto_info)
return additional_info
def set_env(self, env: AbstractEnv) -> None:
"""
Needed to infer automatically logged information like the instance id.
Parameters
----------
env: AbstractEnv
env to log
"""
self.env = env
@staticmethod
def _pretty_valid_types() -> str:
"""Returns a string pretty string representation of the types that can be logged as values."""
valid_types = chain(
AbstractLogger.valid_types["recursive"],
AbstractLogger.valid_types["primitive"],
)
return ", ".join(map(lambda type_: type_.__name__, valid_types))
@staticmethod
def _init_logging_dir(log_dir: Path) -> None:
"""
Prepares the logging directory.
Parameters
----------
log_dir: pathlib.Path
dir to prepare for logging
Returns
-------
None
"""
log_dir.mkdir(parents=True, exist_ok=True)
return log_dir
def is_of_valid_type(self, value: Any) -> bool:
"""
Checks if the value of any type in the logger's valid types.
Parameters
----------
value
value to check
Returns
-------
bool
"""
if any(isinstance(value, type) for type in self.valid_types["primitive"]):
return True
elif any(isinstance(value, type) for type in self.valid_types["recursive"]):
value = value.vlaues() if isinstance(value, dict) else value
return all(self.is_of_valid_type(sub_value) for sub_value in value)
else:
return False
@abstractmethod
def close(self) -> None:
"""Makes sure, that all remaining entries in the are written to file and the file is closed."""
pass
@abstractmethod
def next_step(self) -> None:
"""Call at the end of the step. Updates the internal state and dumps the information of the last step into a json."""
pass
@abstractmethod
def next_episode(self) -> None:
"""Call at the end of episode. See next_step."""
pass
@abstractmethod
def write(self) -> None:
"""Writes buffered logs to file. Invoke manually if you want to load logs during a run."""
pass
@abstractmethod
def log(self, key: str, value) -> None:
"""
Writes value to list of values and save the current time for key.
Parameters
----------
key: str
key to log
value:
the value must of of a type that is json serializable.
Currently only {str, int, float, bool, np.number} and recursive types of those are supported.
"""
pass
@abstractmethod
def log_dict(self, data):
"""
Alternative to log if more the one value should be logged at once.
Parameters
----------
data: dict
a dict with key-value so that each value is a valid value for log
"""
pass
@abstractmethod
def log_space(self, key: str, value: Union[np.ndarray, Dict], space_info=None):
"""
Special for logging gym.spaces.
Currently three types are supported:
* Numbers: e.g. samples from Discrete
* Fixed length arrays like MultiDiscrete or Box
* Dict: assuming each key has fixed length array
Parameters
----------
key:
see log
value:
see log
space_info:
a list of column names. The length of this list must equal the resulting number of columns.
"""
pass
class ModuleLogger(AbstractLogger):
"""
A logger for handling logging of one module. e.g. a wrapper or toplevel general logging.
Don't create manually use Logger to manage ModuleLoggers
"""
def __init__(
self,
output_path: Path,
experiment_name: str,
module: str,
step_write_frequency: int = None,
episode_write_frequency: int = 1,
) -> None:
"""
All results are placed under 'output_path / experiment_name'.
Parameters
----------
experiment_name: str
Name of the folder to store the result in
output_path: pathlib.Path
Path under which the experiment folder is created
module: str
the module (mostly name of the wrapper), each wrapper gets its own file
step_write_frequency: int
number of steps after which the loggers writes to file.
If None only the data is only written to file if write is called, if triggered by episode_write_frequency
or on close
episode_write_frequency: int
see step_write_frequency
output_path:
The path where logged information should be stored
"""
super(ModuleLogger, self).__init__(
experiment_name, output_path, step_write_frequency, episode_write_frequency
)
self.log_file = open(self.log_dir / f"{module}.jsonl", "w")
self.step = 0
self.episode = 0
self.buffer = []
self.current_step = self.__init_dict()
def get_logfile(self) -> Path:
"""
Get logfile name.
Returns
-------
pathlib.Path
the path to the log file of this logger
"""
return Path(self.log_file.name)
def close(self):
"""Makes sure, that all remaining entries in the are written to file and the file is closed."""
if not self.log_file.closed:
self.write()
self.log_file.close()
def __del__(self):
"""Makes sure, that all remaining entries in the are written to file and the file is closed."""
if not self.log_file.closed:
self.close()
@staticmethod
def __json_default(object):
"""
Add supoort for dumping numpy arrays and numbers to json.
Parameters
----------
object
numpy object to jsonify
"""
if isinstance(object, np.ndarray):
return object.tolist()
elif isinstance(object, np.number):
return object.item()
else:
raise ValueError(f"Type {type(object)} not supported")
def __end_step(self):
if self.current_step:
self.current_step["step"] = self.step
self.current_step["episode"] = self.episode
self.current_step.update(self.additional_info)
self.buffer.append(
json.dumps(self.current_step, default=self.__json_default)
)
self.current_step = self.__init_dict()
@staticmethod
def __init_dict():
return defaultdict(lambda: {"times": [], "values": []})
def reset_episode(self) -> None:
"""Resets the episode and step. Be aware that this can lead to ambitious keys if no instance or seed or other identifying additional info is set."""
self.__end_step()
self.episode = 0
self.step = 0
def __reset_step(self):
self.__end_step()
self.step = 0
def next_step(self):
"""Call at the end of the step. Updates the internal state and dumps the information of the last step into a json."""
self.__end_step()
if (
self.step_write_frequency is not None
and self.step % self.step_write_frequency == 0
):
self.write()
self.step += 1
def next_episode(self):
"""Writes buffered logs to file. Invoke manually if you want to load logs during a run."""
self.__reset_step()
if (
self.episode_write_frequency is not None
and self.episode % self.episode_write_frequency == 0
):
self.write()
self.episode += 1
def write(self):
"""Writes buffered logs to file. Invoke manually if you want to load logs during a run."""
self.__end_step()
self.__buffer_to_file()
def __buffer_to_file(self):
if len(self.buffer) > 0:
self.log_file.write("\n".join(self.buffer))
self.log_file.write("\n")
self.buffer.clear()
self.log_file.flush()
def set_additional_info(self, **kwargs):
"""
Can be used to log additional information for each step e.g. for seed and instance id.
Parameters
----------
kwargs : dict
info dict
"""
self._additional_info.update(kwargs)
def log(
self, key: str, value: Union[Dict, List, Tuple, str, int, float, bool]
) -> None:
"""
Writes value to list of values and save the current time for key.
Parameters
----------
key: str
key to log
value:
the value must of of a type that is json serializable.
Currently only {str, int, float, bool, np.number} and recursive types of those are supported.
"""
self.__log(key, value, datetime.now().strftime("%d-%m-%y %H:%M:%S.%f"))
def __log(self, key, value, time):
if not self.is_of_valid_type(value):
valid_types = self._pretty_valid_types()
raise ValueError(
f"value {type(value)} is not of valid type or a recursive composition of valid types ({valid_types})"
)
self.current_step[key]["times"].append(time)
self.current_step[key]["values"].append(value)
def log_dict(self, data: Dict) -> None:
"""
Alternative to log if more the one value should be logged at once.
Parameters
----------
data: dict
a dict with key-value so that each value is a valid value for log
"""
time = datetime.now().strftime("%d-%m-%y %H:%M:%S.%f")
for key, value in data.items():
self.__log(key, value, time)
@staticmethod
def __space_dict(key: str, value, space_info):
if isinstance(value, np.ndarray) and len(value.shape) == 0:
value = value.item()
if isinstance(value, Number):
if space_info is None:
data = {key: value}
else:
if len(space_info) != 1:
raise ValueError(
f"Space info must match length (expect 1 != got{len(space_info)}"
)
data = {f"{key}_{space_info[0]}": value}
elif isinstance(value, np.ndarray):
if space_info is not None and len(space_info) != len(value):
raise ValueError(
f"Space info must match length (expect {len(value)} != got{len(space_info)}"
)
key_suffix = (
enumerate(value) if space_info is None else zip(space_info, value)
)
data = {f"{key}_{suffix}": x for suffix, x in key_suffix}
elif isinstance(value, dict):
key_suffix = (
value.items() if space_info is None else zip(space_info, value.values())
)
dicts = (
ModuleLogger.__space_dict(f"{key}_{sub_key}", sub_value, None)
for sub_key, sub_value in key_suffix
)
data = dict(ChainMap(*dicts))
else:
raise ValueError("Space does not seem be supported")
return data
def log_space(self, key, value, space_info=None):
"""
Special for logging gym.spaces.
Currently three types are supported:
* Numbers: e.g. samples from Discrete
* Fixed length arrays like MultiDiscrete or Box
* Dict: assuming each key has fixed length array
Parameters
----------
key:
see log
value:
see log
space_info:
a list of column names. The length of this list must equal the resulting number of columns.
"""
data = self.__space_dict(key, value, space_info)
self.log_dict(data)
class Logger(AbstractLogger):
"""
A logger that manages the creation of the module loggers.
To get a ModuleLogger for you module (e.g. wrapper) call module_logger = Logger(...).add_module("my_wrapper").
From now on module_logger.log(...) or logger.log(..., module="my_wrapper") can be used to log.
The logger module takes care of updating information like episode and step in the subloggers. To indicate to the loggers
the end of the episode or the next_step simple call logger.next_episode() or logger.next_step().
"""
def __init__(
self,
experiment_name: str,
output_path: Path,
step_write_frequency: int = None,
episode_write_frequency: int = 1,
) -> None:
"""
Create Logger.
Parameters
----------
experiment_name: str
Name of the folder to store the result in
output_path: pathlib.Path
Path under which the experiment folder is created
step_write_frequency: int
number of steps after which the loggers writes to file.
If None only the data is only written to file if write is called, if triggered by episode_write_frequency
or on close
episode_write_frequency: int
see step_write_frequency
"""
super(Logger, self).__init__(
experiment_name, output_path, step_write_frequency, episode_write_frequency
)
self.env: AbstractEnv = None
self.module_logger: Dict[str, ModuleLogger] = dict()
def set_env(self, env: AbstractEnv) -> None:
"""
Writes information about the environment.
Parameters
----------
env: AbstractEnv
the env object to track
"""
super().set_env(env)
for _, module_logger in self.module_logger.items():
module_logger.set_env(env)
def close(self):
"""Makes sure, that all remaining entries (from all sublogger) are written to files and the files are closed."""
for _, module_logger in self.module_logger.items():
module_logger.close()
def __del__(self):
"""Removes Logger."""
self.close()
def next_step(self):
"""Call at the end of the step. Updates the internal state of all subloggers and dumps the information of the last step into a json."""
for _, module_logger in self.module_logger.items():
module_logger.next_step()
def next_episode(self):
"""Call at the end of episode. See next_step."""
for _, module_logger in self.module_logger.items():
module_logger.next_episode()
def reset_episode(self):
"""Resets in all modules."""
for _, module_logger in self.module_logger.items():
module_logger.reset_episode()
def write(self):
"""Writes buffered logs to file. Invoke manually if you want to load logs during a run."""
for _, module_logger in self.module_logger.items():
module_logger.write()
def add_module(self, module: Union[str, type]) -> ModuleLogger:
"""
Creates a sub-logger. For more details see class level documentation.
Parameters
----------
module: str or type
The module name or Wrapper-Type to create a sub-logger for
Returns
-------
ModuleLogger
"""
if isinstance(module, str):
pass
elif isinstance(module, type):
module = module.__name__
else:
module = module.__class__
if module in self.module_logger:
raise ValueError(f"Module {module} already registered")
else:
self.module_logger[module] = ModuleLogger(
self.output_path,
self.experiment_name,
module,
self.step_write_frequency,
self.episode_write_frequency,
)
if self.env is not None:
self.module_logger[module].set_env(self.env)
return self.module_logger[module]
def add_agent(self, agent: AbstractDACBenchAgent):
"""
Writes information about the agent.
Parameters
----------
agent: AbstractDACBenchAgent
the agent object to add
"""
agent_config = {"type": str(agent.__class__)}
with open(self.log_dir / "agent.json", "w") as f:
json.dump(agent_config, f)
def add_benchmark(self, benchmark: AbstractBenchmark) -> None:
"""
Add benchmark to logger.
Parameters
----------
benchmark : AbstractBenchmark
the benchmark object to add
"""
benchmark.save_config(self.log_dir / "benchmark.json")
def set_additional_info(self, **kwargs):
"""
Add additional info.
Parameters
----------
kwargs : dict
info dict
"""
for _, module_logger in self.module_logger.items():
module_logger.set_additional_info(**kwargs)
def log(self, key, value, module):
"""
Log a key-value pair to module.
Parameters
----------
key : str | int
key to log
value :
value to log
module :
module to log to
"""
if module not in self.module_logger:
raise ValueError(f"Module {module} not registered yet")
self.module_logger.log(key, value)
def log_space(self, key, value, module, space_info=None):
"""
Log a key-value pair to module with optional info.
Parameters
----------
key : str | int
key to log
value :
value to log
module :
module to log to
space_info :
additional log info
"""
if module not in self.module_logger:
raise ValueError(f"Module {module} not registered yet")
self.module_logger.log_space(key, value, space_info)
def log_dict(self, data, module):
"""
Log a data dict to module.
Parameters
----------
data : dict
data to log
module
module to log to
"""
if module not in self.module_logger:
raise ValueError(f"Module {module} not registered yet")
self.module_logger.log_space(data)
| 26,036 | 28.5875 | 156 | py |
DACBench | DACBench-main/dacbench/argument_parsing.py | from argparse import ArgumentTypeError as err
from pathlib import Path
class PathType(object):
"""
Custom argument type for path validation.
Adapted from: https://stackoverflow.com/questions/11415570/directory-path-types-with-argparse
"""
def __init__(self, exists=True, type="file", dash_ok=True):
"""
Initialize Path.
Parameters
----------
exists : bool
True: a path that does exist
False: a path that does not exist, in a valid parent directory
None: don't care
type : str
file, dir, symlink, socket, None, or a function returning True for valid paths
None: don't care
dash_ok: bool
whether to allow "-" as stdin/stdout
"""
assert exists in (True, False, None)
assert type in ("file", "dir", "symlink", "socket", None) or hasattr(
type, "__call__"
)
self._exists = exists
self._type = type
self._dash_ok = dash_ok
def __call__(self, string: str):
"""
Call Path.
Parameters
----------
string : str
string to check
"""
if string == "-":
# the special argument "-" means sys.std{in,out}
if self._type == "dir":
raise err("standard input/output (-) not allowed as directory path")
elif self._type == "symlink":
raise err("standard input/output (-) not allowed as symlink path")
elif not self._dash_ok:
raise err("standard input/output (-) not allowed")
path = Path(string)
# existence
if self._exists is None:
pass
elif not self._exists == path.exists():
negate = "" if self._exists else "not"
positive = "" if not self._exists else "not"
raise err(
f"{self._type.capitalize()} should {negate} exist but does {positive}"
)
# type
if self._type is None:
pass
elif isinstance(self._type, str):
check = getattr(path, f"is_{self._type}")
if not check():
raise err(f"Path is not {self._type}")
elif isinstance(self._type, callable):
if not self._type(path):
raise err("Callable type check failed")
else:
raise err("invalid type to check for")
return path
| 2,544 | 29.662651 | 97 | py |
DACBench | DACBench-main/dacbench/runner.py | from pathlib import Path
import seaborn as sb
from dacbench import benchmarks
from dacbench.logger import Logger
from dacbench.wrappers import PerformanceTrackingWrapper
sb.set_style("darkgrid")
current_palette = list(sb.color_palette())
def run_benchmark(env, agent, num_episodes, logger=None):
"""
Run single benchmark env for a given number of episodes with a given agent.
Parameters
----------
env : gym.Env
Benchmark environment
agent
Any agent implementing the methods act, train and end_episode (see AbstractDACBenchAgent below)
num_episodes : int
Number of episodes to run
logger : dacbench.logger.Logger
logger to use for logging. Not closed automatically like env
"""
if logger is not None:
logger.reset_episode()
logger.set_env(env)
for _ in range(num_episodes):
state, _ = env.reset()
terminated, truncated = False, False
reward = 0
while not (terminated or truncated):
action = agent.act(state, reward)
next_state, reward, terminated, truncated, _ = env.step(action)
agent.train(next_state, reward)
state = next_state
if logger is not None:
logger.next_step()
agent.end_episode(state, reward)
if logger is not None:
logger.next_episode()
env.close()
def run_dacbench(results_path, agent_method, num_episodes, bench=None, seeds=None):
"""
Run all benchmarks for 10 seeds for a given number of episodes with a given agent and save result.
Parameters
----------
results_path : str
Path to where results should be saved
agent_method : function
Method that takes an env as input and returns an agent
num_episodes : int
Number of episodes to run for each benchmark
bench: AbstractBenchmark
benchmark to run. If none is given, run all.
seeds : list[int]
List of seeds to runs all benchmarks for. If None (default) seeds [1, ..., 10] are used.
"""
if bench is None:
bench = map(benchmarks.__dict__.get, benchmarks.__all__)
else:
bench = [getattr(benchmarks, b) for b in bench]
seeds = seeds if seeds is not None else range(10)
for b in bench:
print(f"Evaluating {b.__name__}")
for i in seeds:
print(f"Seed {i}/10")
bench = b()
try:
env = bench.get_benchmark(seed=i)
except:
continue
logger = Logger(
experiment_name=f"seed_{i}",
output_path=Path(results_path) / f"{b.__name__}",
)
perf_logger = logger.add_module(PerformanceTrackingWrapper)
logger.add_benchmark(bench)
logger.set_env(env)
env = PerformanceTrackingWrapper(env, logger=perf_logger)
agent = agent_method(env)
logger.add_agent(agent)
run_benchmark(env, agent, num_episodes, logger)
logger.close()
| 3,075 | 29.76 | 103 | py |
DACBench | DACBench-main/dacbench/__init__.py | """DACBench: a benchmark library for Dynamic Algorithm Configuration"""
__version__ = "0.2.1"
__contact__ = "automl.org"
from dacbench.abstract_benchmark import AbstractBenchmark
from dacbench.abstract_env import AbstractEnv, AbstractMADACEnv
__all__ = ["AbstractEnv", "AbstractMADACEnv", "AbstractBenchmark"]
from gymnasium.envs.registration import register
from dacbench import benchmarks
try:
for b in benchmarks.__all__:
bench = getattr(benchmarks, b)()
bench.read_instance_set()
env_name = b[:-9]
register(
id=f"{env_name}-v0",
entry_point=f"dacbench.envs:{env_name}Env",
kwargs={"config": bench.config},
)
except:
print(
"DACBench Gym registration failed - make sure you have all dependencies installed and their instance sets in the right path!"
)
| 856 | 29.607143 | 133 | py |
DACBench | DACBench-main/dacbench/benchmarks/sgd_benchmark.py | import csv
import os
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
import numpy as np
from gymnasium import spaces
from torch.nn import NLLLoss
from dacbench.abstract_benchmark import AbstractBenchmark, objdict
from dacbench.envs import SGDEnv
from dacbench.envs.sgd import Reward
DEFAULT_CFG_SPACE = CS.ConfigurationSpace()
LR = CSH.UniformIntegerHyperparameter(name="learning_rate", lower=0, upper=10)
DEFAULT_CFG_SPACE.add_hyperparameter(LR)
def __default_loss_function(**kwargs):
return NLLLoss(reduction="none", **kwargs)
INFO = {
"identifier": "LR",
"name": "Learning Rate Adaption for Neural Networks",
"reward": "Negative Log Differential Validation Loss",
"state_description": [
"Predictive Change Variance (Discounted Average)",
"Predictive Change Variance (Uncertainty)",
"Loss Variance (Discounted Average)",
"Loss Variance (Uncertainty)",
"Current Learning Rate",
"Training Loss",
"Validation Loss",
"Step",
"Alignment",
"Crashed",
],
}
SGD_DEFAULTS = objdict(
{
"config_space": DEFAULT_CFG_SPACE,
"action_space_class": "Box",
"action_space_args": [np.array([0]), np.array([10])],
"observation_space_class": "Dict",
"observation_space_type": None,
"observation_space_args": [
{
"predictiveChangeVarDiscountedAverage": spaces.Box(
low=-np.inf, high=np.inf, shape=(1,)
),
"predictiveChangeVarUncertainty": spaces.Box(
low=0, high=np.inf, shape=(1,)
),
"lossVarDiscountedAverage": spaces.Box(
low=-np.inf, high=np.inf, shape=(1,)
),
"lossVarUncertainty": spaces.Box(low=0, high=np.inf, shape=(1,)),
"currentLR": spaces.Box(low=0, high=1, shape=(1,)),
"trainingLoss": spaces.Box(low=0, high=np.inf, shape=(1,)),
"validationLoss": spaces.Box(low=0, high=np.inf, shape=(1,)),
"step": spaces.Box(low=0, high=np.inf, shape=(1,)),
"alignment": spaces.Box(low=0, high=1, shape=(1,)),
"crashed": spaces.Discrete(2),
}
],
"reward_type": Reward.LogDiffTraining,
"cutoff": 1e3,
"lr": 1e-3,
"discount_factor": 0.9,
"optimizer": "rmsprop",
"loss_function": __default_loss_function,
"loss_function_kwargs": {},
"val_loss_function": __default_loss_function,
"val_loss_function_kwargs": {},
"training_batch_size": 64,
"validation_batch_size": 64,
"train_validation_ratio": 0.8,
"dataloader_shuffle": True,
"no_cuda": False,
"beta1": 0.9,
"beta2": 0.9,
"epsilon": 1.0e-06,
"clip_grad": (-1.0, 1.0),
"seed": 0,
"cd_paper_reconstruction": False,
"cd_bias_correction": True,
"terminate_on_crash": False,
"crash_penalty": 0.0,
"instance_set_path": "../instance_sets/sgd/sgd_train_100instances.csv",
"benchmark_info": INFO,
"features": [
"predictiveChangeVarDiscountedAverage",
"predictiveChangeVarUncertainty",
"lossVarDiscountedAverage",
"lossVarUncertainty",
"currentLR",
"trainingLoss",
"validationLoss",
"step",
"alignment",
"crashed",
],
}
)
# Set reward range based on the chosen reward type
SGD_DEFAULTS.reward_range = SGD_DEFAULTS["reward_type"].func.frange
class SGDBenchmark(AbstractBenchmark):
"""
Benchmark with default configuration & relevant functions for SGD
"""
def __init__(self, config_path=None, config=None):
"""
Initialize SGD Benchmark
Parameters
-------
config_path : str
Path to config file (optional)
"""
super(SGDBenchmark, self).__init__(config_path, config)
if not self.config:
self.config = objdict(SGD_DEFAULTS.copy())
for key in SGD_DEFAULTS:
if key not in self.config:
self.config[key] = SGD_DEFAULTS[key]
def get_environment(self):
"""
Return SGDEnv env with current configuration
Returns
-------
SGDEnv
SGD environment
"""
if "instance_set" not in self.config.keys():
self.read_instance_set()
# Read test set if path is specified
if (
"test_set" not in self.config.keys()
and "test_set_path" in self.config.keys()
):
self.read_instance_set(test=True)
env = SGDEnv(self.config)
for func in self.wrap_funcs:
env = func(env)
return env
def read_instance_set(self, test=False):
"""
Read path of instances from config into list
"""
if test:
path = (
os.path.dirname(os.path.abspath(__file__))
+ "/"
+ self.config.test_set_path
)
keyword = "test_set"
else:
path = (
os.path.dirname(os.path.abspath(__file__))
+ "/"
+ self.config.instance_set_path
)
keyword = "instance_set"
self.config[keyword] = {}
with open(path, "r") as fh:
reader = csv.DictReader(fh, delimiter=";")
for row in reader:
if "_" in row["dataset"]:
dataset_info = row["dataset"].split("_")
dataset_name = dataset_info[0]
dataset_size = int(dataset_info[1])
else:
dataset_name = row["dataset"]
dataset_size = None
instance = [
dataset_name,
int(row["seed"]),
row["architecture"],
int(row["steps"]),
dataset_size,
]
self.config[keyword][int(row["ID"])] = instance
def get_benchmark(self, instance_set_path=None, seed=0):
"""
Get benchmark from the LTO paper
Parameters
-------
seed : int
Environment seed
Returns
-------
env : SGDEnv
SGD environment
"""
self.config = objdict(SGD_DEFAULTS.copy())
if instance_set_path is not None:
self.config["instance_set_path"] = instance_set_path
self.config.seed = seed
self.read_instance_set()
return SGDEnv(self.config)
| 6,789 | 30.004566 | 81 | py |
DACBench | DACBench-main/dacbench/benchmarks/luby_benchmark.py | import csv
import os
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
import numpy as np
from dacbench.abstract_benchmark import AbstractBenchmark, objdict
from dacbench.envs import LubyEnv, luby_gen
from dacbench.wrappers import RewardNoiseWrapper
MAX_STEPS = 2**6
LUBY_SEQUENCE = np.log2([next(luby_gen(i)) for i in range(1, 2 * MAX_STEPS + 2)])
HISTORY_LENGTH = 5
DEFAULT_CFG_SPACE = CS.ConfigurationSpace()
SEQ = CSH.UniformIntegerHyperparameter(
name="sequence_element", lower=0, upper=np.log2(MAX_STEPS)
)
DEFAULT_CFG_SPACE.add_hyperparameter(SEQ)
INFO = {
"identifier": "Luby",
"name": "Luby Sequence Approximation",
"reward": "Boolean sucess indication",
"state_description": [
"Action t-2",
"Step t-2",
"Action t-1",
"Step t-1",
"Action t (current)",
"Step t (current)",
],
}
LUBY_DEFAULTS = objdict(
{
"config_space": DEFAULT_CFG_SPACE,
"observation_space_class": "Box",
"observation_space_type": np.float32,
"observation_space_args": [
np.array([-1 for _ in range(HISTORY_LENGTH + 1)]),
np.array([2 ** max(LUBY_SEQUENCE + 1) for _ in range(HISTORY_LENGTH + 1)]),
],
"reward_range": (-1, 0),
"cutoff": MAX_STEPS,
"hist_length": HISTORY_LENGTH,
"min_steps": 2**3,
"seed": 0,
"instance_set_path": "../instance_sets/luby/luby_default.csv",
"benchmark_info": INFO,
}
)
class LubyBenchmark(AbstractBenchmark):
"""
Benchmark with default configuration & relevant functions for Sigmoid
"""
def __init__(self, config_path=None, config=None):
"""
Initialize Luby Benchmark
Parameters
-------
config_path : str
Path to config file (optional)
"""
super(LubyBenchmark, self).__init__(config_path, config)
if not self.config:
self.config = objdict(LUBY_DEFAULTS.copy())
for key in LUBY_DEFAULTS:
if key not in self.config:
self.config[key] = LUBY_DEFAULTS[key]
def get_environment(self):
"""
Return Luby env with current configuration
Returns
-------
LubyEnv
Luby environment
"""
if "instance_set" not in self.config.keys():
self.read_instance_set()
# Read test set if path is specified
if (
"test_set" not in self.config.keys()
and "test_set_path" in self.config.keys()
):
self.read_instance_set(test=True)
env = LubyEnv(self.config)
for func in self.wrap_funcs:
env = func(env)
return env
def set_cutoff(self, steps):
"""
Set cutoff and adapt dependencies
Parameters
-------
int
Maximum number of steps
"""
self.config.cutoff = steps
self.config.action_space_args = [int(np.log2(steps))]
LUBY_SEQUENCE = np.log2([next(luby_gen(i)) for i in range(1, 2 * steps + 2)])
self.config.observation_space_args = [
np.array([-1 for _ in range(self.config.hist_length + 1)]),
np.array(
[
2 ** max(LUBY_SEQUENCE + 1)
for _ in range(self.config.hist_length + 1)
]
),
]
def set_history_length(self, length):
"""
Set history length and adapt dependencies
Parameters
-------
int
History length
"""
self.config.hist_length = length
self.config.observation_space_args = [
np.array([-1 for _ in range(length + 1)]),
np.array([2 ** max(LUBY_SEQUENCE + 1) for _ in range(length + 1)]),
]
def read_instance_set(self, test=False):
"""Read instance set from file"""
if test:
path = (
os.path.dirname(os.path.abspath(__file__))
+ "/"
+ self.config.test_set_path
)
keyword = "test_set"
else:
path = (
os.path.dirname(os.path.abspath(__file__))
+ "/"
+ self.config.instance_set_path
)
keyword = "instance_set"
self.config[keyword] = {}
with open(path, "r") as fh:
reader = csv.DictReader(fh)
for row in reader:
self.config[keyword][int(row["ID"])] = [
float(shift) for shift in row["start"].split(",")
] + [float(slope) for slope in row["sticky"].split(",")]
def get_benchmark(self, L=8, fuzziness=1.5, seed=0):
"""
Get Benchmark from DAC paper
Parameters
-------
L : int
Minimum sequence lenght, was 8, 16 or 32 in the paper
fuzziness : float
Amount of noise applied. Was 1.5 for most of the experiments
seed : int
Environment seed
Returns
-------
env : LubyEnv
Luby environment
"""
self.config = objdict(LUBY_DEFAULTS.copy())
self.config.min_steps = L
self.config.seed = seed
self.config.instance_set = {0: [0, 0]}
self.config.reward_range = (-10, 10)
env = LubyEnv(self.config)
rng = np.random.RandomState(self.config.seed)
def fuzz():
return rng.normal(-1, fuzziness)
fuzzy_env = RewardNoiseWrapper(env, noise_function=fuzz)
return fuzzy_env
| 5,627 | 27.714286 | 87 | py |
DACBench | DACBench-main/dacbench/benchmarks/fast_downward_benchmark.py | import os
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
import numpy as np
from dacbench.abstract_benchmark import AbstractBenchmark, objdict
from dacbench.envs import FastDownwardEnv
HEURISTICS = [
"tiebreaking([pdb(pattern=manual_pattern([0,1])),weight(g(),-1)])",
"tiebreaking([pdb(pattern=manual_pattern([0,2])),weight(g(),-1)])",
]
DEFAULT_CFG_SPACE = CS.ConfigurationSpace()
HEURISTIC = CSH.CategoricalHyperparameter(name="heuristic", choices=["toy1", "toy2"])
DEFAULT_CFG_SPACE.add_hyperparameter(HEURISTIC)
INFO = {
"identifier": "FastDownward",
"name": "Heuristic Selection for the FastDownward Planner",
"reward": "Negative Runtime (-1 per step)",
"state_description": [
"Average Value (heuristic 1)",
"Max Value (heuristic 1)",
"Min Value (heuristic 1)",
"Open List Entries (heuristic 1)",
"Variance (heuristic 1)",
"Average Value (heuristic 2)",
"Max Value (heuristic 2)",
"Min Value (heuristic 2)",
"Open List Entries (heuristic 2)",
"Variance (heuristic 2)",
],
}
FD_DEFAULTS = objdict(
{
"heuristics": HEURISTICS,
"config_space": DEFAULT_CFG_SPACE,
"observation_space_class": "Box",
"observation_space_type": np.float32,
"observation_space_args": [
np.array([-np.inf for _ in range(5 * len(HEURISTICS))]),
np.array([np.inf for _ in range(5 * len(HEURISTICS))]),
],
"reward_range": (-np.inf, 0),
"cutoff": 1e6,
"use_general_state_info": True,
"host": "",
"port": 54322,
"control_interval": 0,
"fd_seed": 0,
"num_steps": None,
"state_type": 2,
"config_dir": ".",
"port_file_id": None,
"seed": 0,
"max_rand_steps": 0,
"instance_set_path": "../instance_sets/fast_downward/train",
"test_set_path": "../instance_sets/fast_downward/test",
"fd_path": os.path.dirname(os.path.abspath(__file__))
+ "/../envs/rl-plan/fast-downward/fast-downward.py",
"parallel": True,
"fd_logs": None,
"benchmark_info": INFO,
}
)
class FastDownwardBenchmark(AbstractBenchmark):
"""
Benchmark with default configuration & relevant functions for Sigmoid
"""
def __init__(self, config_path=None, config=None):
"""
Initialize FD Benchmark
Parameters
-------
config_path : str
Path to config file (optional)
"""
super(FastDownwardBenchmark, self).__init__(config_path, config)
if not self.config:
self.config = objdict(FD_DEFAULTS.copy())
for key in FD_DEFAULTS:
if key not in self.config:
self.config[key] = FD_DEFAULTS[key]
def get_environment(self):
"""
Return Luby env with current configuration
Returns
-------
LubyEnv
Luby environment
"""
if "instance_set" not in self.config.keys():
self.read_instance_set()
# Read test set if path is specified
if (
"test_set" not in self.config.keys()
and "test_set_path" in self.config.keys()
):
self.read_instance_set(test=True)
env = FastDownwardEnv(self.config)
for func in self.wrap_funcs:
env = func(env)
return env
def read_instance_set(self, test=False):
"""
Read paths of instances from config into list
"""
instances = {}
if test:
path = (
os.path.dirname(os.path.abspath(__file__))
+ "/"
+ self.config.test_set_path
)
keyword = "test_set"
else:
path = (
os.path.dirname(os.path.abspath(__file__))
+ "/"
+ self.config.instance_set_path
)
keyword = "instance_set"
import re
for root, dirs, files in os.walk(path):
for f in files:
if (f.endswith(".pddl") or f.endswith(".sas")) and not f.startswith(
"domain"
):
p = os.path.join(root, f)
if f.endswith(".pddl"):
index = p.split("/")[-1].split(".")[0]
else:
index = p.split("/")[-2]
index = int(re.sub("[^0-9]", "", index))
instances[index] = p
if len(instances) == 0:
for f in os.listdir(path):
f = f.strip()
if (f.endswith(".pddl") or f.endswith(".sas")) and not f.startswith(
"domain"
):
p = os.path.join(path, f)
if f.endswith(".pddl"):
index = p.split("/")[-1].split(".")[0]
else:
index = p.split("/")[-2]
index = re.sub("[^0-9]", "", index)
instances[index] = p
self.config[keyword] = instances
if instances[list(instances.keys())[0]].endswith(".pddl"):
self.config.domain_file = os.path.join(path + "/domain.pddl")
def set_heuristics(self, heuristics):
self.config.heuristics = heuristics
self.config.action_space_args = [len(heuristics)]
self.config.observation_space_args = [
np.array([-np.inf for _ in range(5 * len(heuristics))]),
np.array([np.inf for _ in range(5 * len(heuristics))]),
]
def get_benchmark(self, seed=0):
"""
Get published benchmark
Parameters
-------
seed : int
Environment seed
Returns
-------
env : FastDownwardEnv
FD environment
"""
self.config = objdict(FD_DEFAULTS.copy())
self.read_instance_set()
self.read_instance_set(test=True)
self.config.seed = seed
env = FastDownwardEnv(self.config)
return env
| 6,149 | 30.218274 | 85 | py |
DACBench | DACBench-main/dacbench/benchmarks/toysgd_benchmark.py | import os
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
import numpy as np
import pandas as pd
from gymnasium import spaces
from dacbench.abstract_benchmark import AbstractBenchmark, objdict
from dacbench.envs import ToySGDEnv
DEFAULT_CFG_SPACE = CS.ConfigurationSpace()
LR = CSH.UniformFloatHyperparameter(name="0_log_learning_rate", lower=-10, upper=0)
MOMENTUM = CSH.UniformFloatHyperparameter(name="1_log_momentum", lower=-10, upper=0)
DEFAULT_CFG_SPACE.add_hyperparameter(LR)
DEFAULT_CFG_SPACE.add_hyperparameter(MOMENTUM)
INFO = {
"identifier": "toy_sgd",
"name": "Learning Rate and Momentum Adaption for SGD on Toy Functions",
"reward": "Negative Log Regret",
"state_description": [
"Remaining Budget",
"Gradient",
"Current Learning Rate",
"Current Momentum",
],
"action_description": ["Log Learning Rate", "Log Momentum"],
}
DEFAULTS = objdict(
{
"config_space": DEFAULT_CFG_SPACE,
"observation_space_class": "Dict",
"observation_space_type": None,
"observation_space_args": [
{
"remaining_budget": spaces.Box(low=0, high=np.inf, shape=(1,)),
"gradient": spaces.Box(low=-np.inf, high=np.inf, shape=(1,)),
"learning_rate": spaces.Box(low=0, high=1, shape=(1,)),
"momentum": spaces.Box(low=0, high=1, shape=(1,)),
}
],
"reward_range": (-np.inf, np.inf),
"cutoff": 10,
"seed": 0,
"multi_agent": False,
"instance_set_path": "../instance_sets/toysgd/toysgd_default.csv",
"benchmark_info": INFO,
}
)
class ToySGDBenchmark(AbstractBenchmark):
def __init__(self, config_path=None, config=None):
"""
Initialize SGD Benchmark
Parameters
-------
config_path : str
Path to config file (optional)
"""
super(ToySGDBenchmark, self).__init__(config_path, config)
if not self.config:
self.config = objdict(DEFAULTS.copy())
for key in DEFAULTS:
if key not in self.config:
self.config[key] = DEFAULTS[key]
def get_environment(self):
"""
Return SGDEnv env with current configuration
Returns
-------
SGDEnv
SGD environment
"""
if "instance_set" not in self.config.keys():
self.read_instance_set()
# Read test set if path is specified
if (
"test_set" not in self.config.keys()
and "test_set_path" in self.config.keys()
):
self.read_instance_set(test=True)
env = ToySGDEnv(self.config)
for func in self.wrap_funcs:
env = func(env)
return env
def read_instance_set(self, test=False):
"""
Read path of instances from config into list
"""
if test:
path = (
os.path.dirname(os.path.abspath(__file__))
+ "/"
+ self.config.test_set_path
)
keyword = "test_set"
else:
path = (
os.path.dirname(os.path.abspath(__file__))
+ "/"
+ self.config.instance_set_path
)
keyword = "instance_set"
self.config[keyword] = {}
with open(path, "r") as fh:
# reader = csv.DictReader(fh, delimiter=";")
df = pd.read_csv(fh, sep=";")
for index, instance in df.iterrows():
self.config[keyword][int(instance["ID"])] = instance
| 3,648 | 28.666667 | 84 | py |
DACBench | DACBench-main/dacbench/benchmarks/cma_benchmark.py | import csv
import os
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
import numpy as np
from gymnasium import spaces
from dacbench.abstract_benchmark import AbstractBenchmark, objdict
from dacbench.envs import CMAESEnv
HISTORY_LENGTH = 40
INPUT_DIM = 10
DEFAULT_CFG_SPACE = CS.ConfigurationSpace()
STEP_SIZE = CSH.UniformFloatHyperparameter(name="Step_size", lower=0, upper=10)
DEFAULT_CFG_SPACE.add_hyperparameter(STEP_SIZE)
INFO = {
"identifier": "CMA-ES",
"name": "Step-size adaption in CMA-ES",
"reward": "Negative best function value",
"state_description": [
"Loc",
"Past Deltas",
"Population Size",
"Sigma",
"History Deltas",
"Past Sigma Deltas",
],
}
CMAES_DEFAULTS = objdict(
{
"action_space_class": "Box",
"action_space_args": [np.array([0]), np.array([10])],
"config_space": DEFAULT_CFG_SPACE,
"observation_space_class": "Dict",
"observation_space_type": None,
"observation_space_args": [
{
"current_loc": spaces.Box(
low=-np.inf, high=np.inf, shape=np.arange(INPUT_DIM).shape
),
"past_deltas": spaces.Box(
low=-np.inf, high=np.inf, shape=np.arange(HISTORY_LENGTH).shape
),
"current_ps": spaces.Box(low=-np.inf, high=np.inf, shape=(1,)),
"current_sigma": spaces.Box(low=-np.inf, high=np.inf, shape=(1,)),
"history_deltas": spaces.Box(
low=-np.inf, high=np.inf, shape=np.arange(HISTORY_LENGTH * 2).shape
),
"past_sigma_deltas": spaces.Box(
low=-np.inf, high=np.inf, shape=np.arange(HISTORY_LENGTH).shape
),
}
],
"reward_range": (-(10**9), 0),
"cutoff": 1e6,
"hist_length": HISTORY_LENGTH,
"popsize": 10,
"seed": 0,
"instance_set_path": "../instance_sets/cma/cma_train.csv",
"test_set_path": "../instance_sets/cma/cma_test.csv",
"benchmark_info": INFO,
}
)
class CMAESBenchmark(AbstractBenchmark):
"""
Benchmark with default configuration & relevant functions for CMA-ES
"""
def __init__(self, config_path=None, config=None):
"""
Initialize CMA Benchmark
Parameters
-------
config_path : str
Path to config file (optional)
"""
super(CMAESBenchmark, self).__init__(config_path, config)
if not self.config:
self.config = objdict(CMAES_DEFAULTS.copy())
for key in CMAES_DEFAULTS:
if key not in self.config:
self.config[key] = CMAES_DEFAULTS[key]
def get_environment(self):
"""
Return CMAESEnv env with current configuration
Returns
-------
CMAESEnv
CMAES environment
"""
if "instance_set" not in self.config.keys():
self.read_instance_set()
# Read test set if path is specified
if (
"test_set" not in self.config.keys()
and "test_set_path" in self.config.keys()
):
self.read_instance_set(test=True)
env = CMAESEnv(self.config)
for func in self.wrap_funcs:
env = func(env)
return env
def read_instance_set(self, test=False):
"""
Read path of instances from config into list
"""
if test:
path = (
os.path.dirname(os.path.abspath(__file__))
+ "/"
+ self.config.test_set_path
)
keyword = "test_set"
else:
path = (
os.path.dirname(os.path.abspath(__file__))
+ "/"
+ self.config.instance_set_path
)
keyword = "instance_set"
self.config[keyword] = {}
with open(path, "r") as fh:
reader = csv.DictReader(fh)
for row in reader:
init_locs = [float(row[f"init_loc{i}"]) for i in range(int(row["dim"]))]
instance = [
int(row["fcn_index"]),
int(row["dim"]),
float(row["init_sigma"]),
init_locs,
]
self.config[keyword][int(row["ID"])] = instance
def get_benchmark(self, seed=0):
"""
Get benchmark from the LTO paper
Parameters
-------
seed : int
Environment seed
Returns
-------
env : CMAESEnv
CMAES environment
"""
self.config = objdict(CMAES_DEFAULTS.copy())
self.config.seed = seed
self.read_instance_set()
self.read_instance_set(test=True)
return CMAESEnv(self.config)
| 4,920 | 28.291667 | 88 | py |
DACBench | DACBench-main/dacbench/benchmarks/sigmoid_benchmark.py | import csv
import os
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
import numpy as np
from dacbench.abstract_benchmark import AbstractBenchmark, objdict
from dacbench.envs import ContinuousSigmoidEnv, ContinuousStateSigmoidEnv, SigmoidEnv
ACTION_VALUES = (5, 10)
DEFAULT_CFG_SPACE = CS.ConfigurationSpace()
for i, d in enumerate(ACTION_VALUES):
X = CSH.UniformIntegerHyperparameter(name=f"value_dim_{i}", lower=0, upper=d - 1)
DEFAULT_CFG_SPACE.add_hyperparameter(X)
INFO = {
"identifier": "Sigmoid",
"name": "Sigmoid Function Approximation",
"reward": "Multiplied Differences between Function and Action in each Dimension",
"state_description": [
"Remaining Budget",
"Shift (dimension 1)",
"Slope (dimension 1)",
"Shift (dimension 2)",
"Slope (dimension 2)",
"Action 1",
"Action 2",
],
}
SIGMOID_DEFAULTS = objdict(
{
"config_space": DEFAULT_CFG_SPACE,
"action_space_class": "MultiDiscrete",
"action_space_args": [ACTION_VALUES],
"observation_space_class": "Box",
"observation_space_type": np.float32,
"observation_space_args": [
np.array([-np.inf for _ in range(1 + len(ACTION_VALUES) * 3)]),
np.array([np.inf for _ in range(1 + len(ACTION_VALUES) * 3)]),
],
"reward_range": (0, 1),
"cutoff": 10,
"action_values": ACTION_VALUES,
"slope_multiplier": 2.0,
"seed": 0,
"multi_agent": False,
"default_action": [0, 0],
"instance_set_path": "../instance_sets/sigmoid/sigmoid_2D3M_train.csv",
"test_set_path": "../instance_sets/sigmoid/sigmoid_2D3M_test.csv",
"benchmark_info": INFO,
}
)
class SigmoidBenchmark(AbstractBenchmark):
"""
Benchmark with default configuration & relevant functions for Sigmoid
"""
def __init__(self, config_path=None, config=None):
"""
Initialize Sigmoid Benchmark
Parameters
-------
config_path : str
Path to config file (optional)
"""
super(SigmoidBenchmark, self).__init__(config_path, config)
if not self.config:
self.config = objdict(SIGMOID_DEFAULTS.copy())
for key in SIGMOID_DEFAULTS:
if key not in self.config:
self.config[key] = SIGMOID_DEFAULTS[key]
def get_environment(self):
"""
Return Sigmoid env with current configuration
Returns
-------
SigmoidEnv
Sigmoid environment
"""
if "instance_set" not in self.config.keys():
self.read_instance_set()
# Read test set if path is specified
if (
"test_set" not in self.config.keys()
and "test_set_path" in self.config.keys()
):
self.read_instance_set(test=True)
if (
"env_type" in self.config
): # The env_type determines which Sigmoid environment to use.
if self.config["env_type"].lower() in [
"continuous",
"cont",
]: # Either continuous ...
if (
self.config["action_space"] == "Box"
): # ... in both actions and x-axis state, only ...
env = ContinuousSigmoidEnv(self.config)
elif (
self.config["action_space"] == "MultiDiscrete"
): # ... continuous in the x-axis state or ...
env = ContinuousStateSigmoidEnv(self.config)
else:
raise Exception(
f'The given environment type "{self.config["env_type"]}" does not support the'
f' chosen action_space "{self.config["action_space"]}". The action space has to'
f' be either of type "Box" for continuous actions or "Discrete".'
)
else: # ... discrete.
env = SigmoidEnv(self.config)
else: # If the type is not specified we the simplest, fully discrete version.
env = SigmoidEnv(self.config)
for func in self.wrap_funcs:
env = func(env)
return env
def set_action_values(self, values):
"""
Adapt action values and update dependencies
Parameters
----------
values: list
A list of possible actions per dimension
"""
del self.config["config_space"]
self.config.action_space_args = [values]
self.config.observation_space_args = [
np.array([-np.inf for _ in range(1 + len(values) * 3)]),
np.array([np.inf for _ in range(1 + len(values) * 3)]),
]
def read_instance_set(self, test=False):
"""Read instance set from file"""
if test:
path = (
os.path.dirname(os.path.abspath(__file__))
+ "/"
+ self.config.test_set_path
)
keyword = "test_set"
else:
path = (
os.path.dirname(os.path.abspath(__file__))
+ "/"
+ self.config.instance_set_path
)
keyword = "instance_set"
self.config[keyword] = {}
with open(path, "r") as f:
reader = csv.reader(f)
for row in reader:
f = []
inst_id = None
for i in range(len(row)):
if i == 0:
try:
inst_id = int(row[i])
except Exception:
continue
else:
try:
f.append(float(row[i]))
except Exception:
continue
if not len(f) == 0:
self.config[keyword][inst_id] = f
def get_benchmark(self, dimension=None, seed=0):
"""
Get Benchmark from DAC paper
Parameters
-------
dimension : int
Sigmoid dimension, was 1, 2, 3 or 5 in the paper
seed : int
Environment seed
Returns
-------
env : SigmoidEnv
Sigmoid environment
"""
self.config = objdict(SIGMOID_DEFAULTS.copy())
if dimension == 1:
self.set_action_values([3])
self.config.instance_set_path = (
"../instance_sets/sigmoid/sigmoid_1D3M_train.csv"
)
self.config.test_set_path = "../instance_sets/sigmoid/sigmoid_1D3M_test.csv"
self.config.benchmark_info["state_description"] = [
"Remaining Budget",
"Shift (dimension 1)",
"Slope (dimension 1)",
"Action",
]
if dimension == 2:
self.set_action_values([3, 3])
if dimension == 3:
self.set_action_values((3, 3, 3))
self.config.instance_set_path = (
"../instance_sets/sigmoid/sigmoid_3D3M_train.csv"
)
self.config.test_set_path = "../instance_sets/sigmoid/sigmoid_3D3M_test.csv"
self.config.benchmark_info["state_description"] = [
"Remaining Budget",
"Shift (dimension 1)",
"Slope (dimension 1)",
"Shift (dimension 2)",
"Slope (dimension 2)",
"Shift (dimension 3)",
"Slope (dimension 3)",
"Action 1",
"Action 2",
"Action 3",
]
if dimension == 5:
self.set_action_values((3, 3, 3, 3, 3))
self.config.instance_set_path = (
"../instance_sets/sigmoid/sigmoid_5D3M_train.csv"
)
self.config.test_set_path = "../instance_sets/sigmoid/sigmoid_5D3M_test.csv"
self.config.benchmark_info["state_description"] = [
"Remaining Budget",
"Shift (dimension 1)",
"Slope (dimension 1)",
"Shift (dimension 2)",
"Slope (dimension 2)",
"Shift (dimension 3)",
"Slope (dimension 3)",
"Shift (dimension 4)",
"Slope (dimension 4)",
"Shift (dimension 5)",
"Slope (dimension 5)",
"Action 1",
"Action 2",
"Action 3",
"Action 4",
"Action 5",
]
self.config.seed = seed
self.read_instance_set()
self.read_instance_set(test=True)
env = SigmoidEnv(self.config)
return env
| 8,827 | 32.694656 | 104 | py |
DACBench | DACBench-main/dacbench/benchmarks/modcma_benchmark.py | import itertools
import os
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
import numpy as np
from modcma import Parameters
from dacbench.abstract_benchmark import AbstractBenchmark, objdict
from dacbench.envs import CMAStepSizeEnv, ModCMAEnv
DEFAULT_CFG_SPACE = CS.ConfigurationSpace()
ACTIVE = CSH.CategoricalHyperparameter(name="0_active", choices=[True, False])
ELITIST = CSH.CategoricalHyperparameter(name="1_elitist", choices=[True, False])
ORTHOGONAL = CSH.CategoricalHyperparameter(name="2_orthogonal", choices=[True, False])
SEQUENTIAL = CSH.CategoricalHyperparameter(name="3_sequential", choices=[True, False])
THRESHOLD_CONVERGENCE = CSH.CategoricalHyperparameter(
name="4_threshold_convergence", choices=[True, False]
)
STEP_SIZE_ADAPTION = CSH.CategoricalHyperparameter(
name="5_step_size_adaption",
choices=["csa", "tpa", "msr", "xnes", "m-xnes", "lp-xnes", "psr"],
)
MIRRORED = CSH.CategoricalHyperparameter(
name="6_mirrored", choices=["None", "mirrored", "mirrored pairwise"]
)
BASE_SAMPLER = CSH.CategoricalHyperparameter(
name="7_base_sampler", choices=["gaussian", "sobol", "halton"]
)
WEIGHTS_OPTION = CSH.CategoricalHyperparameter(
name="8_weights_option", choices=["default", "equal", "1/2^lambda"]
)
LOCAL_RESTART = CSH.CategoricalHyperparameter(
name="90_local_restart", choices=["None", "IPOP", "BIPOP"]
)
BOUND_CORRECTION = CSH.CategoricalHyperparameter(
name="91_bound_correction",
choices=["None", "saturate", "unif_resample", "COTN", "toroidal", "mirror"],
)
DEFAULT_CFG_SPACE.add_hyperparameter(ACTIVE)
DEFAULT_CFG_SPACE.add_hyperparameter(ELITIST)
DEFAULT_CFG_SPACE.add_hyperparameter(ORTHOGONAL)
DEFAULT_CFG_SPACE.add_hyperparameter(SEQUENTIAL)
DEFAULT_CFG_SPACE.add_hyperparameter(THRESHOLD_CONVERGENCE)
DEFAULT_CFG_SPACE.add_hyperparameter(STEP_SIZE_ADAPTION)
DEFAULT_CFG_SPACE.add_hyperparameter(MIRRORED)
DEFAULT_CFG_SPACE.add_hyperparameter(BASE_SAMPLER)
DEFAULT_CFG_SPACE.add_hyperparameter(WEIGHTS_OPTION)
DEFAULT_CFG_SPACE.add_hyperparameter(LOCAL_RESTART)
DEFAULT_CFG_SPACE.add_hyperparameter(BOUND_CORRECTION)
INFO = {
"identifier": "ModCMA",
"name": "Online Selection of CMA-ES Variants",
"reward": "Negative best function value",
"state_description": [
"Generation Size",
"Sigma",
"Remaining Budget",
"Function ID",
"Instance ID",
],
}
MODCMA_DEFAULTS = objdict(
{
"config_space": DEFAULT_CFG_SPACE,
"action_space_class": "MultiDiscrete",
"action_space_args": [
list(
map(
lambda m: len(
getattr(getattr(Parameters, m), "options", [False, True])
),
Parameters.__modules__,
)
)
],
"observation_space_class": "Box",
"observation_space_args": [-np.inf * np.ones(5), np.inf * np.ones(5)],
"observation_space_type": np.float32,
"reward_range": (-(10**12), 0),
"budget": 100,
"cutoff": 1e6,
"seed": 0,
"multi_agent": False,
"instance_set_path": os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../instance_sets/modea/modea_train.csv",
),
"test_set_path": os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../instance_sets/modea/modea_train.csv",
),
"benchmark_info": INFO,
}
)
class ModCMABenchmark(AbstractBenchmark):
def __init__(self, config_path: str = None, step_size=False, config=None):
super().__init__(config_path, config)
self.config = objdict(MODCMA_DEFAULTS.copy(), **(self.config or dict()))
self.step_size = step_size
def get_environment(self):
if "instance_set" not in self.config:
self.read_instance_set()
# Read test set if path is specified
if (
"test_set" not in self.config.keys()
and "test_set_path" in self.config.keys()
):
self.read_instance_set(test=True)
if self.step_size:
self.config.action_space_class = "Box"
self.config.action_space_args = [np.array([0]), np.array([10])]
env = CMAStepSizeEnv(self.config)
else:
env = ModCMAEnv(self.config)
for func in self.wrap_funcs:
env = func(env)
return env
def read_instance_set(self, test=False):
if test:
path = self.config.test_set_path
keyword = "test_set"
else:
path = self.config.instance_set_path
keyword = "instance_set"
self.config[keyword] = dict()
with open(path, "r") as fh:
for line in itertools.islice(fh, 1, None):
_id, dim, fid, iid, *representation = line.strip().split(",")
self.config[keyword][int(_id)] = [
int(dim),
int(fid),
int(iid),
list(map(int, representation)),
]
def get_benchmark(self, seed: int = 0):
self.config = MODCMA_DEFAULTS.copy()
self.config.seed = seed
self.read_instance_set()
self.read_instance_set(test=True)
return ModCMAEnv(self.config)
| 5,343 | 33.701299 | 86 | py |
DACBench | DACBench-main/dacbench/benchmarks/__init__.py | # flake8: noqa: F401
import importlib
import warnings
from dacbench.benchmarks.fast_downward_benchmark import FastDownwardBenchmark
from dacbench.benchmarks.geometric_benchmark import GeometricBenchmark
from dacbench.benchmarks.luby_benchmark import LubyBenchmark
from dacbench.benchmarks.sigmoid_benchmark import SigmoidBenchmark
from dacbench.benchmarks.toysgd_benchmark import ToySGDBenchmark
__all__ = [
"LubyBenchmark",
"SigmoidBenchmark",
"ToySGDBenchmark",
"GeometricBenchmark",
"FastDownwardBenchmark",
]
cma_spec = importlib.util.find_spec("cma")
found = cma_spec is not None
if found:
from dacbench.benchmarks.cma_benchmark import CMAESBenchmark
__all__.append("CMAESBenchmark")
else:
warnings.warn(
"CMA-ES Benchmark not installed. If you want to use this benchmark, please follow the installation guide."
)
modcma_spec = importlib.util.find_spec("modcma")
found = modcma_spec is not None
if found:
from dacbench.benchmarks.modcma_benchmark import ModCMABenchmark
__all__.append("ModCMABenchmark")
else:
warnings.warn(
"ModCMA Benchmark not installed. If you want to use this benchmark, please follow the installation guide."
)
sgd_spec = importlib.util.find_spec("backpack")
found = sgd_spec is not None
if found:
from dacbench.benchmarks.sgd_benchmark import SGDBenchmark
__all__.append("SGDBenchmark")
else:
warnings.warn(
"SGD Benchmark not installed. If you want to use this benchmark, please follow the installation guide."
)
theory_spec = importlib.util.find_spec("uuid")
found = theory_spec is not None
if found:
from dacbench.benchmarks.theory_benchmark import TheoryBenchmark
__all__.append("TheoryBenchmark")
else:
warnings.warn(
"Theory Benchmark not installed. If you want to use this benchmark, please follow the installation guide."
)
| 1,888 | 28.984127 | 114 | py |
DACBench | DACBench-main/dacbench/benchmarks/geometric_benchmark.py | import csv
import os
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
import numpy as np
from dacbench.abstract_benchmark import AbstractBenchmark, objdict
from dacbench.envs import GeometricEnv
FILE_PATH = os.path.dirname(__file__)
ACTION_VALUES = (5, 10)
DEFAULT_CFG_SPACE = CS.ConfigurationSpace()
INFO = {
"identifier": "Geometric",
"name": "High Dimensional Geometric Curve Approximation. Curves are geometrical orthogonal.",
"reward": "Overall Euclidean Distance between Point on Curve and Action Vector for all Dimensions",
"state_description": [
"Remaining Budget",
"Dimensions",
],
}
GEOMETRIC_DEFAULTS = objdict(
{
"config_space": DEFAULT_CFG_SPACE,
"observation_space_class": "Box",
"observation_space_type": np.float32,
"observation_space_args": [],
"reward_range": (0, 1),
"seed": 0,
"multi_agent": False,
"cutoff": 10,
"action_values": [],
"action_value_default": 4,
# if action_values_variable True action_value_mapping will be used instead of action_value_default to define action values
# action_value_mapping defines number of action values for differnet functions
# sigmoid is split in 3 actions, cubic in 7 etc.
"action_values_variable": False,
"action_value_mapping": {
"sigmoid": 3,
"linear": 3,
"parabel": 5,
"cubic": 7,
"logarithmic": 4,
"constant": 1,
"sinus": 9,
},
"action_interval_mapping": {}, # maps actions to equally sized intervalls in interval [-1, 1]
"derivative_interval": 3, # defines how many values are used for derivative calculation
"realistic_trajectory": True, # True: coordiantes are used as trajectory, False: Actions are used as trajectories
"instance_set_path": os.path.join(
FILE_PATH, "../instance_sets/geometric/geometric_test.csv"
),
# correlation table to chain dimensions -> if dim x changes dim y changes as well
# either assign numpy array to correlation table or use create_correlation_table()
"correlation_active": False,
"correlation_table": None,
"correlation_info": {
"high": [(1, 2, "+"), (2, 3, "-"), (1, 5, "+")],
"middle": [(4, 5, "-")],
"low": [(4, 6, "+"), (2, 3, "+"), (0, 2, "-")],
},
"correlation_mapping": {
"high": (0.5, 1),
"middle": (0.1, 0.5),
"low": (0, 0.1),
},
"correlation_depth": 4,
"benchmark_info": INFO,
}
)
class GeometricBenchmark(AbstractBenchmark):
"""
Benchmark with default configuration & relevant functions for Geometric
"""
def __init__(self, config_path=None):
"""
Initialize Geometric Benchmark
Parameters
-------
config_path : str
Path to config file (optional)
"""
super(GeometricBenchmark, self).__init__(config_path)
if not self.config:
self.config = objdict(GEOMETRIC_DEFAULTS.copy())
for key in GEOMETRIC_DEFAULTS:
if key not in self.config:
self.config[key] = GEOMETRIC_DEFAULTS[key]
if not self.config["observation_space_type"]:
self.config["observation_space_type"] = np.float32
def get_environment(self):
"""
Return Geometric env with current configuration
Returns
-------
GeometricEnv
Geometric environment
"""
if "instance_set" not in self.config.keys():
self.read_instance_set()
self.set_action_values()
self.set_action_description()
if (
self.config.correlation_active
and not type(self.config.correlation_table) == np.ndarray
):
self.create_correlation_table()
env = GeometricEnv(self.config)
for func in self.wrap_funcs:
env = func(env)
return env
def read_instance_set(self):
"""
Read instance set from file
Creates a nested List for every Intance.
The List contains all functions with their respective values.
"""
path = os.path.join(FILE_PATH, self.config.instance_set_path)
self.config["instance_set"] = {}
with open(path, "r") as fh:
known_ids = []
reader = csv.DictReader(fh)
for row in reader:
function_list = []
id = int(row["ID"])
if id not in known_ids:
self.config.instance_set[id] = []
known_ids.append(id)
for index, element in enumerate(row.values()):
# if element == "0" and index != 0:
# break
# read numbers from csv as floats
element = float(element) if index != 1 else element
function_list.append(element)
self.config.instance_set[id].append(function_list)
def get_benchmark(self, dimension=None, seed=0):
"""
[summary]
Parameters
----------
dimension : [type], optional
[description], by default None
seed : int, optional
[description], by default 0
Returns
-------
[type]
[description]
"""
self.config = objdict(GEOMETRIC_DEFAULTS.copy())
self.config.benchmark_info["state_description"] = [
"Remaining Budget",
"Dimensions",
]
self.config.seed = seed
if "instance_set" not in self.config.keys():
self.read_instance_set()
self.set_action_values()
self.set_action_description()
if (
self.config.correlation_active
and not type(self.config.correlation_table) == np.ndarray
):
self.create_correlation_table()
env = GeometricEnv(self.config)
return env
def set_action_values(self):
"""
Adapt action values and update dependencies
Number of actions can differ between functions if configured in DefaultDict
Set observation space args.
"""
map_action_number = {}
if self.config.action_values_variable:
map_action_number = self.config.action_value_mapping
values = []
for function_info in self.config.instance_set[0]:
function_name = function_info[1]
value = map_action_number.get(
function_name, self.config.action_value_default
)
values.append(value)
# map intervall [-1, 1] to action values
if function_name not in self.config.action_interval_mapping:
action_interval = []
step_size = 2 / value
for step in np.arange(-1, 1, step_size):
lower_bound = step
upper_bound = step + step_size
middle = (lower_bound + upper_bound) / 2
action_interval.append(middle)
self.config.action_interval_mapping[function_name] = np.round(
action_interval, 3
)
self.config.action_values = values
cs = CS.ConfigurationSpace()
for i, v in enumerate(values):
actions = CSH.UniformIntegerHyperparameter(
name=f"curve_values_dim_{i}", lower=0, upper=v
)
cs.add_hyperparameter(actions)
self.config.config_space = cs
num_info = 2
self.config.observation_space_args = [
np.array([-1 for _ in range(num_info + 2 * len(values))]),
np.array(
[self.config["cutoff"] for _ in range(num_info + 2 * len(values))]
),
]
def set_action_description(self):
"""
Add Information about Derivative and Coordinate to Description.
"""
if "Coordinate" in self.config.benchmark_info["state_description"]:
return
for index in range(len(self.config.action_values)):
self.config.benchmark_info["state_description"].append(f"Derivative{index}")
for index in range(len(self.config.action_values)):
self.config.benchmark_info["state_description"].append(f"Coordinate{index}")
def create_correlation_table(self):
"""
Create correlation table from Config infos
"""
n_dimensions = len(self.config.instance_set[0])
corr_table = np.zeros((n_dimensions, n_dimensions))
for corr_level, corr_info in self.config.correlation_info.items():
for dim1, dim2, signum in corr_info:
low, high = self.config.correlation_mapping[corr_level]
value = np.random.uniform(low, high)
try:
corr_table[dim1, dim2] = value if signum == "+" else value * -1
except IndexError:
print(
"Check your correlation_info dict. Does it have more dimensions than the instance_set?"
)
self.config.correlation_table = corr_table
if __name__ == "__main__":
from dacbench.challenge_benchmarks.reward_quality_challenge.reward_functions import (
quadratic_euclidean_distance_reward_geometric,
)
geo_bench = GeometricBenchmark()
geo_bench.config["correlation_active"] = True
geo_bench.config["reward_function"] = quadratic_euclidean_distance_reward_geometric
env = geo_bench.get_environment()
opt_policy = env.get_optimal_policy()
# env.render_dimensions([0, 1, 2, 3, 4, 5, 6], "/home/vonglahn/tmp/MultiDAC")
env.render_3d_dimensions([1, 3], "/home/eimer/tmp")
while True:
env.reset()
done = False
while not done:
state, reward, done, info = env.step(np.random.randint(env.action_space.n))
print(reward)
| 10,145 | 31.834951 | 130 | py |
DACBench | DACBench-main/dacbench/benchmarks/theory_benchmark.py | import os
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
import gymnasium as gym
import numpy as np
import pandas as pd
from dacbench.abstract_benchmark import AbstractBenchmark, objdict
from dacbench.envs.theory import TheoryEnv, TheoryEnvDiscrete
INFO = {
"identifier": "Theory",
"name": "DAC benchmark with RLS algorithm and LeadingOne problem",
"reward": "Negative number of iterations until solution",
"state_description": "specified by user",
}
THEORY_DEFAULTS = {
"observation_description": "n, f(x)", # examples: n, f(x), delta_f(x), optimal_k, k, k_{t-0..4}, f(x)_{t-1}, f(x)_{t-0..4}
"reward_range": [-np.inf, np.inf], # the true reward range is instance dependent
"reward_choice": "imp_minus_evals", # possible values: see envs/theory.py for more details
"cutoff": 1e6, # if using as a "train" environment, a cutoff of 0.8*n^2 where n is problem size will be used (for more details, please see https://arxiv.org/abs/2202.03259)
# see get_environment function of TheoryBenchmark on how to specify a train/test environment
"seed": 0,
"seed_action_space": False, # set this one to True for reproducibility when random action is sampled in the action space with gym.action_space.sample()
"problem": "LeadingOne", # possible values: "LeadingOne"
"instance_set_path": "lo_rls_50.csv", # if the instance list file cannot be found in the running directory, it will be looked up in <DACBench>/dacbench/instance_sets/theory/
"discrete_action": True, # action space is discrete
"action_choices": [1, 2, 4, 8, 16], # portfolio of k values
"benchmark_info": INFO,
"name": "LeadingOnesDAC",
}
class TheoryBenchmark(AbstractBenchmark):
"""
Benchmark with various settings for (1+(lbd, lbd))-GA and RLS
"""
def __init__(self, config=None):
"""
Initialize a theory benchmark
Parameters
-------
base_config_name: str
OneLL's config name
possible values: see ../additional_configs/onell/configs.py
config : str
a dictionary, all options specified in this argument will override the one in base_config_name
"""
super(TheoryBenchmark, self).__init__()
self.config = objdict(THEORY_DEFAULTS)
if config:
for key, val in config.items():
self.config[key] = val
self.read_instance_set()
# initialise action space and environment class
cfg_space = CS.ConfigurationSpace()
if self.config.discrete_action:
assert (
"action_choices" in self.config
), "ERROR: action_choices must be specified"
assert ("min_action" not in self.config) and (
"max_action" not in self.config
), "ERROR: min_action and max_action should not be used for discrete action space"
assert (
"max_action" not in self.config
), "ERROR: max_action should not be used for discrete action space"
self.config.env_class = "TheoryEnvDiscrete"
n_acts = len(self.config["action_choices"])
action = CSH.UniformIntegerHyperparameter(name="", lower=0, upper=n_acts)
else:
assert (
"action_chocies" not in self.config
), "ERROR: action_choices is only used for discrete action space"
assert ("min_action" in self.config) and (
"max_action" in self.config
), "ERROR: min_action and max_action must be specified"
self.config.env_class = "TheoryEnv"
action = CSH.UniformFloatHyperparameter(
name="Step_size",
lower=self.config["min_action"],
upper=self.config["max_action"],
)
cfg_space.add_hyperparameter(action)
self.config["config_space"] = cfg_space
# create observation space
self.env_class = globals()[self.config.env_class]
assert self.env_class == TheoryEnv or self.env_class == TheoryEnvDiscrete
self.config[
"observation_space"
] = self.create_observation_space_from_description(
self.config["observation_description"], self.env_class
)
def create_observation_space_from_description(
self, obs_description, env_class=TheoryEnvDiscrete
):
"""
Create a gym observation space (Box only) based on a string containing observation variable names, e.g. "n, f(x), k, k_{t-1}"
Return:
A gym.spaces.Box observation space
"""
obs_var_names = [s.strip() for s in obs_description.split(",")]
low = []
high = []
for var_name in obs_var_names:
l, h = env_class.get_obs_domain_from_name(var_name)
low.append(l)
high.append(h)
obs_space = gym.spaces.Box(low=np.array(low), high=np.array(high))
return obs_space
def get_environment(self, test_env=False):
"""
Return an environment with current configuration
Parameters:
test_env: whether the enviroment is used for train an agent or for testing.
if test_env=False:
cutoff time for an episode is set to 0.8*n^2 (n: problem size)
if an action is out of range, stop the episode immediately and return a large negative reward (see envs/theory.py for more details)
otherwise: benchmark's original cutoff time is used, and out-of-range action will be clipped to nearest valid value and the episode will continue.
"""
env = self.env_class(self.config, test_env)
for func in self.wrap_funcs:
env = func(env)
return env
def read_instance_set(self):
"""
Read instance set from file
we look at the current directory first, if the file doesn't exist, we look in <DACBench>/dacbench/instance_sets/theory/
"""
assert self.config.instance_set_path
if os.path.isfile(self.config.instance_set_path):
path = self.config.instance_set_path
else:
path = (
os.path.dirname(os.path.abspath(__file__))
+ "/../instance_sets/theory/"
+ self.config.instance_set_path
)
self.config["instance_set"] = pd.read_csv(path, index_col=0).to_dict("id")
assert len(self.config["instance_set"].items()) > 0, "ERROR: empty instance set"
assert (
"initObj" in self.config["instance_set"][0].keys()
), "ERROR: initial solution (initObj) must be specified in instance set"
assert (
"size" in self.config["instance_set"][0].keys()
), "ERROR: problem size must be specified in instance set"
for key, val in self.config["instance_set"].items():
self.config["instance_set"][key] = objdict(val)
| 7,032 | 40.370588 | 178 | py |
DACBench | DACBench-main/dacbench/envs/theory.py | import logging
import uuid
from collections import deque
from copy import deepcopy
import gymnasium as gym
import numpy as np
from dacbench import AbstractEnv
class BinaryProblem:
"""An abstract class for an individual in binary representation."""
def __init__(self, n, rng=np.random.default_rng()):
"""Init problem."""
self.data = rng.choice([True, False], size=n)
self.n = n
self.fitness = self.eval()
def initialise_with_fixed_number_of_bits(self, k, rng=np.random.default_rng()):
"""Init with given number of bits."""
nbits = self.data.sum()
if nbits < k:
ids = rng.choice(
np.where(self.data is False)[0], size=k - nbits, replace=False
)
self.data[ids] = True
self.eval()
def is_optimal(self):
"""Get is_optimal flag."""
pass
def get_optimal(self):
"""Get optimum."""
pass
def eval(self):
"""Evaluate fitness."""
pass
def get_fitness_after_flipping(self, locs):
"""
Calculate the change in fitness after flipping the bits at positions locs
Parameters
----------
locs: 1d-array
positions where bits are flipped
Returns
-------
objective after flipping
"""
raise NotImplementedError
def get_fitness_after_crossover(self, xprime, locs_x, locs_xprime):
"""
Calculate fitness of the child aftering being crossovered with xprime.
Parameters
----------
xprime: 1d boolean array
the individual to crossover with
locs_x: 1d boolean/integer array
positions where we keep current bits of self
locs_xprime: : 1d boolean/integer array
positions where we change to xprime's bits
Returns
-------
fitness of the new individual after crossover
"""
raise NotImplementedError
def flip(self, locs):
"""
Flip the bits at position indicated by locs.
Parameters
----------
locs: 1d-array
positions where bits are flipped
Returns
-------
the new individual after the flip
"""
child = deepcopy(self)
child.data[locs] = ~child.data[locs]
child.eval()
return child
def combine(self, xprime, locs_xprime):
"""
Combine (crossover) self and xprime by taking xprime's bits at locs_xprime and self's bits at other positions.
Parameters
----------
xprime: 1d boolean array
the individual to crossover with
locs_x: 1d boolean/integer array
positions where we keep current bits of self
locs_xprime: : 1d boolean/integer array
positions where we change to xprime's bits
Returns
-------
the new individual after the crossover
"""
child = deepcopy(self)
child.data[locs_xprime] = xprime.data[locs_xprime]
child.eval()
return child
def mutate(self, p, n_childs, rng=np.random.default_rng()):
"""
Draw l ~ binomial(n, p), l>0.
Generate n_childs children by flipping exactly l bits
Returns
-------
the best child (maximum fitness), its fitness and number of evaluations used
"""
assert p >= 0
if p == 0:
return self, self.fitness, 0
l = 0
while l == 0:
l = rng.binomial(self.n, p)
best_obj = -1
best_locs = None
for i in range(n_childs):
locs = rng.choice(self.n, size=l, replace=False)
obj = self.get_fitness_after_flipping(locs)
if obj > best_obj:
best_locs = locs
best_obj = obj
best_child = self.flip(best_locs)
return best_child, best_child.fitness, n_childs
def mutate_rls(self, l, rng=np.random.default_rng()):
"""
Generate a child by flipping exactly l bits.
Returns
-------
child, its fitness
"""
assert l >= 0
if l == 0:
return self, self.fitness, 0
locs = rng.choice(self.n, size=l, replace=False)
child = self.flip(locs)
return child, child.fitness, 1
def crossover(
self,
xprime,
p,
n_childs,
include_xprime=True,
count_different_inds_only=True,
rng=np.random.default_rng(),
):
"""
Crossover operation in population.
Crossover operator: for each bit, taking value from x with probability p and from self with probability 1-p
Parameters
----------
xprime
the individual to crossover with
p : float
probability in [0,1]
n_childs : int
number of child individuals
include_xprime : bool
whether to inculde x
count_different_inds_only : bool
whether to only count different individuals
rng:
random number generator
"""
assert p <= 1
if p == 0:
if include_xprime:
return xprime, xprime.fitness, 0
else:
return self, self.fitness, 0
if include_xprime:
best_obj = xprime.fitness
else:
best_obj = -1
best_locs = None
n_evals = 0
ls = rng.binomial(self.n, p, size=n_childs)
for l in ls:
locs_xprime = rng.choice(self.n, l, replace=False)
locs_x = np.full(self.n, True)
locs_x[locs_xprime] = False
obj = self.get_fitness_after_crossover(xprime, locs_x, locs_xprime)
if (obj != self.fitness) and (obj != xprime.fitness):
n_evals += 1
elif (
not np.array_equal(xprime.data[locs_xprime], self.data[locs_xprime])
) and (not np.array_equal(self.data[locs_x], xprime.data[locs_x])):
n_evals += 1
if obj > best_obj:
best_obj = obj
best_locs = locs_xprime
if best_locs is not None:
child = self.combine(xprime, best_locs)
else:
child = xprime
if not count_different_inds_only:
n_evals = n_childs
return child, child.fitness, n_evals
class LeadingOne(BinaryProblem):
"""
An individual for LeadingOne problem.
The aim is to maximise the number of leading (and consecutive) 1 bits in the string
"""
def __init__(self, n, rng=np.random.default_rng(), initObj=None):
"""Make individual"""
if initObj is None:
super(LeadingOne, self).__init__(n=n, rng=rng)
else:
self.data = rng.choice([True, False], size=n)
self.data[: int(initObj)] = True
self.data[int(initObj)] = False
self.n = n
self.fitness = self.eval()
def eval(self):
"""Evaluate fitness."""
k = self.data.argmin()
if self.data[k]:
self.fitness = self.n
else:
self.fitness = k
return self.fitness
def is_optimal(self):
"""Return is_optimal flag."""
return self.data.all()
def get_optimal(self):
"""Return optimum."""
return self.n
def get_fitness_after_flipping(self, locs):
"""Return fitness after flipping."""
min_loc = locs.min()
if min_loc < self.fitness:
return min_loc
elif min_loc > self.fitness:
return self.fitness
else:
old_fitness = self.fitness
self.data[locs] = ~self.data[locs]
new_fitness = self.eval()
self.data[locs] = ~self.data[locs]
self.fitness = old_fitness
return new_fitness
def get_fitness_after_crossover(self, xprime, locs_x, locs_xprime):
"""Return fitness after crossover."""
child = self.combine(xprime, locs_xprime)
child.eval()
return child.fitness
MAX_INT = 1e8
HISTORY_LENGTH = 5
class TheoryEnv(AbstractEnv):
"""
Environment for RLS with step size.
Current assumption: we only consider (1+1)-RLS, so there's only one parameter to tune (r)
"""
def __init__(self, config, test_env=False) -> None:
"""
Initialize TheoryEnv.
Parameters
----------
config : objdict
Environment configuration
test_env : bool
whether to use test mode
"""
super(TheoryEnv, self).__init__(config)
self.logger = logging.getLogger(self.__str__())
self.test_env = test_env
self.name = config.name
# name of reward function
assert config.reward_choice in [
"imp_div_evals",
"imp_div_evals_new",
"imp_minus_evals",
"minus_evals",
"imp",
"minus_evals_normalised",
"imp_minus_evals_normalised",
]
self.reward_choice = config.reward_choice
# print("Reward choice: " + self.reward_choice)
# get problem
self.problem = globals()[config.problem]
# read names of all observation variables
self.obs_description = config.observation_description
self.obs_var_names = [
s.strip() for s in config.observation_description.split(",")
]
# functions to get values of the current state from histories
# (see reset() function for those history variables)
self.state_functions = []
for var_name in self.obs_var_names:
if var_name == "n":
self.state_functions.append(lambda: self.n)
elif var_name in ["r"]:
self.state_functions.append(
lambda his="history_" + var_name: vars(self)[his][-1]
)
elif (
"_{t-" in var_name
): # TODO: this implementation only allow accessing history of r, but not delta_f(x), optimal_k, etc
k = int(
var_name.split("_{t-")[1][:-1]
) # get the number in _{t-<number>}
name = var_name.split("_{t-")[0] # get the variable name (r, f(x), etc)
self.state_functions.append(
lambda his="history_" + name: vars(self)[his][-(k + 1)]
) # the last element is the value at the current time step, so we have to go one step back to access the history
elif var_name == "f(x)":
self.state_functions.append(lambda: self.history_fx[-1])
elif var_name == "delta_f(x)":
self.state_functions.append(
lambda: self.history_fx[-1] - self.history_fx[-2]
)
elif var_name == "optimal_r":
self.state_functions.append(
lambda: int(self.n / (self.history_fx[-1] + 1))
)
else:
raise Exception("Error: invalid state variable name: " + var_name)
# the random generator used by RLS
if "seed" in config:
seed = config.seed
else:
seed = None
if "seed" in self.instance:
seed = self.instance.seed
self.seed(seed)
# for logging
self.outdir = None
if "outdir" in config:
self.outdir = config.outdir + "/" + str(uuid.uuid4())
def get_obs_domain_from_name(var_name):
"""
Get default lower and upperbound of a observation variable based on its name.
The observation space will then be created
Returns
-------
Two int values, e.g., 1, np.inf
"""
return 0, np.inf
def reset(self, seed=None, options={}):
"""
Resets env.
Returns
-------
numpy.array
Environment state
"""
super(TheoryEnv, self).reset_(seed)
# current problem size (n) & evaluation limit (max_evals)
self.n = self.instance.size
if self.test_env:
self.max_evals = self.n_steps
else:
self.max_evals = int(0.8 * self.n * self.n)
self.logger.info("n:%d, max_evals:%d" % (self.n, self.max_evals))
# set random seed
if "seed" in self.instance:
self.seed(self.instance.seed)
# create an initial solution
if self.instance.initObj == "random":
self.x = self.problem(n=self.instance.size, rng=self.np_random)
else:
self.x = self.problem(
n=self.instance.size, rng=self.np_random, initObj=self.instance.initObj
)
# total number of evaluations so far
self.total_evals = 1
# reset histories
self.history_r = deque([0] * HISTORY_LENGTH, maxlen=HISTORY_LENGTH)
self.history_fx = deque(
[self.x.fitness] * HISTORY_LENGTH, maxlen=HISTORY_LENGTH
)
# for debug only
self.log_r = []
self.log_reward = []
self.log_fx = []
self.init_obj = self.x.fitness
return self.get_state(), {}
def get_state(self):
"""Return state."""
return np.asarray([f() for f in self.state_functions])
def step(self, action):
"""
Execute environment step.
Parameters
----------
action : Box
action to execute
Returns
-------
state, reward, terminated, truncated, info
np.array, float, bool, bool, dict
"""
truncated = super(TheoryEnv, self).step_()
fitness_before_update = self.x.fitness
# get r
if isinstance(action, np.ndarray) or isinstance(action, list):
assert len(action) == 1
r = action[0]
else:
r = action
# if r is out of range
stop = False
if r < 1 or r > self.n:
self.logger.info(f"WARNING: r={r} is out of bound")
# if we're in the training phase, we return a large negative reward and stop the episode
if self.test_env is False:
terminated = True
n_evals = 0
reward = -MAX_INT
stop = True
# if we're in the test phase, just clip r back to the range and continue
else:
r = np.clip(r, 1, self.n)
if stop is False:
# flip r bits
r = int(r)
y, f_y, n_evals = self.x.mutate_rls(r, self.np_random)
# update x
if self.x.fitness <= y.fitness:
self.x = y
# update total number of evaluations
self.total_evals += n_evals
# check stopping criteria
terminated = (self.total_evals >= self.max_evals) or (self.x.is_optimal())
# calculate reward
if self.reward_choice == "imp_div_evals":
reward = (self.x.fitness - fitness_before_update - 0.5) / n_evals
elif self.reward_choice == "imp_minus_evals":
reward = self.x.fitness - fitness_before_update - n_evals
elif self.reward_choice == "minus_evals":
reward = -n_evals
elif self.reward_choice == "minus_evals_normalised":
reward = -n_evals / self.max_evals
elif self.reward_choice == "imp_minus_evals_normalised":
reward = (
self.x.fitness - fitness_before_update - n_evals
) / self.max_evals
elif self.reward_choice == "imp":
reward = self.x.fitness - fitness_before_update - 0.5
self.log_reward.append(reward)
# update histories
self.history_fx.append(self.x.fitness)
self.history_r.append(r)
# update logs
self.log_r.append(r)
self.log_fx.append(self.x.fitness)
self.log_reward.append(reward)
returned_info = {"msg": "", "values": {}}
if terminated or truncated:
if hasattr(self, "env_type"):
msg = "Env " + self.env_type + ". "
else:
msg = ""
msg += (
"Episode done: n=%d; obj=%d; init_obj=%d; evals=%d; max_evals=%d; steps=%d; r_min=%.1f; r_max=%.1f; r_mean=%.1f; R=%.4f"
% (
self.n,
self.x.fitness,
self.init_obj,
self.total_evals,
self.max_evals,
self.c_step,
min(self.log_r),
max(self.log_r),
sum(self.log_r) / len(self.log_r),
sum(self.log_reward),
)
)
# self.logger.info(msg)
returned_info["msg"] = msg
returned_info["values"] = {
"n": int(self.n),
"obj": int(self.x.fitness),
"init_obj": int(self.init_obj),
"evals": int(self.total_evals),
"max_evals": int(self.max_evals),
"steps": int(self.c_step),
"r_min": float(min(self.log_r)),
"r_max": float(max(self.log_r)),
"r_mean": float(sum(self.log_r) / len(self.log_r)),
"R": float(sum(self.log_reward)),
"log_r": [int(x) for x in self.log_r],
"log_fx": [int(x) for x in self.log_fx],
"log_reward": [float(x) for x in self.log_reward],
}
return self.get_state(), reward, truncated, terminated, returned_info
def close(self) -> bool:
"""
Close Env.
No additional cleanup necessary
Returns
-------
bool
Closing confirmation
"""
return True
class TheoryEnvDiscrete(TheoryEnv):
"""RLS environment where the choices of r is discretised."""
def __init__(self, config, test_env=False):
"""Init env."""
super(TheoryEnvDiscrete, self).__init__(config, test_env)
assert (
"action_choices" in config
), "Error: action_choices must be specified in benchmark's config"
assert isinstance(
self.action_space, gym.spaces.Discrete
), "Error: action space must be discrete"
assert self.action_space.n == len(config["action_choices"]), (
"Error: action space's size (%d) must be equal to the len(action_choices) (%d)"
% (self.action_space.n, len(config["action_choices"]))
)
self.action_choices = config["action_choices"]
def step(self, action):
"""Take step."""
if isinstance(action, np.ndarray) or isinstance(action, list):
assert len(action) == 1
action = action[0]
return super(TheoryEnvDiscrete, self).step(self.action_choices[action])
| 19,187 | 29.408875 | 136 | py |
DACBench | DACBench-main/dacbench/envs/geometric.py | """
Geometric environment.
Original environment authors: Rasmus von Glahn
"""
import bisect
import math
import os
from typing import Dict, List, Tuple
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from mpl_toolkits import mplot3d
from dacbench import AbstractEnv
sns.set_theme(style="darkgrid")
class GeometricEnv(AbstractEnv):
"""
Environment for tracing different curves that are orthogonal to each other
Use product approach: f(t,x,y,z) = X(t,x) * Y(t,y) * Z(t,z)
Normalize Function Value on a Scale between 0 and 1
- min and max value for normalization over all timesteps
"""
def __init__(self, config) -> None:
"""
Initialize Geometric Env
Parameters
-------
config : objdict
Environment configuration
"""
super(GeometricEnv, self).__init__(config)
self.action_vals = config["action_values"]
self.action_interval_mapping = config["action_interval_mapping"]
self.realistic_trajectory = config["realistic_trajectory"]
self.derivative_interval = config["derivative_interval"]
self.correlation_table = config["correlation_table"]
self.correlation_active = config["correlation_active"]
self.correlation_depth = config["correlation_depth"]
self.n_steps = config["cutoff"]
self._prev_state = None
self.action = None
self.n_actions = len(self.action_vals)
# Functions
self.functions = Functions(
self.n_steps,
self.n_actions,
len(self.instance_set),
self.correlation_active,
self.correlation_table,
self.correlation_depth,
self.derivative_interval,
)
self.functions.calculate_norm_values(self.instance_set)
# Trajectories
self.action_trajectory = []
self.coord_trajectory = []
self.action_trajectory_set = {}
self.coord_trajectory_set = {}
self.derivative = []
self.derivative_set = {}
if "reward_function" in config.keys():
self.get_reward = config["reward_function"]
else:
self.get_reward = self.get_default_reward
if "state_method" in config.keys():
self.get_state = config["state_method"]
else:
self.get_state = self.get_default_state
def get_optimal_policy(
self, instance: List = None, vector_action: bool = True
) -> List[np.array]:
"""
Calculates the optimal policy for an instance
Parameters
----------
instance : List, optional
instance with information about function config.
vector_action : bool, optional
if True return multidim actions else return onedimensional action, by default True
Returns
-------
List[np.array]
List with entry for each timestep that holds all optimal values in an array or as int
"""
if not instance:
instance = self.instance
optimal_policy_coords = self.functions.get_coordinates(instance).transpose()
optimal_policy = np.zeros(((self.n_steps, self.n_actions)))
for step in range(self.n_steps):
for dimension in range(self.n_actions):
step_size = 2 / self.action_vals[dimension]
interval = [step for step in np.arange(-1, 1, step_size)][1:]
optimal_policy[step, dimension] = bisect.bisect_left(
interval, optimal_policy_coords[step, dimension]
)
optimal_policy = optimal_policy.astype(int)
return optimal_policy
def step(self, action: int):
"""
Execute environment step
Parameters
----------
action : int
action to execute
Returns
-------
np.array, float, bool, bool, dict
state, reward, terminated, truncated, info
"""
self.done = super(GeometricEnv, self).step_()
self.action = action
coords = self.functions.get_coordinates_at_time_step(self.c_step)
self.coord_trajectory.append(coords)
self.action_trajectory.append(action)
self.coord_trajectory_set[self.inst_id] = self.coord_trajectory
self.action_trajectory_set[self.inst_id] = self.action_trajectory
if self.realistic_trajectory:
self.derivative = self.functions.calculate_derivative(
self.coord_trajectory, self.c_step
)
else:
self.derivative = self.functions.calculate_derivative(
self.action_trajectory, self.c_step
)
self.derivative_set[self.inst_id] = self.derivative
next_state = self.get_state(self)
self._prev_state = next_state
reward = self.get_reward(self)
if reward > 1:
print(f"Instance: {self.instance}, Reward:{reward}, step: {self.c_step}")
raise ValueError(f"Reward zu Hoch Coords: {coords}, step: {self.c_step}")
if math.isnan(reward):
raise ValueError(f"Reward NAN Coords: {coords}, step: {self.c_step}")
return next_state, reward, False, self.done, {}
def reset(self, seed=None, options={}) -> List[int]:
"""
Resets env
Returns
-------
numpy.array
Environment state
dict
Meta-info
"""
super(GeometricEnv, self).reset_(seed)
self.functions.set_instance(self.instance, self.instance_index)
if self.c_step:
self.action_trajectory = self.action_trajectory_set.get(self.inst_id)
self.coord_trajectory = self.coord_trajectory_set.get(
self.inst_id, [self.functions.get_coordinates_at_time_step(self.c_step)]
)
self.derivative = self.derivative_set.get(
self.inst_id, np.zeros(self.n_actions)
)
self._prev_state = None
return self.get_state(self), {}
def get_default_reward(self, _) -> float:
"""
Calculate euclidean distance between action vector and real position of Curve.
Parameters
----------
_ : self
ignore
Returns
-------
float
Euclidean distance
"""
coords, action_coords, highest_coords, lowest_actions = self._pre_reward()
euclidean_dist = np.linalg.norm(action_coords - coords)
max_dist = np.linalg.norm(highest_coords - lowest_actions)
reward = 1 - (euclidean_dist / max_dist)
return abs(reward)
def get_default_state(self, _) -> np.array:
"""
Gather state information.
Parameters
----------
_ :
ignore param
Returns
-------
np.array
numpy array with state information
"""
remaining_budget = self.n_steps - self.c_step
next_state = [remaining_budget]
next_state += [self.n_actions]
if self.c_step == 0:
next_state += [0 for _ in range(self.n_actions)]
next_state += [0 for _ in range(self.n_actions)]
else:
next_state += list(self.derivative)
next_state += list(self.coord_trajectory[self.c_step])
return np.array(next_state, dtype="float32")
def close(self) -> bool:
"""
Close Env
Returns
-------
bool
Closing confirmation
"""
return True
def render(self, dimensions: List, absolute_path: str):
"""
Multiplot for specific dimensions of benchmark with policy actions.
Parameters
----------
dimensions : List
List of dimensions that get plotted
"""
coordinates = self.functions.get_coordinates()
fig, axes = plt.subplots(
len(dimensions), sharex=True, sharey=True, figsize=(15, 4 * len(dimensions))
)
plt.xlabel("time steps", fontsize=32)
plt.ylim(-1.1, 1.1)
plt.xlim(-0.1, self.n_steps - 0.9)
plt.xticks(np.arange(0, self.n_steps, 1), fontsize=24.0)
for idx, dim in zip(range(len(dimensions)), dimensions):
function_info = self.instance[dim]
title = function_info[1] + " - Dimension " + str(dim)
axes[idx].tick_params(axis="both", which="major", labelsize=24)
axes[idx].set_yticks((np.arange(-1, 1.1, 2 / self.action_vals[dim])))
axes[idx].set_title(title, size=32)
axes[idx].plot(coordinates[dim], label="Function", marker="o", linewidth=3)[
0
].axes
axes[idx].xaxis.grid(False)
axes[idx].vlines(x=[3.5, 7.5], ymin=-1, ymax=1, colors="white", ls="--")
"""
axes[idx].legend(
loc="lower right",
framealpha=1,
shadow=True,
borderpad=1,
frameon=True,
ncol=1,
edgecolor="0.2",
)
"""
fig_title = f"GeoBench-Dimensions{len(dimensions)}"
fig.savefig(os.path.join(absolute_path, fig_title + ".jpg"))
def render_3d_dimensions(self, dimensions: List, absolute_path: str):
"""
Plot 2 Dimensions in 3D space
Parameters
----------
dimensions : List
List of dimensions that get plotted. Max 2
"""
assert len(dimensions) == 2
print(mplot3d)
coordinates = self.functions.get_coordinates()
fig = plt.figure(figsize=(10, 10))
ax = plt.axes(projection="3d")
x = list(range(self.n_steps))
z = coordinates[dimensions[0]][x]
y = coordinates[dimensions[1]][x]
ax.set_title("3D line plot")
ax.plot3D(x, y, z, "blue")
ax.view_init()
fig.savefig(os.path.join(absolute_path, "3D.jpg"))
ax.set_yticklabels([])
ax.set_yticks([])
ax.view_init(elev=0, azim=-90)
fig.savefig(os.path.join(absolute_path, "3D-90side.jpg"))
def _pre_reward(self) -> Tuple[np.ndarray, List]:
"""
Prepare actions and coordinates for reward calculation.
Returns
-------
Tuple[np.ndarray, List]
[description]
"""
coordinates = self.functions.get_coordinates_at_time_step(self.c_step)
function_names = [function_info[1] for function_info in self.instance]
# map action values to their interval mean
mapping_list = [self.action_interval_mapping[name] for name in function_names]
action_intervall = [
mapping_list[count][index] for count, index in enumerate(self.action)
]
highest_coords = np.ones(self.n_actions)
lowest_actions = np.array([val[0] for val in mapping_list])
return coordinates, action_intervall, highest_coords, lowest_actions
class Functions:
def __init__(
self,
n_steps: int,
n_actions: int,
n_instances: int,
correlation: bool,
correlation_table: np.ndarray,
correlation_depth: int,
derivative_interval: int,
) -> None:
self.instance = None
self.instance_idx = None
self.coord_array = np.zeros((n_actions, n_steps))
self.calculated_instance = None
self.norm_calculated = False
self.norm_values = np.ones((n_instances, n_actions))
self.correlation = correlation
self.correlation_table = correlation_table
self.correlation_changes = np.zeros(n_actions)
self.correlation_depth = correlation_depth
self.n_steps = n_steps
self.n_actions = n_actions
self.derivative_interval = derivative_interval
def set_instance(self, instance: List, instance_index):
"""update instance"""
self.instance = instance
self.instance_idx = instance_index
def get_coordinates(self, instance: List = None) -> List[np.array]:
"""
Calculates coordinates for instance over all time_steps.
The values will change if correlation is applied and not optimal actions are taken.
Parameters
----------
instance : List, optional
Instance that holds information about functions, by default None
Returns
-------
List[np.array]
Index of List refers to time step
"""
if not instance:
instance = self.instance
assert instance
if self.instance_idx == self.calculated_instance:
optimal_coords = self.coord_array
else:
optimal_coords = np.zeros((self.n_actions, self.n_steps))
for time_step in range(self.n_steps):
optimal_coords[:, time_step] = self.get_coordinates_at_time_step(
time_step + 1
)
if self.norm_calculated:
self.coord_array = optimal_coords
self.calculated_instance = self.instance_idx
return optimal_coords
def get_coordinates_at_time_step(self, time_step: int) -> np.array:
"""
Calculate coordiantes at time_step.
Apply correlation.
Parameters
----------
instance : List
Instance that holds information about functions
time_step : int
Time step of functions
Returns
-------
np.array
array of function values at timestep
"""
if self.instance_idx == self.calculated_instance:
value_array = self.coord_array[:, time_step - 1]
else:
value_array = np.zeros(self.n_actions)
for index, function_info in enumerate(self.instance):
value_array[index] = self._calculate_function_value(
time_step, function_info, index
)
if self.correlation and time_step > 1 and self.norm_calculated:
value_array = self._add_correlation(value_array, time_step)
return value_array
def calculate_derivative(self, trajectory: List, c_step: int) -> np.array:
"""
Calculate derivatives of each dimension, based on trajectories.
Parameters
----------
trajectory: List
List of actions or coordinates already taken
c_step: int
current timestep
Returns
-------
np.array
derivatives for each dimension
"""
if c_step > 1:
upper_bound = c_step + 1
lower_bound = max(upper_bound - self.derivative_interval, 1)
derrivative = np.zeros(self.n_actions)
for step in range(lower_bound, upper_bound):
der = np.subtract(
np.array(trajectory[step], dtype=np.float32),
np.array(trajectory[step - 1], dtype=np.float32),
)
derrivative = np.add(derrivative, der)
derrivative /= upper_bound - lower_bound
elif c_step == 1:
derrivative = np.subtract(
np.array(trajectory[c_step], dtype=np.float32),
np.array(trajectory[c_step - 1], dtype=np.float32),
)
else:
derrivative = np.zeros(self.n_actions)
return derrivative
def calculate_norm_values(self, instance_set: Dict):
"""
Norm Functions to Intervall between -1 and 1
"""
for key, instance in instance_set.items():
self.set_instance(instance, key)
instance_values = self.get_coordinates()
for dim, function_values in enumerate(instance_values):
if abs(min(function_values)) > max(function_values):
norm_factor = abs(min(function_values))
else:
norm_factor = max(function_values)
self.norm_values[key][dim] = norm_factor
self.norm_calculated = True
def _calculate_function_value(
self, time_step: int, function_infos: List, func_idx: int
) -> float:
"""
Call different functions with their speicifc parameters and norm them.
Parameters
----------
function_infos : List
Consists of function name and the coefficients
time_step: int
time step for each function
calculate_norm : bool, optional
True if norm gets calculated, by default False
Returns
-------
float
coordinate in dimension of function
"""
assert self.instance_idx == function_infos[0]
function_name = function_infos[1]
coefficients = function_infos[2:]
if self.norm_calculated:
norm_value = self.norm_values[self.instance_idx, func_idx]
if norm_value == 0:
norm_value = 1
else:
norm_value = 1
function_value = 0
if "sigmoid" == function_name:
function_value = self._sigmoid(time_step, coefficients[0], coefficients[1])
elif "linear" == function_name:
function_value = self._linear(time_step, coefficients[0], coefficients[1])
elif "constant" == function_name:
function_value = self._constant(coefficients[0])
elif "logarithmic" == function_name:
function_value = self._logarithmic(time_step, coefficients[0])
elif "cubic" in function_name:
function_value = self._cubic(
time_step, coefficients[0], coefficients[1], coefficients[2]
)
elif "parabel" in function_name:
function_value = self._parabel(
time_step, coefficients[0], coefficients[1], coefficients[2]
)
elif "sinus" in function_name:
function_value = self._sinus(time_step, coefficients[0])
function_value = np.round(function_value / norm_value, 5)
if self.norm_calculated:
function_value = max(min(function_value, 1), -1)
return function_value
def _add_correlation(self, value_array: np.ndarray, time_step: int):
"""
Adds correlation between dimensions but clips at -1 and 1.
Correlation table holds numbers between -1 and 1.
e.g. correlation_table[0][2] = 0.5 if dimension 1 changes dimension 3 changes about 50% of dimension one
Parameters
----------
correlation_table : np.array
table that holds all values of correlation between dimensions [n,n]
"""
prev_values = self.coord_array[:, time_step - 1]
diff_values = value_array - prev_values
new_values = []
for idx, diff in enumerate(diff_values):
self._apply_correlation_update(idx, diff, self.correlation_depth)
new_values = self.correlation_changes + value_array
clipped_values = np.clip(new_values, a_min=-1, a_max=1)
self.correlation_changes = np.zeros(self.n_actions)
return clipped_values
def _apply_correlation_update(self, idx: int, diff: float, depth):
"""
Recursive function for correlation updates
Call function recursively till depth is 0 or diff is too small.
"""
if not depth or diff < 0.001:
return
for coeff_idx, corr_coeff in enumerate(self.correlation_table[:][idx]):
change = corr_coeff * diff
self.correlation_changes[coeff_idx] += change
self._apply_correlation_update(coeff_idx, change, depth - 1)
def _sigmoid(self, t: float, scaling: float, inflection: float):
"""Simple sigmoid function"""
return 1 / (1 + np.exp(-scaling * (t - inflection)))
def _linear(self, t: float, a: float, b: float):
"""Linear function"""
return a * t + b
def _parabel(self, t: float, sig: int, x_int: int, y_int: int):
"""Parabel function"""
return sig * (t - x_int) ** 2 + y_int
def _cubic(self, t: float, sig: int, x_int: int, y_int: int):
"""cubic function"""
return sig * (t - x_int) ** 3 + y_int
def _logarithmic(self, t: float, a: float):
"""Logarithmic function"""
if t != 0:
return a * np.log(t)
else:
return 1000
def _constant(self, c: float):
"""Constant function"""
return c
def _sinus(self, t: float, scale: float):
"""Sinus function"""
return np.sin(scale * t)
| 20,713 | 31.164596 | 112 | py |
DACBench | DACBench-main/dacbench/envs/sgd.py | import json
import math
import numbers
import random
import warnings
from enum import IntEnum, auto
from functools import reduce
import numpy as np
import torch
from backpack import backpack, extend
from backpack.extensions import BatchGrad
from torchvision import datasets, transforms
from dacbench import AbstractEnv
warnings.filterwarnings("ignore")
def reward_range(frange):
def wrapper(f):
f.frange = frange
return f
return wrapper
class Reward(IntEnum):
TrainingLoss = auto()
ValidationLoss = auto()
LogTrainingLoss = auto()
LogValidationLoss = auto()
DiffTraining = auto()
DiffValidation = auto()
LogDiffTraining = auto()
LogDiffValidation = auto()
FullTraining = auto()
def __call__(self, f):
if hasattr(self, "func"):
raise ValueError("Can not assign the same reward to a different function!")
self.func = f
return f
class SGDEnv(AbstractEnv):
"""
Environment to control the learning rate of adam
"""
def __init__(self, config):
"""
Initialize SGD Env
Parameters
-------
config : objdict
Environment configuration
"""
super(SGDEnv, self).__init__(config)
self.batch_size = config.training_batch_size
self.validation_batch_size = config.validation_batch_size
self.no_cuda = config.no_cuda
self.current_batch_size = config.training_batch_size
self.on_features = config.features
self.cd_paper_reconstruction = config.cd_paper_reconstruction
self.cd_bias_correction = config.cd_bias_correction
self.crashed = False
self.terminate_on_crash = config.terminate_on_crash
self.crash_penalty = config.crash_penalty
if isinstance(config.reward_type, Reward):
self.reward_type = config.reward_type
elif isinstance(config.reward_type, str):
try:
self.reward_type = getattr(Reward, config.reward_type)
except AttributeError:
raise ValueError(f"{config.reward_type} is not a valid reward type!")
else:
raise ValueError(f"Type {type(config.reward_type)} is not valid!")
self.use_cuda = not self.no_cuda and torch.cuda.is_available()
self.device = torch.device("cuda" if self.use_cuda else "cpu")
self.training_validation_ratio = config.train_validation_ratio
self.dataloader_shuffle = config.dataloader_shuffle
# self.test_dataset = None
self.train_dataset = None
self.validation_dataset = None
self.train_loader = None
# self.test_loader = None
self.validation_loader = None
self.train_loader_it = None
self.validation_loader_it = None
self.train_batch_index = 0
self.epoch_index = 0
self.current_training_loss = None
self.loss_batch = None
self.prev_training_loss = None
self._current_validation_loss = torch.zeros(
1, device=self.device, requires_grad=False
)
self._current_validation_loss.calculated = False
self.prev_validation_loss = torch.zeros(
1, device=self.device, requires_grad=False
)
self.model = None
self.val_model = None
# TODO:
"""
TODO: Samuel Mueller (PhD student in our group) also uses backpack and has ran into a similar memory leak.
He solved it calling this custom made RECURSIVE memory_cleanup function:
# from backpack import memory_cleanup
# def recursive_backpack_memory_cleanup(module: torch.nn.Module):
# memory_cleanup(module)
# for m in module.modules():
# memory_cleanup(m)
(calling this after computing the training loss/gradients and after validation loss should suffice)
"""
self.parameter_count = 0
self.layer_sizes = []
self.loss_function = config.loss_function(**config.loss_function_kwargs)
self.loss_function = extend(self.loss_function)
self.val_loss_function = config.loss_function(**config.val_loss_function_kwargs)
self.initial_lr = config.lr * torch.ones(
1, device=self.device, requires_grad=False
)
self.current_lr = config.lr * torch.ones(
1, device=self.device, requires_grad=False
)
self.optimizer_name = config.optimizer
self.beta1 = config.beta1
self.beta2 = config.beta2
self.epsilon = config.epsilon
# RMSprop parameters
self.beta2 = config.beta2
self.m = 0
self.v = 0
# Momentum parameters
self.sgd_momentum_v = 0
self.sgd_rho = 0.9
self.clip_grad = config.clip_grad
self.t = 0
self.step_count = torch.zeros(1, device=self.device, requires_grad=False)
self.prev_direction = None
self.current_direction = None
self.predictiveChangeVarDiscountedAverage = torch.zeros(
1, device=self.device, requires_grad=False
)
self.predictiveChangeVarUncertainty = torch.zeros(
1, device=self.device, requires_grad=False
)
self.lossVarDiscountedAverage = torch.zeros(
1, device=self.device, requires_grad=False
)
self.lossVarUncertainty = torch.zeros(
1, device=self.device, requires_grad=False
)
self.discount_factor = config.discount_factor
self.firstOrderMomentum = torch.zeros(
1, device=self.device, requires_grad=False
)
self.secondOrderMomentum = torch.zeros(
1, device=self.device, requires_grad=False
)
if self.optimizer_name == "adam":
self.get_optimizer_direction = self.get_adam_direction
elif self.optimizer_name == "rmsprop":
self.get_optimizer_direction = self.get_rmsprop_direction
elif self.optimizer_name == "momentum":
self.get_optimizer_direction = self.get_momentum_direction
else:
raise NotImplementedError
if "reward_function" in config.keys():
self._get_reward = config["reward_function"]
else:
self._get_reward = self.reward_type.func
if "state_method" in config.keys():
self.get_state = config["state_method"]
else:
self.get_state = self.get_default_state
self.reward_range = self.reward_type.func.frange
def get_reward(self):
return self._get_reward(self)
@reward_range([-(10**9), 0])
@Reward.TrainingLoss
def get_training_reward(self):
return -self.current_training_loss.item()
@reward_range([-(10**9), 0])
@Reward.ValidationLoss
def get_validation_reward(self):
return -self.current_validation_loss.item()
@reward_range([-(10**9), (10**9)])
@Reward.LogTrainingLoss
def get_log_training_reward(self):
return -torch.log(self.current_training_loss).item()
@reward_range([-(10**9), (10**9)])
@Reward.LogValidationLoss
def get_log_validation_reward(self):
return -torch.log(self.current_validation_loss).item()
@reward_range([-(10**9), (10**9)])
@Reward.LogDiffTraining
def get_log_diff_training_reward(self):
return -(
torch.log(self.current_training_loss) - torch.log(self.prev_training_loss)
).item()
@reward_range([-(10**9), (10**9)])
@Reward.LogDiffValidation
def get_log_diff_validation_reward(self):
return -(
torch.log(self.current_validation_loss)
- torch.log(self.prev_validation_loss)
).item()
@reward_range([-(10**9), (10**9)])
@Reward.DiffTraining
def get_diff_training_reward(self):
return (self.current_training_loss - self.prev_training_loss).item()
@reward_range([-(10**9), (10**9)])
@Reward.DiffValidation
def get_diff_validation_reward(self):
return (self.current_validation_loss - self.prev_validation_loss).item()
@reward_range([-(10**9), 0])
@Reward.FullTraining
def get_full_training_reward(self):
return -self._get_full_training_loss(loader=self.train_loader).item()
def get_full_training_loss(self):
return -self.get_full_training_reward()
@property
def crash(self):
self.crashed = True
truncated = False
terminated = False
if self.c_step >= self.n_steps:
truncated = True
else:
terminated = self.terminate_on_crash
return self.get_state(self), self.crash_penalty, terminated, truncated, {}
def seed(self, seed=None, seed_action_space=False):
"""
Set rng seed
Parameters
----------
seed:
seed for rng
seed_action_space: bool, default False
if to seed the action space as well
"""
(seed,) = super().seed(seed, seed_action_space)
if seed is not None:
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
return [seed]
def step(self, action):
"""
Execute environment step
Parameters
----------
action : list
action to execute
Returns
-------
np.array, float, bool, bool, dict
state, reward, terminated, truncated, info
"""
truncated = super(SGDEnv, self).step_()
self.step_count += 1
index = 0
if not isinstance(action, int) and not isinstance(action, float):
action = action.item()
if not isinstance(action, numbers.Number):
action = action[0]
if np.isnan(action):
return self.crash
new_lr = torch.Tensor([action]).to(self.device)
self.current_lr = new_lr
direction = self.get_optimizer_direction()
if np.isnan(direction).any():
return self.crash
self.current_direction = direction
delta_w = torch.mul(new_lr, direction)
for i, p in enumerate(self.model.parameters()):
layer_size = self.layer_sizes[i]
p.data = p.data - delta_w[index : index + layer_size].reshape(
shape=p.data.shape
)
index += layer_size
self.model.zero_grad()
self.prev_training_loss = self.current_training_loss
if self._current_validation_loss.calculated:
self.prev_validation_loss = self.current_validation_loss
self.train_network()
reward = self.get_reward()
if np.isnan(reward):
return self.crash
state = self.get_state(self)
for value in state.values():
if np.isnan(value):
return self.crash
return state, reward, False, truncated, {}
def _architecture_constructor(self, arch_str):
layer_specs = []
layer_strs = arch_str.split("-")
for layer_str in layer_strs:
idx = layer_str.find("(")
if idx == -1:
nn_module_name = layer_str
vargs = []
else:
nn_module_name = layer_str[:idx]
vargs_json_str = '{"tmp": [' + layer_str[idx + 1 : -1] + "]}"
vargs = json.loads(vargs_json_str)["tmp"]
layer_specs.append((getattr(torch.nn, nn_module_name), vargs))
def model_constructor():
layers = [cls(*vargs) for cls, vargs in layer_specs]
return torch.nn.Sequential(*layers)
return model_constructor
def reset(self, seed=None, options={}):
"""
Reset environment
Returns
-------
np.array
Environment state
"""
super(SGDEnv, self).reset_(seed)
dataset = self.instance[0]
instance_seed = self.instance[1]
construct_model = self._architecture_constructor(self.instance[2])
self.n_steps = self.instance[3]
dataset_size = self.instance[4]
self.crashed = False
self.seed(instance_seed)
self.model = construct_model().to(self.device)
self.val_model = construct_model().to(self.device)
def init_weights(m):
if type(m) == torch.nn.Linear or type(m) == torch.nn.Conv2d:
torch.nn.init.xavier_normal(m.weight)
m.bias.data.fill_(0.0)
if self.cd_paper_reconstruction:
self.model.apply(init_weights)
train_dataloader_args = {
"batch_size": self.batch_size,
"drop_last": True,
"shuffle": self.dataloader_shuffle,
}
validation_dataloader_args = {
"batch_size": self.validation_batch_size,
"drop_last": True,
"shuffle": False,
} # SA: shuffling empty data loader causes exception
if self.use_cuda:
param = {"num_workers": 1, "pin_memory": True}
train_dataloader_args.update(param)
validation_dataloader_args.update(param)
if dataset == "MNIST":
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
train_dataset = datasets.MNIST(
"../data", train=True, download=True, transform=transform
)
# self.test_dataset = datasets.MNIST('../data', train=False, transform=transform)
elif dataset == "CIFAR":
transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(
(0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)
),
]
)
train_dataset = datasets.CIFAR10(
"../data", train=True, download=True, transform=transform
)
# self.test_dataset = datasets.MNIST('../data', train=False, transform=transform)
else:
raise NotImplementedError
if dataset_size is not None:
train_dataset = torch.utils.data.Subset(
train_dataset, range(0, dataset_size)
)
training_dataset_limit = math.floor(
len(train_dataset) * self.training_validation_ratio
)
validation_dataset_limit = len(train_dataset)
self.train_dataset = torch.utils.data.Subset(
train_dataset, range(0, training_dataset_limit - 1)
)
self.validation_dataset = torch.utils.data.Subset(
train_dataset, range(training_dataset_limit, validation_dataset_limit)
)
self.train_loader = torch.utils.data.DataLoader(
self.train_dataset, **train_dataloader_args
)
# self.test_loader = torch.utils.data.DataLoader(self.test_dataset, **train_dataloader_args)
self.validation_loader = torch.utils.data.DataLoader(
self.validation_dataset, **validation_dataloader_args
)
self.train_batch_index = 0
self.epoch_index = 0
self.train_loader_it = iter(self.train_loader)
self.validation_loader_it = iter(self.validation_loader)
self.parameter_count = 0
self.layer_sizes = []
for p in self.model.parameters():
layer_size = reduce(lambda x, y: x * y, p.shape)
self.layer_sizes.append(layer_size)
self.parameter_count += layer_size
self.model = extend(self.model)
self.model.zero_grad()
self.model.train()
self.val_model.eval()
self.current_training_loss = None
self.loss_batch = None
# Momentum parameters
self.m = 0
self.v = 0
self.sgd_momentum_v = 0
self.t = 0
self.step_count = torch.zeros(1, device=self.device, requires_grad=False)
self.current_lr = self.initial_lr
self.prev_direction = torch.zeros(
(self.parameter_count,), device=self.device, requires_grad=False
)
self.current_direction = torch.zeros(
(self.parameter_count,), device=self.device, requires_grad=False
)
self.predictiveChangeVarDiscountedAverage = torch.zeros(
1, device=self.device, requires_grad=False
)
self.predictiveChangeVarUncertainty = torch.zeros(
1, device=self.device, requires_grad=False
)
self.lossVarDiscountedAverage = torch.zeros(
1, device=self.device, requires_grad=False
)
self.lossVarUncertainty = torch.zeros(
1, device=self.device, requires_grad=False
)
self.firstOrderMomentum = torch.zeros(
1, device=self.device, requires_grad=False
)
self.secondOrderMomentum = torch.zeros(
1, device=self.device, requires_grad=False
)
self._current_validation_loss = torch.zeros(
1, device=self.device, requires_grad=False
)
self._current_validation_loss.calculated = False
self.prev_validation_loss = torch.zeros(
1, device=self.device, requires_grad=False
)
self.train_network()
return self.get_state(self), {}
def set_writer(self, writer):
self.writer = writer
def close(self):
"""
No additional cleanup necessary
Returns
-------
bool
Cleanup flag
"""
return True
def render(self, mode: str = "human"):
"""
Render env in human mode
Parameters
----------
mode : str
Execution mode
"""
if mode != "human":
raise NotImplementedError
pass
def get_default_state(self, _):
"""
Gather state description
Returns
-------
dict
Environment state
"""
self.gradients = self._get_gradients()
self.gradients = self.gradients.clip(*self.clip_grad)
(
self.firstOrderMomentum,
self.secondOrderMomentum,
self.sgdMomentum,
) = self._get_momentum(self.gradients)
if (
"predictiveChangeVarDiscountedAverage" in self.on_features
or "predictiveChangeVarUncertainty" in self.on_features
):
(
predictiveChangeVarDiscountedAverage,
predictiveChangeVarUncertainty,
) = self._get_predictive_change_features(self.current_lr)
if (
"lossVarDiscountedAverage" in self.on_features
or "lossVarUncertainty" in self.on_features
):
lossVarDiscountedAverage, lossVarUncertainty = self._get_loss_features()
if "alignment" in self.on_features:
alignment = self._get_alignment()
state = {}
if "predictiveChangeVarDiscountedAverage" in self.on_features:
state[
"predictiveChangeVarDiscountedAverage"
] = predictiveChangeVarDiscountedAverage.item()
if "predictiveChangeVarUncertainty" in self.on_features:
state[
"predictiveChangeVarUncertainty"
] = predictiveChangeVarUncertainty.item()
if "lossVarDiscountedAverage" in self.on_features:
state["lossVarDiscountedAverage"] = lossVarDiscountedAverage.item()
if "lossVarUncertainty" in self.on_features:
state["lossVarUncertainty"] = lossVarUncertainty.item()
if "currentLR" in self.on_features:
state["currentLR"] = self.current_lr.item()
if "trainingLoss" in self.on_features:
if self.crashed:
state["trainingLoss"] = 0.0
else:
state["trainingLoss"] = self.current_training_loss.item()
if "validationLoss" in self.on_features:
if self.crashed:
state["validationLoss"] = 0.0
else:
state["validationLoss"] = self.current_validation_loss.item()
if "step" in self.on_features:
state["step"] = self.step_count.item()
if "alignment" in self.on_features:
state["alignment"] = alignment.item()
if "crashed" in self.on_features:
state["crashed"] = self.crashed
return state
def _train_batch_(self):
(data, target) = next(self.train_loader_it)
data, target = data.to(self.device), target.to(self.device)
self.current_batch_size = data.size()[0]
output = self.model(data)
loss = self.loss_function(output, target)
with backpack(BatchGrad()):
loss.mean().backward()
loss_value = loss.mean()
self.loss_batch = loss
self.current_training_loss = torch.unsqueeze(loss_value.detach(), dim=0)
self.train_batch_index += 1
self._current_validation_loss.calculated = False
def train_network(self):
try:
self._train_batch_()
except StopIteration:
self.train_batch_index = 0
self.epoch_index += 1
self.train_loader_it = iter(self.train_loader)
self._train_batch_()
def _get_full_training_loss(self, loader):
for target_param, param in zip(
self.val_model.parameters(), self.model.parameters()
):
target_param.data.copy_(param.data)
loss = torch.zeros(1, device=self.device, requires_grad=False)
with torch.no_grad():
for data, target in loader:
data, target = data.to(self.device), target.to(self.device)
output = self.val_model(data)
loss += self.val_loss_function(output, target).sum().detach().detach()
loss /= len(loader.dataset)
return loss
@property
def current_validation_loss(self):
if not self._current_validation_loss.calculated:
self._current_validation_loss = self._get_validation_loss()
self._current_validation_loss.calculated = True
return self._current_validation_loss
def _get_validation_loss_(self):
with torch.no_grad():
(data, target) = next(self.validation_loader_it)
data, target = data.to(self.device), target.to(self.device)
output = self.val_model(data)
validation_loss = self.val_loss_function(output, target).mean()
validation_loss = torch.unsqueeze(validation_loss.detach(), dim=0)
return validation_loss
def _get_validation_loss(self):
for target_param, param in zip(
self.val_model.parameters(), self.model.parameters()
):
target_param.data.copy_(param.data)
try:
validation_loss = self._get_validation_loss_()
except StopIteration:
self.validation_loader_it = iter(self.validation_loader)
validation_loss = self._get_validation_loss_()
return validation_loss
def _get_gradients(self):
gradients = []
for p in self.model.parameters():
if p.grad is None:
continue
gradients.append(p.grad.flatten())
gradients = torch.cat(gradients, dim=0)
return gradients
def _get_momentum(self, gradients):
self.t += 1
self.m = self.beta1 * self.m + (1 - self.beta1) * gradients
self.v = self.beta2 * self.v + (1 - self.beta2) * torch.square(gradients)
bias_corrected_m = self.m / (1 - self.beta1**self.t)
bias_corrected_v = self.v / (1 - self.beta2**self.t)
self.sgd_momentum_v = self.sgd_rho * self.sgd_momentum_v + gradients
return bias_corrected_m, bias_corrected_v, self.sgd_momentum_v
def get_adam_direction(self):
return self.firstOrderMomentum / (
torch.sqrt(self.secondOrderMomentum) + self.epsilon
)
def get_rmsprop_direction(self):
return self.gradients / (torch.sqrt(self.secondOrderMomentum) + self.epsilon)
def get_momentum_direction(self):
return self.sgd_momentum_v
def _get_loss_features(self):
if self.crashed:
return torch.tensor(0.0), torch.tensor(0.0)
bias_correction = (
(1 - self.discount_factor ** (self.c_step + 1))
if self.cd_bias_correction
else 1
)
with torch.no_grad():
loss_var = torch.log(torch.var(self.loss_batch))
self.lossVarDiscountedAverage = (
self.discount_factor * self.lossVarDiscountedAverage
+ (1 - self.discount_factor) * loss_var
)
self.lossVarUncertainty = (
self.discount_factor * self.lossVarUncertainty
+ (1 - self.discount_factor)
* (loss_var - self.lossVarDiscountedAverage / bias_correction) ** 2
)
return (
self.lossVarDiscountedAverage / bias_correction,
self.lossVarUncertainty / bias_correction,
)
def _get_predictive_change_features(self, lr):
if self.crashed:
return torch.tensor(0.0), torch.tensor(0.0)
bias_correction = (
(1 - self.discount_factor ** (self.c_step + 1))
if self.cd_bias_correction
else 1
)
batch_gradients = []
for i, (name, param) in enumerate(self.model.named_parameters()):
grad_batch = param.grad_batch.reshape(
self.current_batch_size, self.layer_sizes[i]
)
batch_gradients.append(grad_batch)
batch_gradients = torch.cat(batch_gradients, dim=1)
update_value = torch.mul(lr, self.get_optimizer_direction())
predictive_change = torch.log(
torch.var(-1 * torch.matmul(batch_gradients, update_value))
)
self.predictiveChangeVarDiscountedAverage = (
self.discount_factor * self.predictiveChangeVarDiscountedAverage
+ (1 - self.discount_factor) * predictive_change
)
self.predictiveChangeVarUncertainty = (
self.discount_factor * self.predictiveChangeVarUncertainty
+ (1 - self.discount_factor)
* (
predictive_change
- self.predictiveChangeVarDiscountedAverage / bias_correction
)
** 2
)
return (
self.predictiveChangeVarDiscountedAverage / bias_correction,
self.predictiveChangeVarUncertainty / bias_correction,
)
def _get_alignment(self):
if self.crashed:
return torch.tensor(0.0)
alignment = torch.mean(
torch.sign(torch.mul(self.prev_direction, self.current_direction))
)
alignment = torch.unsqueeze(alignment, dim=0)
self.prev_direction = self.current_direction
return alignment
def generate_instance_file(self, file_name, mode="test", n=100):
header = ["ID", "dataset", "architecture", "seed", "steps"]
# dataset name, architecture, dataset size, sample dimension, number of max pool layers, hidden layers, test architecture convolutional layers
architectures = [
(
"MNIST",
"Conv2d(1, {0}, 3, 1, 1)-MaxPool2d(2, 2)-Conv2d({0}, {1}, 3, 1, 1)-MaxPool2d(2, 2)-Conv2d({1}, {2}, 3, 1, 1)-ReLU-Flatten-Linear({3}, 10)-LogSoftmax(1)",
60000,
28,
2,
3,
[20, 50, 500],
),
(
"CIFAR",
"Conv2d(3, {0}, 3, 1, 1)-MaxPool2d(2, 2)-ReLU-Conv2d({0}, {1}, 3, 1, 1)-ReLU-MaxPool2d(2, 2)-Conv2d({1}, {2}, 3, 1, 1)-ReLU-MaxPool2d(2, 2)-Conv2d({2}, {3}, 3, 1, 1)-ReLU-Flatten-Linear({4}, 10)-LogSoftmax(1)",
60000,
32,
3,
4,
[32, 32, 64, 64],
),
]
if mode == "test":
seed_list = [random.randrange(start=0, stop=1e9) for _ in range(n)]
for i in range(len(architectures)):
fname = file_name + "_" + architectures[i][0].lower() + ".csv"
steps = int(1e8)
conv = architectures[i][6]
hidden_layers = architectures[i][5]
sample_size = architectures[i][3]
pool_layer_count = architectures[i][4]
linear_layer_size = conv[-1] * pow(
sample_size / pow(2, pool_layer_count), 2
)
linear_layer_size = int(round(linear_layer_size))
dataset = architectures[i][0]
if hidden_layers == 3:
architecture = architectures[i][1].format(
conv[0], conv[1], conv[2], linear_layer_size
)
else:
architecture = architectures[i][1].format(
conv[0], conv[1], conv[2], conv[3], linear_layer_size
)
# args = conv
# args.append(linear_layer_size)
# # architecture = architectures[i][1].format(**conv)
# args = {0: conv[0], 1: conv[1], 2: conv[2], 3: linear_layer_size}
# architecture = architectures[i][1].format(**args)
with open(fname, "w", encoding="UTF8") as f:
for h in header:
f.write(h + ";")
f.write("\n")
for id in range(0, n):
f.write(str(id) + ";")
f.write(dataset + ";")
f.write(architecture + ";")
seed = seed_list[id]
f.write(str(seed) + ";")
f.write(str(steps) + ";")
f.write("\n")
f.close()
else:
dataset_index = 0
dataset_size_start = 0.1
dataset_size_stop = 0.5
steps_start = 300
steps_stop = 1000
conv1_start = 2
conv1_stop = 10
conv2_start = 5
conv2_stop = 25
conv3_start = 50
conv3_stop = 250
dataset_list = [dataset_index for _ in range(n)]
dataset_size_list = [
random.uniform(dataset_size_start, dataset_size_stop) for _ in range(n)
]
seed_list = [random.randrange(start=0, stop=1e9) for _ in range(n)]
steps_list = [
random.randrange(start=steps_start, stop=steps_stop) for _ in range(n)
]
conv1_list = [
random.randrange(start=conv1_start, stop=conv1_stop) for _ in range(n)
]
conv2_list = [
random.randrange(start=conv2_start, stop=conv2_stop) for _ in range(n)
]
conv3_list = [
random.randrange(start=conv3_start, stop=conv3_stop) for _ in range(n)
]
fname = file_name + ".csv"
with open(fname, "w", encoding="UTF8") as f:
for h in header:
f.write(h + ";")
f.write("\n")
for id in range(0, n):
f.write(str(id) + ";")
sample_size = architectures[dataset_list[id]][3]
pool_layer_count = architectures[dataset_list[id]][4]
linear_layer_size = conv3_list[id] * pow(
sample_size / pow(2, pool_layer_count), 2
)
linear_layer_size = int(round(linear_layer_size))
dataset_size = int(
dataset_size_list[id] * architectures[dataset_list[id]][2]
)
dataset = (
architectures[dataset_list[id]][0] + "_" + str(dataset_size)
)
architecture = architectures[dataset_list[id]][1].format(
conv1_list[id],
conv2_list[id],
conv3_list[id],
linear_layer_size,
)
f.write(dataset + ";")
f.write(architecture + ";")
seed = seed_list[id]
f.write(str(seed) + ";")
steps = steps_list[id]
f.write(str(steps) + ";")
f.write("\n")
f.close()
| 32,877 | 32.721026 | 226 | py |
DACBench | DACBench-main/dacbench/envs/cma_es.py | """
CMA-ES environment adapted from CMAWorld in
"Learning Step-size Adaptation in CMA-ES"
by G.Shala and A. Biedenkapp and N.Awad and S. Adriaensen and M.Lindauer and F. Hutter.
Original author: Gresa Shala
"""
import resource
import sys
import threading
import warnings
from collections import deque
import numpy as np
from cma import bbobbenchmarks as bn
from cma.evolution_strategy import CMAEvolutionStrategy
from dacbench import AbstractEnv
resource.setrlimit(resource.RLIMIT_STACK, (2**35, -1))
sys.setrecursionlimit(10**9)
warnings.filterwarnings("ignore")
def _norm(x):
return np.sqrt(np.sum(np.square(x)))
# IDEA: if we ask cma instead of ask_eval, we could make this parallel
class CMAESEnv(AbstractEnv):
"""
Environment to control the step size of CMA-ES
"""
def __init__(self, config):
"""
Initialize CMA Env
Parameters
-------
config : objdict
Environment configuration
"""
super(CMAESEnv, self).__init__(config)
self.b = None
self.bounds = [None, None]
self.fbest = None
self.history_len = config.hist_length
self.history = deque(maxlen=self.history_len)
self.past_obj_vals = deque(maxlen=self.history_len)
self.past_sigma = deque(maxlen=self.history_len)
self.solutions = None
self.func_values = []
self.cur_obj_val = -1
# self.chi_N = dim ** 0.5 * (1 - 1.0 / (4.0 * dim) + 1.0 / (21.0 * dim ** 2))
self.lock = threading.Lock()
self.popsize = config["popsize"]
self.cur_ps = self.popsize
if "reward_function" in config.keys():
self.get_reward = config["reward_function"]
else:
self.get_reward = self.get_default_reward
if "state_method" in config.keys():
self.get_state = config["state_method"]
else:
self.get_state = self.get_default_state
def step(self, action):
"""
Execute environment step
Parameters
----------
action : list
action to execute
Returns
-------
np.array, float, bool, dict
state, reward, done, info
"""
truncated = super(CMAESEnv, self).step_()
self.history.append([self.f_difference, self.velocity])
terminated = self.es.stop() != {}
if not (terminated or truncated):
"""Moves forward in time one step"""
sigma = action
self.es.tell(self.solutions, self.func_values)
self.es.sigma = np.maximum(sigma, 0.2)
self.solutions, self.func_values = self.es.ask_and_eval(self.fcn)
self.f_difference = np.nan_to_num(
np.abs(np.amax(self.func_values) - self.cur_obj_val)
/ float(self.cur_obj_val)
)
self.velocity = np.nan_to_num(
np.abs(np.amin(self.func_values) - self.cur_obj_val)
/ float(self.cur_obj_val)
)
self.fbest = min(self.es.best.f, np.amin(self.func_values))
self.past_obj_vals.append(self.cur_obj_val)
self.past_sigma.append(self.cur_sigma)
self.cur_ps = _norm(self.es.adapt_sigma.ps)
self.cur_loc = self.es.best.x
try:
self.cur_sigma = [self.es.sigma[0]]
except:
self.cur_sigma = [self.es.sigma]
self.cur_obj_val = self.es.best.f
return self.get_state(self), self.get_reward(self), terminated, truncated, {}
def reset(self, seed=None, options={}):
"""
Reset environment
Returns
-------
np.array
Environment state
"""
super(CMAESEnv, self).reset_(seed)
self.history.clear()
self.past_obj_vals.clear()
self.past_sigma.clear()
self.cur_loc = self.instance[3]
self.dim = self.instance[1]
self.init_sigma = self.instance[2]
self.cur_sigma = [self.init_sigma]
self.fcn = bn.instantiate(self.instance[0], seed=self.seed)[0]
self.func_values = []
self.f_vals = deque(maxlen=self.popsize)
self.es = CMAEvolutionStrategy(
self.cur_loc,
self.init_sigma,
{"popsize": self.popsize, "bounds": self.bounds, "seed": self.initial_seed},
)
self.solutions, self.func_values = self.es.ask_and_eval(self.fcn)
self.fbest = self.func_values[np.argmin(self.func_values)]
self.f_difference = np.abs(
np.amax(self.func_values) - self.cur_obj_val
) / float(self.cur_obj_val)
self.velocity = np.abs(np.amin(self.func_values) - self.cur_obj_val) / float(
self.cur_obj_val
)
self.es.mean_old = self.es.mean
self.history.append([self.f_difference, self.velocity])
return self.get_state(self), {}
def close(self):
"""
No additional cleanup necessary
Returns
-------
bool
Cleanup flag
"""
return True
def render(self, mode: str = "human"):
"""
Render env in human mode
Parameters
----------
mode : str
Execution mode
"""
if mode != "human":
raise NotImplementedError
pass
def get_default_reward(self, _):
"""
Compute reward
Returns
-------
float
Reward
"""
reward = min(self.reward_range[1], max(self.reward_range[0], -self.fbest))
return reward
def get_default_state(self, _):
"""
Gather state description
Returns
-------
dict
Environment state
"""
past_obj_val_deltas = []
for i in range(1, len(self.past_obj_vals)):
past_obj_val_deltas.append(
(self.past_obj_vals[i] - self.past_obj_vals[i - 1] + 1e-3)
/ float(self.past_obj_vals[i - 1])
)
if len(self.past_obj_vals) > 0:
past_obj_val_deltas.append(
(self.cur_obj_val - self.past_obj_vals[-1] + 1e-3)
/ float(self.past_obj_vals[-1])
)
past_obj_val_deltas = np.array(past_obj_val_deltas).reshape(-1)
history_deltas = []
for i in range(len(self.history)):
history_deltas.append(self.history[i])
history_deltas = np.array(history_deltas).reshape(-1)
past_sigma_deltas = []
for i in range(len(self.past_sigma)):
past_sigma_deltas.append(self.past_sigma[i])
past_sigma_deltas = np.array(past_sigma_deltas).reshape(-1)
past_obj_val_deltas = np.hstack(
(
np.zeros((self.history_len - past_obj_val_deltas.shape[0],)),
past_obj_val_deltas,
)
)
history_deltas = np.hstack(
(
np.zeros((self.history_len * 2 - history_deltas.shape[0],)),
history_deltas,
)
)
past_sigma_deltas = np.hstack(
(
np.zeros((self.history_len - past_sigma_deltas.shape[0],)),
past_sigma_deltas,
)
)
cur_loc = np.array(self.cur_loc)
cur_ps = np.array([self.cur_ps])
cur_sigma = np.array(self.cur_sigma)
state = {
"current_loc": cur_loc,
"past_deltas": past_obj_val_deltas,
"current_ps": cur_ps,
"current_sigma": cur_sigma,
"history_deltas": history_deltas,
"past_sigma_deltas": past_sigma_deltas,
}
return state
| 7,678 | 28.763566 | 88 | py |
DACBench | DACBench-main/dacbench/envs/toysgd.py | from typing import Dict, Tuple, Union
import numpy as np
import pandas as pd
from numpy.polynomial import Polynomial
from dacbench import AbstractMADACEnv
def create_polynomial_instance_set(
out_fname: str,
n_samples: int = 100,
order: int = 2,
low: float = -10,
high: float = 10,
):
"""Make instance set."""
instances = []
for i in range(n_samples):
coeffs = sample_coefficients(order=order, low=low, high=high)
instance = {
"ID": i,
"family": "polynomial",
"order": order,
"low": low,
"high": high,
"coefficients": coeffs,
}
instances.append(instance)
df = pd.DataFrame(instances)
df.to_csv(out_fname, sep=";", index=False)
def sample_coefficients(order: int = 2, low: float = -10, high: float = 10):
"""Sample function coefficients."""
n_coeffs = order + 1
coeffs = np.zeros((n_coeffs,))
coeffs[0] = np.random.uniform(0, high, size=1)
coeffs[1:] = np.random.uniform(low, high, size=n_coeffs - 1)
return coeffs
class ToySGDEnv(AbstractMADACEnv):
"""
Optimize toy functions with SGD + Momentum.
Action: [log_learning_rate, log_momentum] (log base 10)
State: Dict with entries remaining_budget, gradient, learning_rate, momentum
Reward: negative log regret of current and true function value
An instance can look as follows:
ID 0
family polynomial
order 2
low -2
high 2
coefficients [ 1.40501053 -0.59899755 1.43337392]
"""
def __init__(self, config):
"""Init env."""
super(ToySGDEnv, self).__init__(config)
self.n_steps_max = config.get("cutoff", 1000)
self.velocity = 0
self.gradient = 0
self.history = []
self.n_dim = None # type: Optional[int]
self.objective_function = None
self.objective_function_deriv = None
self.x_min = None
self.f_min = None
self.x_cur = None
self.f_cur = None
self.momentum = 0 # type: Optional[float]
self.learning_rate = None # type: Optional[float]
self.n_steps = 0 # type: Optional[int]
def build_objective_function(self):
"""Make base function."""
if self.instance["family"] == "polynomial":
order = int(self.instance["order"])
if order != 2:
raise NotImplementedError(
"Only order 2 is currently implemented for polynomial functions."
)
self.n_dim = order
coeffs_str = self.instance["coefficients"]
coeffs_str = coeffs_str.strip("[]")
coeffs = [float(item) for item in coeffs_str.split()]
self.objective_function = Polynomial(coef=coeffs)
self.objective_function_deriv = self.objective_function.deriv(
m=1
) # lambda x0: derivative(self.objective_function, x0, dx=1.0, n=1, args=(), order=3)
self.x_min = -coeffs[1] / (
2 * coeffs[0] + 1e-10
) # add small epsilon to avoid numerical instabilities
self.f_min = self.objective_function(self.x_min)
self.x_cur = self.get_initial_position()
else:
raise NotImplementedError(
"No other function families than polynomial are currently supported."
)
def get_initial_position(self):
"""Get initial position."""
return 0 # np.random.uniform(-5, 5, size=self.n_dim-1)
def step(
self, action: Union[float, Tuple[float, float]]
) -> Tuple[Dict[str, float], float, bool, Dict]:
"""
Take one step with SGD.
Parameters
----------
action: Tuple[float, Tuple[float, float]]
If scalar, action = (log_learning_rate)
If tuple, action = (log_learning_rate, log_momentum)
Returns
-------
Tuple[Dict[str, float], float, bool, Dict]
- state : Dict[str, float]
State with entries "remaining_budget", "gradient", "learning_rate", "momentum"
- reward : float
- terminated : bool
- truncated : bool
- info : Dict
"""
truncated = super(ToySGDEnv, self).step_()
info = {}
# parse action
if np.isscalar(action):
log_learning_rate = action
elif len(action) == 2:
log_learning_rate, log_momentum = action
self.momentum = 10**log_momentum
else:
raise ValueError
self.learning_rate = 10**log_learning_rate
# SGD + Momentum update
self.velocity = (
self.momentum * self.velocity + self.learning_rate * self.gradient
)
self.x_cur -= self.velocity
self.gradient = self.objective_function_deriv(self.x_cur)
# State
remaining_budget = self.n_steps_max - self.n_steps
state = {
"remaining_budget": remaining_budget,
"gradient": self.gradient,
"learning_rate": self.learning_rate,
"momentum": self.momentum,
}
# Reward
# current function value
self.f_cur = self.objective_function(self.x_cur)
# log regret
log_regret = np.log10(np.abs(self.f_min - self.f_cur))
reward = -log_regret
self.history.append(self.x_cur)
# Stop criterion
self.n_steps += 1
return state, reward, False, truncated, info
def reset(self, seed=None, options={}):
"""
Reset environment.
Parameters
----------
seed : int
seed
options : dict
options dict (not used)
Returns
-------
np.array
Environment state
dict
Meta-info
"""
super(ToySGDEnv, self).reset_(seed)
self.velocity = 0
self.gradient = 0
self.history = []
self.objective_function = None
self.objective_function_deriv = None
self.x_min = None
self.f_min = None
self.x_cur = None
self.f_cur = None
self.momentum = 0
self.learning_rate = 0
self.n_steps = 0
self.build_objective_function()
return {
"remaining_budget": self.n_steps_max,
"gradient": self.gradient,
"learning_rate": self.learning_rate,
"momentum": self.momentum,
}, {}
def render(self, **kwargs):
"""Render progress."""
import matplotlib.pyplot as plt
history = np.array(self.history).flatten()
X = np.linspace(1.05 * np.amin(history), 1.05 * np.amax(history), 100)
Y = self.objective_function(X)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(X, Y, label="True")
ax.plot(
history,
self.objective_function(history),
marker="x",
color="black",
label="Observed",
)
ax.plot(
self.x_cur,
self.objective_function(self.x_cur),
marker="x",
color="red",
label="Current Optimum",
)
ax.legend()
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_title("instance: " + str(self.instance["coefficients"]))
plt.show()
def close(self):
"""Close env."""
pass
| 7,727 | 29.666667 | 98 | py |
DACBench | DACBench-main/dacbench/envs/__init__.py | # flake8: noqa: F401
import importlib
import warnings
from dacbench.envs.fast_downward import FastDownwardEnv
from dacbench.envs.geometric import GeometricEnv
from dacbench.envs.luby import LubyEnv, luby_gen
from dacbench.envs.sigmoid import (
ContinuousSigmoidEnv,
ContinuousStateSigmoidEnv,
SigmoidEnv,
)
from dacbench.envs.theory import TheoryEnv
from dacbench.envs.toysgd import ToySGDEnv
__all__ = [
"LubyEnv",
"luby_gen",
"SigmoidEnv",
"ContinuousSigmoidEnv",
"ContinuousStateSigmoidEnv",
"FastDownwardEnv",
"ToySGDEnv",
"GeometricEnv",
"TheoryEnv",
]
cma_spec = importlib.util.find_spec("cma")
found = cma_spec is not None
if found:
from dacbench.envs.cma_es import CMAESEnv
__all__.append("CMAESEnv")
else:
warnings.warn(
"CMA-ES Benchmark not installed. If you want to use this benchmark, please follow the installation guide."
)
modcma_spec = importlib.util.find_spec("modcma")
found = modcma_spec is not None
if found:
from dacbench.envs.cma_step_size import CMAStepSizeEnv
from dacbench.envs.modcma import ModCMAEnv
__all__.append("ModCMAEnv")
__all__.append("CMAStepSizeEnv")
else:
warnings.warn(
"ModCMA Benchmark not installed. If you want to use this benchmark, please follow the installation guide."
)
sgd_spec = importlib.util.find_spec("backpack")
found = sgd_spec is not None
if found:
from dacbench.envs.sgd import SGDEnv
__all__.append("SGDEnv")
else:
warnings.warn(
"SGD Benchmark not installed. If you want to use this benchmark, please follow the installation guide."
)
| 1,631 | 25.322581 | 114 | py |
DACBench | DACBench-main/dacbench/envs/fast_downward.py | """
Planning environment from
"Learning Heuristic Selection with Dynamic Algorithm Configuration"
by David Speck, André Biedenkapp, Frank Hutter, Robert Mattmüller und Marius Lindauer.
Original environment authors: David Speck, André Biedenkapp
"""
import os
import socket
import subprocess
import time
import typing
from copy import deepcopy
from enum import Enum
from os import remove
from os.path import join as joinpath
import numpy as np
from dacbench import AbstractEnv
class StateType(Enum):
"""Class to define numbers for state types"""
RAW = 1
DIFF = 2
ABSDIFF = 3
NORMAL = 4
NORMDIFF = 5
NORMABSDIFF = 6
class FastDownwardEnv(AbstractEnv):
"""
Environment to control Solver Heuristics of FastDownward
"""
def __init__(self, config):
"""
Initialize FD Env
Parameters
-------
config : objdict
Environment configuration
"""
super(FastDownwardEnv, self).__init__(config)
self._heuristic_state_features = [
"Average Value", # 'Dead Ends Reliable',
"Max Value",
"Min Value",
"Open List Entries",
"Varianz",
]
self._general_state_features = [ # 'evaluated_states', 'evaluations', 'expanded_states',
# 'generated_ops',
# 'generated_states', 'num_variables',
# 'registered_states', 'reopened_states',
# "cg_num_eff_to_eff", "cg_num_eff_to_pre", "cg_num_pre_to_eff"
]
total_state_features = len(config.heuristics) * len(
self._heuristic_state_features
)
self._use_gsi = config.use_general_state_info
if config.use_general_state_info:
total_state_features += len(self._general_state_features)
self.__skip_transform = [False for _ in range(total_state_features)]
if config.use_general_state_info:
self.__skip_transform[4] = True # skip num_variables transform
self.__skip_transform[7] = True
self.__skip_transform[8] = True
self.__skip_transform[9] = True
self.heuristics = config.heuristics
self.host = config.host
self._port = config.get("port", 0)
if config["parallel"]:
self.port = 0
self.fd_seed = config.fd_seed
self.control_interval = config.control_interval
if config.fd_logs is None:
self.logpath_out = os.devnull
self.logpath_err = os.devnull
else:
self.logpath_out = os.path.join(config.fd_logs, "fdout.txt")
self.logpath_err = os.path.join(config.fd_logs, "fderr.txt")
self.fd_path = config.fd_path
self.fd = None
if "domain_file" in config.keys():
self.domain_file = config["domain_file"]
self.socket = None
self.conn = None
self._prev_state = None
self.num_steps = config.num_steps
self.__state_type = StateType(config.state_type)
self.__norm_vals = []
self._config_dir = config.config_dir
self._port_file_id = config.port_file_id
self._transformation_func = None
# create state transformation function with inputs (current state, previous state, normalization values)
if self.__state_type == StateType.DIFF:
self._transformation_func = lambda x, y, z, skip: x - y if not skip else x
elif self.__state_type == StateType.ABSDIFF:
self._transformation_func = (
lambda x, y, z, skip: abs(x - y) if not skip else x
)
elif self.__state_type == StateType.NORMAL:
self._transformation_func = (
lambda x, y, z, skip: FastDownwardEnv._save_div(x, z) if not skip else x
)
elif self.__state_type == StateType.NORMDIFF:
self._transformation_func = (
lambda x, y, z, skip: FastDownwardEnv._save_div(x, z)
- FastDownwardEnv._save_div(y, z)
if not skip
else x
)
elif self.__state_type == StateType.NORMABSDIFF:
self._transformation_func = (
lambda x, y, z, skip: abs(
FastDownwardEnv._save_div(x, z) - FastDownwardEnv._save_div(y, z)
)
if not skip
else x
)
self.max_rand_steps = config.max_rand_steps
self.__start_time = None
self.done = True # Starts as true as the expected behavior is that before normal resets an episode was done.
@property
def port(self):
if self._port == 0:
if self.socket is None:
raise ValueError(
"Automatic port selection enabled. Port not know at the moment"
)
_, port = self.socket.getsockname()
else:
port = self._port
return port
@port.setter
def port(self, port):
self._port = port
@property
def argstring(self):
# if a socket is bound to 0 it will automatically choose a free port
return f"rl_eager(rl([{''.join(f'{h},' for h in self.heuristics)[:-1]}],random_seed={self.fd_seed}),rl_control_interval={self.control_interval},rl_client_port={self.port})"
@staticmethod
def _save_div(a, b):
"""
Helper method for safe division
Parameters
----------
a : list or np.array
values to be divided
b : list or np.array
values to divide by
Returns
-------
np.array
Division result
"""
return np.divide(a, b, out=np.zeros_like(a), where=b != 0)
def send_msg(self, msg: bytes):
"""
Send message and prepend the message size
Based on comment from SO see [1]
[1] https://stackoverflow.com/a/17668009
Parameters
----------
msg : bytes
The message as byte
"""
# Prefix each message with a 4-byte length (network byte order)
msg = str.encode("{:>04d}".format(len(msg))) + msg
self.conn.sendall(msg)
def recv_msg(self):
"""
Recieve a whole message. The message has to be prepended with its total size
Based on comment from SO see [1]
Returns
----------
bytes
The message as byte
"""
# Read message length and unpack it into an integer
raw_msglen = self.recvall(4)
if not raw_msglen:
return None
msglen = int(raw_msglen.decode())
# Read the message data
return self.recvall(msglen)
def recvall(self, n: int):
"""
Given we know the size we want to recieve, we can recieve that amount of bytes.
Based on comment from SO see [1]
Parameters
---------
n: int
Number of bytes to expect in the data
Returns
----------
bytes
The message as byte
"""
# Helper function to recv n bytes or return None if EOF is hit
data = b""
while len(data) < n:
packet = self.conn.recv(n - len(data))
if not packet:
return None
data += packet
return data
def _process_data(self):
"""
Split received json into state reward and done
Returns
----------
np.array, float, bool
state, reward, done
"""
msg = self.recv_msg().decode()
# print("----------------------------")
# print(msg)
# print("=>")
msg = msg.replace("-inf", "0")
msg = msg.replace("inf", "0")
# print(msg)
data = eval(msg)
r = data["reward"]
done = data["done"]
del data["reward"]
del data["done"]
state = []
if self._use_gsi:
for feature in self._general_state_features:
state.append(data[feature])
for heuristic_id in range(len(self.heuristics)): # process heuristic data
for feature in self._heuristic_state_features:
state.append(data["%d" % heuristic_id][feature])
if self._prev_state is None:
self.__norm_vals = deepcopy(state)
self._prev_state = deepcopy(state)
if (
self.__state_type != StateType.RAW
): # Transform state to DIFF state or normalize
tmp_state = state
state = list(
map(
self._transformation_func,
state,
self._prev_state,
self.__norm_vals,
self.__skip_transform,
)
)
self._prev_state = tmp_state
return np.array(state), r, done
def step(self, action: typing.Union[int, typing.List[int]]):
"""
Environment step
Parameters
---------
action: typing.Union[int, List[int]]
Parameter(s) to apply
Returns
----------
np.array, float, bool, bool, dict
state, reward, terminated, truncated, info
"""
self.done = super(FastDownwardEnv, self).step_()
if not np.issubdtype(
type(action), np.integer
): # check for core int and any numpy-int
try:
action = action[0]
except IndexError as e:
print(type(action))
raise e
if self.num_steps:
msg = ",".join([str(action), str(self.num_steps)])
else:
msg = str(action)
self.send_msg(str.encode(msg))
s, r, terminated = self._process_data()
r = max(self.reward_range[0], min(self.reward_range[1], r))
info = {}
if terminated:
self.done = True
self.kill_connection()
if self.c_step > self.n_steps:
info["needs_reset"] = True
self.send_msg(str.encode("END"))
self.kill_connection()
return s, r, terminated, self.done, info
def reset(self, seed=None, options={}):
"""
Reset environment
Returns
----------
np.array
State after reset
dict
Meta-info
"""
super(FastDownwardEnv, self).reset_(seed)
self._prev_state = None
self.__start_time = time.time()
if not self.done: # This means we interrupt FD before a plan was found
# Inform FD about imminent shutdown of the connection
self.send_msg(str.encode("END"))
self.done = False
if self.conn:
self.conn.shutdown(2)
self.conn.close()
self.conn = None
if not self.socket:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.settimeout(10)
self.socket.bind((self.host, self.port))
if self.fd:
self.fd.terminate()
if self.instance.endswith(".pddl"):
command = [
"python3",
f"{self.fd_path}",
self.domain_file,
self.instance,
"--search",
self.argstring,
]
else:
command = [
"python3",
f"{self.fd_path}",
self.instance,
"--search",
self.argstring,
]
with open(self.logpath_out, "a+") as fout, open(self.logpath_err, "a+") as ferr:
err_output = subprocess.STDOUT if self.logpath_err == "/dev/null" else ferr
self.fd = subprocess.Popen(command, stdout=fout, stderr=err_output)
# write down port such that FD can potentially read where to connect to
if self._port_file_id:
fp = joinpath(self._config_dir, "port_{:d}.txt".format(self._port_file_id))
else:
fp = joinpath(self._config_dir, f"port_{self.port}.txt")
with open(fp, "w") as portfh:
portfh.write(str(self.port))
self.socket.listen()
try:
self.conn, address = self.socket.accept()
except socket.timeout:
raise OSError(
"Fast downward subprocess not reachable (time out). "
"Possible solutions:\n"
" (1) Did you run './dacbench/envs/rl-plan/fast-downward/build.py' "
"in order to build the fd backend?\n"
" (2) Try to fix this by setting OPENBLAS_NUM_THREADS=1. "
"For more details see https://github.com/automl/DACBench/issues/96"
)
s, _, _ = self._process_data()
if self.max_rand_steps > 1:
for _ in range(self.np_random.randint(1, self.max_rand_steps + 1)):
s, _, _, _, _ = self.step(self.action_space.sample())
if self.conn is None:
return self.reset()
else:
s, _, _, _, _ = self.step(0) # hard coded to zero as initial step
remove(
fp
) # remove the port file such that there is no chance of loading the old port
return s, {}
def kill_connection(self):
"""Kill the connection"""
if self.conn:
self.conn.shutdown(2)
self.conn.close()
self.conn = None
if self.socket:
self.socket.shutdown(2)
self.socket.close()
self.socket = None
def close(self):
"""
Close Env
Returns
-------
bool
Closing confirmation
"""
if self.socket is None:
return True
fp = joinpath(self._config_dir, f"port_{self.port}.txt")
if os.path.exists(fp):
remove(fp)
self.kill_connection()
return True
def render(self, mode: str = "human") -> None:
"""
Required by gym.Env but not implemented
Parameters
-------
mode : str
Rendering mode
"""
pass
| 14,301 | 30.432967 | 180 | py |
DACBench | DACBench-main/dacbench/envs/modcma.py | import numpy as np
from IOHexperimenter import IOH_function
from modcma import ModularCMAES, Parameters
from dacbench import AbstractMADACEnv
class ModCMAEnv(AbstractMADACEnv):
def __init__(self, config):
super().__init__(config)
self.es = None
self.budget = config.budget
self.total_budget = self.budget
self.get_reward = config.get("reward_function", self.get_default_reward)
self.get_state = config.get("state_method", self.get_default_state)
def reset(self, seed=None, options={}):
super().reset_(seed)
self.dim, self.fid, self.iid, self.representation = self.instance
self.representation = np.array(self.representation)
self.objective = IOH_function(
self.fid, self.dim, self.iid, target_precision=1e-8
)
self.es = ModularCMAES(
self.objective,
parameters=Parameters.from_config_array(
self.dim, np.array(self.representation).astype(int)
),
)
return self.get_state(self), {}
def step(self, action):
truncated = super().step_()
new_parameters = Parameters.from_config_array(self.dim, action)
self.es.parameters.update(
{m: getattr(new_parameters, m) for m in Parameters.__modules__}
)
terminated = not self.es.step()
return self.get_state(self), self.get_reward(self), terminated, truncated, {}
def close(self):
return True
def get_default_reward(self, *_):
return max(
self.reward_range[0], min(self.reward_range[1], -self.es.parameters.fopt)
)
def get_default_state(self, *_):
return np.array(
[
self.es.parameters.lambda_,
self.es.parameters.sigma,
self.budget - self.es.parameters.used_budget,
self.fid,
self.iid,
]
)
| 1,952 | 31.016393 | 85 | py |
DACBench | DACBench-main/dacbench/envs/cma_step_size.py | import numpy as np
from IOHexperimenter import IOH_function
from modcma import ModularCMAES, Parameters
from dacbench import AbstractEnv
class CMAStepSizeEnv(AbstractEnv):
def __init__(self, config):
super().__init__(config)
self.es = None
self.budget = config.budget
self.total_budget = self.budget
self.get_reward = config.get("reward_function", self.get_default_reward)
self.get_state = config.get("state_method", self.get_default_state)
def reset(self, seed=None, options={}):
super().reset_(seed)
self.dim, self.fid, self.iid, self.representation = self.instance
self.objective = IOH_function(
self.fid, self.dim, self.iid, target_precision=1e-8
)
self.es = ModularCMAES(
self.objective,
parameters=Parameters.from_config_array(self.dim, self.representation),
)
return self.get_state(self), {}
def step(self, action):
truncated = super().step_()
self.es.parameters.sigma = action
terminated = not self.es.step()
return self.get_state(self), self.get_reward(self), terminated, truncated, {}
def close(self):
return True
def get_default_reward(self, *_):
return max(
self.reward_range[0], min(self.reward_range[1], -self.es.parameters.fopt)
)
def get_default_state(self, *_):
return np.array(
[
self.es.parameters.lambda_,
self.es.parameters.sigma,
self.budget - self.es.parameters.used_budget,
self.fid,
self.iid,
]
)
| 1,684 | 29.636364 | 85 | py |
DACBench | DACBench-main/dacbench/envs/sigmoid.py | """
Sigmoid environment from:
"Dynamic Algorithm Configuration:Foundation of a New Meta-Algorithmic Framework"
by A. Biedenkapp and H. F. Bozkurt and T. Eimer and F. Hutter and M. Lindauer.
Original environment authors: André Biedenkapp, H. Furkan Bozkurt
"""
from typing import List
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
from dacbench import AbstractMADACEnv
class SigmoidEnv(AbstractMADACEnv):
"""Environment for tracing sigmoid curves."""
def _sig(self, x, scaling, inflection):
"""Simple sigmoid function"""
return 1 / (1 + np.exp(-scaling * (x - inflection)))
def __init__(self, config) -> None:
"""
Initialize Sigmoid Env.
Parameters
----------
config : objdict
Environment configuration
"""
super(SigmoidEnv, self).__init__(config)
self.shifts = [self.n_steps / 2 for _ in config["action_values"]]
self.slopes = [-1 for _ in config["action_values"]]
self.slope_multiplier = config["slope_multiplier"]
self.n_actions = len(self.action_space.nvec)
self._prev_state = None
self.last_action = None
if "reward_function" in config.keys():
self.get_reward = config["reward_function"]
else:
self.get_reward = self.get_default_reward
if "state_method" in config.keys():
self.get_state = config["state_method"]
else:
self.get_state = self.get_default_state
def step(self, action: int):
"""
Execute environment step.
Parameters
----------
action : int
action to execute
Returns
-------
np.array, float, bool, bool, dict
state, reward, terminated, truncated, info
"""
self.done = super(SigmoidEnv, self).step_()
self.last_action = action
next_state = self.get_state(self)
self._prev_state = next_state
return next_state, self.get_reward(self), False, self.done, {}
def reset(self, seed=None, options={}) -> List[int]:
"""
Resets env.
Returns
-------
numpy.array
Environment state
"""
super(SigmoidEnv, self).reset_(seed)
self.shifts = self.instance[: self.n_actions]
self.slopes = self.instance[self.n_actions :]
self._prev_state = None
return self.get_state(self), {}
def get_default_reward(self, _):
"""Get default reward."""
r = [
1 - np.abs(self._sig(self.c_step, slope, shift) - (act / (max_act - 1)))
for slope, shift, act, max_act in zip(
self.slopes, self.shifts, self.last_action, self.action_space.nvec
)
]
r = np.prod(r)
r = max(self.reward_range[0], min(self.reward_range[1], r))
return r
def get_default_state(self, _):
"""Get default state representation."""
remaining_budget = self.n_steps - self.c_step
next_state = [remaining_budget]
for shift, slope in zip(self.shifts, self.slopes):
next_state.append(shift)
next_state.append(slope)
if self.c_step == 0:
next_state += [-1 for _ in range(self.n_actions)]
else:
next_state = np.array(list(next_state) + list(self.last_action))
return np.array(next_state)
def close(self) -> bool:
"""
Close Env.
Returns
-------
bool
Closing confirmation
"""
return True
def render(self, mode: str) -> None:
"""
Render env in human mode.
Parameters
----------
mode : str
Execution mode
"""
if mode == "human" and self.n_actions == 2:
plt.ion()
plt.show()
plt.cla()
steps = np.arange(self.n_steps)
self.data = self._sig(steps, self.slopes[0], self.shifts[0]) * self._sig(
steps, self.slopes[1], self.shifts[1]
).reshape(-1, 1)
plt.imshow(
self.data,
extent=(0, self.n_steps - 1, 0, self.n_steps - 1),
interpolation="nearest",
cmap=cm.plasma,
)
plt.axvline(x=self.c_step, color="r", linestyle="-", linewidth=2)
plt.axhline(y=self.c_step, color="r", linestyle="-", linewidth=2)
plt.draw()
plt.pause(0.005)
class ContinuousStateSigmoidEnv(SigmoidEnv):
"""Environment for tracing sigmoid curves with a continuous state on the x-axis."""
def __init__(self, config) -> None:
"""
Initialize Sigmoid Env.
Parameters
----------
config : objdict
Environment configuration
"""
super().__init__(config)
def step(self, action: int):
"""
Execute environment step.
Parameters
----------
action : int
action to execute
Returns
-------
np.array, float, bool, dict
state, reward, done, info
"""
self.last_action = action
# The reward measures how wrong the choice was so we can take this error to determine how far we travel along
# the x-axis instead of always advancing + 1
r = self.get_reward(self)
# magic constants but such that the max step is ~1 and the min step is ~0.25
self.c_step += (r + np.sqrt(np.power(r, 2) + 0.25)) / 2
if self.c_step >= self.n_steps:
self.done = True
else:
self.done = False
# self.c_step is used in get_next_state to show how much distance along the x-axis is left to cover
# Thus we get a continuous state this way.
next_state = self.get_state(self)
self._prev_state = next_state
return next_state, r, self.done, {}
class ContinuousSigmoidEnv(SigmoidEnv):
"""Environment for tracing sigmoid curves with a continuous state on the x-axis."""
def __init__(self, config) -> None:
"""
Initialize Sigmoid Env.
Parameters
----------
config : objdict
Environment configuration
"""
super().__init__(config)
def step(self, action: np.ndarray):
"""
Execute environment step. !!NOTE!! The action here is a list of floats and not a single number !!NOTE!!
Parameters
----------
action : list of floats
action(s) to execute
Returns
-------
np.array, float, bool, dict
state, reward, done, info
"""
self.last_action = action
# The reward measures how wrong the choice was so we can take this error to determine how far we travel along
# the x-axis instead of always advancing + 1
r = self.get_reward(self)
# magic constants but such that the max step is ~1 and the min step is ~0.25
self.c_step += (r + np.sqrt(np.power(r, 2) + 0.25)) / 2
if self.c_step >= self.n_steps:
self.done = True
else:
self.done = False
# self.c_step is used in get_next_state to show how much distance along the x-axis is left to cover
# Thus we get a continuous state this way.
next_state = self.get_state(self)
self._prev_state = next_state
return next_state, r, self.done, {}
| 7,549 | 28.263566 | 117 | py |
DACBench | DACBench-main/dacbench/envs/luby.py | """
Luby environment from
"Dynamic Algorithm Configuration:Foundation of a New Meta-Algorithmic Framework"
by A. Biedenkapp and H. F. Bozkurt and T. Eimer and F. Hutter and M. Lindauer.
Original environment authors: André Biedenkapp, H. Furkan Bozkurt
"""
from typing import List
import numpy as np
from dacbench import AbstractEnv
# Instance IDEA 1: shift luby seq -> feat is sum of skipped action values
# Instance IDEA 2: "Wiggle" luby i.e. luby(t + N(0, 0.1)) -> feat is sampled value
class LubyEnv(AbstractEnv):
"""
Environment to learn Luby Sequence
"""
def __init__(self, config) -> None:
"""
Initialize Luby Env
Parameters
-------
config : objdict
Environment configuration
"""
super().__init__(config)
self._hist_len = config["hist_length"]
self._ms = self.n_steps
self._mi = config["min_steps"]
self._state = np.array([-1 for _ in range(self._hist_len + 1)])
self._r = 0
self._genny = luby_gen(1)
self._next_goal = next(self._genny)
# Generate luby sequence up to 2*max_steps + 2 as mode 1 could potentially shift up to max_steps
self._seq = np.log2(
[next(luby_gen(i)) for i in range(1, 2 * config["cutoff"] + 2)]
)
self._jenny_i = 1
self._start_dist = None
self._sticky_dis = None
self._sticky_shif = 0
self._start_shift = 0
self.__lower, self.__upper = 0, 0
self.__error = 0
self.done = None
self.action = None
if "reward_function" in config.keys():
self.get_reward = config["reward_function"]
else:
self.get_reward = self.get_default_reward
if "state_method" in config.keys():
self.get_state = config["state_method"]
else:
self.get_state = self.get_default_state
def step(self, action: int):
"""
Execute environment step
Parameters
----------
action : int
action to execute
Returns
-------
np.array, float, bool, bool, dict
state, reward, terminated, truncated, info
"""
self.done = super(LubyEnv, self).step_()
self.prev_state = self._state.copy()
self.action = action
reward = self.get_reward(self)
if (
self.__error < self.__lower
): # needed to avoid too long sequences of sticky actions
self.__error += np.abs(self.__lower)
elif self.__error > self.__upper:
self.__error -= np.abs(self.__upper)
self._jenny_i += 1
self.__error += self._sticky_shif
# next target in sequence at step luby_t is determined by the current time step (jenny_i), the start_shift
# value and the sticky error. Additive sticky error leads to sometimes rounding to the next time_step and
# thereby repeated actions. With check against lower/upper we reset the sequence to the correct timestep in
# the t+1 timestep.
luby_t = max(1, int(np.round(self._jenny_i + self._start_shift + self.__error)))
self._next_goal = self._seq[luby_t - 1]
return self.get_state(self), reward, False, self.done, {}
def reset(self, seed=None, options={}) -> List[int]:
"""
Resets env
Returns
-------
numpy.array
Environment state
"""
super(LubyEnv, self).reset_(seed)
self._start_shift = self.instance[0]
self._sticky_shif = self.instance[1]
self._r = 0
self.n_steps = self._mi
self.__error = 0 + self._sticky_shif
self._jenny_i = 1
luby_t = max(1, int(np.round(self._jenny_i + self._start_shift + self.__error)))
self._next_goal = self._seq[luby_t - 1]
self.done = False
return self.get_state(self), {}
def get_default_reward(self, _):
if self.action == self._next_goal:
self._r = 0 # we don't want to allow for exploiting large rewards by tending towards long sequences
else: # mean and var chosen s.t. ~1/4 of rewards are positive
self._r = -1
self._r = max(self.reward_range[0], min(self.reward_range[1], self._r))
return self._r
def get_default_state(self, _):
if self.c_step == 0:
self._state = [-1 for _ in range(self._hist_len + 1)]
else:
if self.c_step - 1 < self._hist_len:
self._state[(self.c_step - 1)] = self.action
else:
self._state[:-2] = self._state[1:-1]
self._state[-2] = self.action
self._state[-1] = self.c_step - 1
next_state = np.array(self._state if not self.done else self.prev_state)
return next_state
def close(self) -> bool:
"""
Close Env
Returns
-------
bool
Closing confirmation
"""
return True
# TODO: this should render!
def render(self, mode: str = "human") -> None:
"""
Render env in human mode
Parameters
----------
mode : str
Execution mode
"""
if mode != "human":
raise NotImplementedError
pass
def luby_gen(i):
"""Generator for the Luby Sequence"""
for k in range(1, 33):
if i == ((1 << k) - 1):
yield 1 << (k - 1)
for k in range(1, 9999):
if 1 << (k - 1) <= i < (1 << k) - 1:
for x in luby_gen(i - (1 << (k - 1)) + 1):
yield x
| 5,640 | 30.165746 | 115 | py |
DACBench | DACBench-main/dacbench/envs/policies/csa_cma.py | def csa(env, state):
u = env.es.sigma
hsig = env.es.adapt_sigma.hsig(env.es)
env.es.hsig = hsig
delta = env.es.adapt_sigma.update2(env.es, function_values=env.cur_obj_val)
u *= delta
return u
| 216 | 26.125 | 79 | py |
DACBench | DACBench-main/dacbench/envs/policies/__init__.py | from dacbench.envs.policies.csa_cma import csa
from dacbench.envs.policies.optimal_fd import get_optimum as optimal_fd
from dacbench.envs.policies.optimal_luby import get_optimum as optimal_luby
from dacbench.envs.policies.optimal_sigmoid import get_optimum as optimal_sigmoid
OPTIMAL_POLICIES = {
"LubyBenchmark": optimal_luby,
"SigmoidBenchmark": optimal_sigmoid,
"FastDownwardBenchmark": optimal_fd,
}
NON_OPTIMAL_POLICIES = {"CMAESBenchmark": csa}
ALL_POLICIES = {**OPTIMAL_POLICIES, **NON_OPTIMAL_POLICIES}
| 527 | 34.2 | 81 | py |
DACBench | DACBench-main/dacbench/envs/policies/optimal_luby.py | def luby_gen(i):
"""Generator for the Luby Sequence"""
for k in range(1, 33):
if i == ((1 << k) - 1):
yield 1 << (k - 1)
for k in range(1, 9999):
if 1 << (k - 1) <= i < (1 << k) - 1:
for x in luby_gen(i - (1 << (k - 1)) + 1):
yield x
def get_optimum(env, state):
return env._next_goal
| 360 | 23.066667 | 54 | py |
DACBench | DACBench-main/dacbench/envs/policies/sgd_ca.py | import math
from dacbench.abstract_agent import AbstractDACBenchAgent
class CosineAnnealingAgent(AbstractDACBenchAgent):
def __init__(self, env, base_lr=0.1, t_max=1000, eta_min=0):
self.eta_min = eta_min
self.t_max = t_max
self.base_lr = base_lr
self.current_lr = base_lr
self.last_epoch = -1
super(CosineAnnealingAgent, self).__init__(env)
def act(self, state, reward):
self.last_epoch += 1
if self.last_epoch == 0:
return self.base_lr
elif (self.last_epoch - 1 - self.t_max) % (2 * self.t_max) == 0:
return (
self.current_lr
+ (self.base_lr - self.eta_min)
* (1 - math.cos(math.pi / self.t_max))
/ 2
)
return (1 + math.cos(math.pi * self.last_epoch / self.t_max)) / (
1 + math.cos(math.pi * (self.last_epoch - 1) / self.t_max)
) * (self.current_lr - self.eta_min) + self.eta_min
def train(self, state, reward):
pass
def end_episode(self, state, reward):
pass
| 1,100 | 30.457143 | 73 | py |
DACBench | DACBench-main/dacbench/envs/policies/optimal_fd.py | import json
def get_optimum(env, state):
instance = env.get_instance()[:-12] + "optimal.json"
with open(instance, "r+") as fp:
optimal = json.load(fp)
return optimal[env.c_step]
| 200 | 21.333333 | 56 | py |
DACBench | DACBench-main/dacbench/envs/policies/optimal_sigmoid.py | import numpy as np
def sig(x, scaling, inflection):
"""Simple sigmoid function"""
return 1 / (1 + np.exp(-scaling * (x - inflection)))
def get_optimum(env, state):
sigmoids = [
np.abs(sig(env.c_step, slope, shift))
for slope, shift in zip(env.shifts, env.slopes)
]
action = []
for i in range(len(env.action_space.nvec)):
best_action = None
dist = 100
for a in range(env.action_space.nvec[i] + 1):
if np.abs(sigmoids[i] - a / (env.action_space.nvec[i])) < dist:
dist = np.abs(sigmoids[i] - a / (env.action_space.nvec[i] + 1))
best_action = a
action.append(best_action)
return action
| 706 | 28.458333 | 79 | py |
DACBench | DACBench-main/dacbench/container/remote_runner.py | """This is strongly guided and partially copy from:https://github.com/automl/HPOBench/blob/master/hpobench/container/client_abstract_benchmark.py"""
import argparse
import logging
import os
import subprocess
import sys
from pathlib import Path
from typing import Optional, Tuple, Union
from uuid import uuid1
import Pyro4
import Pyro4.naming
from dacbench.abstract_agent import AbstractDACBenchAgent
from dacbench.abstract_benchmark import AbstractBenchmark
from dacbench.argument_parsing import PathType
from dacbench.container.container_utils import wait_for_unixsocket
from dacbench.container.remote_env import (
RemoteEnvironmentClient,
RemoteEnvironmentServer,
)
# Needed in order to combine event loops of name_server and daemon
Pyro4.config.SERVERTYPE = "multiplex"
# Read in the verbosity level from the environment variable
log_level_str = os.environ.get("DACBENCH_DEBUG", "false")
LOG_LEVEL = logging.INFO
LOG_LEVEL = logging.DEBUG if log_level_str == "true" else logging.INFO
root = logging.getLogger()
root.setLevel(level=LOG_LEVEL)
logger = logging.getLogger(__name__)
logger.setLevel(level=LOG_LEVEL)
# This option improves the quality of stacktraces if a container crashes
sys.excepthook = Pyro4.util.excepthook
# os.environ["PYRO_LOGFILE"] = "pyro.log"
# os.environ["PYRO_LOGLEVEL"] = "DEBUG"
# Number of tries to connect to server
MAX_TRIES = 5
SOCKET_PATH = Path("/tmp/dacbench/sockets")
@Pyro4.expose
class RemoteRunnerServer:
"""Server for container running."""
def __init__(self, pyro_demon):
"""Init server."""
self.benchmark = None
self.pyro_demon = pyro_demon
def start(self, config: str, benchmark: Tuple[str, str]):
"""Start server."""
benchmark = AbstractBenchmark.import_from(*benchmark)
self.benchmark = benchmark.from_json(config)
def get_environment(self) -> str:
"""Get environment."""
env = self.benchmark.get_environment()
# set up logger and stuff
self.env = RemoteEnvironmentServer(env)
uri = self.pyro_demon.register(self.env)
return uri
class RemoteRunner:
"""Runner for remote benchmarks."""
FACTORY_NAME: str = "RemoteRunnerServerFactory"
def __init__(
self,
benchmark: AbstractBenchmark,
container_name: str = None,
container_source: Optional[str] = None,
container_tag: str = "latest",
env_str: Optional[str] = "",
bind_str: Optional[str] = "",
gpu: Optional[bool] = False,
socket_id=None,
):
"""
Runner for containers.
Parameters
----------
benchmark: AbstractBenchmark
The benchmark to run
container_name : str
name for container
container_source : Optional[str]
Path to the container. Either local path or url to a hosting platform, e.g. singularity hub.
container_tag : str
Singularity containers are specified by an address as well as a container tag. We use the tag as a version
number. By default the tag is set to `latest`, which then pulls the latest container from the container
source. The tag-versioning allows the users to rerun an experiment, which was performed with an older
container version. Take a look in the container_source to find the right tag to use.
bind_str : Optional[str]
Defaults to ''. You can bind further directories into the container.
This string have the form src[:dest[:opts]].
For more information, see https://sylabs.io/guides/3.5/user-guide/bind_paths_and_mounts.html
env_str : Optional[str]
Defaults to ''. Sometimes you want to pass a parameter to your container. You can do this by setting some
environmental variables. The list should follow the form VAR1=VALUE1,VAR2=VALUE2,..
For more information, see
https://sylabs.io/guides/3.5/user-guide/environment_and_metadata.html#environment-overview
gpu : bool
If True, the container has access to the local cuda-drivers. (Not tested)
socket_id : Optional[str]
Setting up the container is done in two steps:
1) Start the benchmark on a random generated socket id.
2) Create a proxy connection to the container via this socket id.
When no `socket_id` is given, a new container is started. The `socket_id` (address) of this containers is
stored in the class attribute Benchmark.socket_id
When a `socket_id` is given, instead of creating a new container, connect only to the container that is
reachable at `socket_id`. Make sure that a container is already running with the address `socket_id`.
"""
logger.info(f"Logging level: {logger.level}")
# connect to already running server if a socket_id is given. In this case, skip the init of
# the benchmark
self.__proxy_only = socket_id is not None
self.__socket_path = SOCKET_PATH
if not self.__proxy_only:
self.__socket_id = self.id_generator()
# todo for now only work with given container source (local)
self.load_benchmark(
benchmark=benchmark,
container_name=container_name,
container_source=container_source,
container_tag=container_tag,
)
self.__start_server(env_str=env_str, bind_str=bind_str, gpu=gpu)
else:
self.__socket_id = socket_id
self.__connect_to_server(benchmark)
@property
def socket(self) -> Path:
"""Get socket."""
return self.socket_from_id(self.__socket_id)
@staticmethod
def id_generator() -> str:
"""Helper function: Creates unique socket ids for the benchmark server."""
return str(uuid1())
@staticmethod
def socket_from_id(socket_id: str) -> Path:
"""Get socket from id."""
return Path(SOCKET_PATH) / f"{socket_id}.unixsock"
def __start_server(self, env_str, bind_str, gpu):
"""
Starts container and the pyro server.
Parameters
----------
env_str : str
Environment string for the container
bind_str : str
Bind string for the container
gpu : bool
True if the container should use gpu, False otherwise
"""
# start container
logger.debug(f"Starting server on {self.socket}")
# todo add mechanism to to retry if failing
self.daemon_process = subprocess.Popen(
[
"singularity",
"run",
"-e",
str(self.container_source),
"-u",
str(self.socket),
]
)
# todo should be configurable
wait_for_unixsocket(self.socket, 10)
def __connect_to_server(self, benchmark: AbstractBenchmark):
"""Connects to the server and initializes the benchmark."""
# Pyro4.config.REQUIRE_EXPOSE = False
# Generate Pyro 4 URI for connecting to client
ns = Pyro4.Proxy(f"PYRO:Pyro.NameServer@./u:{self.socket}")
factory_uri = ns.lookup(self.FACTORY_NAME)
factory = Pyro4.Proxy(factory_uri)
remote_runner_uri = factory.create()
self.remote_runner: RemoteRunnerServer = Pyro4.Proxy(remote_runner_uri)
serialized_config = benchmark.to_json()
serialized_type = benchmark.class_to_str()
self.remote_runner.start(serialized_config, serialized_type)
self.env = None
def get_environment(self):
"""Get remote environment."""
if self.env is None:
env_uri = self.remote_runner.get_environment()
remote_env_server = Pyro4.Proxy(env_uri)
self.env = RemoteEnvironmentClient(remote_env_server)
return self.env
def run(self, agent: AbstractDACBenchAgent, number_of_episodes: int):
"""Run agent on remote."""
# todo: seeding
env = self.get_environment()
for _ in range(number_of_episodes):
state = env.reset()
done = False
reward = 0
while not done:
action = agent.act(state, reward)
next_state, reward, done, _ = env.step(action)
agent.train(next_state, reward)
state = next_state
agent.end_episode(state, reward)
env.close()
self.env = None
def close(self):
"""Termiante all processes."""
# todo add context manager
self.daemon_process.terminate()
self.daemon_process.wait()
def __del__(self):
"""Close."""
self.close()
def load_benchmark(
self,
benchmark: AbstractBenchmark,
container_name: str,
container_source: Union[str, Path],
container_tag: str,
):
"""Load benchmark from recipe."""
# see for implementation guideline hpobench hpobench/container/client_abstract_benchmark.py
# in the end self.container_source should contain the path to the file to run
logger.warning("Only container source is used")
container_source = (
container_source
if isinstance(container_source, Path)
else Path(container_source)
)
self.container_source = container_source
@Pyro4.expose
class RemoteRunnerServerFactory:
"""Creates remoter runner servers."""
def __init__(self, pyro_demon):
"""Make server factory."""
self.pyro_demon = pyro_demon
def create(self):
"""Get server."""
remote_runner_server = RemoteRunnerServer(pyro_demon=self.pyro_demon)
remote_runner_server_uri = daemon.register(remote_runner_server)
return remote_runner_server_uri
def __call__(self):
"""Make."""
return self.create()
if __name__ == "__main__":
# todo refactor move to RemoverRunnerServer
parser = argparse.ArgumentParser(
description="Runs the benchmark remote server inside a container"
)
parser.add_argument(
"--unixsocket",
"-u",
type=PathType(exists=False, type="socket"),
required=False,
default=None,
dest="socket",
help="The path to a exiting socket to run the name server on. If none a new socket unixsocket is created.",
)
args = parser.parse_args()
daemon_socket = RemoteRunner.socket_from_id(RemoteRunner.id_generator())
ns_socket = (
args.socket
if args.socket
else RemoteRunner.socket_from_id(RemoteRunner.id_generator())
)
print(ns_socket)
daemon_socket.parent.mkdir(parents=True, exist_ok=True)
ns_socket.parent.mkdir(parents=True, exist_ok=True)
print(
f"Starting Pyro4 Nameserver on {ns_socket} and Pyro4 Daemon on {daemon_socket}"
)
name_server_uir, name_server_daemon, _ = Pyro4.naming.startNS(
unixsocket=str(ns_socket)
)
daemon = Pyro4.Daemon(unixsocket=str(daemon_socket))
daemon.combine(name_server_daemon)
factory = RemoteRunnerServerFactory(daemon)
factory_uri = daemon.register(factory)
name_server_daemon.nameserver.register("RemoteRunnerServerFactory", factory_uri)
daemon.requestLoop()
daemon_socket.unlink()
ns_socket.unlink()
| 11,481 | 33.172619 | 148 | py |
DACBench | DACBench-main/dacbench/container/remote_env.py | import json
from numbers import Number
from typing import Dict, List, Tuple, Union
import numpy as np
import Pyro4
from dacbench.abstract_env import AbstractEnv
from dacbench.container.container_utils import Decoder, Encoder
NumpyTypes = Union[np.ndarray, np.int32, np.float32, np.random.RandomState]
DefaultJsonable = Union[
bool,
None,
Dict[str, "DefaultJsonable"],
List["DefaultJsonable"],
Tuple["DefaultJsonable"],
str,
float,
int,
]
Jsonable = Union[
List["Jsonable"],
Dict[str, "Jsonable"],
Tuple["Jsonable"],
DefaultJsonable,
NumpyTypes,
]
def json_encode(obj: Jsonable) -> str:
"""Encode object"""
return json.dumps(obj, indent=None, cls=Encoder)
def json_decode(json_str: str) -> Jsonable:
"""Decode object"""
return json.loads(json_str, cls=Decoder)
@Pyro4.expose
class RemoteEnvironmentServer:
"""Server for remote environment."""
def __init__(self, env):
"""Make env server."""
self.__env: AbstractEnv = env
def step(self, action: Union[Dict[str, List[Number]], List[Number]]):
"""Env step."""
action = json_decode(action)
json_str = json_encode(self.__env.step(action))
return json_str
def reset(self):
"""Reset env."""
state = self.__env.reset()
state = json_encode(state)
return state
def render(self, mode="human"):
"""Render, does nothing."""
# ever used?
pass
def close(self):
"""Close."""
self.__env.close()
@property
def action_space(self):
"""Return action space."""
return json_encode(self.__env.action_space)
class RemoteEnvironmentClient:
"""Client for remote environment."""
def __init__(self, env: RemoteEnvironmentServer):
"""Make client."""
self.__env = env
def step(
self, action: Union[Dict[str, np.ndarray], np.ndarray]
) -> Tuple[Union[Dict[str, np.ndarray], np.ndarray], Number, bool, dict]:
"""Remote step."""
action = json_encode(action)
json_str = self.__env.step(action)
state, reward, done, info = json_decode(json_str)
return state, reward, done, info
def reset(self) -> Union[Dict[str, np.ndarray], np.ndarray]:
"""Remote reset."""
state = self.__env.reset()
state = json_decode(state)
return state
def close(self):
"""Close."""
self.__env.close()
@property
def action_space(self):
"""Return env action space."""
return json_decode(self.__env.action_space)
| 2,618 | 23.027523 | 77 | py |
DACBench | DACBench-main/dacbench/container/container_utils.py | import enum
import json
import os
import socket
import time
from typing import Any, Dict, Union
import gymnasium as gym
import numpy as np
class Encoder(json.JSONEncoder):
"""
Json Encoder to save tuple and or numpy arrays | numpy floats / integer.
Adapted from: https://github.com/automl/HPOBench/blob/master/hpobench/util/container_utils.py
Serializing tuple/numpy array may not work. We need to annotate those types, to reconstruct them correctly.
"""
@staticmethod
def hint(item):
"""Encode different object types."""
# Annotate the different item types
if isinstance(item, tuple):
return {"__type__": "tuple", "__items__": [Encoder.hint(e) for e in item]}
if isinstance(item, np.ndarray):
return {"__type__": "np.ndarray", "__items__": item.tolist()}
if isinstance(item, np.floating):
return {"__type__": "np.float", "__items__": float(item)}
if isinstance(item, np.integer):
return {"__type__": "np.int32", "__items__": item.tolist()}
if isinstance(item, enum.Enum):
return str(item)
if isinstance(item, gym.Space):
return Encoder.encode_space(item)
if isinstance(item, np.dtype):
return {"__type__": "np.dtype", "__items__": str(item)}
# If it is a container data structure, go also through the items.
if isinstance(item, list):
return [Encoder.hint(e) for e in item]
if isinstance(item, dict):
return {key: Encoder.hint(value) for key, value in item.items()}
return item
# pylint: disable=arguments-differ
def encode(self, obj):
"""Generic encode."""
return super(Encoder, self).encode(Encoder.hint(obj))
@staticmethod
def encode_space(space_obj: gym.Space):
"""Encode gym space."""
properties = [
(
"__type__",
".".join(
[space_obj.__class__.__module__, space_obj.__class__.__name__]
),
)
]
if isinstance(
space_obj,
(
gym.spaces.Box,
gym.spaces.Discrete,
gym.spaces.MultiDiscrete,
gym.spaces.MultiBinary,
),
):
# by default assume all constrcutor arguments are stored under the same name
# for box we need to drop shape, since either shape or a array for low and height is required
__init__ = space_obj.__init__.__func__.__code__
local_vars = __init__.co_varnames
# drop self and non-args (self, arg1, arg2, ..., local_var1, local_var2, ...)
arguments = local_vars[1 : __init__.co_argcount]
attributes_to_serialize = list(
filter(lambda att: att not in ["shape", "seed"], arguments)
)
for attribute in attributes_to_serialize:
if hasattr(space_obj, attribute):
properties.append(
(attribute, Encoder.hint(getattr(space_obj, attribute)))
)
elif isinstance(space_obj, gym.spaces.Tuple):
properties.append(
("spaces", [Encoder.encode_space(space) for space in space_obj.spaces])
)
elif isinstance(space_obj, gym.spaces.Dict):
properties.append(
(
"spaces",
{
name: Encoder.encode_space(space)
for name, space in space_obj.spaces.items()
},
)
)
else:
raise NotImplementedError(
f"Serialisation for type {properties['__type__']} not implemented"
)
return dict(properties)
class Decoder(json.JSONDecoder):
"""Adapted from: https://github.com/automl/HPOBench/blob/master/hpobench/util/container_utils.py"""
def __init__(self, *args, **kwargs):
"""Init decoder."""
json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs)
def object_hook(
self, obj: Any
) -> Union[Union[tuple, np.ndarray, float, float, int], Any]:
"""Encode different types of objects."""
if "__type__" in obj:
__type = obj["__type__"]
if __type == "tuple":
return tuple(obj["__items__"])
if __type == "np.ndarray":
return np.array(obj["__items__"])
if __type == "np.float":
return np.float(obj["__items__"])
if __type == "np.int32":
return np.int32(obj["__items__"])
if __type == "np.dtype":
return np.dtype(obj["__items__"])
if __type.startswith("gymnasium.spaces."):
return self.decode_space(obj)
return obj
def decode_space(self, space_dict: Dict) -> gym.Space:
"""Dict to gym space."""
__type = space_dict["__type__"]
__class = getattr(gym.spaces, __type.split(".")[-1])
args = {
name: value
for name, value in space_dict.items()
if name not in ["__type__", "shape"]
}
# temporally remove subspace since constructor reseeds them
if issubclass(__class, (gym.spaces.Tuple, gym.spaces.Dict)):
spaces = args["spaces"]
args["spaces"] = type(args["spaces"])()
space_object = __class(**args)
# re-insert afterwards
if issubclass(__class, (gym.spaces.Tuple, gym.spaces.Dict)):
space_object.spaces = spaces
if isinstance(space_object, gym.spaces.Tuple):
space_object.spaces = tuple(space_object.spaces)
print(space_object)
return space_object
def wait_for_unixsocket(path: str, timeout: float = 10.0) -> None:
"""
Wait for a UNIX socket to be created.
:param path: path to the socket
:param timeout: timeout in seconds
:return:
"""
start = time.time()
while not os.path.exists(path):
if time.time() - start > timeout:
raise TimeoutError(
f"Timeout ({timeout}s) waiting for UNIX socket {path} to be created"
)
time.sleep(0.1)
def wait_for_port(port, host="localhost", timeout=5.0):
"""
Taken from https://gist.github.com/butla/2d9a4c0f35ea47b7452156c96a4e7b12 - Wait until a port starts accepting TCP connections.
Parameters
----------
port : int
Port number to check.
host : str
Host to check.
timeout : float
Timeout in seconds.
Raises
------
TimeoutError: The port isn't accepting connection after time specified in `timeout`.
"""
start_time = time.perf_counter()
while True:
try:
with socket.create_connection((host, port), timeout=timeout):
break
except OSError as ex:
time.sleep(0.01)
if time.perf_counter() - start_time >= timeout:
raise TimeoutError(
"Waited too long for the port {} on host {} to start accepting "
"connections.".format(port, host)
) from ex
| 7,313 | 33.17757 | 131 | py |
DACBench | DACBench-main/dacbench/container/__init__.py | 0 | 0 | 0 | py | |
DACBench | DACBench-main/dacbench/agents/generic_agent.py | from dacbench.abstract_agent import AbstractDACBenchAgent
class GenericAgent(AbstractDACBenchAgent):
def __init__(self, env, policy):
self.policy = policy
self.env = env
def act(self, state, reward):
return self.policy(self.env, state)
def train(self, next_state, reward):
pass
def end_episode(self, state, reward):
pass
| 382 | 21.529412 | 57 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.