repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
rllab | rllab-master/rllab/envs/box2d/mountain_car_env.py | import numpy as np
import pygame
from rllab.envs.box2d.parser import find_body
from rllab.core.serializable import Serializable
from rllab.envs.box2d.box2d_env import Box2DEnv
from rllab.misc import autoargs
from rllab.misc.overrides import overrides
class MountainCarEnv(Box2DEnv, Serializable):
@autoargs.inherit(Box2DEnv.__init__)
@autoargs.arg("height_bonus_coeff", type=float,
help="Height bonus added to each step's reward")
@autoargs.arg("goal_cart_pos", type=float,
help="Goal horizontal position")
def __init__(self,
height_bonus=1.,
goal_cart_pos=0.6,
*args, **kwargs):
super(MountainCarEnv, self).__init__(
self.model_path("mountain_car.xml.mako"),
*args, **kwargs
)
self.max_cart_pos = 2
self.goal_cart_pos = goal_cart_pos
self.height_bonus = height_bonus
self.cart = find_body(self.world, "cart")
Serializable.quick_init(self, locals())
@overrides
def compute_reward(self, action):
yield
yield (-1 + self.height_bonus * self.cart.position[1])
@overrides
def is_current_done(self):
return self.cart.position[0] >= self.goal_cart_pos \
or abs(self.cart.position[0]) >= self.max_cart_pos
@overrides
def reset(self):
self._set_state(self.initial_state)
self._invalidate_state_caches()
bounds = np.array([
[-1],
[1],
])
low, high = bounds
xvel = np.random.uniform(low, high)
self.cart.linearVelocity = (float(xvel), self.cart.linearVelocity[1])
return self.get_current_obs()
@overrides
def action_from_keys(self, keys):
if keys[pygame.K_LEFT]:
return np.asarray([-1])
elif keys[pygame.K_RIGHT]:
return np.asarray([+1])
else:
return np.asarray([0])
| 1,964 | 29.703125 | 77 | py |
rllab | rllab-master/rllab/envs/box2d/box2d_env.py | import os.path as osp
import mako.lookup
import mako.template
import numpy as np
from rllab import spaces
from rllab.envs.base import Env, Step
from rllab.envs.box2d.box2d_viewer import Box2DViewer
from rllab.envs.box2d.parser.xml_box2d import world_from_xml, find_body, \
find_joint
from rllab.misc import autoargs
from rllab.misc.overrides import overrides
BIG = 1e6
class Box2DEnv(Env):
@autoargs.arg("frame_skip", type=int,
help="Number of frames to skip")
@autoargs.arg('position_only', type=bool,
help='Whether to only provide (generalized) position as the '
'observation (i.e. no velocities etc.)')
@autoargs.arg('obs_noise', type=float,
help='Noise added to the observations (note: this makes the '
'problem non-Markovian!)')
@autoargs.arg('action_noise', type=float,
help='Noise added to the controls, which will be '
'proportional to the action bounds')
def __init__(
self, model_path, frame_skip=1, position_only=False,
obs_noise=0.0, action_noise=0.0, template_string=None,
template_args=None,
):
self.full_model_path = model_path
if template_string is None:
if model_path.endswith(".mako"):
with open(model_path) as template_file:
template = mako.template.Template(
template_file.read())
template_string = template.render(
opts=template_args if template_args is not None else {},
)
else:
with open(model_path, "r") as f:
template_string = f.read()
world, extra_data = world_from_xml(template_string)
self.world = world
self.extra_data = extra_data
self.initial_state = self._state
self.viewer = None
self.frame_skip = frame_skip
self.timestep = self.extra_data.timeStep
self.position_only = position_only
self.obs_noise = obs_noise
self.action_noise = action_noise
self._action_bounds = None
# cache the computation of position mask
self._position_ids = None
self._cached_obs = None
self._cached_coms = {}
def model_path(self, file_name):
return osp.abspath(osp.join(osp.dirname(__file__),
'models/%s' % file_name))
def _set_state(self, state):
splitted = np.array(state).reshape((-1, 6))
for body, body_state in zip(self.world.bodies, splitted):
xpos, ypos, apos, xvel, yvel, avel = body_state
body.position = (xpos, ypos)
body.angle = apos
body.linearVelocity = (xvel, yvel)
body.angularVelocity = avel
@overrides
def reset(self):
self._set_state(self.initial_state)
self._invalidate_state_caches()
return self.get_current_obs()
def _invalidate_state_caches(self):
self._cached_obs = None
self._cached_coms = {}
@property
def _state(self):
s = []
for body in self.world.bodies:
s.append(np.concatenate([
list(body.position),
[body.angle],
list(body.linearVelocity),
[body.angularVelocity]
]))
return np.concatenate(s)
@property
@overrides
def action_space(self):
lb = np.array([control.ctrllimit[0] for control in self.extra_data.controls])
ub = np.array([control.ctrllimit[1] for control in self.extra_data.controls])
return spaces.Box(lb, ub)
@property
@overrides
def observation_space(self):
if self.position_only:
d = len(self._get_position_ids())
else:
d = len(self.extra_data.states)
ub = BIG * np.ones(d)
return spaces.Box(ub*-1, ub)
@property
def action_bounds(self):
return self.action_space.bounds
def forward_dynamics(self, action):
if len(action) != self.action_dim:
raise ValueError('incorrect action dimension: expected %d but got '
'%d' % (self.action_dim, len(action)))
lb, ub = self.action_bounds
action = np.clip(action, lb, ub)
for ctrl, act in zip(self.extra_data.controls, action):
if ctrl.typ == "force":
for name in ctrl.bodies:
body = find_body(self.world, name)
direction = np.array(ctrl.direction)
direction = direction / np.linalg.norm(direction)
world_force = body.GetWorldVector(direction * act)
world_point = body.GetWorldPoint(ctrl.anchor)
body.ApplyForce(world_force, world_point, wake=True)
elif ctrl.typ == "torque":
assert ctrl.joint
joint = find_joint(self.world, ctrl.joint)
joint.motorEnabled = True
# forces the maximum allowed torque to be taken
if act > 0:
joint.motorSpeed = 1e5
else:
joint.motorSpeed = -1e5
joint.maxMotorTorque = abs(act)
else:
raise NotImplementedError
self.before_world_step(action)
self.world.Step(
self.extra_data.timeStep,
self.extra_data.velocityIterations,
self.extra_data.positionIterations
)
def compute_reward(self, action):
"""
The implementation of this method should have two parts, structured
like the following:
<perform calculations before stepping the world>
yield
reward = <perform calculations after stepping the world>
yield reward
"""
raise NotImplementedError
@overrides
def step(self, action):
"""
Note: override this method with great care, as it post-processes the
observations, etc.
"""
reward_computer = self.compute_reward(action)
# forward the state
action = self._inject_action_noise(action)
for _ in range(self.frame_skip):
self.forward_dynamics(action)
# notifies that we have stepped the world
next(reward_computer)
# actually get the reward
reward = next(reward_computer)
self._invalidate_state_caches()
done = self.is_current_done()
next_obs = self.get_current_obs()
return Step(observation=next_obs, reward=reward, done=done)
def _filter_position(self, obs):
"""
Filter the observation to contain only position information.
"""
return obs[self._get_position_ids()]
def get_obs_noise_scale_factor(self, obs):
return np.ones_like(obs)
def _inject_obs_noise(self, obs):
"""
Inject entry-wise noise to the observation. This should not change
the dimension of the observation.
"""
noise = self.get_obs_noise_scale_factor(obs) * self.obs_noise * \
np.random.normal(size=obs.shape)
return obs + noise
def get_current_reward(
self, state, xml_obs, action, next_state, next_xml_obs):
raise NotImplementedError
def is_current_done(self):
raise NotImplementedError
def _inject_action_noise(self, action):
# generate action noise
noise = self.action_noise * \
np.random.normal(size=action.shape)
# rescale the noise to make it proportional to the action bounds
lb, ub = self.action_bounds
noise = 0.5 * (ub - lb) * noise
return action + noise
def get_current_obs(self):
"""
This method should not be overwritten.
"""
raw_obs = self.get_raw_obs()
noisy_obs = self._inject_obs_noise(raw_obs)
if self.position_only:
return self._filter_position(noisy_obs)
return noisy_obs
def _get_position_ids(self):
if self._position_ids is None:
self._position_ids = []
for idx, state in enumerate(self.extra_data.states):
if state.typ in ["xpos", "ypos", "apos", "dist", "angle"]:
self._position_ids.append(idx)
return self._position_ids
def get_raw_obs(self):
"""
Return the unfiltered & noiseless observation. By default, it computes
based on the declarations in the xml file.
"""
if self._cached_obs is not None:
return self._cached_obs
obs = []
for state in self.extra_data.states:
new_obs = None
if state.body:
body = find_body(self.world, state.body)
if state.local is not None:
l = state.local
position = body.GetWorldPoint(l)
linearVel = body.GetLinearVelocityFromLocalPoint(l)
# now I wish I could write angle = error "not supported"
else:
position = body.position
linearVel = body.linearVelocity
if state.to is not None:
to = find_body(self.world, state.to)
if state.typ == "xpos":
new_obs = position[0]
elif state.typ == "ypos":
new_obs = position[1]
elif state.typ == "xvel":
new_obs = linearVel[0]
elif state.typ == "yvel":
new_obs = linearVel[1]
elif state.typ == "apos":
new_obs = body.angle
elif state.typ == "avel":
new_obs = body.angularVelocity
elif state.typ == "dist":
new_obs = np.linalg.norm(position - to.position)
elif state.typ == "angle":
diff = to.position - position
abs_angle = np.arccos(
diff.dot((0, 1)) / np.linalg.norm(diff))
new_obs = body.angle + abs_angle
else:
raise NotImplementedError
elif state.joint:
joint = find_joint(self.world, state.joint)
if state.typ == "apos":
new_obs = joint.angle
elif state.typ == "avel":
new_obs = joint.speed
else:
raise NotImplementedError
elif state.com:
com_quant = self._compute_com_pos_vel(*state.com)
if state.typ == "xpos":
new_obs = com_quant[0]
elif state.typ == "ypos":
new_obs = com_quant[1]
elif state.typ == "xvel":
new_obs = com_quant[2]
elif state.typ == "yvel":
new_obs = com_quant[3]
else:
print(state.typ)
# orientation and angular velocity of the whole body is not
# supported
raise NotImplementedError
else:
raise NotImplementedError
if state.transform is not None:
if state.transform == "id":
pass
elif state.transform == "sin":
new_obs = np.sin(new_obs)
elif state.transform == "cos":
new_obs = np.cos(new_obs)
else:
raise NotImplementedError
obs.append(new_obs)
self._cached_obs = np.array(obs)
return self._cached_obs
def _compute_com_pos_vel(self, *com):
com_key = ",".join(sorted(com))
if com_key in self._cached_coms:
return self._cached_coms[com_key]
total_mass_quant = 0
total_mass = 0
for body_name in com:
body = find_body(self.world, body_name)
total_mass_quant += body.mass * np.array(
list(body.worldCenter) + list(body.linearVelocity))
total_mass += body.mass
com_quant = total_mass_quant / total_mass
self._cached_coms[com_key] = com_quant
return com_quant
def get_com_position(self, *com):
return self._compute_com_pos_vel(*com)[:2]
def get_com_velocity(self, *com):
return self._compute_com_pos_vel(*com)[2:]
@overrides
def render(self, states=None, actions=None, pause=False):
if not self.viewer:
self.viewer = Box2DViewer(self.world)
if states or actions or pause:
raise NotImplementedError
if not self.viewer:
self.start_viewer()
if self.viewer:
self.viewer.loop_once()
def before_world_step(self, action):
pass
def action_from_keys(self, keys):
raise NotImplementedError
| 13,078 | 35.229917 | 85 | py |
rllab | rllab-master/rllab/envs/box2d/double_pendulum_env.py | import numpy as np
from rllab.envs.box2d.parser import find_body
from rllab.core.serializable import Serializable
from rllab.envs.box2d.box2d_env import Box2DEnv
from rllab.misc import autoargs
from rllab.misc.overrides import overrides
# http://mlg.eng.cam.ac.uk/pilco/
class DoublePendulumEnv(Box2DEnv, Serializable):
@autoargs.inherit(Box2DEnv.__init__)
def __init__(self, *args, **kwargs):
# make sure mdp-level step is 100ms long
kwargs["frame_skip"] = kwargs.get("frame_skip", 2)
if kwargs.get("template_args", {}).get("noise", False):
self.link_len = (np.random.rand()-0.5) + 1
else:
self.link_len = 1
kwargs["template_args"] = kwargs.get("template_args", {})
kwargs["template_args"]["link_len"] = self.link_len
super(DoublePendulumEnv, self).__init__(
self.model_path("double_pendulum.xml.mako"),
*args, **kwargs
)
self.link1 = find_body(self.world, "link1")
self.link2 = find_body(self.world, "link2")
Serializable.__init__(self, *args, **kwargs)
@overrides
def reset(self):
self._set_state(self.initial_state)
self._invalidate_state_caches()
stds = np.array([0.1, 0.1, 0.01, 0.01])
pos1, pos2, v1, v2 = np.random.randn(*stds.shape) * stds
self.link1.angle = pos1
self.link2.angle = pos2
self.link1.angularVelocity = v1
self.link2.angularVelocity = v2
return self.get_current_obs()
def get_tip_pos(self):
cur_center_pos = self.link2.position
cur_angle = self.link2.angle
cur_pos = (
cur_center_pos[0] - self.link_len*np.sin(cur_angle),
cur_center_pos[1] - self.link_len*np.cos(cur_angle)
)
return cur_pos
@overrides
def compute_reward(self, action):
yield
tgt_pos = np.asarray([0, self.link_len * 2])
cur_pos = self.get_tip_pos()
dist = np.linalg.norm(cur_pos - tgt_pos)
yield -dist
def is_current_done(self):
return False
| 2,093 | 32.238095 | 65 | py |
rllab | rllab-master/rllab/envs/box2d/cartpole_env.py | import numpy as np
from rllab.envs.box2d.parser import find_body
from rllab.core.serializable import Serializable
from rllab.envs.box2d.box2d_env import Box2DEnv
from rllab.misc import autoargs
from rllab.misc.overrides import overrides
class CartpoleEnv(Box2DEnv, Serializable):
@autoargs.inherit(Box2DEnv.__init__)
def __init__(self, *args, **kwargs):
self.max_pole_angle = .2
self.max_cart_pos = 2.4
self.max_cart_speed = 4.
self.max_pole_speed = 4.
self.reset_range = 0.05
super(CartpoleEnv, self).__init__(
self.model_path("cartpole.xml.mako"),
*args, **kwargs
)
self.cart = find_body(self.world, "cart")
self.pole = find_body(self.world, "pole")
Serializable.__init__(self, *args, **kwargs)
@overrides
def reset(self):
self._set_state(self.initial_state)
self._invalidate_state_caches()
bounds = np.array([
self.max_cart_pos,
self.max_cart_speed,
self.max_pole_angle,
self.max_pole_speed
])
low, high = -self.reset_range*bounds, self.reset_range*bounds
xpos, xvel, apos, avel = np.random.uniform(low, high)
self.cart.position = (xpos, self.cart.position[1])
self.cart.linearVelocity = (xvel, self.cart.linearVelocity[1])
self.pole.angle = apos
self.pole.angularVelocity = avel
return self.get_current_obs()
@overrides
def compute_reward(self, action):
yield
notdone = 1 - int(self.is_current_done())
ucost = 1e-5*(action**2).sum()
xcost = 1 - np.cos(self.pole.angle)
yield notdone * 10 - notdone * xcost - notdone * ucost
@overrides
def is_current_done(self):
return abs(self.cart.position[0]) > self.max_cart_pos or \
abs(self.pole.angle) > self.max_pole_angle
| 1,908 | 31.913793 | 70 | py |
rllab | rllab-master/rllab/envs/box2d/box2d_viewer.py | from Box2D import b2ContactListener, b2Vec2, b2DrawExtended
import pygame
from pygame import (QUIT, KEYDOWN, KEYUP, MOUSEBUTTONDOWN, MOUSEMOTION)
class PygameDraw(b2DrawExtended):
"""
This debug draw class accepts callbacks from Box2D (which specifies what to
draw) and handles all of the rendering.
If you are writing your own game, you likely will not want to use debug
drawing. Debug drawing, as its name implies, is for debugging.
"""
surface = None
axisScale = 50.0
def __init__(self, test=None, **kwargs):
b2DrawExtended.__init__(self, **kwargs)
self.flipX = False
self.flipY = True
self.convertVertices = True
self.test = test
self.flags = dict(
drawShapes=True,
convertVertices=True,
)
def StartDraw(self):
self.zoom = self.test.viewZoom
self.center = self.test.viewCenter
self.offset = self.test.viewOffset
self.screenSize = self.test.screenSize
def EndDraw(self):
pass
def DrawPoint(self, p, size, color):
"""
Draw a single point at point p given a pixel size and color.
"""
self.DrawCircle(p, size / self.zoom, color, drawwidth=0)
def DrawAABB(self, aabb, color):
"""
Draw a wireframe around the AABB with the given color.
"""
points = [(aabb.lowerBound.x, aabb.lowerBound.y),
(aabb.upperBound.x, aabb.lowerBound.y),
(aabb.upperBound.x, aabb.upperBound.y),
(aabb.lowerBound.x, aabb.upperBound.y)]
pygame.draw.aalines(self.surface, color, True, points)
def DrawSegment(self, p1, p2, color):
"""
Draw the line segment from p1-p2 with the specified color.
"""
pygame.draw.aaline(self.surface, color.bytes, p1, p2)
def DrawTransform(self, xf):
"""
Draw the transform xf on the screen
"""
p1 = xf.position
p2 = self.to_screen(p1 + self.axisScale * xf.R.x_axis)
p3 = self.to_screen(p1 + self.axisScale * xf.R.y_axis)
p1 = self.to_screen(p1)
pygame.draw.aaline(self.surface, (255, 0, 0), p1, p2)
pygame.draw.aaline(self.surface, (0, 255, 0), p1, p3)
def DrawCircle(self, center, radius, color, drawwidth=1):
"""
Draw a wireframe circle given the center, radius, axis of orientation
and color.
"""
radius *= self.zoom
if radius < 1:
radius = 1
else:
radius = int(radius)
pygame.draw.circle(self.surface, color.bytes,
center, radius, drawwidth)
def DrawSolidCircle(self, center, radius, axis, color):
"""
Draw a solid circle given the center, radius, axis of orientation and
color.
"""
radius *= self.zoom
if radius < 1:
radius = 1
else:
radius = int(radius)
pygame.draw.circle(
self.surface,
(color / 2).bytes + [127], center, radius, 0)
pygame.draw.circle(
self.surface,
color.bytes, center, radius, 1)
pygame.draw.aaline(self.surface, (255, 0, 0), center,
(center[0] - radius * axis[0],
center[1] + radius * axis[1]))
def DrawSolidCapsule(self, p1, p2, radius, color):
pass
def DrawPolygon(self, vertices, color):
"""
Draw a wireframe polygon given the screen vertices with the specified
color.
"""
if not vertices:
return
if len(vertices) == 2:
pygame.draw.aaline(self.surface, color.bytes,
vertices[0], vertices)
else:
pygame.draw.polygon(self.surface, color.bytes, vertices, 1)
def DrawSolidPolygon(self, vertices, color):
"""
Draw a filled polygon given the screen vertices with the specified
color.
"""
if not vertices:
return
if len(vertices) == 2:
pygame.draw.aaline(self.surface, color.bytes,
vertices[0], vertices[1])
else:
pygame.draw.polygon(
self.surface, (color / 2).bytes + [127], vertices, 0)
pygame.draw.polygon(self.surface, color.bytes, vertices, 1)
class Box2DViewer(b2ContactListener):
def __init__(self, world):
super(Box2DViewer, self).__init__()
self.world = world
self.world.contactListener = self
self._reset()
pygame.init()
caption = "Box2D Simulator"
pygame.display.set_caption(caption)
self.screen = pygame.display.set_mode((800, 600))
self.screenSize = b2Vec2(*self.screen.get_size())
self.renderer = PygameDraw(surface=self.screen, test=self)
self.world.renderer = self.renderer
# FIXME, commented to avoid Linux error due to font.
# try:
# self.font = pygame.font.Font(None, 15)
# except IOError:
# try:
# self.font = pygame.font.Font("freesansbold.ttf", 15)
# except IOError:
# print("Unable to load default font or 'freesansbold.ttf'")
# print("Disabling text drawing.")
# self.Print = lambda *args: 0
# self.DrawStringAt = lambda *args: 0
self.viewCenter = (0, 20.0)
self._viewZoom = 100
def _reset(self):
self._viewZoom = 10.0
self._viewCenter = None
self._viewOffset = None
self.screenSize = None
self.rMouseDown = False
self.textLine = 30
self.font = None
def setCenter(self, value):
"""
Updates the view offset based on the center of the screen.
Tells the debug draw to update its values also.
"""
self._viewCenter = b2Vec2(*value)
self._viewCenter *= self._viewZoom
self._viewOffset = self._viewCenter - self.screenSize / 2
def setZoom(self, zoom):
self._viewZoom = zoom
viewZoom = property(lambda self: self._viewZoom, setZoom,
doc='Zoom factor for the display')
viewCenter = property(lambda self: self._viewCenter / self._viewZoom,
setCenter, doc='Screen center in camera coordinates')
viewOffset = property(lambda self: self._viewOffset,
doc='The offset of the top-left corner of the '
'screen')
def checkEvents(self):
"""
Check for pygame events (mainly keyboard/mouse events).
Passes the events onto the GUI also.
"""
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key ==
pygame.K_ESCAPE):
return False
elif event.type == KEYDOWN:
self._Keyboard_Event(event.key, down=True)
elif event.type == KEYUP:
self._Keyboard_Event(event.key, down=False)
elif event.type == MOUSEBUTTONDOWN:
if event.button == 4:
self.viewZoom *= 1.1
elif event.button == 5:
self.viewZoom /= 1.1
elif event.type == MOUSEMOTION:
if self.rMouseDown:
self.viewCenter -= (event.rel[0] /
5.0, -event.rel[1] / 5.0)
return True
def _Keyboard_Event(self, key, down=True):
"""
Internal keyboard event, don't override this.
Checks for the initial keydown of the basic testbed keys. Passes the
unused ones onto the test via the Keyboard() function.
"""
if down:
if key == pygame.K_z: # Zoom in
self.viewZoom = min(2 * self.viewZoom, 500.0)
elif key == pygame.K_x: # Zoom out
self.viewZoom = max(0.9 * self.viewZoom, 0.02)
def CheckKeys(self):
"""
Check the keys that are evaluated on every main loop iteration.
I.e., they aren't just evaluated when first pressed down
"""
pygame.event.pump()
self.keys = keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT]:
self.viewCenter -= (0.5, 0)
elif keys[pygame.K_RIGHT]:
self.viewCenter += (0.5, 0)
if keys[pygame.K_UP]:
self.viewCenter += (0, 0.5)
elif keys[pygame.K_DOWN]:
self.viewCenter -= (0, 0.5)
if keys[pygame.K_HOME]:
self.viewZoom = 1.0
self.viewCenter = (0.0, 20.0)
def ConvertScreenToWorld(self, x, y):
return b2Vec2((x + self.viewOffset.x) / self.viewZoom,
((self.screenSize.y - y + self.viewOffset.y) /
self.viewZoom))
def loop_once(self):
self.checkEvents()
# self.CheckKeys()
self.screen.fill((0, 0, 0))
if self.renderer is not None:
self.renderer.StartDraw()
self.world.DrawDebugData()
self.renderer.EndDraw()
pygame.display.flip()
def finish(self):
pygame.quit()
| 9,331 | 31.975265 | 79 | py |
rllab | rllab-master/rllab/envs/box2d/cartpole_swingup_env.py | import numpy as np
import pygame
from rllab.envs.box2d.parser import find_body
from rllab.core.serializable import Serializable
from rllab.envs.box2d.box2d_env import Box2DEnv
from rllab.misc import autoargs
from rllab.misc.overrides import overrides
# Tornio, Matti, and Tapani Raiko. "Variational Bayesian approach for
# nonlinear identification and control." Proc. of the IFAC Workshop on
# Nonlinear Model Predictive Control for Fast Systems, NMPC FS06. 2006.
class CartpoleSwingupEnv(Box2DEnv, Serializable):
@autoargs.inherit(Box2DEnv.__init__)
def __init__(self, *args, **kwargs):
super(CartpoleSwingupEnv, self).__init__(
self.model_path("cartpole.xml.mako"),
*args, **kwargs
)
self.max_cart_pos = 3
self.max_reward_cart_pos = 3
self.cart = find_body(self.world, "cart")
self.pole = find_body(self.world, "pole")
Serializable.__init__(self, *args, **kwargs)
@overrides
def reset(self):
self._set_state(self.initial_state)
self._invalidate_state_caches()
bounds = np.array([
[-1, -2, np.pi-1, -3],
[1, 2, np.pi+1, 3],
])
low, high = bounds
xpos, xvel, apos, avel = np.random.uniform(low, high)
self.cart.position = (xpos, self.cart.position[1])
self.cart.linearVelocity = (xvel, self.cart.linearVelocity[1])
self.pole.angle = apos
self.pole.angularVelocity = avel
return self.get_current_obs()
@overrides
def compute_reward(self, action):
yield
if self.is_current_done():
yield -100
else:
if abs(self.cart.position[0]) > self.max_reward_cart_pos:
yield -1
else:
yield np.cos(self.pole.angle)
@overrides
def is_current_done(self):
return abs(self.cart.position[0]) > self.max_cart_pos
@overrides
def action_from_keys(self, keys):
if keys[pygame.K_LEFT]:
return np.asarray([-10])
elif keys[pygame.K_RIGHT]:
return np.asarray([+10])
else:
return np.asarray([0])
| 2,167 | 30.882353 | 71 | py |
rllab | rllab-master/rllab/envs/box2d/car_parking_env.py | import numpy as np
import pygame
from rllab.envs.box2d.box2d_env import Box2DEnv
from rllab.envs.box2d.parser import find_body
from rllab.core.serializable import Serializable
from rllab.envs.box2d.parser.xml_box2d import _get_name
from rllab.misc import autoargs
from rllab.misc.overrides import overrides
class CarParkingEnv(Box2DEnv, Serializable):
@autoargs.inherit(Box2DEnv.__init__)
@autoargs.arg("random_start", type=bool,
help="Randomized starting position by uniforming sampling starting car angle"
"and position from a circle of radius 5")
@autoargs.arg("random_start_range", type=float,
help="Defaulted to 1. which means possible angles are 1. * 2*pi")
def __init__(self, *args, **kwargs):
Serializable.__init__(self, *args, **kwargs)
self.random_start = kwargs.pop("random_start", True)
self.random_start_range = kwargs.pop("random_start_range", 1.)
super(CarParkingEnv, self).__init__(
self.model_path("car_parking.xml"),
*args, **kwargs
)
self.goal = find_body(self.world, "goal")
self.car = find_body(self.world, "car")
self.wheels = [
body for body in self.world.bodies if "wheel" in _get_name(body)]
self.front_wheels = [
body for body in self.wheels if "front" in _get_name(body)]
self.max_deg = 30.
self.goal_radius = 1.
self.vel_thres = 1e-1
self.start_radius = 5.
@overrides
def before_world_step(self, action):
desired_angle = self.car.angle + action[-1] / 180 * np.pi
for wheel in self.front_wheels:
wheel.angle = desired_angle
wheel.angularVelocity = 0 # kill angular velocity
# kill all wheels' lateral speed
for wheel in self.wheels:
ortho = wheel.GetWorldVector((1, 0))
lateral_speed = wheel.linearVelocity.dot(ortho) * ortho
impulse = wheel.mass * -lateral_speed
wheel.ApplyLinearImpulse(impulse, wheel.worldCenter, True)
# also apply a tiny bit of fraction
mag = wheel.linearVelocity.dot(wheel.linearVelocity)
if mag != 0:
wheel.ApplyLinearImpulse(
0.1 * wheel.mass * -wheel.linearVelocity / mag**0.5, wheel.worldCenter, True)
@property
@overrides
def action_dim(self):
return super(CarParkingEnv, self).action_dim + 1
@property
@overrides
def action_bounds(self):
lb, ub = super(CarParkingEnv, self).action_bounds
return np.append(lb, -self.max_deg), np.append(ub, self.max_deg)
@overrides
def reset(self):
self._set_state(self.initial_state)
self._invalidate_state_caches()
if self.random_start:
pos_angle, car_angle = np.random.rand(
2) * np.pi * 2 * self.random_start_range
dis = (self.start_radius * np.cos(pos_angle),
self.start_radius * np.sin(pos_angle))
for body in [self.car] + self.wheels:
body.angle = car_angle
for wheel in self.wheels:
wheel.position = wheel.position - self.car.position + dis
self.car.position = dis
self.world.Step(
self.extra_data.timeStep,
self.extra_data.velocityIterations,
self.extra_data.positionIterations
)
return self.get_current_obs()
@overrides
def compute_reward(self, action):
yield
not_done = not self.is_current_done()
dist_to_goal = self.get_current_obs()[-3]
yield - 1 * not_done - 2 * dist_to_goal
@overrides
def is_current_done(self):
pos_satified = np.linalg.norm(self.car.position) <= self.goal_radius
vel_satisfied = np.linalg.norm(
self.car.linearVelocity) <= self.vel_thres
return pos_satified and vel_satisfied
@overrides
def action_from_keys(self, keys):
go = np.zeros(self.action_dim)
if keys[pygame.K_LEFT]:
go[-1] = self.max_deg
if keys[pygame.K_RIGHT]:
go[-1] = -self.max_deg
if keys[pygame.K_UP]:
go[0] = 10
if keys[pygame.K_DOWN]:
go[0] = -10
return go
| 4,347 | 36.162393 | 97 | py |
rllab | rllab-master/rllab/envs/box2d/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/rllab/envs/box2d/parser/xml_box2d.py | # pylint: disable=no-init, too-few-public-methods, old-style-class
import xml.etree.ElementTree as ET
import Box2D
import numpy as np
from rllab.envs.box2d.parser.xml_types import XmlElem, XmlChild, XmlAttr, \
XmlChildren
from rllab.envs.box2d.parser.xml_attr_types import Tuple, Float, Choice, \
String, List, Point2D, Hex, Int, Angle, Bool, Either
class XmlBox2D(XmlElem):
tag = "box2d"
class Meta:
world = XmlChild("world", lambda: XmlWorld, required=True)
def __init__(self):
self.world = None
def to_box2d(self, extra_data, world=None):
return self.world.to_box2d(extra_data, world=world)
class XmlWorld(XmlElem):
tag = "world"
class Meta:
bodies = XmlChildren("body", lambda: XmlBody)
gravity = XmlAttr("gravity", Point2D())
joints = XmlChildren("joint", lambda: XmlJoint)
states = XmlChildren("state", lambda: XmlState)
controls = XmlChildren("control", lambda: XmlControl)
warmStarting = XmlAttr("warmstart", Bool())
continuousPhysics = XmlAttr("continuous", Bool())
subStepping = XmlAttr("substepping", Bool())
velocityIterations = XmlAttr("velitr", Int())
positionIterations = XmlAttr("positr", Int())
timeStep = XmlAttr("timestep", Float())
def __init__(self):
self.bodies = []
self.gravity = None
self.joints = []
self.states = []
self.controls = []
self.warmStarting = True
self.continuousPhysics = True
self.subStepping = False
self.velocityIterations = 8
self.positionIterations = 3
self.timeStep = 0.02
def to_box2d(self, extra_data, world=None):
if world is None:
world = Box2D.b2World(allow_sleeping=False)
world.warmStarting = self.warmStarting
world.continuousPhysics = self.continuousPhysics
world.subStepping = self.subStepping
extra_data.velocityIterations = self.velocityIterations
extra_data.positionIterations = self.positionIterations
extra_data.timeStep = self.timeStep
if self.gravity:
world.gravity = self.gravity
for body in self.bodies:
body.to_box2d(world, self, extra_data)
for joint in self.joints:
joint.to_box2d(world, self, extra_data)
for state in self.states:
state.to_box2d(world, self, extra_data)
for control in self.controls:
control.to_box2d(world, self, extra_data)
return world
class XmlBody(XmlElem):
tag = "body"
TYPES = ["static", "kinematic", "dynamic"]
class Meta:
color = XmlAttr("color", List(Float()))
name = XmlAttr("name", String())
typ = XmlAttr("type", Choice("static", "kinematic", "dynamic"),
required=True)
fixtures = XmlChildren("fixture", lambda: XmlFixture)
position = XmlAttr("position", Point2D())
def __init__(self):
self.color = None
self.name = None
self.typ = None
self.position = None
self.fixtures = []
def to_box2d(self, world, xml_world, extra_data):
body = world.CreateBody(type=self.TYPES.index(self.typ))
body.userData = dict(
name=self.name,
color=self.color,
)
if self.position:
body.position = self.position
for fixture in self.fixtures:
fixture.to_box2d(body, self, extra_data)
return body
class XmlFixture(XmlElem):
tag = "fixture"
class Meta:
shape = XmlAttr("shape",
Choice("polygon", "circle", "edge", "sine_chain"), required=True)
vertices = XmlAttr("vertices", List(Point2D()))
box = XmlAttr("box", Either(
Point2D(),
Tuple(Float(), Float(), Point2D(), Angle())))
radius = XmlAttr("radius", Float())
width = XmlAttr("width", Float())
height = XmlAttr("height", Float())
center = XmlAttr("center", Point2D())
angle = XmlAttr("angle", Angle())
position = XmlAttr("position", Point2D())
friction = XmlAttr("friction", Float())
density = XmlAttr("density", Float())
category_bits = XmlAttr("category_bits", Hex())
mask_bits = XmlAttr("mask_bits", Hex())
group = XmlAttr("group", Int())
def __init__(self):
self.shape = None
self.vertices = None
self.box = None
self.friction = None
self.density = None
self.category_bits = None
self.mask_bits = None
self.group = None
self.radius = None
self.width = None
self.height = None
self.center = None
self.angle = None
def to_box2d(self, body, xml_body, extra_data):
attrs = dict()
if self.friction:
attrs["friction"] = self.friction
if self.density:
attrs["density"] = self.density
if self.group:
attrs["groupIndex"] = self.group
if self.radius:
attrs["radius"] = self.radius
if self.shape == "polygon":
if self.box:
fixture = body.CreatePolygonFixture(
box=self.box, **attrs)
else:
fixture = body.CreatePolygonFixture(
vertices=self.vertices, **attrs)
elif self.shape == "edge":
fixture = body.CreateEdgeFixture(vertices=self.vertices, **attrs)
elif self.shape == "circle":
if self.center:
attrs["pos"] = self.center
fixture = body.CreateCircleFixture(**attrs)
elif self.shape == "sine_chain":
if self.center:
attrs["pos"] = self.center
m = 100
vs = [
(0.5/m*i*self.width, self.height*np.sin((1./m*i-0.5)*np.pi))
for i in range(-m, m+1)
]
attrs["vertices_chain"] = vs
fixture = body.CreateChainFixture(**attrs)
else:
assert False
return fixture
def _get_name(x):
if isinstance(x.userData, dict):
return x.userData.get('name')
return None
def find_body(world, name):
return [body for body in world.bodies if _get_name(body) == name][0]
def find_joint(world, name):
return [joint for joint in world.joints if _get_name(joint) == name][0]
class XmlJoint(XmlElem):
tag = "joint"
JOINT_TYPES = {
"revolute": Box2D.b2RevoluteJoint,
"friction": Box2D.b2FrictionJoint,
"prismatic": Box2D.b2PrismaticJoint,
}
class Meta:
bodyA = XmlAttr("bodyA", String(), required=True)
bodyB = XmlAttr("bodyB", String(), required=True)
anchor = XmlAttr("anchor", Tuple(Float(), Float()))
localAnchorA = XmlAttr("localAnchorA", Tuple(Float(), Float()))
localAnchorB = XmlAttr("localAnchorB", Tuple(Float(), Float()))
axis = XmlAttr("axis", Tuple(Float(), Float()))
limit = XmlAttr("limit", Tuple(Angle(), Angle()))
ctrllimit = XmlAttr("ctrllimit", Tuple(Angle(), Angle()))
typ = XmlAttr("type", Choice("revolute", "friction", "prismatic"), required=True)
name = XmlAttr("name", String())
motor = XmlAttr("motor", Bool())
def __init__(self):
self.bodyA = None
self.bodyB = None
self.anchor = None
self.localAnchorA = None
self.localAnchorB = None
self.limit = None
self.ctrllimit = None
self.motor = False
self.typ = None
self.name = None
self.axis = None
def to_box2d(self, world, xml_world, extra_data):
bodyA = find_body(world, self.bodyA)
bodyB = find_body(world, self.bodyB)
args = dict()
if self.typ == "revolute":
if self.localAnchorA:
args["localAnchorA"] = self.localAnchorA
if self.localAnchorB:
args["localAnchorB"] = self.localAnchorB
if self.anchor:
args["anchor"] = self.anchor
if self.limit:
args["enableLimit"] = True
args["lowerAngle"] = self.limit[0]
args["upperAngle"] = self.limit[1]
elif self.typ == "friction":
if self.anchor:
args["anchor"] = self.anchor
elif self.typ == "prismatic":
if self.axis:
args["axis"] = self.axis
else:
raise NotImplementedError
userData = dict(
ctrllimit=self.ctrllimit,
motor=self.motor,
name=self.name
)
joint = world.CreateJoint(type=self.JOINT_TYPES[self.typ],
bodyA=bodyA,
bodyB=bodyB,
**args)
joint.userData = userData
return joint
class XmlState(XmlElem):
tag = "state"
class Meta:
typ = XmlAttr(
"type", Choice(
"xpos", "ypos", "xvel", "yvel", "apos", "avel",
"dist", "angle",
))
transform = XmlAttr(
"transform", Choice("id", "sin", "cos"))
body = XmlAttr("body", String())
to = XmlAttr("to", String())
joint = XmlAttr("joint", String())
local = XmlAttr("local", Point2D())
com = XmlAttr("com", List(String()))
def __init__(self):
self.typ = None
self.transform = None
self.body = None
self.joint = None
self.local = None
self.com = None
self.to = None
def to_box2d(self, world, xml_world, extra_data):
extra_data.states.append(self)
class XmlControl(XmlElem):
tag = "control"
class Meta:
typ = XmlAttr("type", Choice("force", "torque"), required=True)
body = XmlAttr(
"body", String(),
help="name of the body to apply force on")
bodies = XmlAttr(
"bodies", List(String()),
help="names of the bodies to apply force on")
joint = XmlAttr(
"joint", String(),
help="name of the joint")
anchor = XmlAttr(
"anchor", Point2D(),
help="location of the force in local coordinate frame")
direction = XmlAttr(
"direction", Point2D(),
help="direction of the force in local coordinate frame")
ctrllimit = XmlAttr(
"ctrllimit", Tuple(Float(), Float()),
help="limit of the control input in Newton")
def __init__(self):
self.typ = None
self.body = None
self.bodies = None
self.joint = None
self.anchor = None
self.direction = None
self.ctrllimit = None
def to_box2d(self, world, xml_world, extra_data):
if self.body != None:
assert self.bodies is None, "Should not set body and bodies at the same time"
self.bodies = [self.body]
extra_data.controls.append(self)
class ExtraData(object):
def __init__(self):
self.states = []
self.controls = []
self.velocityIterations = None
self.positionIterations = None
self.timeStep = None
def world_from_xml(s):
extra_data = ExtraData()
box2d = XmlBox2D.from_xml(ET.fromstring(s))
world = box2d.to_box2d(extra_data)
return world, extra_data
| 11,482 | 30.546703 | 89 | py |
rllab | rllab-master/rllab/envs/box2d/parser/xml_attr_types.py | # pylint: disable=no-init, too-few-public-methods, old-style-class
import numpy as np
class Type(object):
def __eq__(self, other):
return self.__class__ == other.__class__
def from_str(self, s):
raise NotImplementedError
class Float(Type):
def from_str(self, s):
return float(s)
class Int(Type):
def from_str(self, s):
return int(s)
class Hex(Type):
def from_str(self, s):
assert s.startswith("0x") or s.startswith("0X")
return int(s, 16)
class Choice(Type):
def __init__(self, *options):
self._options = options
def from_str(self, s):
if s in self._options:
return s
raise ValueError("Unexpected value %s: must be one of %s" %
(s, ", ".join(self._options)))
class List(Type):
def __init__(self, elem_type):
self.elem_type = elem_type
def __eq__(self, other):
return self.__class__ == other.__class__ \
and self.elem_type == other.elem_type
def from_str(self, s):
if ";" in s:
segments = s.split(";")
elif "," in s:
segments = s.split(",")
else:
segments = s.split(" ")
return list(map(self.elem_type.from_str, segments))
class Tuple(Type):
def __init__(self, *elem_types):
self.elem_types = elem_types
def __eq__(self, other):
return self.__class__ == other.__class__ \
and self.elem_types == other.elem_types
def from_str(self, s):
if ";" in s:
segments = s.split(";")
elif "," in s:
segments = s.split(",")
else:
segments = s.split(" ")
if len(segments) != len(self.elem_types):
raise ValueError(
"Length mismatch: expected a tuple of length %d; got %s instead" %
(len(self.elem_types), s))
return tuple([typ.from_str(seg)
for typ, seg in zip(self.elem_types, segments)])
class Either(Type):
def __init__(self, *elem_types):
self.elem_types = elem_types
def from_str(self, s):
for typ in self.elem_types:
try:
return typ.from_str(s)
except ValueError:
pass
raise ValueError('No match found')
class String(Type):
def from_str(self, s):
return s
class Angle(Type):
def from_str(self, s):
if s.endswith("deg"):
return float(s[:-len("deg")]) * np.pi / 180.0
elif s.endswith("rad"):
return float(s[:-len("rad")])
return float(s) * np.pi / 180.0
class Bool(Type):
def from_str(self, s):
return s.lower() == "true" or s.lower() == "1"
Point2D = lambda: Tuple(Float(), Float())
| 2,807 | 21.285714 | 82 | py |
rllab | rllab-master/rllab/envs/box2d/parser/xml_types.py | # pylint: disable=no-init, too-few-public-methods, old-style-class
from types import LambdaType
def _extract_type(typ):
if isinstance(typ, LambdaType):
return typ()
else:
return typ
class AttrDecl(object):
pass
class XmlChildren(AttrDecl):
def __init__(self, tag, typ):
self._tag = tag
self._typ = typ
def from_xml(self, xml):
xml_elems = [child for child in xml if child.tag == self._tag]
typ = _extract_type(self._typ)
return [typ.from_xml(elem) for elem in xml_elems]
class XmlChild(AttrDecl):
def __init__(self, tag, typ, required=False):
self._tag = tag
self._typ = typ
self._required = required
def from_xml(self, xml):
xml_elems = [child for child in xml if child.tag == self._tag]
if len(xml_elems) > 1:
raise ValueError('Multiple candidate found for tag %s' % self._tag)
if len(xml_elems) == 0:
if self._required:
raise ValueError('Missing xml element with tag %s' % self._tag)
else:
return None
elem = xml_elems[0]
return _extract_type(self._typ).from_xml(elem)
class XmlAttr(AttrDecl):
def __init__(self, name, typ, required=False, *args, **kwargs):
self._name = name
self._typ = typ
self._required = required
@property
def name(self):
return self._name
def from_xml(self, xml):
if self._name in xml.attrib:
return _extract_type(self._typ).from_str(xml.attrib[self._name])
elif self._required:
raise ValueError("Missing required attribute %s" % self._name)
else:
return None
class XmlElem(object):
tag = None
Meta = None
@classmethod
def from_xml(cls, xml):
if cls.tag != xml.tag:
raise ValueError(
"Tag mismatch: expected %s but got %s" % (cls.tag, xml.tag))
attrs = cls.get_attrs()
inst = cls()
used_attrs = []
for name, attr in attrs:
val = attr.from_xml(xml)
if isinstance(attr, XmlAttr):
used_attrs.append(attr.name)
if val is not None:
setattr(inst, name, val)
for attr in list(xml.attrib.keys()):
if attr not in used_attrs:
raise ValueError("Unrecognized attribute: %s" % attr)
return inst
@classmethod
def get_attrs(cls):
if not hasattr(cls, '_attrs'):
all_attrs = dir(cls.Meta)
attrs = [(attr, getattr(cls.Meta, attr)) for attr in all_attrs
if isinstance(getattr(cls.Meta, attr), AttrDecl)]
cls._attrs = attrs
return cls._attrs
def __str__(self):
attrs = []
for name, _ in self.__class__.get_attrs():
attrs.append("%s=%s" % (name, str(getattr(self, name))))
return self.__class__.__name__ + "(" + ", ".join(attrs) + ")"
def __repr__(self):
return str(self)
| 3,055 | 26.781818 | 79 | py |
rllab | rllab-master/rllab/envs/box2d/parser/__init__.py | from .xml_box2d import world_from_xml, find_body, find_joint
| 61 | 30 | 60 | py |
rllab | rllab-master/rllab/distributions/recurrent_diagonal_gaussian.py | import theano.tensor as TT
import numpy as np
from rllab.distributions.base import Distribution
from rllab.distributions.diagonal_gaussian import DiagonalGaussian
RecurrentDiagonalGaussian = DiagonalGaussian
| 209 | 29 | 66 | py |
rllab | rllab-master/rllab/distributions/base.py | import theano.tensor as TT
class Distribution(object):
@property
def dim(self):
raise NotImplementedError
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
"""
Compute the symbolic KL divergence of two distributions
"""
raise NotImplementedError
def kl(self, old_dist_info, new_dist_info):
"""
Compute the KL divergence of two distributions
"""
raise NotImplementedError
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
raise NotImplementedError
def entropy(self, dist_info):
raise NotImplementedError
def log_likelihood_sym(self, x_var, dist_info_vars):
raise NotImplementedError
def likelihood_sym(self, x_var, dist_info_vars):
return TT.exp(self.log_likelihood_sym(x_var, dist_info_vars))
def log_likelihood(self, xs, dist_info):
raise NotImplementedError
@property
def dist_info_keys(self):
raise NotImplementedError
| 1,033 | 25.512821 | 82 | py |
rllab | rllab-master/rllab/distributions/categorical.py | import theano.tensor as TT
import numpy as np
from .base import Distribution
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
TINY = 1e-8
# def from_onehot_sym(x_var):
# ret = TT.zeros((x_var.shape[0],), x_var.dtype)
# nonzero_n, nonzero_a = TT.nonzero(x_var)[:2]
# ret = TT.set_subtensor(ret[nonzero_n], nonzero_a.astype('uint8'))
# return ret
def from_onehot(x_var):
ret = np.zeros((len(x_var),), 'int32')
nonzero_n, nonzero_a = np.nonzero(x_var)
ret[nonzero_n] = nonzero_a
return ret
class Categorical(Distribution):
def __init__(self, dim):
self._dim = dim
self._srng = RandomStreams()
@property
def dim(self):
return self._dim
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
"""
Compute the symbolic KL divergence of two categorical distributions
"""
old_prob_var = old_dist_info_vars["prob"]
new_prob_var = new_dist_info_vars["prob"]
# Assume layout is N * A
return TT.sum(
old_prob_var * (TT.log(old_prob_var + TINY) - TT.log(new_prob_var + TINY)),
axis=-1
)
def kl(self, old_dist_info, new_dist_info):
"""
Compute the KL divergence of two categorical distributions
"""
old_prob = old_dist_info["prob"]
new_prob = new_dist_info["prob"]
return np.sum(
old_prob * (np.log(old_prob + TINY) - np.log(new_prob + TINY)),
axis=-1
)
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
old_prob_var = old_dist_info_vars["prob"]
new_prob_var = new_dist_info_vars["prob"]
x_var = TT.cast(x_var, 'float32')
# Assume layout is N * A
return (TT.sum(new_prob_var * x_var, axis=-1) + TINY) / (TT.sum(old_prob_var * x_var, axis=-1) + TINY)
def entropy(self, info):
probs = info["prob"]
return -np.sum(probs * np.log(probs + TINY), axis=1)
def entropy_sym(self, dist_info_vars):
prob_var = dist_info_vars["prob"]
return -TT.sum(prob_var * TT.log(prob_var + TINY), axis=1)
def log_likelihood_sym(self, x_var, dist_info_vars):
probs = dist_info_vars["prob"]
# Assume layout is N * A
return TT.log(TT.sum(probs * TT.cast(x_var, 'float32'), axis=-1) + TINY)
def log_likelihood(self, xs, dist_info):
probs = dist_info["prob"]
# Assume layout is N * A
N = probs.shape[0]
return np.log(probs[np.arange(N), from_onehot(np.asarray(xs))] + TINY)
def sample_sym(self, dist_info):
probs = dist_info["prob"]
return self._srng.multinomial(pvals=probs, dtype='uint8')
@property
def dist_info_keys(self):
return ["prob"]
| 2,797 | 30.795455 | 110 | py |
rllab | rllab-master/rllab/distributions/recurrent_categorical.py | import theano.tensor as TT
import numpy as np
import theano
from rllab.distributions.categorical import Categorical
from rllab.distributions.base import Distribution
TINY = 1e-8
class RecurrentCategorical(Distribution):
def __init__(self, dim):
self._cat = Categorical(dim)
self._dim = dim
@property
def dim(self):
return self._dim
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
"""
Compute the symbolic KL divergence of two categorical distributions
"""
old_prob_var = old_dist_info_vars["prob"]
new_prob_var = new_dist_info_vars["prob"]
# Assume layout is N * T * A
return TT.sum(
old_prob_var * (TT.log(old_prob_var + TINY) - TT.log(new_prob_var + TINY)),
axis=2
)
def kl(self, old_dist_info, new_dist_info):
"""
Compute the KL divergence of two categorical distributions
"""
old_prob = old_dist_info["prob"]
new_prob = new_dist_info["prob"]
return np.sum(
old_prob * (np.log(old_prob + TINY) - np.log(new_prob + TINY)),
axis=2
)
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
old_prob_var = old_dist_info_vars["prob"]
new_prob_var = new_dist_info_vars["prob"]
# Assume layout is N * T * A
a_dim = x_var.shape[-1]
flat_ratios = self._cat.likelihood_ratio_sym(
x_var.reshape((-1, a_dim)),
dict(prob=old_prob_var.reshape((-1, a_dim))),
dict(prob=new_prob_var.reshape((-1, a_dim)))
)
return flat_ratios.reshape(old_prob_var.shape[:2])
def entropy(self, dist_info):
probs = dist_info["prob"]
return -np.sum(probs * np.log(probs + TINY), axis=2)
def log_likelihood_sym(self, xs, dist_info_vars):
probs = dist_info_vars["prob"]
# Assume layout is N * T * A
a_dim = probs.shape[-1]
# a_dim = TT.printing.Print("lala")(a_dim)
flat_logli = self._cat.log_likelihood_sym(xs.reshape((-1, a_dim)), dict(prob=probs.reshape((-1, a_dim))))
return flat_logli.reshape(probs.shape[:2])
def log_likelihood(self, xs, dist_info):
probs = dist_info["prob"]
# Assume layout is N * T * A
a_dim = probs.shape[-1]
flat_logli = self._cat.log_likelihood_sym(xs.reshape((-1, a_dim)), dict(prob=probs.reshape((-1, a_dim))))
return flat_logli.reshape(probs.shape[:2])
@property
def dist_info_keys(self):
return ["prob"]
| 2,585 | 33.026316 | 113 | py |
rllab | rllab-master/rllab/distributions/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/rllab/distributions/delta.py | from rllab.distributions.base import Distribution
class Delta(Distribution):
@property
def dim(self):
return 0
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
return None
def kl(self, old_dist_info, new_dist_info):
return None
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
raise NotImplementedError
def entropy(self, dist_info):
raise NotImplementedError
def log_likelihood_sym(self, x_var, dist_info_vars):
raise NotImplementedError
def likelihood_sym(self, x_var, dist_info_vars):
return TT.exp(self.log_likelihood_sym(x_var, dist_info_vars))
def log_likelihood(self, xs, dist_info):
return None
@property
def dist_info_keys(self):
return None
def entropy(self,dist_info):
return 0
| 865 | 23.742857 | 82 | py |
rllab | rllab-master/rllab/distributions/diagonal_gaussian.py | import theano.tensor as TT
import numpy as np
from rllab.distributions.base import Distribution
class DiagonalGaussian(Distribution):
def __init__(self, dim):
self._dim = dim
@property
def dim(self):
return self._dim
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
old_means = old_dist_info_vars["mean"]
old_log_stds = old_dist_info_vars["log_std"]
new_means = new_dist_info_vars["mean"]
new_log_stds = new_dist_info_vars["log_std"]
"""
Compute the KL divergence of two multivariate Gaussian distribution with
diagonal covariance matrices
"""
old_std = TT.exp(old_log_stds)
new_std = TT.exp(new_log_stds)
# means: (N*A)
# std: (N*A)
# formula:
# { (\mu_1 - \mu_2)^2 + \sigma_1^2 - \sigma_2^2 } / (2\sigma_2^2) +
# ln(\sigma_2/\sigma_1)
numerator = TT.square(old_means - new_means) + \
TT.square(old_std) - TT.square(new_std)
denominator = 2 * TT.square(new_std) + 1e-8
return TT.sum(
numerator / denominator + new_log_stds - old_log_stds, axis=-1)
def kl(self, old_dist_info, new_dist_info):
old_means = old_dist_info["mean"]
old_log_stds = old_dist_info["log_std"]
new_means = new_dist_info["mean"]
new_log_stds = new_dist_info["log_std"]
"""
Compute the KL divergence of two multivariate Gaussian distribution with
diagonal covariance matrices
"""
old_std = np.exp(old_log_stds)
new_std = np.exp(new_log_stds)
# means: (N*A)
# std: (N*A)
# formula:
# { (\mu_1 - \mu_2)^2 + \sigma_1^2 - \sigma_2^2 } / (2\sigma_2^2) +
# ln(\sigma_2/\sigma_1)
numerator = np.square(old_means - new_means) + \
np.square(old_std) - np.square(new_std)
denominator = 2 * np.square(new_std) + 1e-8
return np.sum(
numerator / denominator + new_log_stds - old_log_stds, axis=-1)
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
logli_new = self.log_likelihood_sym(x_var, new_dist_info_vars)
logli_old = self.log_likelihood_sym(x_var, old_dist_info_vars)
return TT.exp(logli_new - logli_old)
def log_likelihood_sym(self, x_var, dist_info_vars):
means = dist_info_vars["mean"]
log_stds = dist_info_vars["log_std"]
zs = (x_var - means) / TT.exp(log_stds)
return - TT.sum(log_stds, axis=-1) - \
0.5 * TT.sum(TT.square(zs), axis=-1) - \
0.5 * means.shape[-1] * np.log(2 * np.pi)
def sample(self, dist_info):
means = dist_info["mean"]
log_stds = dist_info["log_std"]
rnd = np.random.normal(size=means.shape)
return rnd * np.exp(log_stds) + means
def log_likelihood(self, xs, dist_info):
means = dist_info["mean"]
log_stds = dist_info["log_std"]
zs = (xs - means) / np.exp(log_stds)
return - np.sum(log_stds, axis=-1) - \
0.5 * np.sum(np.square(zs), axis=-1) - \
0.5 * means.shape[-1] * np.log(2 * np.pi)
def entropy(self, dist_info):
log_stds = dist_info["log_std"]
return np.sum(log_stds + np.log(np.sqrt(2 * np.pi * np.e)), axis=-1)
def entropy_sym(self, dist_info_var):
log_std_var = dist_info_var["log_std"]
return TT.sum(log_std_var + TT.log(np.sqrt(2 * np.pi * np.e)), axis=-1)
@property
def dist_info_keys(self):
return ["mean", "log_std"]
| 3,610 | 36.226804 | 82 | py |
rllab | rllab-master/rllab/distributions/bernoulli.py |
from .base import Distribution
import theano.tensor as TT
import numpy as np
TINY = 1e-8
class Bernoulli(Distribution):
def __init__(self, dim):
self._dim = dim
@property
def dim(self):
return self._dim
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
old_p = old_dist_info_vars["p"]
new_p = new_dist_info_vars["p"]
kl = old_p * (TT.log(old_p + TINY) - TT.log(new_p + TINY)) + \
(1 - old_p) * (TT.log(1 - old_p + TINY) - TT.log(1 - new_p + TINY))
return TT.sum(kl, axis=-1)
def kl(self, old_dist_info, new_dist_info):
old_p = old_dist_info["p"]
new_p = new_dist_info["p"]
kl = old_p * (np.log(old_p + TINY) - np.log(new_p + TINY)) + \
(1 - old_p) * (np.log(1 - old_p + TINY) - np.log(1 - new_p + TINY))
return np.sum(kl, axis=-1)
def sample(self, dist_info):
p = np.asarray(dist_info["p"])
return np.cast['int'](np.random.uniform(low=0., high=1., size=p.shape) < p)
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
old_p = old_dist_info_vars["p"]
new_p = new_dist_info_vars["p"]
return TT.prod(x_var * new_p / (old_p + TINY) + (1 - x_var) * (1 - new_p) / (1 - old_p + TINY),
axis=-1)
def log_likelihood_sym(self, x_var, dist_info_vars):
p = dist_info_vars["p"]
return TT.sum(x_var * TT.log(p + TINY) + (1 - x_var) * TT.log(1 - p + TINY), axis=-1)
def log_likelihood(self, xs, dist_info):
p = dist_info["p"]
return np.sum(xs * np.log(p + TINY) + (1 - xs) * np.log(1 - p + TINY), axis=-1)
def entropy(self, dist_info):
p = dist_info["p"]
return np.sum(- p * np.log(p + TINY) - (1 - p) * np.log(1 - p + TINY), axis=-1)
@property
def dist_info_keys(self):
return ["p"]
| 1,891 | 32.192982 | 103 | py |
rllab | rllab-master/rllab/policies/base.py | from rllab.core.parameterized import Parameterized
class Policy(Parameterized):
def __init__(self, env_spec):
Parameterized.__init__(self)
self._env_spec = env_spec
# Should be implemented by all policies
def get_action(self, observation):
raise NotImplementedError
def reset(self):
pass
@property
def observation_space(self):
return self._env_spec.observation_space
@property
def action_space(self):
return self._env_spec.action_space
@property
def recurrent(self):
"""
Indicates whether the policy is recurrent.
:return:
"""
return False
def log_diagnostics(self, paths):
"""
Log extra information per iteration based on the collected paths
"""
pass
@property
def state_info_keys(self):
"""
Return keys for the information related to the policy's state when taking an action.
:return:
"""
return list()
def terminate(self):
"""
Clean up operation
"""
pass
class StochasticPolicy(Policy):
@property
def distribution(self):
"""
:rtype Distribution
"""
raise NotImplementedError
def dist_info_sym(self, obs_var, state_info_vars):
"""
Return the symbolic distribution information about the actions.
:param obs_var: symbolic variable for observations
:param state_info_vars: a dictionary whose values should contain information about the state of the policy at
the time it received the observation
:return:
"""
raise NotImplementedError
def dist_info(self, obs, state_infos):
"""
Return the distribution information about the actions.
:param obs_var: observation values
:param state_info_vars: a dictionary whose values should contain information about the state of the policy at
the time it received the observation
:return:
"""
raise NotImplementedError
| 2,093 | 24.536585 | 117 | py |
rllab | rllab-master/rllab/policies/uniform_control_policy.py | from rllab.core.parameterized import Parameterized
from rllab.core.serializable import Serializable
from rllab.distributions.delta import Delta
from rllab.policies.base import Policy
from rllab.misc.overrides import overrides
class UniformControlPolicy(Policy):
def __init__(
self,
env_spec,
):
Serializable.quick_init(self, locals())
super(UniformControlPolicy, self).__init__(env_spec=env_spec)
@overrides
def get_action(self, observation):
return self.action_space.sample(), dict()
def get_params_internal(self, **tags):
return []
def get_actions(self, observations):
return self.action_space.sample_n(len(observations)), dict()
@property
def vectorized(self):
return True
def reset(self, dones=None):
pass
@property
def distribution(self):
# Just a placeholder
return Delta()
| 928 | 24.108108 | 69 | py |
rllab | rllab-master/rllab/policies/categorical_gru_policy.py | import lasagne.layers as L
import lasagne.nonlinearities as NL
import numpy as np
import theano.tensor as TT
from rllab.core.lasagne_powered import LasagnePowered
from rllab.core.network import GRUNetwork
from rllab.core.lasagne_layers import OpLayer
from rllab.core.serializable import Serializable
from rllab.distributions.recurrent_categorical import RecurrentCategorical
from rllab.misc import ext
from rllab.spaces import Discrete
from rllab.misc import special
from rllab.misc.overrides import overrides
from rllab.policies.base import StochasticPolicy
class CategoricalGRUPolicy(StochasticPolicy, LasagnePowered):
def __init__(
self,
env_spec,
hidden_dim=32,
feature_network=None,
state_include_action=True,
hidden_nonlinearity=NL.tanh):
"""
:param env_spec: A spec for the env.
:param hidden_dim: dimension of hidden layer
:param hidden_nonlinearity: nonlinearity used for each hidden layer
:return:
"""
assert isinstance(env_spec.action_space, Discrete)
Serializable.quick_init(self, locals())
super(CategoricalGRUPolicy, self).__init__(env_spec)
obs_dim = env_spec.observation_space.flat_dim
action_dim = env_spec.action_space.flat_dim
if state_include_action:
input_dim = obs_dim + action_dim
else:
input_dim = obs_dim
l_input = L.InputLayer(
shape=(None, None, input_dim),
name="input"
)
if feature_network is None:
feature_dim = input_dim
l_flat_feature = None
l_feature = l_input
else:
feature_dim = feature_network.output_layer.output_shape[-1]
l_flat_feature = feature_network.output_layer
l_feature = OpLayer(
l_flat_feature,
extras=[l_input],
name="reshape_feature",
op=lambda flat_feature, input: TT.reshape(
flat_feature,
[input.shape[0], input.shape[1], feature_dim]
),
shape_op=lambda _, input_shape: (input_shape[0], input_shape[1], feature_dim)
)
prob_network = GRUNetwork(
input_shape=(feature_dim,),
input_layer=l_feature,
output_dim=env_spec.action_space.n,
hidden_dim=hidden_dim,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=TT.nnet.softmax,
name="prob_network"
)
self.prob_network = prob_network
self.feature_network = feature_network
self.l_input = l_input
self.state_include_action = state_include_action
flat_input_var = TT.matrix("flat_input")
if feature_network is None:
feature_var = flat_input_var
else:
feature_var = L.get_output(l_flat_feature, {feature_network.input_layer: flat_input_var})
self.f_step_prob = ext.compile_function(
[
flat_input_var,
prob_network.step_prev_hidden_layer.input_var
],
L.get_output([
prob_network.step_output_layer,
prob_network.step_hidden_layer
], {prob_network.step_input_layer: feature_var})
)
self.input_dim = input_dim
self.action_dim = action_dim
self.hidden_dim = hidden_dim
self.prev_action = None
self.prev_hidden = None
self.dist = RecurrentCategorical(env_spec.action_space.n)
out_layers = [prob_network.output_layer]
if feature_network is not None:
out_layers.append(feature_network.output_layer)
LasagnePowered.__init__(self, out_layers)
@overrides
def dist_info_sym(self, obs_var, state_info_vars):
n_batches, n_steps = obs_var.shape[:2]
obs_var = obs_var.reshape((n_batches, n_steps, -1))
if self.state_include_action:
prev_action_var = state_info_vars["prev_action"]
all_input_var = TT.concatenate(
[obs_var, prev_action_var],
axis=2
)
else:
all_input_var = obs_var
if self.feature_network is None:
return dict(
prob=L.get_output(
self.prob_network.output_layer,
{self.l_input: all_input_var}
)
)
else:
flat_input_var = TT.reshape(all_input_var, (-1, self.input_dim))
return dict(
prob=L.get_output(
self.prob_network.output_layer,
{self.l_input: all_input_var, self.feature_network.input_layer: flat_input_var}
)
)
def reset(self):
self.prev_action = None
self.prev_hidden = self.prob_network.hid_init_param.get_value()
# The return value is a pair. The first item is a matrix (N, A), where each
# entry corresponds to the action value taken. The second item is a vector
# of length N, where each entry is the density value for that action, under
# the current policy
@overrides
def get_action(self, observation):
if self.state_include_action:
if self.prev_action is None:
prev_action = np.zeros((self.action_space.flat_dim,))
else:
prev_action = self.action_space.flatten(self.prev_action)
all_input = np.concatenate([
self.observation_space.flatten(observation),
prev_action
])
else:
all_input = self.observation_space.flatten(observation)
# should not be used
prev_action = np.nan
probs, hidden_vec = [x[0] for x in self.f_step_prob([all_input], [self.prev_hidden])]
action = special.weighted_sample(probs, range(self.action_space.n))
self.prev_action = action
self.prev_hidden = hidden_vec
agent_info = dict(prob=probs)
if self.state_include_action:
agent_info["prev_action"] = prev_action
return action, agent_info
@property
@overrides
def recurrent(self):
return True
@property
def distribution(self):
return self.dist
@property
def state_info_keys(self):
if self.state_include_action:
return ["prev_action"]
else:
return []
| 6,528 | 33.544974 | 101 | py |
rllab | rllab-master/rllab/policies/categorical_mlp_policy.py | import lasagne.layers as L
import lasagne.nonlinearities as NL
from rllab.core.lasagne_powered import LasagnePowered
from rllab.core.network import MLP
from rllab.core.serializable import Serializable
from rllab.distributions.categorical import Categorical
from rllab.misc import ext
from rllab.misc.overrides import overrides
from rllab.policies.base import StochasticPolicy
from rllab.spaces import Discrete
import numpy as np
class CategoricalMLPPolicy(StochasticPolicy, LasagnePowered):
def __init__(
self,
env_spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=NL.tanh,
num_seq_inputs=1,
prob_network=None,
):
"""
:param env_spec: A spec for the mdp.
:param hidden_sizes: list of sizes for the fully connected hidden layers
:param hidden_nonlinearity: nonlinearity used for each hidden layer
:param prob_network: manually specified network for this policy, other network params
are ignored
:return:
"""
Serializable.quick_init(self, locals())
assert isinstance(env_spec.action_space, Discrete)
if prob_network is None:
prob_network = MLP(
input_shape=(env_spec.observation_space.flat_dim * num_seq_inputs,),
output_dim=env_spec.action_space.n,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=NL.softmax,
)
self._l_prob = prob_network.output_layer
self._l_obs = prob_network.input_layer
self._f_prob = ext.compile_function([prob_network.input_layer.input_var], L.get_output(
prob_network.output_layer))
self._dist = Categorical(env_spec.action_space.n)
super(CategoricalMLPPolicy, self).__init__(env_spec)
LasagnePowered.__init__(self, [prob_network.output_layer])
@overrides
def dist_info_sym(self, obs_var, state_info_vars=None):
return dict(prob=L.get_output(self._l_prob, {self._l_obs: obs_var}))
@overrides
def dist_info(self, obs, state_infos=None):
return dict(prob=self._f_prob(obs))
# The return value is a pair. The first item is a matrix (N, A), where each
# entry corresponds to the action value taken. The second item is a vector
# of length N, where each entry is the density value for that action, under
# the current policy
@overrides
def get_action(self, observation, deterministic=False):
flat_obs = self.observation_space.flatten(observation)
prob = self._f_prob([flat_obs])[0]
if deterministic:
action = np.argmax(prob)
else:
action = self.action_space.weighted_sample(prob)
return action, dict(prob=prob)
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
probs = self._f_prob(flat_obs)
actions = list(map(self.action_space.weighted_sample, probs))
return actions, dict(prob=probs)
@property
def distribution(self):
return self._dist
| 3,158 | 35.732558 | 95 | py |
rllab | rllab-master/rllab/policies/gaussian_mlp_policy.py | import lasagne
import lasagne.layers as L
import lasagne.nonlinearities as NL
import numpy as np
from rllab.core.lasagne_layers import ParamLayer
from rllab.core.lasagne_powered import LasagnePowered
from rllab.core.network import MLP
from rllab.spaces import Box
from rllab.core.serializable import Serializable
from rllab.policies.base import StochasticPolicy
from rllab.misc.overrides import overrides
from rllab.misc import logger
from rllab.misc import ext
from rllab.distributions.diagonal_gaussian import DiagonalGaussian
import theano.tensor as TT
class GaussianMLPPolicy(StochasticPolicy, LasagnePowered):
def __init__(
self,
env_spec,
hidden_sizes=(32, 32),
learn_std=True,
init_std=1.0,
adaptive_std=False,
std_share_network=False,
std_hidden_sizes=(32, 32),
min_std=1e-6,
std_hidden_nonlinearity=NL.tanh,
hidden_nonlinearity=NL.tanh,
output_nonlinearity=None,
mean_network=None,
std_network=None,
dist_cls=DiagonalGaussian,
):
"""
:param env_spec:
:param hidden_sizes: list of sizes for the fully-connected hidden layers
:param learn_std: Is std trainable
:param init_std: Initial std
:param adaptive_std:
:param std_share_network:
:param std_hidden_sizes: list of sizes for the fully-connected layers for std
:param min_std: whether to make sure that the std is at least some threshold value, to avoid numerical issues
:param std_hidden_nonlinearity:
:param hidden_nonlinearity: nonlinearity used for each hidden layer
:param output_nonlinearity: nonlinearity for the output layer
:param mean_network: custom network for the output mean
:param std_network: custom network for the output log std
:return:
"""
Serializable.quick_init(self, locals())
assert isinstance(env_spec.action_space, Box)
obs_dim = env_spec.observation_space.flat_dim
action_dim = env_spec.action_space.flat_dim
# create network
if mean_network is None:
mean_network = MLP(
input_shape=(obs_dim,),
output_dim=action_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity,
)
self._mean_network = mean_network
l_mean = mean_network.output_layer
obs_var = mean_network.input_layer.input_var
if std_network is not None:
l_log_std = std_network.output_layer
else:
if adaptive_std:
std_network = MLP(
input_shape=(obs_dim,),
input_layer=mean_network.input_layer,
output_dim=action_dim,
hidden_sizes=std_hidden_sizes,
hidden_nonlinearity=std_hidden_nonlinearity,
output_nonlinearity=None,
)
l_log_std = std_network.output_layer
else:
l_log_std = ParamLayer(
mean_network.input_layer,
num_units=action_dim,
param=lasagne.init.Constant(np.log(init_std)),
name="output_log_std",
trainable=learn_std,
)
self.min_std = min_std
mean_var, log_std_var = L.get_output([l_mean, l_log_std])
if self.min_std is not None:
log_std_var = TT.maximum(log_std_var, np.log(min_std))
self._mean_var, self._log_std_var = mean_var, log_std_var
self._l_mean = l_mean
self._l_log_std = l_log_std
self._dist = dist_cls(action_dim)
LasagnePowered.__init__(self, [l_mean, l_log_std])
super(GaussianMLPPolicy, self).__init__(env_spec)
self._f_dist = ext.compile_function(
inputs=[obs_var],
outputs=[mean_var, log_std_var],
)
def dist_info_sym(self, obs_var, state_info_vars=None):
mean_var, log_std_var = L.get_output([self._l_mean, self._l_log_std], obs_var)
if self.min_std is not None:
log_std_var = TT.maximum(log_std_var, np.log(self.min_std))
return dict(mean=mean_var, log_std=log_std_var)
@overrides
def get_action(self, observation):
flat_obs = self.observation_space.flatten(observation)
mean, log_std = [x[0] for x in self._f_dist([flat_obs])]
rnd = np.random.normal(size=mean.shape)
action = rnd * np.exp(log_std) + mean
return action, dict(mean=mean, log_std=log_std)
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
means, log_stds = self._f_dist(flat_obs)
rnd = np.random.normal(size=means.shape)
actions = rnd * np.exp(log_stds) + means
return actions, dict(mean=means, log_std=log_stds)
def get_reparam_action_sym(self, obs_var, action_var, old_dist_info_vars):
"""
Given observations, old actions, and distribution of old actions, return a symbolically reparameterized
representation of the actions in terms of the policy parameters
:param obs_var:
:param action_var:
:param old_dist_info_vars:
:return:
"""
new_dist_info_vars = self.dist_info_sym(obs_var, action_var)
new_mean_var, new_log_std_var = new_dist_info_vars["mean"], new_dist_info_vars["log_std"]
old_mean_var, old_log_std_var = old_dist_info_vars["mean"], old_dist_info_vars["log_std"]
epsilon_var = (action_var - old_mean_var) / (TT.exp(old_log_std_var) + 1e-8)
new_action_var = new_mean_var + epsilon_var * TT.exp(new_log_std_var)
return new_action_var
def log_diagnostics(self, paths):
log_stds = np.vstack([path["agent_infos"]["log_std"] for path in paths])
logger.record_tabular('AveragePolicyStd', np.mean(np.exp(log_stds)))
@property
def distribution(self):
return self._dist
| 6,192 | 37.228395 | 117 | py |
rllab | rllab-master/rllab/policies/gaussian_gru_policy.py | import lasagne.layers as L
import lasagne.nonlinearities as NL
import lasagne.init
import numpy as np
import theano.tensor as TT
from rllab.core.lasagne_layers import ParamLayer
from rllab.core.lasagne_powered import LasagnePowered
from rllab.core.network import GRUNetwork
from rllab.core.serializable import Serializable
from rllab.distributions.recurrent_diagonal_gaussian import RecurrentDiagonalGaussian
from rllab.misc import ext
from rllab.misc.overrides import overrides
from rllab.policies.base import StochasticPolicy
class GaussianGRUPolicy(StochasticPolicy, LasagnePowered):
def __init__(
self,
env_spec,
hidden_sizes=(32,),
state_include_action=True,
hidden_nonlinearity=NL.tanh,
learn_std=True,
init_std=1.0,
output_nonlinearity=None,
):
"""
:param env_spec: A spec for the env.
:param hidden_sizes: list of sizes for the fully connected hidden layers
:param hidden_nonlinearity: nonlinearity used for each hidden layer
:return:
"""
Serializable.quick_init(self, locals())
super(GaussianGRUPolicy, self).__init__(env_spec)
assert len(hidden_sizes) == 1
if state_include_action:
obs_dim = env_spec.observation_space.flat_dim + env_spec.action_space.flat_dim
else:
obs_dim = env_spec.observation_space.flat_dim
action_dim = env_spec.action_space.flat_dim
mean_network = GRUNetwork(
input_shape=(obs_dim,),
output_dim=action_dim,
hidden_dim=hidden_sizes[0],
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity,
)
l_mean = mean_network.output_layer
obs_var = mean_network.input_var
l_log_std = ParamLayer(
mean_network.input_layer,
num_units=action_dim,
param=lasagne.init.Constant(np.log(init_std)),
name="output_log_std",
trainable=learn_std,
)
l_step_log_std = ParamLayer(
mean_network.step_input_layer,
num_units=action_dim,
param=l_log_std.param,
name="step_output_log_std",
trainable=learn_std,
)
self._mean_network = mean_network
self._l_log_std = l_log_std
self._state_include_action = state_include_action
self._f_step_mean_std = ext.compile_function(
[
mean_network.step_input_layer.input_var,
mean_network.step_prev_hidden_layer.input_var
],
L.get_output([
mean_network.step_output_layer,
l_step_log_std,
mean_network.step_hidden_layer
])
)
self._prev_action = None
self._prev_hidden = None
self._hidden_sizes = hidden_sizes
self._dist = RecurrentDiagonalGaussian(action_dim)
self.reset()
LasagnePowered.__init__(self, [mean_network.output_layer, l_log_std])
@overrides
def dist_info_sym(self, obs_var, state_info_vars):
n_batches, n_steps = obs_var.shape[:2]
obs_var = obs_var.reshape((n_batches, n_steps, -1))
if self._state_include_action:
prev_action_var = state_info_vars["prev_action"]
all_input_var = TT.concatenate(
[obs_var, prev_action_var],
axis=2
)
else:
all_input_var = obs_var
means, log_stds = L.get_output([self._mean_network.output_layer, self._l_log_std], all_input_var)
return dict(mean=means, log_std=log_stds)
def reset(self):
self._prev_action = None
self._prev_hidden = self._mean_network.hid_init_param.get_value()
# The return value is a pair. The first item is a matrix (N, A), where each
# entry corresponds to the action value taken. The second item is a vector
# of length N, where each entry is the density value for that action, under
# the current policy
@overrides
def get_action(self, observation):
if self._state_include_action:
if self._prev_action is None:
prev_action = np.zeros((self.action_space.flat_dim,))
else:
prev_action = self.action_space.flatten(self._prev_action)
all_input = np.concatenate([
self.observation_space.flatten(observation),
prev_action
])
else:
all_input = self.observation_space.flatten(observation)
# should not be used
prev_action = np.nan
mean, log_std, hidden_vec = [x[0] for x in self._f_step_mean_std([all_input], [self._prev_hidden])]
rnd = np.random.normal(size=mean.shape)
action = rnd * np.exp(log_std) + mean
self._prev_action = action
self._prev_hidden = hidden_vec
agent_info = dict(mean=mean, log_std=log_std)
if self._state_include_action:
agent_info["prev_action"] = prev_action
return action, agent_info
@property
@overrides
def recurrent(self):
return True
@property
def distribution(self):
return self._dist
@property
def state_info_keys(self):
if self._state_include_action:
return ["prev_action"]
else:
return []
| 5,456 | 33.10625 | 107 | py |
rllab | rllab-master/rllab/policies/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/rllab/policies/categorical_conv_policy.py | from rllab.core.lasagne_powered import LasagnePowered
import lasagne.layers as L
from rllab.core.network import ConvNetwork
from rllab.distributions.categorical import Categorical
from rllab.policies.base import StochasticPolicy
from rllab.misc import tensor_utils
from rllab.spaces.discrete import Discrete
from rllab.core.serializable import Serializable
from rllab.misc import ext
from rllab.misc import logger
from rllab.misc.overrides import overrides
import numpy as np
import lasagne.nonlinearities as NL
class CategoricalConvPolicy(StochasticPolicy, LasagnePowered):
def __init__(
self,
name,
env_spec,
conv_filters, conv_filter_sizes, conv_strides, conv_pads,
hidden_sizes=[],
hidden_nonlinearity=NL.rectify,
output_nonlinearity=NL.softmax,
prob_network=None,
):
"""
:param env_spec: A spec for the mdp.
:param hidden_sizes: list of sizes for the fully connected hidden layers
:param hidden_nonlinearity: nonlinearity used for each hidden layer
:param prob_network: manually specified network for this policy, other network params
are ignored
:return:
"""
Serializable.quick_init(self, locals())
assert isinstance(env_spec.action_space, Discrete)
self._env_spec = env_spec
if prob_network is None:
prob_network = ConvNetwork(
input_shape=env_spec.observation_space.shape,
output_dim=env_spec.action_space.n,
conv_filters=conv_filters,
conv_filter_sizes=conv_filter_sizes,
conv_strides=conv_strides,
conv_pads=conv_pads,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=NL.softmax,
name="prob_network",
)
self._l_prob = prob_network.output_layer
self._l_obs = prob_network.input_layer
self._f_prob = ext.compile_function(
[prob_network.input_layer.input_var],
L.get_output(prob_network.output_layer)
)
self._dist = Categorical(env_spec.action_space.n)
super(CategoricalConvPolicy, self).__init__(env_spec)
LasagnePowered.__init__(self, [prob_network.output_layer])
@property
def vectorized(self):
return True
@overrides
def dist_info_sym(self, obs_var, state_info_vars=None):
return dict(
prob=L.get_output(
self._l_prob,
{self._l_obs: obs_var}
)
)
@overrides
def dist_info(self, obs, state_infos=None):
return dict(prob=self._f_prob(obs))
# The return value is a pair. The first item is a matrix (N, A), where each
# entry corresponds to the action value taken. The second item is a vector
# of length N, where each entry is the density value for that action, under
# the current policy
@overrides
def get_action(self, observation):
flat_obs = self.observation_space.flatten(observation)
prob = self._f_prob([flat_obs])[0]
action = self.action_space.weighted_sample(prob)
return action, dict(prob=prob)
def get_actions(self, observations):
flat_obs = self.observation_space.flatten_n(observations)
probs = self._f_prob(flat_obs)
actions = list(map(self.action_space.weighted_sample, probs))
return actions, dict(prob=probs)
@property
def distribution(self):
return self._dist
| 3,623 | 33.514286 | 93 | py |
rllab | rllab-master/rllab/policies/deterministic_mlp_policy.py | import lasagne
import lasagne.layers as L
import lasagne.nonlinearities as NL
import lasagne.init as LI
from rllab.core.lasagne_powered import LasagnePowered
from rllab.core.lasagne_layers import batch_norm
from rllab.core.serializable import Serializable
from rllab.misc import ext
from rllab.policies.base import Policy
class DeterministicMLPPolicy(Policy, LasagnePowered):
def __init__(
self,
env_spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=NL.rectify,
hidden_W_init=LI.HeUniform(),
hidden_b_init=LI.Constant(0.),
output_nonlinearity=NL.tanh,
output_W_init=LI.Uniform(-3e-3, 3e-3),
output_b_init=LI.Uniform(-3e-3, 3e-3),
bn=False):
Serializable.quick_init(self, locals())
l_obs = L.InputLayer(shape=(None, env_spec.observation_space.flat_dim))
l_hidden = l_obs
if bn:
l_hidden = batch_norm(l_hidden)
for idx, size in enumerate(hidden_sizes):
l_hidden = L.DenseLayer(
l_hidden,
num_units=size,
W=hidden_W_init,
b=hidden_b_init,
nonlinearity=hidden_nonlinearity,
name="h%d" % idx
)
if bn:
l_hidden = batch_norm(l_hidden)
l_output = L.DenseLayer(
l_hidden,
num_units=env_spec.action_space.flat_dim,
W=output_W_init,
b=output_b_init,
nonlinearity=output_nonlinearity,
name="output"
)
# Note the deterministic=True argument. It makes sure that when getting
# actions from single observations, we do not update params in the
# batch normalization layers
action_var = L.get_output(l_output, deterministic=True)
self._output_layer = l_output
self._f_actions = ext.compile_function([l_obs.input_var], action_var)
super(DeterministicMLPPolicy, self).__init__(env_spec)
LasagnePowered.__init__(self, [l_output])
def get_action(self, observation):
action = self._f_actions([observation])[0]
return action, dict()
def get_actions(self, observations):
return self._f_actions(observations), dict()
def get_action_sym(self, obs_var):
return L.get_output(self._output_layer, obs_var)
| 2,408 | 31.554054 | 79 | py |
rllab | rllab-master/rllab/baselines/gaussian_conv_baseline.py | import numpy as np
from rllab.core.serializable import Serializable
from rllab.misc.overrides import overrides
from rllab.core.parameterized import Parameterized
from rllab.baselines.base import Baseline
from rllab.regressors.gaussian_conv_regressor import GaussianConvRegressor
class GaussianConvBaseline(Baseline, Parameterized):
def __init__(
self,
env_spec,
subsample_factor=1.,
regressor_args=None,
):
Serializable.quick_init(self, locals())
super(GaussianConvBaseline, self).__init__(env_spec)
if regressor_args is None:
regressor_args = dict()
self._regressor = GaussianConvRegressor(
input_shape=env_spec.observation_space.shape,
output_dim=1,
name="vf",
**regressor_args
)
@overrides
def fit(self, paths):
observations = np.concatenate([p["observations"] for p in paths])
returns = np.concatenate([p["returns"] for p in paths])
self._regressor.fit(observations, returns.reshape((-1, 1)))
@overrides
def predict(self, path):
return self._regressor.predict(path["observations"]).flatten()
@overrides
def get_param_values(self, **tags):
return self._regressor.get_param_values(**tags)
@overrides
def set_param_values(self, flattened_params, **tags):
self._regressor.set_param_values(flattened_params, **tags)
| 1,460 | 30.085106 | 74 | py |
rllab | rllab-master/rllab/baselines/zero_baseline.py | import numpy as np
from rllab.baselines.base import Baseline
from rllab.misc.overrides import overrides
class ZeroBaseline(Baseline):
def __init__(self, env_spec):
pass
@overrides
def get_param_values(self, **kwargs):
return None
@overrides
def set_param_values(self, val, **kwargs):
pass
@overrides
def fit(self, paths):
pass
@overrides
def predict(self, path):
return np.zeros_like(path["rewards"])
| 484 | 17.653846 | 46 | py |
rllab | rllab-master/rllab/baselines/base.py | from rllab.misc import autoargs
class Baseline(object):
def __init__(self, env_spec):
self._mdp_spec = env_spec
@property
def algorithm_parallelized(self):
return False
def get_param_values(self):
raise NotImplementedError
def set_param_values(self, val):
raise NotImplementedError
def fit(self, paths):
raise NotImplementedError
def predict(self, path):
raise NotImplementedError
@classmethod
@autoargs.add_args
def add_args(cls, parser):
pass
@classmethod
@autoargs.new_from_args
def new_from_args(cls, args, mdp):
pass
def log_diagnostics(self, paths):
"""
Log extra information per iteration based on the collected paths
"""
pass
| 797 | 18.95 | 72 | py |
rllab | rllab-master/rllab/baselines/gaussian_mlp_baseline.py | import numpy as np
from rllab.core.serializable import Serializable
from rllab.core.parameterized import Parameterized
from rllab.baselines.base import Baseline
from rllab.misc.overrides import overrides
from rllab.regressors.gaussian_mlp_regressor import GaussianMLPRegressor
class GaussianMLPBaseline(Baseline, Parameterized):
def __init__(
self,
env_spec,
subsample_factor=1.,
num_seq_inputs=1,
regressor_args=None,
):
Serializable.quick_init(self, locals())
super(GaussianMLPBaseline, self).__init__(env_spec)
if regressor_args is None:
regressor_args = dict()
self._regressor = GaussianMLPRegressor(
input_shape=(env_spec.observation_space.flat_dim * num_seq_inputs,),
output_dim=1,
name="vf",
**regressor_args
)
@overrides
def fit(self, paths):
observations = np.concatenate([p["observations"] for p in paths])
returns = np.concatenate([p["returns"] for p in paths])
self._regressor.fit(observations, returns.reshape((-1, 1)))
@overrides
def predict(self, path):
return self._regressor.predict(path["observations"]).flatten()
@overrides
def get_param_values(self, **tags):
return self._regressor.get_param_values(**tags)
@overrides
def set_param_values(self, flattened_params, **tags):
self._regressor.set_param_values(flattened_params, **tags)
| 1,508 | 30.4375 | 80 | py |
rllab | rllab-master/rllab/baselines/linear_feature_baseline.py | from rllab.baselines.base import Baseline
from rllab.misc.overrides import overrides
import numpy as np
class LinearFeatureBaseline(Baseline):
def __init__(self, env_spec, reg_coeff=1e-5):
self._coeffs = None
self._reg_coeff = reg_coeff
@overrides
def get_param_values(self, **tags):
return self._coeffs
@overrides
def set_param_values(self, val, **tags):
self._coeffs = val
def _features(self, path):
o = np.clip(path["observations"], -10, 10)
l = len(path["rewards"])
al = np.arange(l).reshape(-1, 1) / 100.0
return np.concatenate([o, o ** 2, al, al ** 2, al ** 3, np.ones((l, 1))], axis=1)
@overrides
def fit(self, paths):
featmat = np.concatenate([self._features(path) for path in paths])
returns = np.concatenate([path["returns"] for path in paths])
reg_coeff = self._reg_coeff
for _ in range(5):
self._coeffs = np.linalg.lstsq(
featmat.T.dot(featmat) + reg_coeff * np.identity(featmat.shape[1]),
featmat.T.dot(returns)
)[0]
if not np.any(np.isnan(self._coeffs)):
break
reg_coeff *= 10
@overrides
def predict(self, path):
if self._coeffs is None:
return np.zeros(len(path["rewards"]))
return self._features(path).dot(self._coeffs)
| 1,403 | 30.909091 | 89 | py |
rllab | rllab-master/rllab/baselines/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/rllab/algos/base.py | class Algorithm(object):
pass
class RLAlgorithm(Algorithm):
def train(self):
raise NotImplementedError
| 122 | 12.666667 | 33 | py |
rllab | rllab-master/rllab/algos/npo.py | from rllab.misc import ext
from rllab.misc.overrides import overrides
from rllab.algos.batch_polopt import BatchPolopt
import rllab.misc.logger as logger
import theano
import theano.tensor as TT
from rllab.optimizers.penalty_lbfgs_optimizer import PenaltyLbfgsOptimizer
class NPO(BatchPolopt):
"""
Natural Policy Optimization.
"""
def __init__(
self,
optimizer=None,
optimizer_args=None,
step_size=0.01,
truncate_local_is_ratio=None,
**kwargs
):
if optimizer is None:
if optimizer_args is None:
optimizer_args = dict()
optimizer = PenaltyLbfgsOptimizer(**optimizer_args)
self.optimizer = optimizer
self.step_size = step_size
self.truncate_local_is_ratio = truncate_local_is_ratio
super(NPO, self).__init__(**kwargs)
@overrides
def init_opt(self):
is_recurrent = int(self.policy.recurrent)
obs_var = self.env.observation_space.new_tensor_variable(
'obs',
extra_dims=1 + is_recurrent,
)
action_var = self.env.action_space.new_tensor_variable(
'action',
extra_dims=1 + is_recurrent,
)
advantage_var = ext.new_tensor(
'advantage',
ndim=1 + is_recurrent,
dtype=theano.config.floatX
)
dist = self.policy.distribution
old_dist_info_vars = {
k: ext.new_tensor(
'old_%s' % k,
ndim=2 + is_recurrent,
dtype=theano.config.floatX
) for k in dist.dist_info_keys
}
old_dist_info_vars_list = [old_dist_info_vars[k] for k in dist.dist_info_keys]
state_info_vars = {
k: ext.new_tensor(
k,
ndim=2 + is_recurrent,
dtype=theano.config.floatX
) for k in self.policy.state_info_keys
}
state_info_vars_list = [state_info_vars[k] for k in self.policy.state_info_keys]
if is_recurrent:
valid_var = TT.matrix('valid')
else:
valid_var = None
dist_info_vars = self.policy.dist_info_sym(obs_var, state_info_vars)
kl = dist.kl_sym(old_dist_info_vars, dist_info_vars)
lr = dist.likelihood_ratio_sym(action_var, old_dist_info_vars, dist_info_vars)
if self.truncate_local_is_ratio is not None:
lr = TT.minimum(self.truncate_local_is_ratio, lr)
if is_recurrent:
mean_kl = TT.sum(kl * valid_var) / TT.sum(valid_var)
surr_loss = - TT.sum(lr * advantage_var * valid_var) / TT.sum(valid_var)
else:
mean_kl = TT.mean(kl)
surr_loss = - TT.mean(lr * advantage_var)
input_list = [
obs_var,
action_var,
advantage_var,
] + state_info_vars_list + old_dist_info_vars_list
if is_recurrent:
input_list.append(valid_var)
self.optimizer.update_opt(
loss=surr_loss,
target=self.policy,
leq_constraint=(mean_kl, self.step_size),
inputs=input_list,
constraint_name="mean_kl"
)
return dict()
@overrides
def optimize_policy(self, itr, samples_data):
all_input_values = tuple(ext.extract(
samples_data,
"observations", "actions", "advantages"
))
agent_infos = samples_data["agent_infos"]
state_info_list = [agent_infos[k] for k in self.policy.state_info_keys]
dist_info_list = [agent_infos[k] for k in self.policy.distribution.dist_info_keys]
all_input_values += tuple(state_info_list) + tuple(dist_info_list)
if self.policy.recurrent:
all_input_values += (samples_data["valids"],)
loss_before = self.optimizer.loss(all_input_values)
mean_kl_before = self.optimizer.constraint_val(all_input_values)
self.optimizer.optimize(all_input_values)
mean_kl = self.optimizer.constraint_val(all_input_values)
loss_after = self.optimizer.loss(all_input_values)
logger.record_tabular('LossBefore', loss_before)
logger.record_tabular('LossAfter', loss_after)
logger.record_tabular('MeanKLBefore', mean_kl_before)
logger.record_tabular('MeanKL', mean_kl)
logger.record_tabular('dLoss', loss_before - loss_after)
return dict()
@overrides
def get_itr_snapshot(self, itr, samples_data):
return dict(
itr=itr,
policy=self.policy,
baseline=self.baseline,
env=self.env,
)
| 4,746 | 34.691729 | 90 | py |
rllab | rllab-master/rllab/algos/vpg.py | import theano.tensor as TT
import theano
from rllab.misc import logger
from rllab.misc.overrides import overrides
from rllab.misc import ext
from rllab.algos.batch_polopt import BatchPolopt
from rllab.optimizers.first_order_optimizer import FirstOrderOptimizer
from rllab.core.serializable import Serializable
class VPG(BatchPolopt, Serializable):
"""
Vanilla Policy Gradient.
"""
def __init__(
self,
env,
policy,
baseline,
optimizer=None,
optimizer_args=None,
**kwargs):
Serializable.quick_init(self, locals())
if optimizer is None:
default_args = dict(
batch_size=None,
max_epochs=1,
)
if optimizer_args is None:
optimizer_args = default_args
else:
optimizer_args = dict(default_args, **optimizer_args)
optimizer = FirstOrderOptimizer(**optimizer_args)
self.optimizer = optimizer
self.opt_info = None
super(VPG, self).__init__(env=env, policy=policy, baseline=baseline, **kwargs)
@overrides
def init_opt(self):
is_recurrent = int(self.policy.recurrent)
obs_var = self.env.observation_space.new_tensor_variable(
'obs',
extra_dims=1 + is_recurrent,
)
action_var = self.env.action_space.new_tensor_variable(
'action',
extra_dims=1 + is_recurrent,
)
advantage_var = ext.new_tensor(
'advantage',
ndim=1 + is_recurrent,
dtype=theano.config.floatX
)
dist = self.policy.distribution
old_dist_info_vars = {
k: ext.new_tensor(
'old_%s' % k,
ndim=2 + is_recurrent,
dtype=theano.config.floatX
) for k in dist.dist_info_keys
}
old_dist_info_vars_list = [old_dist_info_vars[k] for k in dist.dist_info_keys]
if is_recurrent:
valid_var = TT.matrix('valid')
else:
valid_var = None
state_info_vars = {
k: ext.new_tensor(
k,
ndim=2 + is_recurrent,
dtype=theano.config.floatX
) for k in self.policy.state_info_keys
}
state_info_vars_list = [state_info_vars[k] for k in self.policy.state_info_keys]
dist_info_vars = self.policy.dist_info_sym(obs_var, state_info_vars)
logli = dist.log_likelihood_sym(action_var, dist_info_vars)
kl = dist.kl_sym(old_dist_info_vars, dist_info_vars)
# formulate as a minimization problem
# The gradient of the surrogate objective is the policy gradient
if is_recurrent:
surr_obj = - TT.sum(logli * advantage_var * valid_var) / TT.sum(valid_var)
mean_kl = TT.sum(kl * valid_var) / TT.sum(valid_var)
max_kl = TT.max(kl * valid_var)
else:
surr_obj = - TT.mean(logli * advantage_var)
mean_kl = TT.mean(kl)
max_kl = TT.max(kl)
input_list = [obs_var, action_var, advantage_var] + state_info_vars_list
if is_recurrent:
input_list.append(valid_var)
self.optimizer.update_opt(surr_obj, target=self.policy, inputs=input_list)
f_kl = ext.compile_function(
inputs=input_list + old_dist_info_vars_list,
outputs=[mean_kl, max_kl],
)
self.opt_info = dict(
f_kl=f_kl,
)
@overrides
def optimize_policy(self, itr, samples_data):
logger.log("optimizing policy")
inputs = ext.extract(
samples_data,
"observations", "actions", "advantages"
)
agent_infos = samples_data["agent_infos"]
state_info_list = [agent_infos[k] for k in self.policy.state_info_keys]
inputs += tuple(state_info_list)
if self.policy.recurrent:
inputs += (samples_data["valids"],)
dist_info_list = [agent_infos[k] for k in self.policy.distribution.dist_info_keys]
loss_before = self.optimizer.loss(inputs)
self.optimizer.optimize(inputs)
loss_after = self.optimizer.loss(inputs)
logger.record_tabular("LossBefore", loss_before)
logger.record_tabular("LossAfter", loss_after)
mean_kl, max_kl = self.opt_info['f_kl'](*(list(inputs) + dist_info_list))
logger.record_tabular('MeanKL', mean_kl)
logger.record_tabular('MaxKL', max_kl)
@overrides
def get_itr_snapshot(self, itr, samples_data):
return dict(
itr=itr,
policy=self.policy,
baseline=self.baseline,
env=self.env,
)
| 4,777 | 33.128571 | 90 | py |
rllab | rllab-master/rllab/algos/ddpg.py | from rllab.algos.base import RLAlgorithm
from rllab.misc.overrides import overrides
from rllab.misc import special
from rllab.misc import ext
from rllab.sampler import parallel_sampler
from rllab.plotter import plotter
from functools import partial
import rllab.misc.logger as logger
import theano.tensor as TT
import pickle as pickle
import numpy as np
import pyprind
import lasagne
def parse_update_method(update_method, **kwargs):
if update_method == 'adam':
return partial(lasagne.updates.adam, **ext.compact(kwargs))
elif update_method == 'sgd':
return partial(lasagne.updates.sgd, **ext.compact(kwargs))
else:
raise NotImplementedError
class SimpleReplayPool(object):
def __init__(
self, max_pool_size, observation_dim, action_dim):
self._observation_dim = observation_dim
self._action_dim = action_dim
self._max_pool_size = max_pool_size
self._observations = np.zeros(
(max_pool_size, observation_dim),
)
self._actions = np.zeros(
(max_pool_size, action_dim),
)
self._rewards = np.zeros(max_pool_size)
self._terminals = np.zeros(max_pool_size, dtype='uint8')
self._bottom = 0
self._top = 0
self._size = 0
def add_sample(self, observation, action, reward, terminal):
self._observations[self._top] = observation
self._actions[self._top] = action
self._rewards[self._top] = reward
self._terminals[self._top] = terminal
self._top = (self._top + 1) % self._max_pool_size
if self._size >= self._max_pool_size:
self._bottom = (self._bottom + 1) % self._max_pool_size
else:
self._size += 1
def random_batch(self, batch_size):
assert self._size > batch_size
indices = np.zeros(batch_size, dtype='uint64')
transition_indices = np.zeros(batch_size, dtype='uint64')
count = 0
while count < batch_size:
index = np.random.randint(self._bottom, self._bottom + self._size) % self._max_pool_size
# make sure that the transition is valid: if we are at the end of the pool, we need to discard
# this sample
if index == self._size - 1 and self._size <= self._max_pool_size:
continue
# if self._terminals[index]:
# continue
transition_index = (index + 1) % self._max_pool_size
indices[count] = index
transition_indices[count] = transition_index
count += 1
return dict(
observations=self._observations[indices],
actions=self._actions[indices],
rewards=self._rewards[indices],
terminals=self._terminals[indices],
next_observations=self._observations[transition_indices]
)
@property
def size(self):
return self._size
class DDPG(RLAlgorithm):
"""
Deep Deterministic Policy Gradient.
"""
def __init__(
self,
env,
policy,
qf,
es,
batch_size=32,
n_epochs=200,
epoch_length=1000,
min_pool_size=10000,
replay_pool_size=1000000,
discount=0.99,
max_path_length=250,
qf_weight_decay=0.,
qf_update_method='adam',
qf_learning_rate=1e-3,
policy_weight_decay=0,
policy_update_method='adam',
policy_learning_rate=1e-4,
eval_samples=10000,
soft_target=True,
soft_target_tau=0.001,
n_updates_per_sample=1,
scale_reward=1.0,
include_horizon_terminal_transitions=False,
plot=False,
pause_for_plot=False):
"""
:param env: Environment
:param policy: Policy
:param qf: Q function
:param es: Exploration strategy
:param batch_size: Number of samples for each minibatch.
:param n_epochs: Number of epochs. Policy will be evaluated after each epoch.
:param epoch_length: How many timesteps for each epoch.
:param min_pool_size: Minimum size of the pool to start training.
:param replay_pool_size: Size of the experience replay pool.
:param discount: Discount factor for the cumulative return.
:param max_path_length: Discount factor for the cumulative return.
:param qf_weight_decay: Weight decay factor for parameters of the Q function.
:param qf_update_method: Online optimization method for training Q function.
:param qf_learning_rate: Learning rate for training Q function.
:param policy_weight_decay: Weight decay factor for parameters of the policy.
:param policy_update_method: Online optimization method for training the policy.
:param policy_learning_rate: Learning rate for training the policy.
:param eval_samples: Number of samples (timesteps) for evaluating the policy.
:param soft_target_tau: Interpolation parameter for doing the soft target update.
:param n_updates_per_sample: Number of Q function and policy updates per new sample obtained
:param scale_reward: The scaling factor applied to the rewards when training
:param include_horizon_terminal_transitions: whether to include transitions with terminal=True because the
horizon was reached. This might make the Q value back up less stable for certain tasks.
:param plot: Whether to visualize the policy performance after each eval_interval.
:param pause_for_plot: Whether to pause before continuing when plotting.
:return:
"""
self.env = env
self.policy = policy
self.qf = qf
self.es = es
self.batch_size = batch_size
self.n_epochs = n_epochs
self.epoch_length = epoch_length
self.min_pool_size = min_pool_size
self.replay_pool_size = replay_pool_size
self.discount = discount
self.max_path_length = max_path_length
self.qf_weight_decay = qf_weight_decay
self.qf_update_method = \
parse_update_method(
qf_update_method,
learning_rate=qf_learning_rate,
)
self.qf_learning_rate = qf_learning_rate
self.policy_weight_decay = policy_weight_decay
self.policy_update_method = \
parse_update_method(
policy_update_method,
learning_rate=policy_learning_rate,
)
self.policy_learning_rate = policy_learning_rate
self.eval_samples = eval_samples
self.soft_target_tau = soft_target_tau
self.n_updates_per_sample = n_updates_per_sample
self.include_horizon_terminal_transitions = include_horizon_terminal_transitions
self.plot = plot
self.pause_for_plot = pause_for_plot
self.qf_loss_averages = []
self.policy_surr_averages = []
self.q_averages = []
self.y_averages = []
self.paths = []
self.es_path_returns = []
self.paths_samples_cnt = 0
self.scale_reward = scale_reward
self.opt_info = None
def start_worker(self):
parallel_sampler.populate_task(self.env, self.policy)
if self.plot:
plotter.init_plot(self.env, self.policy)
@overrides
def train(self):
# This seems like a rather sequential method
pool = SimpleReplayPool(
max_pool_size=self.replay_pool_size,
observation_dim=self.env.observation_space.flat_dim,
action_dim=self.env.action_space.flat_dim,
)
self.start_worker()
self.init_opt()
itr = 0
path_length = 0
path_return = 0
terminal = False
observation = self.env.reset()
sample_policy = pickle.loads(pickle.dumps(self.policy))
for epoch in range(self.n_epochs):
logger.push_prefix('epoch #%d | ' % epoch)
logger.log("Training started")
for epoch_itr in pyprind.prog_bar(range(self.epoch_length)):
# Execute policy
if terminal: # or path_length > self.max_path_length:
# Note that if the last time step ends an episode, the very
# last state and observation will be ignored and not added
# to the replay pool
observation = self.env.reset()
self.es.reset()
sample_policy.reset()
self.es_path_returns.append(path_return)
path_length = 0
path_return = 0
action = self.es.get_action(itr, observation, policy=sample_policy) # qf=qf)
next_observation, reward, terminal, _ = self.env.step(action)
path_length += 1
path_return += reward
if not terminal and path_length >= self.max_path_length:
terminal = True
# only include the terminal transition in this case if the flag was set
if self.include_horizon_terminal_transitions:
pool.add_sample(observation, action, reward * self.scale_reward, terminal)
else:
pool.add_sample(observation, action, reward * self.scale_reward, terminal)
observation = next_observation
if pool.size >= self.min_pool_size:
for update_itr in range(self.n_updates_per_sample):
# Train policy
batch = pool.random_batch(self.batch_size)
self.do_training(itr, batch)
sample_policy.set_param_values(self.policy.get_param_values())
itr += 1
logger.log("Training finished")
if pool.size >= self.min_pool_size:
self.evaluate(epoch, pool)
params = self.get_epoch_snapshot(epoch)
logger.save_itr_params(epoch, params)
logger.dump_tabular(with_prefix=False)
logger.pop_prefix()
if self.plot:
self.update_plot()
if self.pause_for_plot:
input("Plotting evaluation run: Press Enter to "
"continue...")
self.env.terminate()
self.policy.terminate()
def init_opt(self):
# First, create "target" policy and Q functions
target_policy = pickle.loads(pickle.dumps(self.policy))
target_qf = pickle.loads(pickle.dumps(self.qf))
# y need to be computed first
obs = self.env.observation_space.new_tensor_variable(
'obs',
extra_dims=1,
)
# The yi values are computed separately as above and then passed to
# the training functions below
action = self.env.action_space.new_tensor_variable(
'action',
extra_dims=1,
)
yvar = TT.vector('ys')
qf_weight_decay_term = 0.5 * self.qf_weight_decay * \
sum([TT.sum(TT.square(param)) for param in
self.qf.get_params(regularizable=True)])
qval = self.qf.get_qval_sym(obs, action)
qf_loss = TT.mean(TT.square(yvar - qval))
qf_reg_loss = qf_loss + qf_weight_decay_term
policy_weight_decay_term = 0.5 * self.policy_weight_decay * \
sum([TT.sum(TT.square(param))
for param in self.policy.get_params(regularizable=True)])
policy_qval = self.qf.get_qval_sym(
obs, self.policy.get_action_sym(obs),
deterministic=True
)
policy_surr = -TT.mean(policy_qval)
policy_reg_surr = policy_surr + policy_weight_decay_term
qf_updates = self.qf_update_method(
qf_reg_loss, self.qf.get_params(trainable=True))
policy_updates = self.policy_update_method(
policy_reg_surr, self.policy.get_params(trainable=True))
f_train_qf = ext.compile_function(
inputs=[yvar, obs, action],
outputs=[qf_loss, qval],
updates=qf_updates
)
f_train_policy = ext.compile_function(
inputs=[obs],
outputs=policy_surr,
updates=policy_updates
)
self.opt_info = dict(
f_train_qf=f_train_qf,
f_train_policy=f_train_policy,
target_qf=target_qf,
target_policy=target_policy,
)
def do_training(self, itr, batch):
obs, actions, rewards, next_obs, terminals = ext.extract(
batch,
"observations", "actions", "rewards", "next_observations",
"terminals"
)
# compute the on-policy y values
target_qf = self.opt_info["target_qf"]
target_policy = self.opt_info["target_policy"]
next_actions, _ = target_policy.get_actions(next_obs)
next_qvals = target_qf.get_qval(next_obs, next_actions)
ys = rewards + (1. - terminals) * self.discount * next_qvals
f_train_qf = self.opt_info["f_train_qf"]
f_train_policy = self.opt_info["f_train_policy"]
qf_loss, qval = f_train_qf(ys, obs, actions)
policy_surr = f_train_policy(obs)
target_policy.set_param_values(
target_policy.get_param_values() * (1.0 - self.soft_target_tau) +
self.policy.get_param_values() * self.soft_target_tau)
target_qf.set_param_values(
target_qf.get_param_values() * (1.0 - self.soft_target_tau) +
self.qf.get_param_values() * self.soft_target_tau)
self.qf_loss_averages.append(qf_loss)
self.policy_surr_averages.append(policy_surr)
self.q_averages.append(qval)
self.y_averages.append(ys)
def evaluate(self, epoch, pool):
logger.log("Collecting samples for evaluation")
paths = parallel_sampler.sample_paths(
policy_params=self.policy.get_param_values(),
max_samples=self.eval_samples,
max_path_length=self.max_path_length,
)
average_discounted_return = np.mean(
[special.discount_return(path["rewards"], self.discount) for path in paths]
)
returns = [sum(path["rewards"]) for path in paths]
all_qs = np.concatenate(self.q_averages)
all_ys = np.concatenate(self.y_averages)
average_q_loss = np.mean(self.qf_loss_averages)
average_policy_surr = np.mean(self.policy_surr_averages)
average_action = np.mean(np.square(np.concatenate(
[path["actions"] for path in paths]
)))
policy_reg_param_norm = np.linalg.norm(
self.policy.get_param_values(regularizable=True)
)
qfun_reg_param_norm = np.linalg.norm(
self.qf.get_param_values(regularizable=True)
)
logger.record_tabular('Epoch', epoch)
logger.record_tabular('AverageReturn',
np.mean(returns))
logger.record_tabular('StdReturn',
np.std(returns))
logger.record_tabular('MaxReturn',
np.max(returns))
logger.record_tabular('MinReturn',
np.min(returns))
if len(self.es_path_returns) > 0:
logger.record_tabular('AverageEsReturn',
np.mean(self.es_path_returns))
logger.record_tabular('StdEsReturn',
np.std(self.es_path_returns))
logger.record_tabular('MaxEsReturn',
np.max(self.es_path_returns))
logger.record_tabular('MinEsReturn',
np.min(self.es_path_returns))
logger.record_tabular('AverageDiscountedReturn',
average_discounted_return)
logger.record_tabular('AverageQLoss', average_q_loss)
logger.record_tabular('AveragePolicySurr', average_policy_surr)
logger.record_tabular('AverageQ', np.mean(all_qs))
logger.record_tabular('AverageAbsQ', np.mean(np.abs(all_qs)))
logger.record_tabular('AverageY', np.mean(all_ys))
logger.record_tabular('AverageAbsY', np.mean(np.abs(all_ys)))
logger.record_tabular('AverageAbsQYDiff',
np.mean(np.abs(all_qs - all_ys)))
logger.record_tabular('AverageAction', average_action)
logger.record_tabular('PolicyRegParamNorm',
policy_reg_param_norm)
logger.record_tabular('QFunRegParamNorm',
qfun_reg_param_norm)
self.env.log_diagnostics(paths)
self.policy.log_diagnostics(paths)
self.qf_loss_averages = []
self.policy_surr_averages = []
self.q_averages = []
self.y_averages = []
self.es_path_returns = []
def update_plot(self):
if self.plot:
plotter.update_plot(self.policy, self.max_path_length)
def get_epoch_snapshot(self, epoch):
return dict(
env=self.env,
epoch=epoch,
qf=self.qf,
policy=self.policy,
target_qf=self.opt_info["target_qf"],
target_policy=self.opt_info["target_policy"],
es=self.es,
)
| 17,620 | 37.642544 | 114 | py |
rllab | rllab-master/rllab/algos/erwr.py | from rllab.algos.vpg import VPG
from rllab.optimizers.lbfgs_optimizer import LbfgsOptimizer
from rllab.core.serializable import Serializable
class ERWR(VPG, Serializable):
"""
Episodic Reward Weighted Regression [1]_
Notes
-----
This does not implement the original RwR [2]_ that deals with "immediate reward problems" since
it doesn't find solutions that optimize for temporally delayed rewards.
.. [1] Kober, Jens, and Jan R. Peters. "Policy search for motor primitives in robotics." Advances in neural information processing systems. 2009.
.. [2] Peters, Jan, and Stefan Schaal. "Using reward-weighted regression for reinforcement learning of task space control." Approximate Dynamic Programming and Reinforcement Learning, 2007. ADPRL 2007. IEEE International Symposium on. IEEE, 2007.
"""
def __init__(
self,
optimizer=None,
optimizer_args=None,
positive_adv=None,
**kwargs):
Serializable.quick_init(self, locals())
if optimizer is None:
if optimizer_args is None:
optimizer_args = dict()
optimizer = LbfgsOptimizer(**optimizer_args)
super(ERWR, self).__init__(
optimizer=optimizer,
positive_adv=True if positive_adv is None else positive_adv,
**kwargs
)
| 1,374 | 37.194444 | 250 | py |
rllab | rllab-master/rllab/algos/cma_es.py | from rllab.algos.base import RLAlgorithm
import theano.tensor as TT
import numpy as np
from rllab.misc import ext
from rllab.misc.special import discount_cumsum
from rllab.sampler import parallel_sampler, stateful_pool
from rllab.sampler.utils import rollout
from rllab.core.serializable import Serializable
import rllab.misc.logger as logger
import rllab.plotter as plotter
from . import cma_es_lib
def sample_return(G, params, max_path_length, discount):
# env, policy, params, max_path_length, discount = args
# of course we make the strong assumption that there is no race condition
G.policy.set_param_values(params)
path = rollout(
G.env,
G.policy,
max_path_length,
)
path["returns"] = discount_cumsum(path["rewards"], discount)
path["undiscounted_return"] = sum(path["rewards"])
return path
class CMAES(RLAlgorithm, Serializable):
def __init__(
self,
env,
policy,
n_itr=500,
max_path_length=500,
discount=0.99,
sigma0=1.,
batch_size=None,
plot=False,
**kwargs
):
"""
:param n_itr: Number of iterations.
:param max_path_length: Maximum length of a single rollout.
:param batch_size: # of samples from trajs from param distribution, when this
is set, n_samples is ignored
:param discount: Discount.
:param plot: Plot evaluation run after each iteration.
:param sigma0: Initial std for param dist
:return:
"""
Serializable.quick_init(self, locals())
self.env = env
self.policy = policy
self.plot = plot
self.sigma0 = sigma0
self.discount = discount
self.max_path_length = max_path_length
self.n_itr = n_itr
self.batch_size = batch_size
def train(self):
cur_std = self.sigma0
cur_mean = self.policy.get_param_values()
es = cma_es_lib.CMAEvolutionStrategy(
cur_mean, cur_std)
parallel_sampler.populate_task(self.env, self.policy)
if self.plot:
plotter.init_plot(self.env, self.policy)
cur_std = self.sigma0
cur_mean = self.policy.get_param_values()
itr = 0
while itr < self.n_itr and not es.stop():
if self.batch_size is None:
# Sample from multivariate normal distribution.
xs = es.ask()
xs = np.asarray(xs)
# For each sample, do a rollout.
infos = (
stateful_pool.singleton_pool.run_map(sample_return, [(x, self.max_path_length,
self.discount) for x in xs]))
else:
cum_len = 0
infos = []
xss = []
done = False
while not done:
sbs = stateful_pool.singleton_pool.n_parallel * 2
# Sample from multivariate normal distribution.
# You want to ask for sbs samples here.
xs = es.ask(sbs)
xs = np.asarray(xs)
xss.append(xs)
sinfos = stateful_pool.singleton_pool.run_map(
sample_return, [(x, self.max_path_length, self.discount) for x in xs])
for info in sinfos:
infos.append(info)
cum_len += len(info['returns'])
if cum_len >= self.batch_size:
xs = np.concatenate(xss)
done = True
break
# Evaluate fitness of samples (negative as it is minimization
# problem).
fs = - np.array([info['returns'][0] for info in infos])
# When batching, you could have generated too many samples compared
# to the actual evaluations. So we cut it off in this case.
xs = xs[:len(fs)]
# Update CMA-ES params based on sample fitness.
es.tell(xs, fs)
logger.push_prefix('itr #%d | ' % itr)
logger.record_tabular('Iteration', itr)
logger.record_tabular('CurStdMean', np.mean(cur_std))
undiscounted_returns = np.array(
[info['undiscounted_return'] for info in infos])
logger.record_tabular('AverageReturn',
np.mean(undiscounted_returns))
logger.record_tabular('StdReturn',
np.mean(undiscounted_returns))
logger.record_tabular('MaxReturn',
np.max(undiscounted_returns))
logger.record_tabular('MinReturn',
np.min(undiscounted_returns))
logger.record_tabular('AverageDiscountedReturn',
np.mean(fs))
logger.record_tabular('AvgTrajLen',
np.mean([len(info['returns']) for info in infos]))
self.env.log_diagnostics(infos)
self.policy.log_diagnostics(infos)
logger.save_itr_params(itr, dict(
itr=itr,
policy=self.policy,
env=self.env,
))
logger.dump_tabular(with_prefix=False)
if self.plot:
plotter.update_plot(self.policy, self.max_path_length)
logger.pop_prefix()
# Update iteration.
itr += 1
# Set final params.
self.policy.set_param_values(es.result()[0])
parallel_sampler.terminate_task()
| 5,759 | 35.923077 | 103 | py |
rllab | rllab-master/rllab/algos/ppo.py | from rllab.optimizers.penalty_lbfgs_optimizer import PenaltyLbfgsOptimizer
from rllab.algos.npo import NPO
from rllab.core.serializable import Serializable
class PPO(NPO, Serializable):
"""
Penalized Policy Optimization.
"""
def __init__(
self,
optimizer=None,
optimizer_args=None,
**kwargs):
Serializable.quick_init(self, locals())
if optimizer is None:
if optimizer_args is None:
optimizer_args = dict()
optimizer = PenaltyLbfgsOptimizer(**optimizer_args)
super(PPO, self).__init__(optimizer=optimizer, **kwargs)
| 646 | 28.409091 | 74 | py |
rllab | rllab-master/rllab/algos/nop.py | from rllab.algos.batch_polopt import BatchPolopt
from rllab.misc.overrides import overrides
class NOP(BatchPolopt):
"""
NOP (no optimization performed) policy search algorithm
"""
def __init__(
self,
**kwargs):
super(NOP, self).__init__(**kwargs)
@overrides
def init_opt(self):
pass
@overrides
def optimize_policy(self, itr, samples_data):
pass
@overrides
def get_itr_snapshot(self, itr, samples_data):
return dict()
| 519 | 19 | 59 | py |
rllab | rllab-master/rllab/algos/tnpg.py | from rllab.algos.npo import NPO
from rllab.optimizers.conjugate_gradient_optimizer import ConjugateGradientOptimizer
from rllab.misc import ext
class TNPG(NPO):
"""
Truncated Natural Policy Gradient.
"""
def __init__(
self,
optimizer=None,
optimizer_args=None,
**kwargs):
if optimizer is None:
default_args = dict(max_backtracks=1)
if optimizer_args is None:
optimizer_args = default_args
else:
optimizer_args = dict(default_args, **optimizer_args)
optimizer = ConjugateGradientOptimizer(**optimizer_args)
super(TNPG, self).__init__(optimizer=optimizer, **kwargs)
| 727 | 29.333333 | 84 | py |
rllab | rllab-master/rllab/algos/trpo.py | from rllab.algos.npo import NPO
from rllab.optimizers.conjugate_gradient_optimizer import ConjugateGradientOptimizer
from rllab.core.serializable import Serializable
class TRPO(NPO):
"""
Trust Region Policy Optimization
"""
def __init__(
self,
optimizer=None,
optimizer_args=None,
**kwargs):
if optimizer is None:
if optimizer_args is None:
optimizer_args = dict()
optimizer = ConjugateGradientOptimizer(**optimizer_args)
super(TRPO, self).__init__(optimizer=optimizer, **kwargs)
| 603 | 27.761905 | 84 | py |
rllab | rllab-master/rllab/algos/util.py | import numpy as np
import time
from rllab.core.serializable import Serializable
from rllab.misc.ext import extract
def center_advantages(advantages):
return (advantages - np.mean(advantages)) / (advantages.std() + 1e-8)
def shift_advantages_to_positive(advantages):
return (advantages - np.min(advantages)) + 1e-8
def sign(x):
return 1. * (x >= 0) - 1. * (x < 0)
class ReplayPool(Serializable):
"""
A utility class for experience replay.
The code is adapted from https://github.com/spragunr/deep_q_rl
"""
def __init__(
self,
observation_shape,
action_dim,
max_steps,
observation_dtype=np.float32,
action_dtype=np.float32,
concat_observations=False,
concat_length=1,
rng=None):
"""Construct a ReplayPool.
Arguments:
observation_shape - tuple indicating the shape of the observation
action_dim - dimension of the action
size - capacity of the replay pool
observation_dtype - ...
action_dtype - ...
concat_observations - whether to concat the past few observations
as a single one, so as to ensure the Markov property
concat_length - length of the concatenation
"""
self.observation_shape = observation_shape
self.action_dim = action_dim
self.max_steps = max_steps
self.observations = np.zeros(
(max_steps,) + observation_shape, dtype=observation_dtype)
self.actions = np.zeros((max_steps, action_dim), dtype=action_dtype)
self.rewards = np.zeros((max_steps,), dtype=np.float32)
self.terminals = np.zeros((max_steps,), dtype='bool')
self.extras = None
self.concat_observations = concat_observations
self.concat_length = concat_length
self.observation_dtype = observation_dtype
self.action_dtype = action_dtype
if rng:
self.rng = rng
else:
self.rng = np.random.RandomState()
if not concat_observations:
assert concat_length == 1, \
"concat_length must be set to 1 if not concatenating " \
"observations"
self.bottom = 0
self.top = 0
self.size = 0
super(ReplayPool, self).__init__(
self, observation_shape, action_dim, max_steps, observation_dtype,
action_dtype, concat_observations, concat_length, rng
)
def __getstate__(self):
d = super(ReplayPool, self).__getstate__()
d["bottom"] = self.bottom
d["top"] = self.top
d["size"] = self.size
d["observations"] = self.observations
d["actions"] = self.actions
d["rewards"] = self.rewards
d["terminals"] = self.terminals
d["extras"] = self.extras
d["rng"] = self.rng
return d
def __setstate__(self, d):
super(ReplayPool, self).__setstate__(d)
self.bottom, self.top, self.size, self.observations, self.actions, \
self.rewards, self.terminals, self.extras, self.rng = extract(
d,
"bottom", "top", "size", "observations", "actions", "rewards",
"terminals", "extras", "rng"
)
def add_sample(self, observation, action, reward, terminal, extra=None):
"""Add a time step record.
Arguments:
observation -- current or observation
action -- action chosen by the agent
reward -- reward received after taking the action
terminal -- boolean indicating whether the episode ended after this
time step
"""
self.observations[self.top] = observation
self.actions[self.top] = action
self.rewards[self.top] = reward
self.terminals[self.top] = terminal
# self.horizon_terminals[self.top] = horizon_terminal
if extra is not None:
if self.extras is None:
assert self.size == 0, "extra must be consistent"
self.extras = np.zeros(
(self.max_steps,) + extra.shape,
dtype=extra.dtype
)
self.extras[self.top] = extra
else:
assert self.extras is None
if self.size == self.max_steps:
self.bottom = (self.bottom + 1) % self.max_steps
else:
self.size += 1
self.top = (self.top + 1) % self.max_steps
def __len__(self):
"""Return an approximate count of stored state transitions."""
# TODO: Properly account for indices which can't be used, as in
# random_batch's check.
return max(0, self.size - self.concat_length)
def last_concat_state(self):
"""
Return the most recent sample (concatenated observations if needed).
"""
if self.concat_observations:
indexes = np.arange(self.top - self.concat_length, self.top)
return self.observations.take(indexes, axis=0, mode='wrap')
else:
return self.observations[self.top - 1]
def concat_state(self, state):
"""Return a concatenated state, using the last concat_length -
1, plus state.
"""
if self.concat_observations:
indexes = np.arange(self.top - self.concat_length + 1, self.top)
concat_state = np.empty(
(self.concat_length,) + self.observation_shape,
dtype=floatX
)
concat_state[0:self.concat_length - 1] = \
self.observations.take(indexes, axis=0, mode='wrap')
concat_state[-1] = state
return concat_state
else:
return state
def random_batch(self, batch_size):
"""
Return corresponding observations, actions, rewards, terminal status,
and next_observations for batch_size randomly chosen state transitions.
"""
# Allocate the response.
observations = np.zeros(
(batch_size, self.concat_length) + self.observation_shape,
dtype=self.observation_dtype
)
actions = np.zeros(
(batch_size, self.action_dim),
dtype=self.action_dtype
)
rewards = np.zeros((batch_size,), dtype=floatX)
terminals = np.zeros((batch_size,), dtype='bool')
if self.extras is not None:
extras = np.zeros(
(batch_size,) + self.extras.shape[1:],
dtype=self.extras.dtype
)
next_extras = np.zeros(
(batch_size,) + self.extras.shape[1:],
dtype=self.extras.dtype
)
else:
extras = None
next_extras = None
next_observations = np.zeros(
(batch_size, self.concat_length) + self.observation_shape,
dtype=self.observation_dtype
)
next_actions = np.zeros(
(batch_size, self.action_dim),
dtype=self.action_dtype
)
count = 0
while count < batch_size:
# Randomly choose a time step from the replay memory.
index = self.rng.randint(
self.bottom,
self.bottom + self.size - self.concat_length
)
initial_indices = np.arange(index, index + self.concat_length)
transition_indices = initial_indices + 1
end_index = index + self.concat_length - 1
# Check that the initial state corresponds entirely to a
# single episode, meaning none but the last frame may be
# terminal. If the last frame of the initial state is
# terminal, then the last frame of the transitioned state
# will actually be the first frame of a new episode, which
# the Q learner recognizes and handles correctly during
# training by zeroing the discounted future reward estimate.
if np.any(self.terminals.take(initial_indices[0:-1], mode='wrap')):
continue
# do not pick samples which terminated because of horizon
# if np.any(self.horizon_terminals.take(initial_indices[0:-1],
# mode='wrap')) or self.horizon_terminals[end_index]:
# continue
# Add the state transition to the response.
observations[count] = self.observations.take(
initial_indices, axis=0, mode='wrap')
actions[count] = self.actions.take(end_index, mode='wrap')
rewards[count] = self.rewards.take(end_index, mode='wrap')
terminals[count] = self.terminals.take(end_index, mode='wrap')
if self.extras is not None:
extras[count] = self.extras.take(
end_index, axis=0, mode='wrap')
next_extras[count] = self.extras.take(
transition_indices, axis=0, mode='wrap')
next_observations[count] = self.observations.take(
transition_indices, axis=0, mode='wrap')
next_actions[count] = self.actions.take(
transition_indices, axis=0, mode='wrap')
count += 1
if not self.concat_observations:
# If we're not concatenating observations, we should squeeze the
# second dimension in observations and next_observations
observations = np.squeeze(observations, axis=1)
next_observations = np.squeeze(next_observations, axis=1)
return dict(
observations=observations,
actions=actions,
rewards=rewards,
next_observations=next_observations,
next_actions=next_actions,
terminals=terminals,
extras=extras,
next_extras=next_extras,
)
# TESTING CODE BELOW THIS POINT...
def simple_tests():
np.random.seed(222)
dataset = ReplayPool(
observation_shape=(3, 2),
action_dim=1,
max_steps=6,
concat_observations=True,
concat_length=4
)
for _ in range(10):
img = np.random.randint(0, 256, size=(3, 2))
action = np.random.randint(16)
reward = np.random.random()
terminal = False
if np.random.random() < .05:
terminal = True
print('img', img)
dataset.add_sample(img, action, reward, terminal)
print("S", dataset.observations)
print("A", dataset.actions)
print("R", dataset.rewards)
print("T", dataset.terminal)
print("SIZE", dataset.size)
print()
print("LAST CONCAT STATE", dataset.last_concat_state())
print()
print('BATCH', dataset.random_batch(2))
def speed_tests():
dataset = ReplayPool(
observation_shape=(80, 80),
action_dim=1,
max_steps=20000,
concat_observations=True,
concat_length=4,
)
img = np.random.randint(0, 256, size=(80, 80))
action = np.random.randint(16)
reward = np.random.random()
start = time.time()
for _ in range(100000):
terminal = False
if np.random.random() < .05:
terminal = True
dataset.add_sample(img, action, reward, terminal)
print("samples per second: ", 100000 / (time.time() - start))
start = time.time()
for _ in range(200):
dataset.random_batch(32)
print("batches per second: ", 200 / (time.time() - start))
print(dataset.last_concat_state())
def trivial_tests():
dataset = ReplayPool(
observation_shape=(1, 2),
action_dim=1,
max_steps=3,
concat_observations=True,
concat_length=2
)
img1 = np.array([[1, 1]], dtype='uint8')
img2 = np.array([[2, 2]], dtype='uint8')
img3 = np.array([[3, 3]], dtype='uint8')
dataset.add_sample(img1, 1, 1, False)
dataset.add_sample(img2, 2, 2, False)
dataset.add_sample(img3, 2, 2, True)
print("last", dataset.last_concat_state())
print("random", dataset.random_batch(1))
def max_size_tests():
dataset1 = ReplayPool(
observation_shape=(4, 3),
action_dim=1,
max_steps=10,
concat_observations=True,
concat_length=4,
rng=np.random.RandomState(42)
)
dataset2 = ReplayPool(
observation_shape=(4, 3),
action_dim=1,
max_steps=1000,
concat_observations=True,
concat_length=4,
rng=np.random.RandomState(42)
)
for _ in range(100):
img = np.random.randint(0, 256, size=(4, 3))
action = np.random.randint(16)
reward = np.random.random()
terminal = False
if np.random.random() < .05:
terminal = True
dataset1.add_sample(img, action, reward, terminal)
dataset2.add_sample(img, action, reward, terminal)
np.testing.assert_array_almost_equal(dataset1.last_concat_state(),
dataset2.last_concat_state())
print("passed")
def test_memory_usage_ok():
import memory_profiler
dataset = ReplayPool(
observation_shape=(80, 80),
action_dim=1,
max_steps=100000,
concat_observations=True,
concat_length=4
)
last = time.time()
for i in range(1000000000):
if (i % 100000) == 0:
print(i)
dataset.add_sample(np.random.random((80, 80)), 1, 1, False)
if i > 200000:
dataset.random_batch(32)
if (i % 10007) == 0:
print(time.time() - last)
mem_usage = memory_profiler.memory_usage(-1)
print(len(dataset), mem_usage)
last = time.time()
def main():
speed_tests()
# test_memory_usage_ok()
max_size_tests()
simple_tests()
if __name__ == "__main__":
main()
| 13,903 | 32.829684 | 79 | py |
rllab | rllab-master/rllab/algos/cma_es_lib.py | """Module cma implements the CMA-ES (Covariance Matrix Adaptation
Evolution Strategy).
CMA-ES is a stochastic optimizer for robust non-linear non-convex
derivative- and function-value-free numerical optimization.
This implementation can be used with Python versions >= 2.6, namely
2.6, 2.7, 3.3, 3.4.
CMA-ES searches for a minimizer (a solution x in :math:`R^n`) of an
objective function f (cost function), such that f(x) is minimal.
Regarding f, only a passably reliable ranking of the candidate
solutions in each iteration is necessary. Neither the function values
itself, nor the gradient of f need to be available or do matter (like
in the downhill simplex Nelder-Mead algorithm). Some termination
criteria however depend on actual f-values.
Two interfaces are provided:
- function `fmin(func, x0, sigma0,...)`
runs a complete minimization
of the objective function func with CMA-ES.
- class `CMAEvolutionStrategy`
allows for minimization such that the control of the iteration
loop remains with the user.
Used packages:
- unavoidable: `numpy` (see `barecmaes2.py` if `numpy` is not
available),
- avoidable with small changes: `time`, `sys`
- optional: `matplotlib.pyplot` (for `plot` etc., highly
recommended), `pprint` (pretty print), `pickle` (in class
`Sections`), `doctest`, `inspect`, `pygsl` (never by default)
Install
-------
The file ``cma.py`` only needs to be visible in the python path (e.g. in
the current working directory).
The preferred way of (system-wide) installation is calling
pip install cma
from the command line.
The ``cma.py`` file can also be installed from the
system shell terminal command line by::
python cma.py --install
which solely calls the ``setup`` function from the standard
``distutils.core`` package for installation. If the ``setup.py``
file is been provided with ``cma.py``, the standard call is
python setup.py cma
Both calls need to see ``cma.py`` in the current working directory and
might need to be preceded with ``sudo``.
To upgrade the currently installed version from the Python Package Index,
and also for first time installation, type in the system shell::
pip install --upgrade cma
Testing
-------
From the system shell::
python cma.py --test
or from the Python shell ``ipython``::
run cma.py --test
or from any python shell
import cma
cma.main('--test')
runs ``doctest.testmod(cma)`` showing only exceptions (and not the
tests that fail due to small differences in the output) and should
run without complaints in about between 20 and 100 seconds.
Example
-------
From a python shell::
import cma
help(cma) # "this" help message, use cma? in ipython
help(cma.fmin)
help(cma.CMAEvolutionStrategy)
help(cma.CMAOptions)
cma.CMAOptions('tol') # display 'tolerance' termination options
cma.CMAOptions('verb') # display verbosity options
res = cma.fmin(cma.Fcts.tablet, 15 * [1], 1)
res[0] # best evaluated solution
res[5] # mean solution, presumably better with noise
:See: `fmin()`, `CMAOptions`, `CMAEvolutionStrategy`
:Author: Nikolaus Hansen, 2008-2015
:Contributor: Petr Baudis, 2014
:License: BSD 3-Clause, see below.
"""
# The BSD 3-Clause License
# Copyright (c) 2014 Inria
# Author: Nikolaus Hansen, 2008-2015
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright and
# authors notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# and authors notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors nor the authors names may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# (note to self) for testing:
# pyflakes cma.py # finds bugs by static analysis
# pychecker --limit 60 cma.py # also executes, all 60 warnings checked
# or python ~/Downloads/pychecker-0.8.19/pychecker/checker.py cma.py
# python cma.py -t -quiet # executes implemented tests based on doctest
# python -3 cma.py --test 2> out2to3warnings.txt #
# to create a html documentation file:
# pydoc -w cma # edit the header (remove local pointers)
# epydoc cma.py # comes close to javadoc but does not find the
# # links of function references etc
# doxygen needs @package cma as first line in the module docstring
# some things like class attributes are not interpreted correctly
# sphinx: doc style of doc.python.org, could not make it work (yet)
# TODO: implement a (deep enough) copy-constructor for class
# CMAEvolutionStrategy to repeat the same step in different
# configurations for online-adaptation of meta parameters
# TODO: reconsider geno-pheno transformation. Can it be a completely
# separate module that operates inbetween optimizer and objective?
# Can we still propagate a repair of solutions to the optimizer?
# How about gradients (should be fine)?
# TODO: implement bipop in a separate algorithm as meta portfolio
# algorithm of IPOP and a local restart option to be implemented
# in fmin (e.g. option restart_mode in [IPOP, local])
# TODO: self.opts['mindx'] is checked without sigma_vec, which is wrong,
# TODO: project sigma_vec on the smallest eigenvector?
# TODO: class _CMAStopDict implementation looks way too complicated
# TODO: separate display and logging options, those CMAEvolutionStrategy
# instances don't use themselves (probably all?)
# TODO: disp method is implemented in CMAEvolutionStrategy and in
# CMADataLogger separately, OOOptimizer.disp_str should return a str
# which can be used uniformly? Only logger can disp a history.
# TODO: check scitools.easyviz and how big the adaptation would be
# TODO: split tell into a variable transformation part and the "pure"
# functionality
# usecase: es.tell_geno(X, [func(es.pheno(x)) for x in X])
# genotypic repair is not part of tell_geno
# TODO: copy_always optional parameter does not make much sense,
# as one can always copy the input argument first,
# however some calls are simpler
# TODO: generalize input logger in optimize() as after_iteration_handler
# (which is logger.add by default)? One difficulty is that
# the logger object is returned (not anymore when return of optimize
# is change). Another difficulty is the obscure usage of modulo
# for writing a final data line in optimize.
# TODO: separate initialize==reset_state from __init__
# TODO: introduce Ypos == diffC which makes the code more consistent and
# the active update "exact"?
# TODO: dynamically read "signals" from a file, see import ConfigParser
# or myproperties.py (to be called after tell())
#
# typical parameters in scipy.optimize: disp, xtol, ftol, maxiter, maxfun,
# callback=None
# maxfev, diag (A sequency of N positive entries that serve as
# scale factors for the variables.)
# full_output -- non-zero to return all optional outputs.
# If xtol < 0.0, xtol is set to sqrt(machine_precision)
# 'infot -- a dictionary of optional outputs with the keys:
# 'nfev': the number of function calls...
#
# see eg fmin_powell
# typical returns
# x, f, dictionary d
# (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag},
# <allvecs>)
#
# TODO: keep best ten solutions
# TODO: implement constraints handling
# TODO: extend function unitdoctest, or use unittest?
# TODO: apply style guide
# TODO: eigh(): thorough testing would not hurt
# changes:
# 15/01/20: larger condition numbers for C realized by using tf_pheno
# of GenoPheno attribute gp.
# 15/01/19: injection method, first implementation, short injections
# and long injections with good fitness need to be addressed yet
# 15/01/xx: prepare_injection_directions to simplify/centralize injected
# solutions from mirroring and TPA
# 14/12/26: bug fix in correlation_matrix computation if np.diag is a view
# 14/12/06: meta_parameters now only as annotations in ## comments
# 14/12/03: unified use of base class constructor call, now always
# super(ThisClass, self).__init__(args_for_base_class_constructor)
# 14/11/29: termination via "stop now" in file cmaes_signals.par
# 14/11/28: bug fix initialization of C took place before setting the
# seed. Now in some dimensions (e.g. 10) results are (still) not
# determistic due to np.linalg.eigh, in some dimensions (<9, 12)
# they seem to be deterministic.
# 14/11/23: bipop option integration, contributed by Petr Baudis
# 14/09/30: initial_elitism option added to fmin
# 14/08/1x: developing fitness wrappers in FFWrappers class
# 14/08/xx: return value of OOOptimizer.optimize changed to self.
# CMAOptions now need to uniquely match an *initial substring*
# only (via method corrected_key).
# Bug fix in CMAEvolutionStrategy.stop: termination conditions
# are now recomputed iff check and self.countiter > 0.
# Doc corrected that self.gp.geno _is_ applied to x0
# Vaste reorganization/modularization/improvements of plotting
# 14/08/01: bug fix to guaranty pos. def. in active CMA
# 14/06/04: gradient of f can now be used with fmin and/or ask
# 14/05/11: global rcParams['font.size'] not permanently changed anymore,
# a little nicer annotations for the plots
# 14/05/07: added method result_pretty to pretty print optimization result
# 14/05/06: associated show() everywhere with ion() which should solve the
# blocked terminal problem
# 14/05/05: all instances of "unicode" removed (was incompatible to 3.x)
# 14/05/05: replaced type(x) == y with isinstance(x, y), reorganized the
# comments before the code starts
# 14/05/xx: change the order of kwargs of OOOptimizer.optimize,
# remove prepare method in AdaptSigma classes, various changes/cleaning
# 14/03/01: bug fix BoundaryHandlerBase.has_bounds didn't check lower bounds correctly
# bug fix in BoundPenalty.repair len(bounds[0]) was used instead of len(bounds[1])
# bug fix in GenoPheno.pheno, where x was not copied when only boundary-repair was applied
# 14/02/27: bug fixed when BoundPenalty was combined with fixed variables.
# 13/xx/xx: step-size adaptation becomes a class derived from CMAAdaptSigmaBase,
# to make testing different adaptation rules (much) easier
# 12/12/14: separated CMAOptions and arguments to fmin
# 12/10/25: removed useless check_points from fmin interface
# 12/10/17: bug fix printing number of infeasible samples, moved not-in-use methods
# timesCroot and divCroot to the right class
# 12/10/16 (0.92.00): various changes commit: bug bound[0] -> bounds[0], more_to_write fixed,
# sigma_vec introduced, restart from elitist, trace normalization, max(mu,popsize/2)
# is used for weight calculation.
# 12/07/23: (bug:) BoundPenalty.update respects now genotype-phenotype transformation
# 12/07/21: convert value True for noisehandling into 1 making the output compatible
# 12/01/30: class Solution and more old stuff removed r3101
# 12/01/29: class Solution is depreciated, GenoPheno and SolutionDict do the job (v0.91.00, r3100)
# 12/01/06: CMA_eigenmethod option now takes a function (integer still works)
# 11/09/30: flat fitness termination checks also history length
# 11/09/30: elitist option (using method clip_or_fit_solutions)
# 11/09/xx: method clip_or_fit_solutions for check_points option for all sorts of
# injected or modified solutions and even reliable adaptive encoding
# 11/08/19: fixed: scaling and typical_x type clashes 1 vs array(1) vs ones(dim) vs dim * [1]
# 11/07/25: fixed: fmin wrote first and last line even with verb_log==0
# fixed: method settableOptionsList, also renamed to versatileOptions
# default seed depends on time now
# 11/07/xx (0.9.92): added: active CMA, selective mirrored sampling, noise/uncertainty handling
# fixed: output argument ordering in fmin, print now only used as function
# removed: parallel option in fmin
# 11/07/01: another try to get rid of the memory leak by replacing self.unrepaired = self[:]
# 11/07/01: major clean-up and reworking of abstract base classes and of the documentation,
# also the return value of fmin changed and attribute stop is now a method.
# 11/04/22: bug-fix: option fixed_variables in combination with scaling
# 11/04/21: stopdict is not a copy anymore
# 11/04/15: option fixed_variables implemented
# 11/03/23: bug-fix boundary update was computed even without boundaries
# 11/03/12: bug-fix of variable annotation in plots
# 11/02/05: work around a memory leak in numpy
# 11/02/05: plotting routines improved
# 10/10/17: cleaning up, now version 0.9.30
# 10/10/17: bug-fix: return values of fmin now use phenotyp (relevant
# if input scaling_of_variables is given)
# 08/10/01: option evalparallel introduced,
# bug-fix for scaling being a vector
# 08/09/26: option CMAseparable becomes CMA_diagonal
# 08/10/18: some names change, test functions go into a class
# 08/10/24: more refactorizing
# 10/03/09: upper bound exp(min(1,...)) for step-size control
# future is >= 3.0, this code has mainly been used with 2.6 & 2.7
# only necessary for python 2.5 and not in heavy use
# available from python 2.6, code should also work without
# from __future__ import collections.MutableMapping
# does not exist in future, otherwise Python 2.5 would work, since 0.91.01
import sys
if not sys.version.startswith('2'): # in python 3
xrange = range
raw_input = input
str = str
else:
input = raw_input # in py2, input(x) == eval(raw_input(x))
import time # not really essential
import collections
import numpy as np
# arange, cos, size, eye, inf, dot, floor, outer, zeros, linalg.eigh,
# sort, argsort, random, ones,...
from numpy import inf, array, dot, exp, log, sqrt, sum, isscalar, isfinite
# to access the built-in sum fct: ``__builtins__.sum`` or ``del sum``
# removes the imported sum and recovers the shadowed build-in
try:
from matplotlib import pyplot
savefig = pyplot.savefig # now we can use cma.savefig() etc
closefig = pyplot.close
def show():
# is_interactive = matplotlib.is_interactive()
pyplot.ion()
pyplot.show()
# if we call now matplotlib.interactive(True), the console is
# blocked
pyplot.ion() # prevents that execution stops after plotting
except:
pyplot = None
savefig = None
closefig = None
def show():
print('pyplot.show() is not available')
print('Could not import matplotlib.pyplot, therefore ``cma.plot()``" +'
' etc. is not available')
__author__ = 'Nikolaus Hansen'
__version__ = "1.1.06 $Revision: 4129 $ $Date: 2015-01-23 20:13:51 +0100 (Fri, 23 Jan 2015) $"
# $Source$ # according to PEP 8 style guides, but what is it good for?
# $Id: cma.py 4129 2015-01-23 19:13:51Z hansen $
# bash $: svn propset svn:keywords 'Date Revision Id' cma.py
__docformat__ = "reStructuredText" # this hides some comments entirely?
__all__ = (
'main',
'fmin',
'fcts',
'Fcts',
'felli',
'rotate',
'pprint',
'plot',
'disp',
'show',
'savefig',
'closefig',
'use_archives',
'is_feasible',
'unitdoctest',
'DerivedDictBase',
'SolutionDict',
'CMASolutionDict',
'BestSolution',
# 'BoundaryHandlerBase',
'BoundNone',
'BoundTransform',
'BoundPenalty',
# 'BoxConstraintsTransformationBase',
# 'BoxConstraintsLinQuadTransformation',
'GenoPheno',
'OOOptimizer',
'CMAEvolutionStrategy',
'CMAOptions',
'CMASolutionDict',
'CMAAdaptSigmaBase',
'CMAAdaptSigmaNone',
'CMAAdaptSigmaDistanceProportional',
'CMAAdaptSigmaCSA',
'CMAAdaptSigmaTPA',
'CMAAdaptSigmaMedianImprovement',
'BaseDataLogger',
'CMADataLogger',
'NoiseHandler',
'Sections',
'Misc',
'Mh',
'ElapsedTime',
'Rotation',
'fcts',
'FFWrappers',
)
use_archives = True # on False some unit tests fail
"""speed up for very large population size. `use_archives` prevents the
need for an inverse gp-transformation, relies on collections module,
not sure what happens if set to ``False``. """
class MetaParameters(object):
"""meta parameters are either "modifiable constants" or refer to
options from ``CMAOptions`` or are arguments to ``fmin`` or to the
``NoiseHandler`` class constructor.
Details
-------
This code contains a single class instance `meta_parameters`
Some interfaces rely on parameters being either `int` or
`float` only. More sophisticated choices are implemented via
``choice_value = {1: 'this', 2: 'or that'}[int_param_value]`` here.
CAVEAT
------
``meta_parameters`` should not be used to determine default
arguments, because these are assigned only once and for all during
module import.
"""
def __init__(self):
self.sigma0 = None ## [~0.01, ~10] # no default available
# learning rates and back-ward time horizons
self.CMA_cmean = 1.0 ## [~0.1, ~10] #
self.c1_multiplier = 1.0 ## [~1e-4, ~20] l
self.cmu_multiplier = 2.0 ## [~1e-4, ~30] l # zero means off
self.CMA_active = 1.0 ## [~1e-4, ~10] l # 0 means off, was CMA_activefac
self.cc_multiplier = 1.0 ## [~0.01, ~20] l
self.cs_multiplier = 1.0 ## [~0.01, ~10] l # learning rate for cs
self.CSA_dampfac = 1.0 ## [~0.01, ~10]
self.CMA_dampsvec_fac = None ## [~0.01, ~100] # def=np.Inf or 0.5, not clear whether this is a log parameter
self.CMA_dampsvec_fade = 0.1 ## [0, ~2]
# exponents for learning rates
self.c1_exponent = 2.0 ## [~1.25, 2]
self.cmu_exponent = 2.0 ## [~1.25, 2]
self.cact_exponent = 1.5 ## [~1.25, 2]
self.cc_exponent = 1.0 ## [~0.25, ~1.25]
self.cs_exponent = 1.0 ## [~0.25, ~1.75] # upper bound depends on CSA_clip_length_value
# selection related parameters
self.lambda_exponent = 0.0 ## [0, ~2.5] # usually <= 2, used by adding N**lambda_exponent to popsize-1
self.parent_fraction = 0.5 ## [0, 1] # default is weighted recombination
self.CMA_elitist = 0 ## [0, 2] i # a choice variable
self.CMA_mirrors = 0.0 ## [0, 0.5) # values <0.5 are interpreted as fraction, values >1 as numbers (rounded), otherwise about 0.16 is used',
# sampling strategies
self.CMA_sample_on_sphere_surface = 0 ## [0, 1] i # boolean
self.mean_shift_line_samples = 0 ## [0, 1] i # boolean
self.pc_line_samples = 0 ## [0, 1] i # boolean
# step-size adapation related parameters
self.CSA_damp_mueff_exponent = 0.5 ## [~0.25, ~1.5] # zero would mean no dependency of damping on mueff, useful with CSA_disregard_length option',
self.CSA_disregard_length = 0 ## [0, 1] i
self.CSA_squared = 0 ## [0, 1] i
self.CSA_clip_length_value = None ## [0, ~20] # None reflects inf
# noise handling
self.noise_reeval_multiplier = 1.0 ## [0.2, 4] # usually 2 offspring are reevaluated
self.noise_choose_reeval = 1 ## [1, 3] i # which ones to reevaluate
self.noise_theta = 0.5 ## [~0.05, ~0.9]
self.noise_alphasigma = 2.0 ## [0, 10]
self.noise_alphaevals = 2.0 ## [0, 10]
self.noise_alphaevalsdown_exponent = -0.25 ## [-1.5, 0]
self.noise_aggregate = None ## [1, 2] i # None and 0 == default or user option choice, 1 == median, 2 == mean
# TODO: more noise handling options (maxreevals...)
# restarts
self.restarts = 0 ## [0, ~30] # but depends on popsize inc
self.restart_from_best = 0 ## [0, 1] i # bool
self.incpopsize = 2.0 ## [~1, ~5]
# termination conditions (for restarts)
self.maxiter_multiplier = 1.0 ## [~0.01, ~100] l
self.mindx = 0.0 ## [1e-17, ~1e-3] l #v minimal std in any direction, cave interference with tol*',
self.minstd = 0.0 ## [1e-17, ~1e-3] l #v minimal std in any coordinate direction, cave interference with tol*',
self.maxstd = None ## [~1, ~1e9] l #v maximal std in any coordinate direction, default is inf',
self.tolfacupx = 1e3 ## [~10, ~1e9] l #v termination when step-size increases by tolfacupx (diverges). That is, the initial step-size was chosen far too small and better solutions were found far away from the initial solution x0',
self.tolupsigma = 1e20 ## [~100, ~1e99] l #v sigma/sigma0 > tolupsigma * max(sqrt(eivenvals(C))) indicates "creeping behavior" with usually minor improvements',
self.tolx = 1e-11 ## [1e-17, ~1e-3] l #v termination criterion: tolerance in x-changes',
self.tolfun = 1e-11 ## [1e-17, ~1e-3] l #v termination criterion: tolerance in function value, quite useful',
self.tolfunhist = 1e-12 ## [1e-17, ~1e-3] l #v termination criterion: tolerance in function value history',
self.tolstagnation_multiplier = 1.0 ## [0.01, ~100] # ': 'int(100 + 100 * N**1.5 / popsize) #v termination if no improvement over tolstagnation iterations',
# abandoned:
# self.noise_change_sigma_exponent = 1.0 ## [0, 2]
# self.noise_epsilon = 1e-7 ## [0, ~1e-2] l #
# self.maxfevals = None ## [1, ~1e11] l # is not a performance parameter
# self.cc_mu_multiplier = 1 ## [0, ~10] # AKA alpha_cc
# self.lambda_log_multiplier = 3 ## [0, ~10]
# self.lambda_multiplier = 0 ## (0, ~10]
meta_parameters = MetaParameters()
# emptysets = ('', (), [], {})
# array([]) does not work but np.size(.) == 0
# here is the problem:
# bool(array([0])) is False
# bool(list(array([0]))) is True
# bool(list(array([0, 1]))) is True
# bool(array([0, 1])) raises ValueError
#
# "x in emptysets" cannot be well replaced by "not x"
# which is also True for array([]) and None, but also for 0 and False,
# and False for NaN, and an exception for array([0,1]), see also
# http://google-styleguide.googlecode.com/svn/trunk/pyguide.html#True/False_evaluations
# ____________________________________________________________
# ____________________________________________________________
#
def rglen(ar):
"""shortcut for the iterator ``xrange(len(ar))``"""
return range(len(ar))
def is_feasible(x, f):
"""default to check feasibility, see also ``cma_default_options``"""
return f is not None and f is not np.NaN
global_verbosity = 1
def _print_warning(msg, method_name=None, class_name=None, iteration=None,
verbose=None):
if verbose is None:
verbose = global_verbosity
if verbose > 0:
print('WARNING (module=' + __name__ +
(', class=' + str(class_name) if class_name else '') +
(', method=' + str(method_name) if method_name else '') +
(', iteration=' + str(iteration) if iteration else '') +
'): ', msg)
# ____________________________________________________________
# ____________________________________________________________
#
def unitdoctest():
"""is used to describe test cases and might in future become helpful
as an experimental tutorial as well. The main testing feature at the
moment is by doctest with ``cma._test()`` or conveniently by
``python cma.py --test``. With the ``--verbose`` option added, the
results will always slightly differ and many "failed" test cases
might be reported.
A simple first overall test:
>>> import cma
>>> res = cma.fmin(cma.fcts.elli, 3*[1], 1,
... {'CMA_diagonal':2, 'seed':1, 'verb_time':0})
(3_w,7)-CMA-ES (mu_w=2.3,w_1=58%) in dimension 3 (seed=1)
Covariance matrix is diagonal for 2 iterations (1/ccov=7.0)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 7 1.453161670768570e+04 1.2e+00 1.08e+00 1e+00 1e+00
2 14 3.281197961927601e+04 1.3e+00 1.22e+00 1e+00 2e+00
3 21 1.082851071704020e+04 1.3e+00 1.24e+00 1e+00 2e+00
100 700 8.544042012075362e+00 1.4e+02 3.18e-01 1e-03 2e-01
200 1400 5.691152415221861e-12 1.0e+03 3.82e-05 1e-09 1e-06
220 1540 3.890107746209078e-15 9.5e+02 4.56e-06 8e-11 7e-08
termination on tolfun : 1e-11
final/bestever f-value = 3.89010774621e-15 2.52273602735e-15
mean solution: [ -4.63614606e-08 -3.42761465e-10 1.59957987e-11]
std deviation: [ 6.96066282e-08 2.28704425e-09 7.63875911e-11]
Test on the Rosenbrock function with 3 restarts. The first trial only
finds the local optimum, which happens in about 20% of the cases.
>>> import cma
>>> res = cma.fmin(cma.fcts.rosen, 4*[-1], 1,
... options={'ftarget':1e-6, 'verb_time':0,
... 'verb_disp':500, 'seed':3},
... restarts=3)
(4_w,8)-CMA-ES (mu_w=2.6,w_1=52%) in dimension 4 (seed=3)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 8 4.875315645656848e+01 1.0e+00 8.43e-01 8e-01 8e-01
2 16 1.662319948123120e+02 1.1e+00 7.67e-01 7e-01 8e-01
3 24 6.747063604799602e+01 1.2e+00 7.08e-01 6e-01 7e-01
184 1472 3.701428610430019e+00 4.3e+01 9.41e-07 3e-08 5e-08
termination on tolfun : 1e-11
final/bestever f-value = 3.70142861043 3.70142861043
mean solution: [-0.77565922 0.61309336 0.38206284 0.14597202]
std deviation: [ 2.54211502e-08 3.88803698e-08 4.74481641e-08 3.64398108e-08]
(8_w,16)-CMA-ES (mu_w=4.8,w_1=32%) in dimension 4 (seed=4)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 1489 2.011376859371495e+02 1.0e+00 8.90e-01 8e-01 9e-01
2 1505 4.157106647905128e+01 1.1e+00 8.02e-01 7e-01 7e-01
3 1521 3.548184889359060e+01 1.1e+00 1.02e+00 8e-01 1e+00
111 3249 6.831867555502181e-07 5.1e+01 2.62e-02 2e-04 2e-03
termination on ftarget : 1e-06
final/bestever f-value = 6.8318675555e-07 1.18576673231e-07
mean solution: [ 0.99997004 0.99993938 0.99984868 0.99969505]
std deviation: [ 0.00018973 0.00038006 0.00076479 0.00151402]
>>> assert res[1] <= 1e-6
Notice the different termination conditions. Termination on the target
function value ftarget prevents further restarts.
Test of scaling_of_variables option
>>> import cma
>>> opts = cma.CMAOptions()
>>> opts['seed'] = 456
>>> opts['verb_disp'] = 0
>>> opts['CMA_active'] = 1
>>> # rescaling of third variable: for searching in roughly
>>> # x0 plus/minus 1e3*sigma0 (instead of plus/minus sigma0)
>>> opts['scaling_of_variables'] = [1, 1, 1e3, 1]
>>> res = cma.fmin(cma.fcts.rosen, 4 * [0.1], 0.1, opts)
termination on tolfun : 1e-11
final/bestever f-value = 2.68096173031e-14 1.09714829146e-14
mean solution: [ 1.00000001 1.00000002 1.00000004 1.00000007]
std deviation: [ 3.00466854e-08 5.88400826e-08 1.18482371e-07 2.34837383e-07]
The printed std deviations reflect the actual value in the parameters
of the function (not the one in the internal representation which
can be different).
Test of CMA_stds scaling option.
>>> import cma
>>> opts = cma.CMAOptions()
>>> s = 5 * [1]
>>> s[0] = 1e3
>>> opts.set('CMA_stds', s)
>>> opts.set('verb_disp', 0)
>>> res = cma.fmin(cma.fcts.cigar, 5 * [0.1], 0.1, opts)
>>> assert res[1] < 1800
:See: cma.main(), cma._test()
"""
pass
class _BlancClass(object):
"""blanc container class for having a collection of attributes,
that might/should at some point become a more tailored class"""
if use_archives:
class DerivedDictBase(collections.MutableMapping):
"""for conveniently adding "features" to a dictionary. The actual
dictionary is in ``self.data``. Copy-paste
and modify setitem, getitem, and delitem, if necessary.
Details: This is the clean way to subclass build-in dict.
"""
def __init__(self, *args, **kwargs):
# collections.MutableMapping.__init__(self)
super(DerivedDictBase, self).__init__()
# super(SolutionDict, self).__init__() # the same
self.data = dict()
self.data.update(dict(*args, **kwargs))
def __len__(self):
return len(self.data)
def __contains__(self, key):
return key in self.data
def __iter__(self):
return iter(self.data)
def __setitem__(self, key, value):
"""defines self[key] = value"""
self.data[key] = value
def __getitem__(self, key):
"""defines self[key]"""
return self.data[key]
def __delitem__(self, key):
del self.data[key]
class SolutionDict(DerivedDictBase):
"""dictionary with computation of an hash key.
The hash key is generated from the inserted solution and a stack of
previously inserted same solutions is provided. Each entry is meant
to store additional information related to the solution.
>>> import cma, numpy as np
>>> d = cma.SolutionDict()
>>> x = np.array([1,2,4])
>>> d[x] = {'f': sum(x**2), 'iteration': 1}
>>> assert d[x]['iteration'] == 1
>>> assert d.get(x) == (d[x] if d.key(x) in d.keys() else None)
TODO: data_with_same_key behaves like a stack (see setitem and
delitem), but rather should behave like a queue?! A queue is less
consistent with the operation self[key] = ..., if
self.data_with_same_key[key] is not empty.
TODO: iteration key is used to clean up without error management
"""
def __init__(self, *args, **kwargs):
# DerivedDictBase.__init__(self, *args, **kwargs)
super(SolutionDict, self).__init__(*args, **kwargs)
self.data_with_same_key = {}
self.last_iteration = 0
def key(self, x):
try:
return tuple(x)
# using sum(x) is slower, using x[0] is slightly faster
except TypeError:
return x
def __setitem__(self, key, value):
"""defines self[key] = value"""
key = self.key(key)
if key in self.data_with_same_key:
self.data_with_same_key[key] += [self.data[key]]
elif key in self.data:
self.data_with_same_key[key] = [self.data[key]]
self.data[key] = value
def __getitem__(self, key): # 50% of time of
"""defines self[key]"""
return self.data[self.key(key)]
def __delitem__(self, key):
"""remove only most current key-entry"""
key = self.key(key)
if key in self.data_with_same_key:
if len(self.data_with_same_key[key]) == 1:
self.data[key] = self.data_with_same_key.pop(key)[0]
else:
self.data[key] = self.data_with_same_key[key].pop(-1)
else:
del self.data[key]
def truncate(self, max_len, min_iter):
if len(self) > max_len:
for k in list(self.keys()):
if self[k]['iteration'] < min_iter:
del self[k]
# deletes one item with k as key, better delete all?
class CMASolutionDict(SolutionDict):
def __init__(self, *args, **kwargs):
# SolutionDict.__init__(self, *args, **kwargs)
super(CMASolutionDict, self).__init__(*args, **kwargs)
self.last_solution_index = 0
# TODO: insert takes 30% of the overall CPU time, mostly in def key()
# with about 15% of the overall CPU time
def insert(self, key, geno=None, iteration=None, fitness=None, value=None):
"""insert an entry with key ``key`` and value
``value if value is not None else {'geno':key}`` and
``self[key]['kwarg'] = kwarg if kwarg is not None`` for the further kwargs.
"""
# archive returned solutions, first clean up archive
if iteration is not None and iteration > self.last_iteration and (iteration % 10) < 1:
self.truncate(300, iteration - 3)
elif value is not None and value.get('iteration'):
iteration = value['iteration']
if (iteration % 10) < 1:
self.truncate(300, iteration - 3)
self.last_solution_index += 1
if value is not None:
try:
iteration = value['iteration']
except:
pass
if iteration is not None:
if iteration > self.last_iteration:
self.last_solution_index = 0
self.last_iteration = iteration
else:
iteration = self.last_iteration + 0.5 # a hack to get a somewhat reasonable value
if value is not None:
self[key] = value
else:
self[key] = {'pheno': key}
if geno is not None:
self[key]['geno'] = geno
if iteration is not None:
self[key]['iteration'] = iteration
if fitness is not None:
self[key]['fitness'] = fitness
return self[key]
if not use_archives:
class CMASolutionDict(dict):
"""a hack to get most code examples running"""
def insert(self, *args, **kwargs):
pass
def get(self, key):
return None
def __getitem__(self, key):
return None
def __setitem__(self, key, value):
pass
class BestSolution(object):
"""container to keep track of the best solution seen"""
def __init__(self, x=None, f=np.inf, evals=None):
"""initialize the best solution with `x`, `f`, and `evals`.
Better solutions have smaller `f`-values.
"""
self.x = x
self.x_geno = None
self.f = f if f is not None and f is not np.nan else np.inf
self.evals = evals
self.evalsall = evals
self.last = _BlancClass()
self.last.x = x
self.last.f = f
def update(self, arx, xarchive=None, arf=None, evals=None):
"""checks for better solutions in list `arx`.
Based on the smallest corresponding value in `arf`,
alternatively, `update` may be called with a `BestSolution`
instance like ``update(another_best_solution)`` in which case
the better solution becomes the current best.
`xarchive` is used to retrieve the genotype of a solution.
"""
if isinstance(arx, BestSolution):
if self.evalsall is None:
self.evalsall = arx.evalsall
elif arx.evalsall is not None:
self.evalsall = max((self.evalsall, arx.evalsall))
if arx.f is not None and arx.f < np.inf:
self.update([arx.x], xarchive, [arx.f], arx.evals)
return self
assert arf is not None
# find failsave minimum
minidx = np.nanargmin(arf)
if minidx is np.nan:
return
minarf = arf[minidx]
# minarf = reduce(lambda x, y: y if y and y is not np.nan
# and y < x else x, arf, np.inf)
if minarf < np.inf and (minarf < self.f or self.f is None):
self.x, self.f = arx[minidx], arf[minidx]
if xarchive is not None and xarchive.get(self.x) is not None:
self.x_geno = xarchive[self.x].get('geno')
else:
self.x_geno = None
self.evals = None if not evals else evals - len(arf) + minidx + 1
self.evalsall = evals
elif evals:
self.evalsall = evals
self.last.x = arx[minidx]
self.last.f = minarf
def get(self):
"""return ``(x, f, evals)`` """
return self.x, self.f, self.evals # , self.x_geno
# ____________________________________________________________
# ____________________________________________________________
#
class BoundaryHandlerBase(object):
"""hacked base class """
def __init__(self, bounds):
"""bounds are not copied, but possibly modified and
put into a normalized form: ``bounds`` can be ``None``
or ``[lb, ub]`` where ``lb`` and ``ub`` are
either None or a vector (which can have ``None`` entries).
Generally, the last entry is recycled to compute bounds
for any dimension.
"""
if not bounds:
self.bounds = None
else:
l = [None, None] # figure out lenths
for i in [0, 1]:
try:
l[i] = len(bounds[i])
except TypeError:
bounds[i] = [bounds[i]]
l[i] = 1
if all([bounds[i][j] is None or not isfinite(bounds[i][j])
for j in rglen(bounds[i])]):
bounds[i] = None
if bounds[i] is not None and any([bounds[i][j] == (-1)**i * np.inf
for j in rglen(bounds[i])]):
raise ValueError('lower/upper is +inf/-inf and ' +
'therefore no finite feasible solution is available')
self.bounds = bounds
def __call__(self, solutions, *args, **kwargs):
"""return penalty or list of penalties, by default zero(s).
This interface seems too specifically tailored to the derived
BoundPenalty class, it should maybe change.
"""
if isscalar(solutions[0]):
return 0.0
else:
return len(solutions) * [0.0]
def update(self, *args, **kwargs):
return self
def repair(self, x, copy_if_changed=True, copy_always=False):
"""projects infeasible values on the domain bound, might be
overwritten by derived class """
if copy_always:
x = array(x, copy=True)
copy = False
else:
copy = copy_if_changed
if self.bounds is None:
return x
for ib in [0, 1]:
if self.bounds[ib] is None:
continue
for i in rglen(x):
idx = min([i, len(self.bounds[ib]) - 1])
if self.bounds[ib][idx] is not None and \
(-1)**ib * x[i] < (-1)**ib * self.bounds[ib][idx]:
if copy:
x = array(x, copy=True)
copy = False
x[i] = self.bounds[ib][idx]
def inverse(self, y, copy_if_changed=True, copy_always=False):
return y if not copy_always else array(y, copy=True)
def get_bounds(self, which, dimension):
"""``get_bounds('lower', 8)`` returns the lower bounds in 8-D"""
if which == 'lower' or which == 0:
return self._get_bounds(0, dimension)
elif which == 'upper' or which == 1:
return self._get_bounds(1, dimension)
else:
raise ValueError("argument which must be 'lower' or 'upper'")
def _get_bounds(self, ib, dimension):
"""ib == 0/1 means lower/upper bound, return a vector of length
`dimension` """
sign_ = 2 * ib - 1
assert sign_**2 == 1
if self.bounds is None or self.bounds[ib] is None:
return array(dimension * [sign_ * np.Inf])
res = []
for i in range(dimension):
res.append(self.bounds[ib][min([i, len(self.bounds[ib]) - 1])])
if res[-1] is None:
res[-1] = sign_ * np.Inf
return array(res)
def has_bounds(self):
"""return True, if any variable is bounded"""
bounds = self.bounds
if bounds in (None, [None, None]):
return False
for ib, bound in enumerate(bounds):
if bound is not None:
sign_ = 2 * ib - 1
for bound_i in bound:
if bound_i is not None and sign_ * bound_i < np.inf:
return True
return False
def is_in_bounds(self, x):
"""not yet tested"""
if self.bounds is None:
return True
for ib in [0, 1]:
if self.bounds[ib] is None:
continue
for i in rglen(x):
idx = min([i, len(self.bounds[ib]) - 1])
if self.bounds[ib][idx] is not None and \
(-1)**ib * x[i] < (-1)**ib * self.bounds[ib][idx]:
return False
return True
def to_dim_times_two(self, bounds):
"""return boundaries in format ``[[lb0, ub0], [lb1, ub1], ...]``,
as used by ``BoxConstraints...`` class.
"""
if not bounds:
b = [[None, None]]
else:
l = [None, None] # figure out lenths
for i in [0, 1]:
try:
l[i] = len(bounds[i])
except TypeError:
bounds[i] = [bounds[i]]
l[i] = 1
b = [] # bounds in different format
try:
for i in range(max(l)):
b.append([bounds[0][i] if i < l[0] else None,
bounds[1][i] if i < l[1] else None])
except (TypeError, IndexError):
print("boundaries must be provided in the form " +
"[scalar_of_vector, scalar_or_vector]")
raise
return b
# ____________________________________________________________
# ____________________________________________________________
#
class BoundNone(BoundaryHandlerBase):
def __init__(self, bounds=None):
if bounds is not None:
raise ValueError()
# BoundaryHandlerBase.__init__(self, None)
super(BoundNone, self).__init__(None)
def is_in_bounds(self, x):
return True
# ____________________________________________________________
# ____________________________________________________________
#
class BoundTransform(BoundaryHandlerBase):
"""Handles boundary by a smooth, piecewise linear and quadratic
transformation into the feasible domain.
>>> import cma
>>> veq = cma.Mh.vequals_approximately
>>> b = cma.BoundTransform([None, 1])
>>> assert b.bounds == [[None], [1]]
>>> assert veq(b.repair([0, 1, 1.2]), array([ 0., 0.975, 0.975]))
>>> assert b.is_in_bounds([0, 0.5, 1])
>>> assert veq(b.transform([0, 1, 2]), [ 0. , 0.975, 0.2 ])
>>> o=cma.fmin(cma.fcts.sphere, 6 * [-2], 0.5, options={
... 'boundary_handling': 'BoundTransform ',
... 'bounds': [[], 5 * [-1] + [inf]] })
>>> assert o[1] < 5 + 1e-8
>>> import numpy as np
>>> b = cma.BoundTransform([-np.random.rand(120), np.random.rand(120)])
>>> for i in range(100):
... x = (-i-1) * np.random.rand(120) + i * np.random.randn(120)
... x_to_b = b.repair(x)
... x2 = b.inverse(x_to_b)
... x2_to_b = b.repair(x2)
... x3 = b.inverse(x2_to_b)
... x3_to_b = b.repair(x3)
... assert veq(x_to_b, x2_to_b)
... assert veq(x2, x3)
... assert veq(x2_to_b, x3_to_b)
Details: this class uses ``class BoxConstraintsLinQuadTransformation``
"""
def __init__(self, bounds=None):
"""Argument bounds can be `None` or ``bounds[0]`` and ``bounds[1]``
are lower and upper domain boundaries, each is either `None` or
a scalar or a list or array of appropriate size.
"""
# BoundaryHandlerBase.__init__(self, bounds)
super(BoundTransform, self).__init__(bounds)
self.bounds_tf = BoxConstraintsLinQuadTransformation(self.to_dim_times_two(bounds))
def repair(self, x, copy_if_changed=True, copy_always=False):
"""transforms ``x`` into the bounded domain.
``copy_always`` option might disappear.
"""
copy = copy_if_changed
if copy_always:
x = array(x, copy=True)
copy = False
if self.bounds is None or (self.bounds[0] is None and
self.bounds[1] is None):
return x
return self.bounds_tf(x, copy)
def transform(self, x):
return self.repair(x)
def inverse(self, x, copy_if_changed=True, copy_always=False):
"""inverse transform of ``x`` from the bounded domain.
"""
copy = copy_if_changed
if copy_always:
x = array(x, copy=True)
copy = False
if self.bounds is None or (self.bounds[0] is None and
self.bounds[1] is None):
return x
return self.bounds_tf.inverse(x, copy) # this doesn't exist
# ____________________________________________________________
# ____________________________________________________________
#
class BoundPenalty(BoundaryHandlerBase):
"""Computes the boundary penalty. Must be updated each iteration,
using the `update` method.
Details
-------
The penalty computes like ``sum(w[i] * (x[i]-xfeas[i])**2)``,
where `xfeas` is the closest feasible (in-bounds) solution from `x`.
The weight `w[i]` should be updated during each iteration using
the update method.
Example:
>>> import cma
>>> cma.fmin(cma.felli, 6 * [1], 1,
... {
... 'boundary_handling': 'BoundPenalty',
... 'bounds': [-1, 1],
... 'fixed_variables': {0: 0.012, 2:0.234}
... })
Reference: Hansen et al 2009, A Method for Handling Uncertainty...
IEEE TEC, with addendum, see
http://www.lri.fr/~hansen/TEC2009online.pdf
"""
def __init__(self, bounds=None):
"""Argument bounds can be `None` or ``bounds[0]`` and ``bounds[1]``
are lower and upper domain boundaries, each is either `None` or
a scalar or a list or array of appropriate size.
"""
# #
# bounds attribute reminds the domain boundary values
# BoundaryHandlerBase.__init__(self, bounds)
super(BoundPenalty, self).__init__(bounds)
self.gamma = 1 # a very crude assumption
self.weights_initialized = False # gamma becomes a vector after initialization
self.hist = [] # delta-f history
def repair(self, x, copy_if_changed=True, copy_always=False):
"""sets out-of-bounds components of ``x`` on the bounds.
"""
# TODO (old data): CPU(N,lam,iter=20,200,100): 3.3s of 8s for two bounds, 1.8s of 6.5s for one bound
# remark: np.max([bounds[0], x]) is about 40 times slower than max((bounds[0], x))
copy = copy_if_changed
if copy_always:
x = array(x, copy=True)
bounds = self.bounds
if bounds not in (None, [None, None], (None, None)): # solely for effiency
x = array(x, copy=True) if copy and not copy_always else x
if bounds[0] is not None:
if isscalar(bounds[0]):
for i in rglen(x):
x[i] = max((bounds[0], x[i]))
else:
for i in rglen(x):
j = min([i, len(bounds[0]) - 1])
if bounds[0][j] is not None:
x[i] = max((bounds[0][j], x[i]))
if bounds[1] is not None:
if isscalar(bounds[1]):
for i in rglen(x):
x[i] = min((bounds[1], x[i]))
else:
for i in rglen(x):
j = min((i, len(bounds[1]) - 1))
if bounds[1][j] is not None:
x[i] = min((bounds[1][j], x[i]))
return x
# ____________________________________________________________
#
def __call__(self, x, archive, gp):
"""returns the boundary violation penalty for `x` ,where `x` is a
single solution or a list or array of solutions.
"""
if x in (None, (), []):
return x
if self.bounds in (None, [None, None], (None, None)):
return 0.0 if isscalar(x[0]) else [0.0] * len(x) # no penalty
x_is_single_vector = isscalar(x[0])
x = [x] if x_is_single_vector else x
# add fixed variables to self.gamma
try:
gamma = list(self.gamma) # fails if self.gamma is a scalar
for i in sorted(gp.fixed_values): # fails if fixed_values is None
gamma.insert(i, 0.0)
gamma = array(gamma, copy=False)
except TypeError:
gamma = self.gamma
pen = []
for xi in x:
# CAVE: this does not work with already repaired values!!
# CPU(N,lam,iter=20,200,100)?: 3s of 10s, array(xi): 1s
# remark: one deep copy can be prevented by xold = xi first
xpheno = gp.pheno(archive[xi]['geno'])
# necessary, because xi was repaired to be in bounds
xinbounds = self.repair(xpheno)
# could be omitted (with unpredictable effect in case of external repair)
fac = 1 # exp(0.1 * (log(self.scal) - np.mean(self.scal)))
pen.append(sum(gamma * ((xinbounds - xpheno) / fac)**2) / len(xi))
return pen[0] if x_is_single_vector else pen
# ____________________________________________________________
#
def feasible_ratio(self, solutions):
"""counts for each coordinate the number of feasible values in
``solutions`` and returns an array of length ``len(solutions[0])``
with the ratios.
`solutions` is a list or array of repaired ``Solution``
instances,
"""
raise NotImplementedError('Solution class disappeared')
count = np.zeros(len(solutions[0]))
for x in solutions:
count += x.unrepaired == x
return count / float(len(solutions))
# ____________________________________________________________
#
def update(self, function_values, es):
"""updates the weights for computing a boundary penalty.
Arguments
---------
`function_values`
all function values of recent population of solutions
`es`
`CMAEvolutionStrategy` object instance, in particular
mean and variances and the methods from the attribute
`gp` of type `GenoPheno` are used.
"""
if self.bounds is None or (self.bounds[0] is None and
self.bounds[1] is None):
return self
N = es.N
# ## prepare
# compute varis = sigma**2 * C_ii
varis = es.sigma**2 * array(N * [es.C] if isscalar(es.C) else (# scalar case
es.C if isscalar(es.C[0]) else # diagonal matrix case
[es.C[i][i] for i in range(N)])) # full matrix case
# relative violation in geno-space
dmean = (es.mean - es.gp.geno(self.repair(es.gp.pheno(es.mean)))) / varis**0.5
# ## Store/update a history of delta fitness value
fvals = sorted(function_values)
l = 1 + len(fvals)
val = fvals[3 * l // 4] - fvals[l // 4] # exact interquartile range apart interpolation
val = val / np.mean(varis) # new: val is normalized with sigma of the same iteration
# insert val in history
if isfinite(val) and val > 0:
self.hist.insert(0, val)
elif val == inf and len(self.hist) > 1:
self.hist.insert(0, max(self.hist))
else:
pass # ignore 0 or nan values
if len(self.hist) > 20 + (3 * N) / es.popsize:
self.hist.pop()
# ## prepare
dfit = np.median(self.hist) # median interquartile range
damp = min(1, es.sp.mueff / 10. / N)
# ## set/update weights
# Throw initialization error
if len(self.hist) == 0:
raise _Error('wrongful initialization, no feasible solution sampled. ' +
'Reasons can be mistakenly set bounds (lower bound not smaller than upper bound) or a too large initial sigma0 or... ' +
'See description of argument func in help(cma.fmin) or an example handling infeasible solutions in help(cma.CMAEvolutionStrategy). ')
# initialize weights
if dmean.any() and (not self.weights_initialized or es.countiter == 2): # TODO
self.gamma = array(N * [2 * dfit]) ## BUGBUGzzzz: N should be phenotypic (bounds are in phenotype), but is genotypic
self.weights_initialized = True
# update weights gamma
if self.weights_initialized:
edist = array(abs(dmean) - 3 * max(1, N**0.5 / es.sp.mueff))
if 1 < 3: # this is better, around a factor of two
# increase single weights possibly with a faster rate than they can decrease
# value unit of edst is std dev, 3==random walk of 9 steps
self.gamma *= exp((edist > 0) * np.tanh(edist / 3) / 2.)**damp
# decrease all weights up to the same level to avoid single extremely small weights
# use a constant factor for pseudo-keeping invariance
self.gamma[self.gamma > 5 * dfit] *= exp(-1. / 3)**damp
# self.gamma[idx] *= exp(5*dfit/self.gamma[idx] - 1)**(damp/3)
es.more_to_write += list(self.gamma) if self.weights_initialized else N * [1.0]
# ## return penalty
# es.more_to_write = self.gamma if not isscalar(self.gamma) else N*[1]
return self # bound penalty values
# ____________________________________________________________
# ____________________________________________________________
#
class BoxConstraintsTransformationBase(object):
"""Implements a transformation into boundaries and is used for
boundary handling::
tf = BoxConstraintsTransformationAnyDerivedClass([[1, 4]])
x = [3, 2, 4.4]
y = tf(x) # "repaired" solution
print(tf([2.5])) # middle value is never changed
[2.5]
:See: ``BoundaryHandler``
"""
def __init__(self, bounds):
try:
if len(bounds[0]) != 2:
raise ValueError
except:
raise ValueError(' bounds must be either [[lb0, ub0]] or [[lb0, ub0], [lb1, ub1],...], \n where in both cases the last entry is reused for all remaining dimensions')
self.bounds = bounds
self.initialize()
def initialize(self):
"""initialize in base class"""
self._lb = [b[0] for b in self.bounds] # can be done more efficiently?
self._ub = [b[1] for b in self.bounds]
def _lowerupperval(self, a, b, c):
return np.max([np.max(a), np.min([np.min(b), c])])
def bounds_i(self, i):
"""return ``[ith_lower_bound, ith_upper_bound]``"""
return self.bounds[self._index(i)]
def __call__(self, solution_in_genotype):
res = [self._transform_i(x, i) for i, x in enumerate(solution_in_genotype)]
return res
transform = __call__
def inverse(self, solution_in_phenotype, copy_if_changed=True, copy_always=True):
return [self._inverse_i(y, i) for i, y in enumerate(solution_in_phenotype)]
def _index(self, i):
return min((i, len(self.bounds) - 1))
def _transform_i(self, x, i):
raise NotImplementedError('this is an abstract method that should be implemented in the derived class')
def _inverse_i(self, y, i):
raise NotImplementedError('this is an abstract method that should be implemented in the derived class')
def shift_or_mirror_into_invertible_domain(self, solution_genotype):
"""return the reference solution that has the same ``box_constraints_transformation(solution)``
value, i.e. ``tf.shift_or_mirror_into_invertible_domain(x) = tf.inverse(tf.transform(x))``.
This is an idempotent mapping (leading to the same result independent how often it is
repeatedly applied).
"""
return self.inverse(self(solution_genotype))
raise NotImplementedError('this is an abstract method that should be implemented in the derived class')
class _BoxConstraintsTransformationTemplate(BoxConstraintsTransformationBase):
"""copy/paste this template to implement a new boundary handling transformation"""
def __init__(self, bounds):
# BoxConstraintsTransformationBase.__init__(self, bounds)
super(_BoxConstraintsTransformationTemplate, self).__init__(bounds)
def initialize(self):
BoxConstraintsTransformationBase.initialize(self) # likely to be removed
def _transform_i(self, x, i):
raise NotImplementedError('this is an abstract method that should be implemented in the derived class')
def _inverse_i(self, y, i):
raise NotImplementedError('this is an abstract method that should be implemented in the derived class')
__doc__ = BoxConstraintsTransformationBase.__doc__ + __doc__
class BoxConstraintsLinQuadTransformation(BoxConstraintsTransformationBase):
"""implements a bijective, monotonous transformation between [lb - al, ub + au]
and [lb, ub] which is the identity (and therefore linear) in [lb + al, ub - au]
(typically about 90% of the interval) and quadratic in [lb - 3*al, lb + al]
and in [ub - au, ub + 3*au]. The transformation is periodically
expanded beyond the limits (somewhat resembling the shape sin(x-pi/2))
with a period of ``2 * (ub - lb + al + au)``.
Details
=======
Partly due to numerical considerations depend the values ``al`` and ``au``
on ``abs(lb)`` and ``abs(ub)`` which makes the transformation non-translation
invariant. In contrast to sin(.), the transformation is robust to "arbitrary"
values for boundaries, e.g. a lower bound of ``-1e99`` or ``np.Inf`` or
``None``.
Examples
========
Example to use with cma:
>>> import cma
>>> # only the first variable has an upper bound
>>> tf = cma.BoxConstraintsLinQuadTransformation([[1,2], [1,None]]) # second==last pair is re-cycled
>>> cma.fmin(cma.felli, 9 * [2], 1, {'transformation': [tf.transform, tf.inverse], 'verb_disp': 0})
>>> # ...or...
>>> es = cma.CMAEvolutionStrategy(9 * [2], 1)
>>> while not es.stop():
... X = es.ask()
... f = [cma.felli(tf(x)) for x in X] # tf(x) == tf.transform(x)
... es.tell(X, f)
Example of the internal workings:
>>> import cma
>>> tf = cma.BoxConstraintsLinQuadTransformation([[1,2], [1,11], [1,11]])
>>> tf.bounds
[[1, 2], [1, 11], [1, 11]]
>>> tf([1.5, 1.5, 1.5])
[1.5, 1.5, 1.5]
>>> tf([1.52, -2.2, -0.2, 2, 4, 10.4])
[1.52, 4.0, 2.0, 2.0, 4.0, 10.4]
>>> res = np.round(tf._au, 2)
>>> assert list(res[:4]) == [ 0.15, 0.6, 0.6, 0.6]
>>> res = [round(x, 2) for x in tf.shift_or_mirror_into_invertible_domain([1.52, -12.2, -0.2, 2, 4, 10.4])]
>>> assert res == [1.52, 9.2, 2.0, 2.0, 4.0, 10.4]
>>> tmp = tf([1]) # call with lower dimension
"""
def __init__(self, bounds):
"""``x`` is defined in ``[lb - 3*al, ub + au + r - 2*al]`` with ``r = ub - lb + al + au``,
and ``x == transformation(x)`` in ``[lb + al, ub - au]``.
``beta*x - alphal = beta*x - alphau`` is then defined in ``[lb, ub]``,
``alphal`` and ``alphau`` represent the same value, but respectively numerically
better suited for values close to lb and ub.
"""
# BoxConstraintsTransformationBase.__init__(self, bounds)
super(BoxConstraintsLinQuadTransformation, self).__init__(bounds)
# super().__init__(bounds) # only available since Python 3.x
# super(BB, self).__init__(bounds) # is supposed to call initialize
def initialize(self, length=None):
"""see ``__init__``"""
if length is None:
length = len(self.bounds)
max_i = min((len(self.bounds) - 1, length - 1))
self._lb = array([self.bounds[min((i, max_i))][0]
if self.bounds[min((i, max_i))][0] is not None
else -np.Inf
for i in range(length)], copy=False)
self._ub = array([self.bounds[min((i, max_i))][1]
if self.bounds[min((i, max_i))][1] is not None
else np.Inf
for i in range(length)], copy=False)
lb = self._lb
ub = self._ub
# define added values for lower and upper bound
self._al = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(lb[i])) / 20])
if isfinite(lb[i]) else 1 for i in rglen(lb)], copy=False)
self._au = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(ub[i])) / 20])
if isfinite(ub[i]) else 1 for i in rglen(ub)], copy=False)
def __call__(self, solution_genotype, copy_if_changed=True, copy_always=False):
# about four times faster version of array([self._transform_i(x, i) for i, x in enumerate(solution_genotype)])
# still, this makes a typical run on a test function two times slower, but there might be one too many copies
# during the transformations in gp
if len(self._lb) != len(solution_genotype):
self.initialize(len(solution_genotype))
lb = self._lb
ub = self._ub
al = self._al
au = self._au
if copy_always or not isinstance(solution_genotype[0], float):
# transformed value is likely to be a float
y = np.array(solution_genotype, copy=True, dtype=float)
# if solution_genotype is not a float, copy value is disregarded
copy = False
else:
y = solution_genotype
copy = copy_if_changed
idx = (y < lb - 2 * al - (ub - lb) / 2.0) | (y > ub + 2 * au + (ub - lb) / 2.0)
if idx.any():
r = 2 * (ub[idx] - lb[idx] + al[idx] + au[idx]) # period
s = lb[idx] - 2 * al[idx] - (ub[idx] - lb[idx]) / 2.0 # start
if copy:
y = np.array(y, copy=True)
copy = False
y[idx] -= r * ((y[idx] - s) // r) # shift
idx = y > ub + au
if idx.any():
if copy:
y = np.array(y, copy=True)
copy = False
y[idx] -= 2 * (y[idx] - ub[idx] - au[idx])
idx = y < lb - al
if idx.any():
if copy:
y = np.array(y, copy=True)
copy = False
y[idx] += 2 * (lb[idx] - al[idx] - y[idx])
idx = y < lb + al
if idx.any():
if copy:
y = np.array(y, copy=True)
copy = False
y[idx] = lb[idx] + (y[idx] - (lb[idx] - al[idx]))**2 / 4 / al[idx]
idx = y > ub - au
if idx.any():
if copy:
y = np.array(y, copy=True)
copy = False
y[idx] = ub[idx] - (y[idx] - (ub[idx] + au[idx]))**2 / 4 / au[idx]
# assert Mh.vequals_approximately(y, BoxConstraintsTransformationBase.__call__(self, solution_genotype))
return y
__call__.doc = BoxConstraintsTransformationBase.__doc__
transform = __call__
def idx_infeasible(self, solution_genotype):
"""return indices of "infeasible" variables, that is,
variables that do not directly map into the feasible domain such that
``tf.inverse(tf(x)) == x``.
"""
res = [i for i, x in enumerate(solution_genotype)
if not self.is_feasible_i(x, i)]
return res
def is_feasible_i(self, x, i):
"""return True if value ``x`` is in the invertible domain of
variable ``i``
"""
lb = self._lb[self._index(i)]
ub = self._ub[self._index(i)]
al = self._al[self._index(i)]
au = self._au[self._index(i)]
return lb - al < x < ub + au
def is_loosely_feasible_i(self, x, i):
"""never used"""
lb = self._lb[self._index(i)]
ub = self._ub[self._index(i)]
al = self._al[self._index(i)]
au = self._au[self._index(i)]
return lb - 2 * al - (ub - lb) / 2.0 <= x <= ub + 2 * au + (ub - lb) / 2.0
def shift_or_mirror_into_invertible_domain(self, solution_genotype,
copy=False):
"""Details: input ``solution_genotype`` is changed. The domain is
[lb - al, ub + au] and in [lb - 2*al - (ub - lb) / 2, lb - al]
mirroring is applied.
"""
assert solution_genotype is not None
if copy:
y = [val for val in solution_genotype]
else:
y = solution_genotype
if isinstance(y, np.ndarray) and not isinstance(y[0], float):
y = array(y, dtype=float)
for i in rglen(y):
lb = self._lb[self._index(i)]
ub = self._ub[self._index(i)]
al = self._al[self._index(i)]
au = self._au[self._index(i)]
# x is far from the boundary, compared to ub - lb
if y[i] < lb - 2 * al - (ub - lb) / 2.0 or y[i] > ub + 2 * au + (ub - lb) / 2.0:
r = 2 * (ub - lb + al + au) # period
s = lb - 2 * al - (ub - lb) / 2.0 # start
y[i] -= r * ((y[i] - s) // r) # shift
if y[i] > ub + au:
y[i] -= 2 * (y[i] - ub - au)
if y[i] < lb - al:
y[i] += 2 * (lb - al - y[i])
return y
shift_or_mirror_into_invertible_domain.__doc__ = BoxConstraintsTransformationBase.shift_or_mirror_into_invertible_domain.__doc__ + shift_or_mirror_into_invertible_domain.__doc__
def _shift_or_mirror_into_invertible_i(self, x, i):
"""shift into the invertible domain [lb - ab, ub + au], mirror close to
boundaries in order to get a smooth transformation everywhere
"""
assert x is not None
lb = self._lb[self._index(i)]
ub = self._ub[self._index(i)]
al = self._al[self._index(i)]
au = self._au[self._index(i)]
# x is far from the boundary, compared to ub - lb
if x < lb - 2 * al - (ub - lb) / 2.0 or x > ub + 2 * au + (ub - lb) / 2.0:
r = 2 * (ub - lb + al + au) # period
s = lb - 2 * al - (ub - lb) / 2.0 # start
x -= r * ((x - s) // r) # shift
if x > ub + au:
x -= 2 * (x - ub - au)
if x < lb - al:
x += 2 * (lb - al - x)
return x
def _transform_i(self, x, i):
"""return transform of x in component i"""
x = self._shift_or_mirror_into_invertible_i(x, i)
lb = self._lb[self._index(i)]
ub = self._ub[self._index(i)]
al = self._al[self._index(i)]
au = self._au[self._index(i)]
if x < lb + al:
return lb + (x - (lb - al))**2 / 4 / al
elif x < ub - au:
return x
elif x < ub + 3 * au:
return ub - (x - (ub + au))**2 / 4 / au
else:
assert False # shift removes this case
return ub + au - (x - (ub + au))
def _inverse_i(self, y, i):
"""return inverse of y in component i"""
lb = self._lb[self._index(i)]
ub = self._ub[self._index(i)]
al = self._al[self._index(i)]
au = self._au[self._index(i)]
if 1 < 3:
if not lb <= y <= ub:
raise ValueError('argument of inverse must be within the given bounds')
if y < lb + al:
return (lb - al) + 2 * (al * (y - lb))**0.5
elif y < ub - au:
return y
else:
return (ub + au) - 2 * (au * (ub - y))**0.5
class GenoPheno(object):
"""Genotype-phenotype transformation.
Method `pheno` provides the transformation from geno- to phenotype,
that is from the internal representation to the representation used
in the objective function. Method `geno` provides the "inverse" pheno-
to genotype transformation. The geno-phenotype transformation comprises,
in this order:
- insert fixed variables (with the phenotypic and therefore quite
possibly "wrong" values)
- affine linear transformation (first scaling then shift)
- user-defined transformation
- repair (e.g. into feasible domain due to boundaries)
- assign fixed variables their original phenotypic value
By default all transformations are the identity. The repair is only applied,
if the transformation is given as argument to the method `pheno`.
``geno`` is only necessary, if solutions have been injected.
"""
def __init__(self, dim, scaling=None, typical_x=None,
fixed_values=None, tf=None):
"""return `GenoPheno` instance with phenotypic dimension `dim`.
Keyword Arguments
-----------------
`scaling`
the diagonal of a scaling transformation matrix, multipliers
in the genotyp-phenotyp transformation, see `typical_x`
`typical_x`
``pheno = scaling*geno + typical_x``
`fixed_values`
a dictionary of variable indices and values, like ``{0:2.0, 2:1.1}``,
that are not subject to change, negative indices are ignored
(they act like incommenting the index), values are phenotypic
values.
`tf`
list of two user-defined transformation functions, or `None`.
``tf[0]`` is a function that transforms the internal representation
as used by the optimizer into a solution as used by the
objective function. ``tf[1]`` does the back-transformation.
For example::
tf_0 = lambda x: [xi**2 for xi in x]
tf_1 = lambda x: [abs(xi)**0.5 fox xi in x]
or "equivalently" without the `lambda` construct::
def tf_0(x):
return [xi**2 for xi in x]
def tf_1(x):
return [abs(xi)**0.5 fox xi in x]
``tf=[tf_0, tf_1]`` is a reasonable way to guaranty that only positive
values are used in the objective function.
Details
-------
If ``tf_0`` is not the identity and ``tf_1`` is ommitted,
the genotype of ``x0`` cannot be computed consistently and
"injection" of phenotypic solutions is likely to lead to
unexpected results.
"""
self.N = dim
self.fixed_values = fixed_values
if tf is not None:
self.tf_pheno = tf[0]
self.tf_geno = tf[1] # TODO: should not necessarily be needed
# r = np.random.randn(dim)
# assert all(tf[0](tf[1](r)) - r < 1e-7)
# r = np.random.randn(dim)
# assert all(tf[0](tf[1](r)) - r > -1e-7)
_print_warning("in class GenoPheno: user defined transformations have not been tested thoroughly")
else:
self.tf_geno = None
self.tf_pheno = None
if fixed_values:
if not isinstance(fixed_values, dict):
raise _Error("fixed_values must be a dictionary {index:value,...}")
if max(fixed_values.keys()) >= dim:
raise _Error("max(fixed_values.keys()) = " + str(max(fixed_values.keys())) +
" >= dim=N=" + str(dim) + " is not a feasible index")
# convenience commenting functionality: drop negative keys
for k in list(fixed_values.keys()):
if k < 0:
fixed_values.pop(k)
def vec_is_default(vec, default_val=0):
"""return True if `vec` has the value `default_val`,
None or [None] are also recognized as default
"""
# TODO: rather let default_val be a list of default values,
# cave comparison of arrays
try:
if len(vec) == 1:
vec = vec[0] # [None] becomes None and is always default
except TypeError:
pass # vec is a scalar
if vec is None or all(vec == default_val):
return True
if all([val is None or val == default_val for val in vec]):
return True
return False
self.scales = array(scaling) if scaling is not None else None
if vec_is_default(self.scales, 1):
self.scales = 1 # CAVE: 1 is not array(1)
elif self.scales.shape is not () and len(self.scales) != self.N:
raise _Error('len(scales) == ' + str(len(self.scales)) +
' does not match dimension N == ' + str(self.N))
self.typical_x = array(typical_x) if typical_x is not None else None
if vec_is_default(self.typical_x, 0):
self.typical_x = 0
elif self.typical_x.shape is not () and len(self.typical_x) != self.N:
raise _Error('len(typical_x) == ' + str(len(self.typical_x)) +
' does not match dimension N == ' + str(self.N))
if (self.scales is 1 and
self.typical_x is 0 and
self.fixed_values is None and
self.tf_pheno is None):
self.isidentity = True
else:
self.isidentity = False
if self.tf_pheno is None:
self.islinear = True
else:
self.islinear = False
def pheno(self, x, into_bounds=None, copy=True, copy_always=False,
archive=None, iteration=None):
"""maps the genotypic input argument into the phenotypic space,
see help for class `GenoPheno`
Details
-------
If ``copy``, values from ``x`` are copied if changed under the transformation.
"""
# TODO: copy_always seems superfluous, as it could be done in the calling code
input_type = type(x)
if into_bounds is None:
into_bounds = (lambda x, copy=False:
x if not copy else array(x, copy=copy))
if copy_always and not copy:
raise ValueError('arguments copy_always=' + str(copy_always) +
' and copy=' + str(copy) + ' have inconsistent values')
if copy_always:
x = array(x, copy=True)
copy = False
if self.isidentity:
y = into_bounds(x) # was into_bounds(x, False) before (bug before v0.96.22)
else:
if self.fixed_values is None:
y = array(x, copy=copy) # make a copy, in case
else: # expand with fixed values
y = list(x) # is a copy
for i in sorted(self.fixed_values.keys()):
y.insert(i, self.fixed_values[i])
y = array(y, copy=False)
copy = False
if self.scales is not 1: # just for efficiency
y *= self.scales
if self.typical_x is not 0:
y += self.typical_x
if self.tf_pheno is not None:
y = array(self.tf_pheno(y), copy=False)
y = into_bounds(y, copy) # copy is False
if self.fixed_values is not None:
for i, k in list(self.fixed_values.items()):
y[i] = k
if input_type is np.ndarray:
y = array(y, copy=False)
if archive is not None:
archive.insert(y, geno=x, iteration=iteration)
return y
def geno(self, y, from_bounds=None,
copy_if_changed=True, copy_always=False,
repair=None, archive=None):
"""maps the phenotypic input argument into the genotypic space,
that is, computes essentially the inverse of ``pheno``.
By default a copy is made only to prevent to modify ``y``.
The inverse of the user-defined transformation (if any)
is only needed if external solutions are injected, it is not
applied to the initial solution x0.
Details
=======
``geno`` searches first in ``archive`` for the genotype of
``y`` and returns the found value, typically unrepaired.
Otherwise, first ``from_bounds`` is applied, to revert a
projection into the bound domain (if necessary) and ``pheno``
is reverted. ``repair`` is applied last, and is usually the
method ``CMAEvolutionStrategy.repair_genotype`` that limits the
Mahalanobis norm of ``geno(y) - mean``.
"""
if from_bounds is None:
from_bounds = lambda x, copy=False: x # not change, no copy
if archive is not None:
try:
x = archive[y]['geno']
except (KeyError, TypeError):
x = None
if x is not None:
if archive[y]['iteration'] < archive.last_iteration \
and repair is not None:
x = repair(x, copy_if_changed=copy_always)
return x
input_type = type(y)
x = y
if copy_always:
x = array(y, copy=True)
copy = False
else:
copy = copy_if_changed
x = from_bounds(x, copy)
if self.isidentity:
if repair is not None:
x = repair(x, copy)
return x
if copy: # could be improved?
x = array(x, copy=True)
copy = False
# user-defined transformation
if self.tf_geno is not None:
x = array(self.tf_geno(x), copy=False)
elif self.tf_pheno is not None:
raise ValueError('t1 of options transformation was not defined but is needed as being the inverse of t0')
# affine-linear transformation: shift and scaling
if self.typical_x is not 0:
x -= self.typical_x
if self.scales is not 1: # just for efficiency
x /= self.scales
# kick out fixed_values
if self.fixed_values is not None:
# keeping the transformed values does not help much
# therefore it is omitted
if 1 < 3:
keys = sorted(self.fixed_values.keys())
x = array([x[i] for i in range(len(x)) if i not in keys],
copy=False)
# repair injected solutions
if repair is not None:
x = repair(x, copy)
if input_type is np.ndarray:
x = array(x, copy=False)
return x
# ____________________________________________________________
# ____________________________________________________________
# check out built-in package abc: class ABCMeta, abstractmethod, abstractproperty...
# see http://docs.python.org/whatsnew/2.6.html PEP 3119 abstract base classes
#
class OOOptimizer(object):
""""abstract" base class for an Object Oriented Optimizer interface.
Relevant methods are `__init__`, `ask`, `tell`, `stop`, `result`,
and `optimize`. Only `optimize` is fully implemented in this base
class.
Examples
--------
All examples minimize the function `elli`, the output is not shown.
(A preferred environment to execute all examples is ``ipython`` in
``%pylab`` mode.)
First we need::
from cma import CMAEvolutionStrategy
# CMAEvolutionStrategy derives from the OOOptimizer class
felli = lambda x: sum(1e3**((i-1.)/(len(x)-1.)*x[i])**2 for i in range(len(x)))
The shortest example uses the inherited method
`OOOptimizer.optimize()`::
es = CMAEvolutionStrategy(8 * [0.1], 0.5).optimize(felli)
The input parameters to `CMAEvolutionStrategy` are specific to this
inherited class. The remaining functionality is based on interface
defined by `OOOptimizer`. We might have a look at the result::
print(es.result()[0]) # best solution and
print(es.result()[1]) # its function value
In order to display more exciting output we do::
es.logger.plot() # if matplotlib is available
Virtually the same example can be written with an explicit loop
instead of using `optimize()`. This gives the necessary insight into
the `OOOptimizer` class interface and entire control over the
iteration loop::
optim = CMAEvolutionStrategy(9 * [0.5], 0.3)
# a new CMAEvolutionStrategy instance
# this loop resembles optimize()
while not optim.stop(): # iterate
X = optim.ask() # get candidate solutions
f = [felli(x) for x in X] # evaluate solutions
# in case do something else that needs to be done
optim.tell(X, f) # do all the real "update" work
optim.disp(20) # display info every 20th iteration
optim.logger.add() # log another "data line"
# final output
print('termination by', optim.stop())
print('best f-value =', optim.result()[1])
print('best solution =', optim.result()[0])
optim.logger.plot() # if matplotlib is available
Details
-------
Most of the work is done in the method `tell(...)`. The method
`result()` returns more useful output.
"""
def __init__(self, xstart, **more_args):
"""``xstart`` is a mandatory argument"""
self.xstart = xstart
self.more_args = more_args
self.initialize()
def initialize(self):
"""(re-)set to the initial state"""
self.countiter = 0
self.xcurrent = self.xstart[:]
raise NotImplementedError('method initialize() must be implemented in derived class')
def ask(self, gradf=None, **more_args):
"""abstract method, AKA "get" or "sample_distribution", deliver
new candidate solution(s), a list of "vectors"
"""
raise NotImplementedError('method ask() must be implemented in derived class')
def tell(self, solutions, function_values):
"""abstract method, AKA "update", pass f-values and prepare for
next iteration
"""
self.countiter += 1
raise NotImplementedError('method tell() must be implemented in derived class')
def stop(self):
"""abstract method, return satisfied termination conditions in
a dictionary like ``{'termination reason': value, ...}``,
for example ``{'tolfun': 1e-12}``, or the empty dictionary ``{}``.
The implementation of `stop()` should prevent an infinite
loop.
"""
raise NotImplementedError('method stop() is not implemented')
def disp(self, modulo=None):
"""abstract method, display some iteration infos if
``self.iteration_counter % modulo == 0``
"""
pass # raise NotImplementedError('method disp() is not implemented')
def result(self):
"""abstract method, return ``(x, f(x), ...)``, that is, the
minimizer, its function value, ...
"""
raise NotImplementedError('method result() is not implemented')
# previous ordering:
# def optimize(self, objectivefct,
# logger=None, verb_disp=20,
# iterations=None, min_iterations=1,
# call_back=None):
def optimize(self, objective_fct, iterations=None, min_iterations=1,
args=(), verb_disp=None, logger=None, call_back=None):
"""find minimizer of `objective_fct`.
CAVEAT: the return value for `optimize` has changed to ``self``.
Arguments
---------
`objective_fct`
function be to minimized
`iterations`
number of (maximal) iterations, while ``not self.stop()``
`min_iterations`
minimal number of iterations, even if ``not self.stop()``
`args`
arguments passed to `objective_fct`
`verb_disp`
print to screen every `verb_disp` iteration, if ``None``
the value from ``self.logger`` is "inherited", if
available.
``logger``
a `BaseDataLogger` instance, which must be compatible
with the type of ``self``.
``call_back``
call back function called like ``call_back(self)`` or
a list of call back functions.
``return self``, that is, the `OOOptimizer` instance.
Example
-------
>>> import cma
>>> es = cma.CMAEvolutionStrategy(7 * [0.1], 0.5
... ).optimize(cma.fcts.rosen, verb_disp=100)
(4_w,9)-CMA-ES (mu_w=2.8,w_1=49%) in dimension 7 (seed=630721393)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 9 3.163954777181882e+01 1.0e+00 4.12e-01 4e-01 4e-01 0:0.0
2 18 3.299006223906629e+01 1.0e+00 3.60e-01 3e-01 4e-01 0:0.0
3 27 1.389129389866704e+01 1.1e+00 3.18e-01 3e-01 3e-01 0:0.0
100 900 2.494847340045985e+00 8.6e+00 5.03e-02 2e-02 5e-02 0:0.3
200 1800 3.428234862999135e-01 1.7e+01 3.77e-02 6e-03 3e-02 0:0.5
300 2700 3.216640032470860e-04 5.6e+01 6.62e-03 4e-04 9e-03 0:0.8
400 3600 6.155215286199821e-12 6.6e+01 7.44e-06 1e-07 4e-06 0:1.1
438 3942 1.187372505161762e-14 6.0e+01 3.27e-07 4e-09 9e-08 0:1.2
438 3942 1.187372505161762e-14 6.0e+01 3.27e-07 4e-09 9e-08 0:1.2
('termination by', {'tolfun': 1e-11})
('best f-value =', 1.1189867885201275e-14)
('solution =', array([ 1. , 1. , 1. , 0.99999999, 0.99999998,
0.99999996, 0.99999992]))
>>> print(es.result()[0])
array([ 1. 1. 1. 0.99999999 0.99999998 0.99999996
0.99999992])
"""
assert iterations is None or min_iterations <= iterations
if not hasattr(self, 'logger'):
self.logger = logger
logger = self.logger = logger or self.logger
if not isinstance(call_back, list):
call_back = [call_back]
citer = 0
while not self.stop() or citer < min_iterations:
if iterations is not None and citer >= iterations:
return self.result()
citer += 1
X = self.ask() # deliver candidate solutions
fitvals = [objective_fct(x, *args) for x in X]
self.tell(X, fitvals) # all the work is done here
self.disp(verb_disp)
for f in call_back:
f is None or f(self)
logger.add(self) if logger else None
# signal logger that we left the loop
# TODO: this is very ugly, because it assumes modulo keyword
# argument *and* modulo attribute to be available
try:
logger.add(self, modulo=bool(logger.modulo)) if logger else None
except TypeError:
print(' suppressing the final call of the logger in ' +
'OOOptimizer.optimize (modulo keyword parameter not ' +
'available)')
except AttributeError:
print(' suppressing the final call of the logger in ' +
'OOOptimizer.optimize (modulo attribute not ' +
'available)')
if verb_disp:
self.disp(1)
if verb_disp in (1, True):
print('termination by', self.stop())
print('best f-value =', self.result()[1])
print('solution =', self.result()[0])
return self
# was: return self.result() + (self.stop(), self, logger)
_experimental = False
class CMAAdaptSigmaBase(object):
"""step-size adaptation base class, implementing hsig functionality
via an isotropic evolution path.
"""
def __init__(self, *args, **kwargs):
self.is_initialized_base = False
self._ps_updated_iteration = -1
def initialize_base(self, es):
"""set parameters and state variable based on dimension,
mueff and possibly further options.
"""
## meta_parameters.cs_exponent == 1.0
b = 1.0
## meta_parameters.cs_multiplier == 1.0
self.cs = 1.0 * (es.sp.mueff + 2)**b / (es.N**b + (es.sp.mueff + 3)**b)
self.ps = np.zeros(es.N)
self.is_initialized_base = True
return self
def _update_ps(self, es):
"""update the isotropic evolution path
:type es: CMAEvolutionStrategy
"""
if not self.is_initialized_base:
self.initialize_base(es)
if self._ps_updated_iteration == es.countiter:
return
if es.countiter <= es.itereigenupdated:
# es.B and es.D must/should be those from the last iteration
assert es.countiter >= es.itereigenupdated
_print_warning('distribution transformation (B and D) have been updated before ps could be computed',
'_update_ps', 'CMAAdaptSigmaBase')
z = dot(es.B, (1. / es.D) * dot(es.B.T, (es.mean - es.mean_old) / es.sigma_vec))
z *= es.sp.mueff**0.5 / es.sigma / es.sp.cmean
self.ps = (1 - self.cs) * self.ps + sqrt(self.cs * (2 - self.cs)) * z
self._ps_updated_iteration = es.countiter
def hsig(self, es):
"""return "OK-signal" for rank-one update, `True` (OK) or `False`
(stall rank-one update), based on the length of an evolution path
"""
self._update_ps(es)
if self.ps is None:
return True
squared_sum = sum(self.ps**2) / (1 - (1 - self.cs)**(2 * es.countiter))
# correction with self.countiter seems not necessary,
# as pc also starts with zero
return squared_sum / es.N - 1 < 1 + 4. / (es.N + 1)
def update(self, es, **kwargs):
"""update ``es.sigma``"""
self._update_ps(es)
raise NotImplementedError('must be implemented in a derived class')
class CMAAdaptSigmaNone(CMAAdaptSigmaBase):
def update(self, es, **kwargs):
"""no update, ``es.sigma`` remains constant.
:param es: ``CMAEvolutionStrategy`` class instance
:param kwargs: whatever else is needed to update ``es.sigma``
"""
pass
class CMAAdaptSigmaDistanceProportional(CMAAdaptSigmaBase):
"""artificial setting of ``sigma`` for test purposes, e.g.
to simulate optimal progress rates.
"""
def __init__(self, coefficient=1.2):
super(CMAAdaptSigmaDistanceProportional, self).__init__() # base class provides method hsig()
self.coefficient = coefficient
self.is_initialized = True
def update(self, es, **kwargs):
# optimal step-size is
es.sigma = self.coefficient * es.sp.mueff * sum(es.mean**2)**0.5 / es.N / es.sp.cmean
class CMAAdaptSigmaCSA(CMAAdaptSigmaBase):
def __init__(self):
"""postpone initialization to a method call where dimension and mueff should be known.
"""
self.is_initialized = False
def initialize(self, es):
"""set parameters and state variable based on dimension,
mueff and possibly further options.
"""
self.disregard_length_setting = True if es.opts['CSA_disregard_length'] else False
if es.opts['CSA_clip_length_value'] is not None:
try:
if len(es.opts['CSA_clip_length_value']) == 0:
es.opts['CSA_clip_length_value'] = [-np.Inf, np.Inf]
elif len(es.opts['CSA_clip_length_value']) == 1:
es.opts['CSA_clip_length_value'] = [-np.Inf, es.opts['CSA_clip_length_value'][0]]
elif len(es.opts['CSA_clip_length_value']) == 2:
es.opts['CSA_clip_length_value'] = np.sort(es.opts['CSA_clip_length_value'])
else:
raise ValueError('option CSA_clip_length_value should be a number of len(.) in [1,2]')
except TypeError: # len(...) failed
es.opts['CSA_clip_length_value'] = [-np.Inf, es.opts['CSA_clip_length_value']]
es.opts['CSA_clip_length_value'] = list(np.sort(es.opts['CSA_clip_length_value']))
if es.opts['CSA_clip_length_value'][0] > 0 or es.opts['CSA_clip_length_value'][1] < 0:
raise ValueError('option CSA_clip_length_value must be a single positive or a negative and a positive number')
## meta_parameters.cs_exponent == 1.0
b = 1.0
## meta_parameters.cs_multiplier == 1.0
self.cs = 1.0 * (es.sp.mueff + 2)**b / (es.N + (es.sp.mueff + 3)**b)
self.damps = es.opts['CSA_dampfac'] * (0.5 +
0.5 * min([1, (es.sp.lam_mirr / (0.159 * es.sp.popsize) - 1)**2])**1 +
2 * max([0, ((es.sp.mueff - 1) / (es.N + 1))**es.opts['CSA_damp_mueff_exponent'] - 1]) +
self.cs
)
self.max_delta_log_sigma = 1 # in symmetric use (strict lower bound is -cs/damps anyway)
if self.disregard_length_setting:
es.opts['CSA_clip_length_value'] = [0, 0]
## meta_parameters.cs_exponent == 1.0
b = 1.0 * 0.5
## meta_parameters.cs_multiplier == 1.0
self.cs = 1.0 * (es.sp.mueff + 1)**b / (es.N**b + 2 * es.sp.mueff**b)
self.damps = es.opts['CSA_dampfac'] * 1 # * (1.1 - 1/(es.N+1)**0.5)
if es.opts['verbose'] > 1 or self.disregard_length_setting or 11 < 3:
print('SigmaCSA Parameters')
for k, v in list(self.__dict__.items()):
print(' ', k, ':', v)
self.ps = np.zeros(es.N)
self._ps_updated_iteration = -1
self.is_initialized = True
def _update_ps(self, es):
if not self.is_initialized:
self.initialize(es)
if self._ps_updated_iteration == es.countiter:
return
z = dot(es.B, (1. / es.D) * dot(es.B.T, (es.mean - es.mean_old) / es.sigma_vec))
z *= es.sp.mueff**0.5 / es.sigma / es.sp.cmean
# zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
if es.opts['CSA_clip_length_value'] is not None:
vals = es.opts['CSA_clip_length_value']
min_len = es.N**0.5 + vals[0] * es.N / (es.N + 2)
max_len = es.N**0.5 + vals[1] * es.N / (es.N + 2)
act_len = sum(z**2)**0.5
new_len = Mh.minmax(act_len, min_len, max_len)
if new_len != act_len:
z *= new_len / act_len
# z *= (es.N / sum(z**2))**0.5 # ==> sum(z**2) == es.N
# z *= es.const.chiN / sum(z**2)**0.5
self.ps = (1 - self.cs) * self.ps + sqrt(self.cs * (2 - self.cs)) * z
self._ps_updated_iteration = es.countiter
def update(self, es, **kwargs):
self._update_ps(es) # caveat: if es.B or es.D are already updated and ps is not, this goes wrong!
if es.opts['CSA_squared']:
s = (sum(self.ps**2) / es.N - 1) / 2
# sum(self.ps**2) / es.N has mean 1 and std sqrt(2/N) and is skewed
# divided by 2 to have the derivative d/dx (x**2 / N - 1) for x**2=N equal to 1
else:
s = sum(self.ps**2)**0.5 / es.const.chiN - 1
if es.opts['vv'] == 'pc for ps':
s = sum((es.D**-1 * dot(es.B.T, es.pc))**2)**0.5 / es.const.chiN - 1
s = (sum((es.D**-1 * dot(es.B.T, es.pc))**2) / es.N - 1) / 2
s *= self.cs / self.damps
s_clipped = Mh.minmax(s, -self.max_delta_log_sigma, self.max_delta_log_sigma)
es.sigma *= np.exp(s_clipped)
# "error" handling
if s_clipped != s:
_print_warning('sigma change exp(' + str(s) + ') = ' + str(np.exp(s)) +
' clipped to exp(+-' + str(self.max_delta_log_sigma) + ')',
'update',
'CMAAdaptSigmaCSA',
es.countiter, es.opts['verbose'])
class CMAAdaptSigmaMedianImprovement(CMAAdaptSigmaBase):
"""Compares median fitness against a fitness percentile of the previous iteration,
see Ait ElHara et al, GECCO 2013.
"""
def __init__(self):
# CMAAdaptSigmaBase.__init__(self)
super(CMAAdaptSigmaMedianImprovement, self).__init__() # base class provides method hsig()
def initialize(self, es):
r = es.sp.mueff / es.popsize
self.index_to_compare = 0.5 * (r**0.5 + 2.0 * (1 - r**0.5) / log(es.N + 9)**2) * (es.popsize) # TODO
self.index_to_compare = (0.30 if not es.opts['vv']
else es.opts['vv']) * es.popsize # TODO
self.damp = 2 - 2 / es.N # sign-rule: 2
self.c = 0.3 # sign-rule needs <= 0.3
self.s = 0 # averaged statistics, usually between -1 and +1
def update(self, es, **kwargs):
if es.countiter < 2:
self.initialize(es)
self.fit = es.fit.fit
else:
ft1, ft2 = self.fit[int(self.index_to_compare)], self.fit[int(np.ceil(self.index_to_compare))]
ftt1, ftt2 = es.fit.fit[(es.popsize - 1) // 2], es.fit.fit[int(np.ceil((es.popsize - 1) / 2))]
pt2 = self.index_to_compare - int(self.index_to_compare)
# ptt2 = (es.popsize - 1) / 2 - (es.popsize - 1) // 2 # not in use
s = 0
if 1 < 3:
s += pt2 * sum(es.fit.fit <= self.fit[int(np.ceil(self.index_to_compare))])
s += (1 - pt2) * sum(es.fit.fit < self.fit[int(self.index_to_compare)])
s -= es.popsize / 2.
s *= 2. / es.popsize # the range was popsize, is 2
self.s = (1 - self.c) * self.s + self.c * s
es.sigma *= exp(self.s / self.damp)
# es.more_to_write.append(10**(self.s))
#es.more_to_write.append(10**((2 / es.popsize) * (sum(es.fit.fit < self.fit[int(self.index_to_compare)]) - (es.popsize + 1) / 2)))
# # es.more_to_write.append(10**(self.index_to_compare - sum(self.fit <= es.fit.fit[es.popsize // 2])))
# # es.more_to_write.append(10**(np.sign(self.fit[int(self.index_to_compare)] - es.fit.fit[es.popsize // 2])))
self.fit = es.fit.fit
class CMAAdaptSigmaTPA(CMAAdaptSigmaBase):
"""two point adaptation for step-size sigma. Relies on a specific
sampling of the first two offspring, whose objective function
value ranks are used to decide on the step-size change.
Example
=======
>>> import cma
>>> cma.CMAOptions('adapt').pprint()
>>> es = cma.CMAEvolutionStrategy(10 * [0.2], 0.1, {'AdaptSigma': cma.CMAAdaptSigmaTPA, 'ftarget': 1e-8})
>>> es.optimize(cma.fcts.rosen)
>>> assert 'ftarget' in es.stop()
>>> assert es.result()[1] <= 1e-8
>>> assert es.result()[2] < 6500 # typically < 5500
References: loosely based on Hansen 2008, CMA-ES with Two-Point
Step-Size Adaptation, more tightly based on an upcoming paper by
Hansen et al.
"""
def __init__(self, dimension=None, opts=None):
super(CMAAdaptSigmaTPA, self).__init__() # base class provides method hsig()
# CMAAdaptSigmaBase.__init__(self)
self.initialized = False
self.dimension = dimension
self.opts = opts
def initialize(self, N=None, opts=None):
if N is None:
N = self.dimension
if opts is None:
opts = self.opts
try:
damp_fac = opts['CSA_dampfac'] # should be renamed to sigma_adapt_dampfac or something
except (TypeError, KeyError):
damp_fac = 1
self.sp = _BlancClass() # just a container to have sp.name instead of sp['name'] to access parameters
try:
self.sp.damp = damp_fac * eval('N')**0.5 # why do we need 10 <-> exp(1/10) == 1.1? 2 should be fine!?
# self.sp.damp = damp_fac * (4 - 3.6/eval('N')**0.5)
except:
self.sp.damp = 4 # - 3.6 / N**0.5 # should become new default
_print_warning("dimension not known, damping set to 4",
'initialize', 'CMAAdaptSigmaTPA')
try:
if opts['vv'][0] == 'TPA_damp':
self.sp.damp = opts['vv'][1]
print('damp set to %d' % self.sp.damp)
except (TypeError):
pass
self.sp.dampup = 0.5**0.0 * 1.0 * self.sp.damp # 0.5 fails to converge on the Rastrigin function
self.sp.dampdown = 2.0**0.0 * self.sp.damp
if self.sp.dampup != self.sp.dampdown:
print('TPA damping is asymmetric')
self.sp.c = 0.3 # rank difference is asymetric and therefore the switch from increase to decrease takes too long
self.sp.z_exponent = 0.5 # sign(z) * abs(z)**z_exponent, 0.5 seems better with larger popsize, 1 was default
self.sp.sigma_fac = 1.0 # (obsolete) 0.5 feels better, but no evidence whether it is
self.sp.relative_to_delta_mean = True # (obsolete)
self.s = 0 # the state variable
self.last = None
self.initialized = True
return self
def update(self, es, function_values, **kwargs):
"""the first and second value in ``function_values``
must reflect two mirrored solutions sampled
in direction / in opposite direction of
the previous mean shift, respectively.
"""
# TODO: on the linear function, the two mirrored samples lead
# to a sharp increase of condition of the covariance matrix.
# They should not be used to update the covariance matrix,
# if the step-size inreases quickly. This should be fine with
# negative updates though.
if not self.initialized:
self.initialize(es.N, es.opts)
if 1 < 3:
# use the ranking difference of the mirrors for adaptation
# damp = 5 should be fine
z = np.where(es.fit.idx == 1)[0][0] - np.where(es.fit.idx == 0)[0][0]
z /= es.popsize - 1 # z in [-1, 1]
self.s = (1 - self.sp.c) * self.s + self.sp.c * np.sign(z) * np.abs(z)**self.sp.z_exponent
if self.s > 0:
es.sigma *= exp(self.s / self.sp.dampup)
else:
es.sigma *= exp(self.s / self.sp.dampdown)
#es.more_to_write.append(10**z)
new_injections = True
# ____________________________________________________________
# ____________________________________________________________
#
class CMAEvolutionStrategy(OOOptimizer):
"""CMA-ES stochastic optimizer class with ask-and-tell interface.
Calling Sequences
=================
es = CMAEvolutionStrategy(x0, sigma0)
es = CMAEvolutionStrategy(x0, sigma0, opts)
es = CMAEvolutionStrategy(x0, sigma0).optimize(objective_fct)
res = CMAEvolutionStrategy(x0, sigma0,
opts).optimize(objective_fct).result()
Arguments
=========
`x0`
initial solution, starting point. `x0` is given as "phenotype"
which means, if::
opts = {'transformation': [transform, inverse]}
is given and ``inverse is None``, the initial mean is not
consistent with `x0` in that ``transform(mean)`` does not
equal to `x0` unless ``transform(mean)`` equals ``mean``.
`sigma0`
initial standard deviation. The problem variables should
have been scaled, such that a single standard deviation
on all variables is useful and the optimum is expected to
lie within about `x0` +- ``3*sigma0``. See also options
`scaling_of_variables`. Often one wants to check for
solutions close to the initial point. This allows,
for example, for an easier check of consistency of the
objective function and its interfacing with the optimizer.
In this case, a much smaller `sigma0` is advisable.
`opts`
options, a dictionary with optional settings,
see class `CMAOptions`.
Main interface / usage
======================
The interface is inherited from the generic `OOOptimizer`
class (see also there). An object instance is generated from
es = cma.CMAEvolutionStrategy(8 * [0.5], 0.2)
The least verbose interface is via the optimize method::
es.optimize(objective_func)
res = es.result()
More verbosely, the optimization is done using the
methods ``stop``, ``ask``, and ``tell``::
while not es.stop():
solutions = es.ask()
es.tell(solutions, [cma.fcts.rosen(s) for s in solutions])
es.disp()
es.result_pretty()
where ``ask`` delivers new candidate solutions and ``tell`` updates
the ``optim`` instance by passing the respective function values
(the objective function ``cma.fcts.rosen`` can be replaced by any
properly defined objective function, see ``cma.fcts`` for more
examples).
To change an option, for example a termination condition to
continue the optimization, call
es.opts.set({'tolfacupx': 1e4})
The class `CMAEvolutionStrategy` also provides::
(solutions, func_values) = es.ask_and_eval(objective_func)
and an entire optimization can also be written like::
while not es.stop():
es.tell(*es.ask_and_eval(objective_func))
Besides for termination criteria, in CMA-ES only the ranks of the
`func_values` are relevant.
Attributes and Properties
=========================
- `inputargs` -- passed input arguments
- `inopts` -- passed options
- `opts` -- actually used options, some of them can be changed any
time via ``opts.set``, see class `CMAOptions`
- `popsize` -- population size lambda, number of candidate
solutions returned by `ask()`
- `logger` -- a `CMADataLogger` instance utilized by `optimize`
Examples
========
Super-short example, with output shown:
>>> import cma
>>> # construct an object instance in 4-D, sigma0=1:
>>> es = cma.CMAEvolutionStrategy(4 * [1], 1, {'seed':234})
(4_w,8)-CMA-ES (mu_w=2.6,w_1=52%) in dimension 4 (seed=234)
>>>
>>> # optimize the ellipsoid function
>>> es.optimize(cma.fcts.elli, verb_disp=1)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 8 2.093015112685775e+04 1.0e+00 9.27e-01 9e-01 9e-01 0:0.0
2 16 4.964814235917688e+04 1.1e+00 9.54e-01 9e-01 1e+00 0:0.0
3 24 2.876682459926845e+05 1.2e+00 1.02e+00 9e-01 1e+00 0:0.0
100 800 6.809045875281943e-01 1.3e+02 1.41e-02 1e-04 1e-02 0:0.2
200 1600 2.473662150861846e-10 8.0e+02 3.08e-05 1e-08 8e-06 0:0.5
233 1864 2.766344961865341e-14 8.6e+02 7.99e-07 8e-11 7e-08 0:0.6
>>>
>>> cma.pprint(es.result())
(array([ -1.98546755e-09, -1.10214235e-09, 6.43822409e-11,
-1.68621326e-11]),
4.5119610261406537e-16,
1666,
1672,
209,
array([ -9.13545269e-09, -1.45520541e-09, -6.47755631e-11,
-1.00643523e-11]),
array([ 3.20258681e-08, 3.15614974e-09, 2.75282215e-10,
3.27482983e-11]))
>>> assert es.result()[1] < 1e-9
>>> help(es.result)
Help on method result in module cma:
result(self) method of cma.CMAEvolutionStrategy instance
return ``(xbest, f(xbest), evaluations_xbest, evaluations, iterations, pheno(xmean), effective_stds)``
The optimization loop can also be written explicitly.
>>> import cma
>>> es = cma.CMAEvolutionStrategy(4 * [1], 1)
>>> while not es.stop():
... X = es.ask()
... es.tell(X, [cma.fcts.elli(x) for x in X])
... es.disp()
<output omitted>
achieving the same result as above.
An example with lower bounds (at zero) and handling infeasible
solutions:
>>> import cma
>>> import numpy as np
>>> es = cma.CMAEvolutionStrategy(10 * [0.2], 0.5, {'bounds': [0, np.inf]})
>>> while not es.stop():
... fit, X = [], []
... while len(X) < es.popsize:
... curr_fit = None
... while curr_fit in (None, np.NaN):
... x = es.ask(1)[0]
... curr_fit = cma.fcts.somenan(x, cma.fcts.elli) # might return np.NaN
... X.append(x)
... fit.append(curr_fit)
... es.tell(X, fit)
... es.logger.add()
... es.disp()
<output omitted>
>>>
>>> assert es.result()[1] < 1e-9
>>> assert es.result()[2] < 9000 # by internal termination
>>> # es.logger.plot() # will plot data
>>> # cma.show() # display plot window
An example with user-defined transformation, in this case to realize
a lower bound of 2.
>>> es = cma.CMAEvolutionStrategy(5 * [3], 1,
... {"transformation": [lambda x: x**2+2, None]})
>>> es.optimize(cma.fcts.rosen)
<output omitted>
>>> assert cma.fcts.rosen(es.result()[0]) < 1e-6 + 5.530760944396627e+02
>>> assert es.result()[2] < 3300
The inverse transformation is (only) necessary if the `BoundPenalty`
boundary handler is used at the same time.
The ``CMAEvolutionStrategy`` class also provides a default logger
(cave: files are overwritten when the logger is used with the same
filename prefix):
>>> import cma
>>> es = cma.CMAEvolutionStrategy(4 * [0.2], 0.5, {'verb_disp': 0})
>>> es.logger.disp_header() # to understand the print of disp
Iterat Nfevals function value axis ratio maxstd minstd
>>> while not es.stop():
... X = es.ask()
... es.tell(X, [cma.fcts.sphere(x) for x in X])
... es.logger.add() # log current iteration
... es.logger.disp([-1]) # display info for last iteration
1 8 2.72769793021748e+03 1.0e+00 4.05e-01 3.99e-01
2 16 6.58755537926063e+03 1.1e+00 4.00e-01 3.39e-01
<output ommitted>
193 1544 3.15195320957214e-15 1.2e+03 3.70e-08 3.45e-11
>>> es.logger.disp_header()
Iterat Nfevals function value axis ratio maxstd minstd
>>> # es.logger.plot() # will make a plot
Example implementing restarts with increasing popsize (IPOP), output
is not displayed:
>>> import cma, numpy as np
>>>
>>> # restart with increasing population size (IPOP)
>>> bestever = cma.BestSolution()
>>> for lam in 10 * 2**np.arange(8): # 10, 20, 40, 80, ..., 10 * 2**7
... es = cma.CMAEvolutionStrategy('6 - 8 * np.random.rand(9)', # 9-D
... 5, # initial std sigma0
... {'popsize': lam, # options
... 'verb_append': bestever.evalsall})
... logger = cma.CMADataLogger().register(es, append=bestever.evalsall)
... while not es.stop():
... X = es.ask() # get list of new solutions
... fit = [cma.fcts.rastrigin(x) for x in X] # evaluate each solution
... es.tell(X, fit) # besides for termination only the ranking in fit is used
...
... # display some output
... logger.add() # add a "data point" to the log, writing in files
... es.disp() # uses option verb_disp with default 100
...
... print('termination:', es.stop())
... cma.pprint(es.best.__dict__)
...
... bestever.update(es.best)
...
... # show a plot
... # logger.plot();
... if bestever.f < 1e-8: # global optimum was hit
... break
<output omitted>
>>> assert es.result()[1] < 1e-8
On the Rastrigin function, usually after five restarts the global
optimum is located.
Using the ``multiprocessing`` module, we can evaluate the function in
parallel with a simple modification of the example (however
multiprocessing seems not always reliable)::
try:
import multiprocessing as mp
import cma
es = cma.CMAEvolutionStrategy(22 * [0.0], 1.0, {'maxiter':10})
pool = mp.Pool(es.popsize)
while not es.stop():
X = es.ask()
f_values = pool.map_async(cma.felli, X).get()
# use chunksize parameter as es.popsize/len(pool)?
es.tell(X, f_values)
es.disp()
es.logger.add()
except ImportError:
pass
The final example shows how to resume:
>>> import cma, pickle
>>>
>>> es = cma.CMAEvolutionStrategy(12 * [0.1], # a new instance, 12-D
... 0.5) # initial std sigma0
>>> es.optimize(cma.fcts.rosen, iterations=100)
>>> pickle.dump(es, open('saved-cma-object.pkl', 'wb'))
>>> print('saved')
>>> del es # let's start fresh
>>>
>>> es = pickle.load(open('saved-cma-object.pkl', 'rb'))
>>> print('resumed')
>>> es.optimize(cma.fcts.rosen, verb_disp=200)
>>> assert es.result()[2] < 15000
>>> cma.pprint(es.result())
Details
=======
The following two enhancements are implemented, the latter is turned
on by default only for very small population size.
*Active CMA* is implemented with option ``CMA_active`` and
conducts an update of the covariance matrix with negative weights.
The negative update is implemented, such that positive definiteness
is guarantied. The update is applied after the default update and
only before the covariance matrix is decomposed, which limits the
additional computational burden to be at most a factor of three
(typically smaller). A typical speed up factor (number of
f-evaluations) is between 1.1 and two.
References: Jastrebski and Arnold, CEC 2006, Glasmachers et al, GECCO 2010.
*Selective mirroring* is implemented with option ``CMA_mirrors``
in the method ``get_mirror()``. Only the method `ask_and_eval()`
(used by `fmin`) will then sample selectively mirrored vectors. In
selective mirroring, only the worst solutions are mirrored. With
the default small number of mirrors, *pairwise selection* (where at
most one of the two mirrors contribute to the update of the
distribution mean) is implicitly guarantied under selective
mirroring and therefore not explicitly implemented.
References: Brockhoff et al, PPSN 2010, Auger et al, GECCO 2011.
:See: `fmin()`, `OOOptimizer`, `CMAOptions`, `plot()`, `ask()`,
`tell()`, `ask_and_eval()`
"""
@property # read only attribute decorator for a method
def popsize(self):
"""number of samples by default returned by` ask()`
"""
return self.sp.popsize
@popsize.setter
def popsize(self, p):
"""popsize cannot be set (this might change in future)
"""
raise _Error("popsize cannot be changed")
def stop(self, check=True):
"""return a dictionary with the termination status.
With ``check==False``, the termination conditions are not checked
and the status might not reflect the current situation.
"""
if (check and self.countiter > 0 and self.opts['termination_callback'] and
self.opts['termination_callback'] != str(self.opts['termination_callback'])):
self.callbackstop = self.opts['termination_callback'](self)
return self._stopdict(self, check) # update the stopdict and return a Dict
def copy_constructor(self, es):
raise NotImplementedError("")
def __init__(self, x0, sigma0, inopts={}):
"""see class `CMAEvolutionStrategy`
"""
if isinstance(x0, CMAEvolutionStrategy):
self.copy_constructor(x0)
return
self.inputargs = dict(locals()) # for the record
del self.inputargs['self'] # otherwise the instance self has a cyclic reference
self.inopts = inopts
opts = CMAOptions(inopts).complement() # CMAOptions() == fmin([],[]) == defaultOptions()
global_verbosity = opts.eval('verbose')
if global_verbosity < -8:
opts['verb_disp'] = 0
opts['verb_log'] = 0
opts['verb_plot'] = 0
if 'noise_handling' in opts and opts.eval('noise_handling'):
raise ValueError('noise_handling not available with class CMAEvolutionStrategy, use function fmin')
if 'restarts' in opts and opts.eval('restarts'):
raise ValueError('restarts not available with class CMAEvolutionStrategy, use function fmin')
self._set_x0(x0) # manage weird shapes, set self.x0
self.N_pheno = len(self.x0)
self.sigma0 = sigma0
if isinstance(sigma0, str):
# TODO: no real need here (do rather in fmin)
self.sigma0 = eval(sigma0) # like '1./N' or 'np.random.rand(1)[0]+1e-2'
if np.size(self.sigma0) != 1 or np.shape(self.sigma0):
raise _Error('input argument sigma0 must be (or evaluate to) a scalar')
self.sigma = self.sigma0 # goes to inialize
# extract/expand options
N = self.N_pheno
assert isinstance(opts['fixed_variables'], (str, dict)) \
or opts['fixed_variables'] is None
# TODO: in case of a string we need to eval the fixed_variables
if isinstance(opts['fixed_variables'], dict):
N = self.N_pheno - len(opts['fixed_variables'])
opts.evalall(locals()) # using only N
self.opts = opts
self.randn = opts['randn']
self.gp = GenoPheno(self.N_pheno, opts['scaling_of_variables'], opts['typical_x'],
opts['fixed_variables'], opts['transformation'])
self.boundary_handler = opts.eval('boundary_handling')(opts.eval('bounds'))
if not self.boundary_handler.has_bounds():
self.boundary_handler = BoundNone() # just a little faster and well defined
elif not self.boundary_handler.is_in_bounds(self.x0):
if opts['verbose'] >= 0:
_print_warning('initial solution is out of the domain boundaries:')
print(' x0 = ' + str(self.gp.pheno(self.x0)))
print(' ldom = ' + str(self.boundary_handler.bounds[0]))
print(' udom = ' + str(self.boundary_handler.bounds[1]))
# set self.mean to geno(x0)
tf_geno_backup = self.gp.tf_geno
if self.gp.tf_pheno and self.gp.tf_geno is None:
self.gp.tf_geno = lambda x: x # a hack to avoid an exception
_print_warning("""
computed initial point is likely to be wrong, because
no inverse was found of user provided phenotype
transformation""")
self.mean = self.gp.geno(self.x0,
from_bounds=self.boundary_handler.inverse,
copy_always=True)
self.gp.tf_geno = tf_geno_backup
# without copy_always interface:
# self.mean = self.gp.geno(array(self.x0, copy=True), copy_if_changed=False)
self.N = len(self.mean)
assert N == self.N
self.fmean = np.NaN # TODO name should change? prints nan in output files (OK with matlab&octave)
self.fmean_noise_free = 0. # for output only
self.adapt_sigma = opts['AdaptSigma']
if self.adapt_sigma is False:
self.adapt_sigma = CMAAdaptSigmaNone
self.adapt_sigma = self.adapt_sigma() # class instance
self.sp = _CMAParameters(N, opts)
self.sp0 = self.sp # looks useless, as it is not a copy
# initialization of state variables
self.countiter = 0
self.countevals = max((0, opts['verb_append'])) \
if not isinstance(opts['verb_append'], bool) else 0
self.pc = np.zeros(N)
self.pc_neg = np.zeros(N)
def eval_scaling_vector(in_):
res = 1
if np.all(in_):
res = array(in_, dtype=float)
if np.size(res) not in (1, N):
raise ValueError("""CMA_stds option must have dimension %d
instead of %d""" %
(str(N), np.size(res)))
return res
self.sigma_vec = eval_scaling_vector(self.opts['CMA_stds'])
if isfinite(self.opts['CMA_dampsvec_fac']):
self.sigma_vec *= np.ones(N) # make sure to get a vector
self.sigma_vec0 = self.sigma_vec if isscalar(self.sigma_vec) \
else self.sigma_vec.copy()
stds = eval_scaling_vector(self.opts['CMA_teststds'])
if self.opts['CMA_diagonal']: # is True or > 0
# linear time and space complexity
self.B = array(1) # fine for np.dot(self.B, .) and self.B.T
self.C = stds**2 * np.ones(N) # in case stds == 1
self.dC = self.C
else:
self.B = np.eye(N) # identity(N)
# prevent equal eigenvals, a hack for np.linalg:
# self.C = np.diag(stds**2 * exp(1e-4 * np.random.rand(N)))
self.C = np.diag(stds**2 * exp((1e-4 / N) * np.arange(N)))
self.dC = np.diag(self.C).copy()
self._Yneg = np.zeros((N, N))
self.D = self.dC**0.5 # we assume that C is diagonal
# self.gp.pheno adds fixed variables
relative_stds = ((self.gp.pheno(self.mean + self.sigma * self.sigma_vec * self.D)
- self.gp.pheno(self.mean - self.sigma * self.sigma_vec * self.D)) / 2.0
/ (self.boundary_handler.get_bounds('upper', self.N_pheno)
- self.boundary_handler.get_bounds('lower', self.N_pheno)))
if np.any(relative_stds > 1):
raise ValueError('initial standard deviations larger than the bounded domain size in variables '
+ str(np.where(relative_stds > 1)[0]))
self._flgtelldone = True
self.itereigenupdated = self.countiter
self.count_eigen = 0
self.noiseS = 0 # noise "signal"
self.hsiglist = []
if not opts['seed']:
np.random.seed()
six_decimals = (time.time() - 1e6 * (time.time() // 1e6))
opts['seed'] = 1e5 * np.random.rand() + six_decimals + 1e5 * (time.time() % 1)
opts['seed'] = int(opts['seed'])
np.random.seed(opts['seed']) # CAVEAT: this only seeds np.random
self.sent_solutions = CMASolutionDict()
self.archive = CMASolutionDict()
self.best = BestSolution()
self.const = _BlancClass()
self.const.chiN = N**0.5 * (1 - 1. / (4.*N) + 1. / (21.*N**2)) # expectation of norm(randn(N,1))
self.logger = CMADataLogger(opts['verb_filenameprefix'], modulo=opts['verb_log']).register(self)
# attribute for stopping criteria in function stop
self._stopdict = _CMAStopDict()
self.callbackstop = 0
self.fit = _BlancClass()
self.fit.fit = [] # not really necessary
self.fit.hist = [] # short history of best
self.fit.histbest = [] # long history of best
self.fit.histmedian = [] # long history of median
self.more_to_write = [] # [1, 1, 1, 1] # N*[1] # needed when writing takes place before setting
# say hello
if opts['verb_disp'] > 0 and opts['verbose'] >= 0:
sweighted = '_w' if self.sp.mu > 1 else ''
smirr = 'mirr%d' % (self.sp.lam_mirr) if self.sp.lam_mirr else ''
print('(%d' % (self.sp.mu) + sweighted + ',%d' % (self.sp.popsize) + smirr +
')-' + ('a' if opts['CMA_active'] else '') + 'CMA-ES' +
' (mu_w=%2.1f,w_1=%d%%)' % (self.sp.mueff, int(100 * self.sp.weights[0])) +
' in dimension %d (seed=%d, %s)' % (N, opts['seed'], time.asctime())) # + func.__name__
if opts['CMA_diagonal'] and self.sp.CMA_on:
s = ''
if opts['CMA_diagonal'] is not True:
s = ' for '
if opts['CMA_diagonal'] < np.inf:
s += str(int(opts['CMA_diagonal']))
else:
s += str(np.floor(opts['CMA_diagonal']))
s += ' iterations'
s += ' (1/ccov=' + str(round(1. / (self.sp.c1 + self.sp.cmu))) + ')'
print(' Covariance matrix is diagonal' + s)
def _set_x0(self, x0):
if x0 == str(x0):
x0 = eval(x0)
self.x0 = array(x0) # should not have column or row, is just 1-D
if self.x0.ndim == 2:
if self.opts.eval('verbose') >= 0:
_print_warning('input x0 should be a list or 1-D array, trying to flatten ' +
str(self.x0.shape) + '-array')
if self.x0.shape[0] == 1:
self.x0 = self.x0[0]
elif self.x0.shape[1] == 1:
self.x0 = array([x[0] for x in self.x0])
if self.x0.ndim != 1:
raise _Error('x0 must be 1-D array')
if len(self.x0) <= 1:
raise _Error('optimization in 1-D is not supported (code was never tested)')
self.x0.resize(self.x0.shape[0]) # 1-D array, not really necessary?!
# ____________________________________________________________
# ____________________________________________________________
def ask(self, number=None, xmean=None, sigma_fac=1,
gradf=None, args=()):
"""get new candidate solutions, sampled from a multi-variate
normal distribution and transformed to f-representation
(phenotype) to be evaluated.
Arguments
---------
`number`
number of returned solutions, by default the
population size ``popsize`` (AKA ``lambda``).
`xmean`
distribution mean, phenotyp?
`sigma_fac`
multiplier for internal sample width (standard
deviation)
`gradf`
gradient, ``len(gradf(x)) == len(x)``, if
``gradf is not None`` the third solution in the
returned list is "sampled" in supposedly Newton
direction ``dot(C, gradf(xmean, *args))``.
`args`
additional arguments passed to gradf
Return
------
A list of N-dimensional candidate solutions to be evaluated
Example
-------
>>> import cma
>>> es = cma.CMAEvolutionStrategy([0,0,0,0], 0.3)
>>> while not es.stop() and es.best.f > 1e-6: # my_desired_target_f_value
... X = es.ask() # get list of new solutions
... fit = [cma.fcts.rosen(x) for x in X] # call function rosen with each solution
... es.tell(X, fit) # feed values
:See: `ask_and_eval`, `ask_geno`, `tell`
"""
pop_geno = self.ask_geno(number, xmean, sigma_fac)
# N,lambda=20,200: overall CPU 7s vs 5s == 40% overhead, even without bounds!
# new data: 11.5s vs 9.5s == 20%
# TODO: check here, whether this is necessary?
# return [self.gp.pheno(x, copy=False, into_bounds=self.boundary_handler.repair) for x in pop] # probably fine
# return [Solution(self.gp.pheno(x, copy=False), copy=False) for x in pop] # here comes the memory leak, now solved
pop_pheno = [self.gp.pheno(x, copy=True, into_bounds=self.boundary_handler.repair) for x in pop_geno]
if gradf is not None:
# see Hansen (2011), Injecting external solutions into CMA-ES
if not self.gp.islinear:
_print_warning("""
using the gradient (option ``gradf``) with a non-linear
coordinate-wise transformation (option ``transformation``)
has never been tested.""")
# TODO: check this out
def grad_numerical_of_coordinate_map(x, map, epsilon=None):
"""map is a coordinate-wise independent map, return
the estimated diagonal of the Jacobian.
"""
eps = 1e-8 * (1 + abs(x)) if epsilon is None else epsilon
return (list(map(x + eps)) - list(map(x - eps))) / (2 * eps)
def grad_numerical_sym(x, func, epsilon=None):
"""return symmetric numerical gradient of func : R^n -> R.
"""
eps = 1e-8 * (1 + abs(x)) if epsilon is None else epsilon
grad = np.zeros(len(x))
ei = np.zeros(len(x)) # float is 1.6 times faster than int
for i in rglen(x):
ei[i] = eps[i]
grad[i] = (func(x + ei) - func(x - ei)) / (2*eps[i])
ei[i] = 0
return grad
try:
if self.last_iteration_with_gradient == self.countiter:
_print_warning('gradient is used several times in ' +
'this iteration', iteration=self.countiter)
self.last_iteration_with_gradient = self.countiter
except AttributeError:
pass
index_for_gradient = min((2, len(pop_pheno)-1))
xmean = self.mean if xmean is None else xmean
xpheno = self.gp.pheno(xmean, copy=True,
into_bounds=self.boundary_handler.repair)
grad_at_mean = gradf(xpheno, *args)
# lift gradient into geno-space
if not self.gp.isidentity or (self.boundary_handler is not None
and self.boundary_handler.has_bounds()):
boundary_repair = None
gradpen = 0
if isinstance(self.boundary_handler, BoundTransform):
boundary_repair = self.boundary_handler.repair
elif isinstance(self.boundary_handler, BoundPenalty):
fpenalty = lambda x: self.boundary_handler.__call__(
x, SolutionDict({tuple(x): {'geno': x}}), self.gp)
gradpen = grad_numerical_sym(
xmean, fpenalty)
elif self.boundary_handler is None or \
isinstance(self.boundary_handler, BoundNone):
pass
else:
raise NotImplementedError(
"unknown boundary handling method" +
str(self.boundary_handler) +
" when using gradf")
gradgp = grad_numerical_of_coordinate_map(
xmean,
lambda x: self.gp.pheno(x, copy=True,
into_bounds=boundary_repair))
grad_at_mean = grad_at_mean * gradgp + gradpen
# TODO: frozen variables brake the code (e.g. at grad of map)
if len(grad_at_mean) != self.N and self.opts['fixed_variables']:
NotImplementedError("""
gradient with fixed variables is not yet implemented""")
v = self.D * dot(self.B.T, self.sigma_vec * grad_at_mean)
# newton_direction = sv * B * D * D * B^T * sv * gradient = sv * B * D * v
# v = D^-1 * B^T * sv^-1 * newton_direction = D * B^T * sv * gradient
q = sum(v**2)
if q:
# Newton direction
pop_geno[index_for_gradient] = xmean - self.sigma \
* (self.N / q)**0.5 \
* (self.sigma_vec * dot(self.B, self.D * v))
else:
pop_geno[index_for_gradient] = xmean
_print_warning('gradient zero observed',
iteration=self.countiter)
pop_pheno[index_for_gradient] = self.gp.pheno(
pop_geno[index_for_gradient], copy=True,
into_bounds=self.boundary_handler.repair)
# insert solutions, this could also (better?) be done in self.gp.pheno
for i in rglen((pop_geno)):
self.sent_solutions.insert(pop_pheno[i], geno=pop_geno[i], iteration=self.countiter)
return pop_pheno
# ____________________________________________________________
# ____________________________________________________________
def ask_geno(self, number=None, xmean=None, sigma_fac=1):
"""get new candidate solutions in genotyp, sampled from a
multi-variate normal distribution.
Arguments are
`number`
number of returned solutions, by default the
population size `popsize` (AKA lambda).
`xmean`
distribution mean
`sigma_fac`
multiplier for internal sample width (standard
deviation)
`ask_geno` returns a list of N-dimensional candidate solutions
in genotyp representation and is called by `ask`.
Details: updates the sample distribution and might change
the geno-pheno transformation during this update.
:See: `ask`, `ask_and_eval`
"""
if number is None or number < 1:
number = self.sp.popsize
# update distribution, might change self.mean
if self.sp.CMA_on and (
(self.opts['updatecovwait'] is None and
self.countiter >=
self.itereigenupdated + 1. / (self.sp.c1 + self.sp.cmu) / self.N / 10
) or
(self.opts['updatecovwait'] is not None and
self.countiter > self.itereigenupdated + self.opts['updatecovwait']
) or
(self.sp.neg.cmuexp * (self.countiter - self.itereigenupdated) > 0.5
) # TODO (minor): not sure whether this is "the right" criterion
):
self.updateBD()
if xmean is None:
xmean = self.mean
else:
try:
xmean = self.archive[xmean]['geno']
# noise handling after call of tell
except KeyError:
try:
xmean = self.sent_solutions[xmean]['geno']
# noise handling before calling tell
except KeyError:
pass
if self.countiter == 0:
self.tic = time.clock() # backward compatible
self.elapsed_time = ElapsedTime()
sigma = sigma_fac * self.sigma
# update parameters for sampling the distribution
# fac 0 1 10
# 150-D cigar:
# 50749 50464 50787
# 200-D elli: == 6.9
# 99900 101160
# 100995 103275 == 2% loss
# 100-D elli: == 6.9
# 363052 369325 < 2% loss
# 365075 365755
# sample distribution
if self._flgtelldone: # could be done in tell()!?
self._flgtelldone = False
self.ary = []
# check injections from pop_injection_directions
arinj = []
if hasattr(self, 'pop_injection_directions'):
if self.countiter < 4 and \
len(self.pop_injection_directions) > self.popsize - 2:
_print_warning(' %d special injected samples with popsize %d, '
% (len(self.pop_injection_directions), self.popsize)
+ "popsize %d will be used" % (len(self.pop_injection_directions) + 2)
+ (" and the warning is suppressed in the following" if self.countiter == 3 else ""))
while self.pop_injection_directions:
y = self.pop_injection_directions.pop(0)
if self.opts['CMA_sample_on_sphere_surface']:
y *= (self.N**0.5 if self.opts['CSA_squared'] else
self.const.chiN) / self.mahalanobis_norm(y)
arinj.append(y)
else:
y *= self.random_rescaling_factor_to_mahalanobis_size(y) / self.sigma
arinj.append(y)
# each row is a solution
# the 1 is a small safeguard which needs to be removed to implement "pure" adaptive encoding
arz = self.randn((max([1, (number - len(arinj))]), self.N))
if self.opts['CMA_sample_on_sphere_surface']: # normalize the length to chiN
for i in rglen((arz)):
ss = sum(arz[i]**2)
if 1 < 3 or ss > self.N + 10.1:
arz[i] *= (self.N**0.5 if self.opts['CSA_squared']
else self.const.chiN) / ss**0.5
# or to average
# arz *= 1 * self.const.chiN / np.mean([sum(z**2)**0.5 for z in arz])
# fac = np.mean(sum(arz**2, 1)**0.5)
# print fac
# arz *= self.const.chiN / fac
# compute ary from arz
if len(arz): # should always be true
# apply unconditional mirroring, is pretty obsolete
if new_injections and self.sp.lam_mirr and self.opts['CMA_mirrormethod'] == 0:
for i in range(self.sp.lam_mirr):
if 2 * (i + 1) > len(arz):
if self.countiter < 4:
_print_warning("fewer mirrors generated than given in parameter setting (%d<%d)"
% (i, self.sp.lam_mirr))
break
arz[-1 - 2 * i] = -arz[-2 - 2 * i]
ary = self.sigma_vec * np.dot(self.B, (self.D * arz).T).T
if len(arinj):
ary = np.vstack((arinj, ary))
else:
ary = array(arinj)
# TODO: subject to removal in future
if not new_injections and number > 2 and self.countiter > 2:
if (isinstance(self.adapt_sigma, CMAAdaptSigmaTPA) or
self.opts['mean_shift_line_samples'] or
self.opts['pc_line_samples']):
ys = []
if self.opts['pc_line_samples']:
ys.append(self.pc[:]) # now TPA is with pc_line_samples
if self.opts['mean_shift_line_samples']:
ys.append(self.mean - self.mean_old)
if not len(ys):
ys.append(self.mean - self.mean_old)
# assign a mirrored pair from each element of ys into ary
for i, y in enumerate(ys):
if len(arz) > 2 * i + 1: # at least two more samples
assert y is not self.pc
# y *= sum(self.randn(self.N)**2)**0.5 / self.mahalanobis_norm(y)
y *= self.random_rescaling_factor_to_mahalanobis_size(y)
# TODO: rescale y depending on some parameter?
ary[2*i] = y / self.sigma
ary[2*i + 1] = y / -self.sigma
else:
_print_warning('line samples omitted due to small popsize',
method_name='ask_geno', iteration=self.countiter)
# print(xmean[0])
pop = xmean + sigma * ary
self.evaluations_per_f_value = 1
self.ary = ary
return pop
def random_rescale_to_mahalanobis(self, x):
"""change `x` like for injection, all on genotypic level"""
x -= self.mean
if any(x):
x *= sum(self.randn(len(x))**2)**0.5 / self.mahalanobis_norm(x)
x += self.mean
return x
def random_rescaling_factor_to_mahalanobis_size(self, y):
"""``self.mean + self.random_rescaling_factor_to_mahalanobis_size(y)``
is guarantied to appear like from the sample distribution.
"""
if len(y) != self.N:
raise ValueError('len(y)=%d != %d=dimension' % (len(y), self.N))
if not any(y):
_print_warning("input was all-zeros, which is probably a bug",
"random_rescaling_factor_to_mahalanobis_size",
iteration=self.countiter)
return 1.0
return sum(self.randn(len(y))**2)**0.5 / self.mahalanobis_norm(y)
def get_mirror(self, x, preserve_length=False):
"""return ``pheno(self.mean - (geno(x) - self.mean))``.
>>> import cma
>>> es = cma.CMAEvolutionStrategy(cma.np.random.randn(3), 1)
>>> x = cma.np.random.randn(3)
>>> assert cma.Mh.vequals_approximately(es.mean - (x - es.mean), es.get_mirror(x, preserve_length=True))
>>> x = es.ask(1)[0]
>>> vals = (es.get_mirror(x) - es.mean) / (x - es.mean)
>>> assert cma.Mh.equals_approximately(sum(vals), len(vals) * vals[0])
TODO: this implementation is yet experimental.
TODO: this implementation includes geno-pheno transformation,
however in general GP-transformation should be separated from
specific code.
Selectively mirrored sampling improves to a moderate extend but
overadditively with active CMA for quite understandable reasons.
Optimal number of mirrors are suprisingly small: 1,2,3 for
maxlam=7,13,20 where 3,6,10 are the respective maximal possible
mirrors that must be clearly suboptimal.
"""
try:
dx = self.sent_solutions[x]['geno'] - self.mean
except: # can only happen with injected solutions?!
dx = self.gp.geno(x, from_bounds=self.boundary_handler.inverse,
copy_if_changed=True) - self.mean
if not preserve_length:
# dx *= sum(self.randn(self.N)**2)**0.5 / self.mahalanobis_norm(dx)
dx *= self.random_rescaling_factor_to_mahalanobis_size(dx)
x = self.mean - dx
y = self.gp.pheno(x, into_bounds=self.boundary_handler.repair)
# old measure: costs 25% in CPU performance with N,lambda=20,200
self.sent_solutions.insert(y, geno=x, iteration=self.countiter)
return y
def _mirror_penalized(self, f_values, idx):
"""obsolete and subject to removal (TODO),
return modified f-values such that for each mirror one becomes worst.
This function is useless when selective mirroring is applied with no
more than (lambda-mu)/2 solutions.
Mirrors are leading and trailing values in ``f_values``.
"""
assert len(f_values) >= 2 * len(idx)
m = np.max(np.abs(f_values))
for i in len(idx):
if f_values[idx[i]] > f_values[-1 - i]:
f_values[idx[i]] += m
else:
f_values[-1 - i] += m
return f_values
def _mirror_idx_cov(self, f_values, idx1): # will most likely be removed
"""obsolete and subject to removal (TODO),
return indices for negative ("active") update of the covariance matrix
assuming that ``f_values[idx1[i]]`` and ``f_values[-1-i]`` are
the corresponding mirrored values
computes the index of the worse solution sorted by the f-value of the
better solution.
TODO: when the actual mirror was rejected, it is better
to return idx1 instead of idx2.
Remark: this function might not be necessary at all: if the worst solution
is the best mirrored, the covariance matrix updates cancel (cave: weights
and learning rates), which seems what is desirable. If the mirror is bad,
as strong negative update is made, again what is desirable.
And the fitness--step-length correlation is in part addressed by
using flat weights.
"""
idx2 = np.arange(len(f_values) - 1, len(f_values) - 1 - len(idx1), -1)
f = []
for i in rglen((idx1)):
f.append(min((f_values[idx1[i]], f_values[idx2[i]])))
# idx.append(idx1[i] if f_values[idx1[i]] > f_values[idx2[i]] else idx2[i])
return idx2[np.argsort(f)][-1::-1]
def eval_mean(self, func, args=()):
"""evaluate the distribution mean, this is not (yet) effective
in terms of termination or display"""
self.fmean = func(self.mean, *args)
return self.fmean
# ____________________________________________________________
# ____________________________________________________________
#
def ask_and_eval(self, func, args=(), gradf=None, number=None, xmean=None, sigma_fac=1,
evaluations=1, aggregation=np.median, kappa=1):
"""samples `number` solutions and evaluates them on `func`, where
each solution `s` is resampled until ``self.is_feasible(s, func(s)) is True``.
Arguments
---------
`func`
objective function, ``func(x)`` returns a scalar
`args`
additional parameters for `func`
`gradf`
gradient of objective function, ``g = gradf(x, *args)``
must satisfy ``len(g) == len(x)``
`number`
number of solutions to be sampled, by default
population size ``popsize`` (AKA lambda)
`xmean`
mean for sampling the solutions, by default ``self.mean``.
`sigma_fac`
multiplier for sampling width, standard deviation, for example
to get a small perturbation of solution `xmean`
`evaluations`
number of evaluations for each sampled solution
`aggregation`
function that aggregates `evaluations` values to
as single value.
`kappa`
multiplier used for the evaluation of the solutions, in
that ``func(m + kappa*(x - m))`` is the f-value for x.
Return
------
``(X, fit)``, where
X -- list of solutions
fit -- list of respective function values
Details
-------
While ``not self.is_feasible(x, func(x))``new solutions are sampled. By
default ``self.is_feasible == cma.feasible == lambda x, f: f not in (None, np.NaN)``.
The argument to `func` can be freely modified within `func`.
Depending on the ``CMA_mirrors`` option, some solutions are not sampled
independently but as mirrors of other bad solutions. This is a simple
derandomization that can save 10-30% of the evaluations in particular
with small populations, for example on the cigar function.
Example
-------
>>> import cma
>>> x0, sigma0 = 8*[10], 1 # 8-D
>>> es = cma.CMAEvolutionStrategy(x0, sigma0)
>>> while not es.stop():
... X, fit = es.ask_and_eval(cma.fcts.elli) # handles NaN with resampling
... es.tell(X, fit) # pass on fitness values
... es.disp(20) # print every 20-th iteration
>>> print('terminated on ' + str(es.stop()))
<output omitted>
A single iteration step can be expressed in one line, such that
an entire optimization after initialization becomes
::
while not es.stop():
es.tell(*es.ask_and_eval(cma.fcts.elli))
"""
# initialize
popsize = self.sp.popsize
if number is not None:
popsize = number
selective_mirroring = self.opts['CMA_mirrormethod'] > 0
nmirrors = self.sp.lam_mirr
if popsize != self.sp.popsize:
nmirrors = Mh.sround(popsize * self.sp.lam_mirr / self.sp.popsize)
# TODO: now selective mirroring might be impaired
assert new_injections or self.opts['CMA_mirrormethod'] < 2
if new_injections and self.opts['CMA_mirrormethod'] != 1: # otherwise mirrors are done elsewhere
nmirrors = 0
assert nmirrors <= popsize // 2
self.mirrors_idx = np.arange(nmirrors) # might never be used
self.mirrors_rejected_idx = [] # might never be used
is_feasible = self.opts['is_feasible']
# do the work
fit = [] # or np.NaN * np.empty(number)
X_first = self.ask(popsize, xmean=xmean, gradf=gradf, args=args)
if xmean is None:
xmean = self.mean # might have changed in self.ask
X = []
for k in range(int(popsize)):
x, f = X_first.pop(0), None
rejected = -1
while rejected < 0 or not is_feasible(x, f): # rejection sampling
rejected += 1
if rejected: # resample
x = self.ask(1, xmean, sigma_fac)[0]
elif k >= popsize - nmirrors: # mirrored sample
if k == popsize - nmirrors and selective_mirroring:
self.mirrors_idx = np.argsort(fit)[-1:-1 - nmirrors:-1]
x = self.get_mirror(X[self.mirrors_idx[popsize - 1 - k]])
if rejected == 1 and k >= popsize - nmirrors:
self.mirrors_rejected_idx.append(k)
# contraints handling test hardwired ccccccccccc
length_normalizer = 1
# zzzzzzzzzzzzzzzzzzzzzzzzz
f = func(x, *args) if kappa == 1 else \
func(xmean + kappa * length_normalizer * (x - xmean),
*args)
if is_feasible(x, f) and evaluations > 1:
f = aggregation([f] + [(func(x, *args) if kappa == 1 else
func(xmean + kappa * length_normalizer * (x - xmean), *args))
for _i in range(int(evaluations - 1))])
if rejected + 1 % 1000 == 0:
print(' %d solutions rejected (f-value NaN or None) at iteration %d' %
(rejected, self.countiter))
fit.append(f)
X.append(x)
self.evaluations_per_f_value = int(evaluations)
return X, fit
def prepare_injection_directions(self):
"""provide genotypic directions for TPA and selective mirroring,
with no specific length normalization, to be used in the
coming iteration.
Details:
This method is called in the end of `tell`. The result is
assigned to ``self.pop_injection_directions`` and used in
`ask_geno`.
TODO: should be rather appended?
"""
# self.pop_injection_directions is supposed to be empty here
if hasattr(self, 'pop_injection_directions') and self.pop_injection_directions:
ValueError("Looks like a bug in calling order/logics")
ary = []
if (isinstance(self.adapt_sigma, CMAAdaptSigmaTPA) or
self.opts['mean_shift_line_samples']):
ary.append(self.mean - self.mean_old)
ary.append(self.mean_old - self.mean) # another copy!
if ary[-1][0] == 0.0:
_print_warning('zero mean shift encountered which ',
'prepare_injection_directions',
'CMAEvolutionStrategy', self.countiter)
if self.opts['pc_line_samples']: # caveat: before, two samples were used
ary.append(self.pc.copy())
if self.sp.lam_mirr and self.opts['CMA_mirrormethod'] == 2:
if self.pop_sorted is None:
_print_warning('pop_sorted attribute not found, mirrors obmitted',
'prepare_injection_directions',
iteration=self.countiter)
else:
ary += self.get_selective_mirrors()
self.pop_injection_directions = ary
return ary
def get_selective_mirrors(self, number=None, pop_sorted=None):
"""get mirror genotypic directions of the `number` worst
solution, based on ``pop_sorted`` attribute (from last
iteration).
Details:
Takes the last ``number=sp.lam_mirr`` entries in
``pop_sorted=self.pop_sorted`` as solutions to be mirrored.
"""
if pop_sorted is None:
if hasattr(self, 'pop_sorted'):
pop_sorted = self.pop_sorted
else:
return None
if number is None:
number = self.sp.lam_mirr
res = []
for i in range(1, number + 1):
res.append(self.mean_old - pop_sorted[-i])
return res
# ____________________________________________________________
def tell(self, solutions, function_values, check_points=None,
copy=False):
"""pass objective function values to prepare for next
iteration. This core procedure of the CMA-ES algorithm updates
all state variables, in particular the two evolution paths, the
distribution mean, the covariance matrix and a step-size.
Arguments
---------
`solutions`
list or array of candidate solution points (of
type `numpy.ndarray`), most presumably before
delivered by method `ask()` or `ask_and_eval()`.
`function_values`
list or array of objective function values
corresponding to the respective points. Beside for termination
decisions, only the ranking of values in `function_values`
is used.
`check_points`
If ``check_points is None``, only solutions that are not generated
by `ask()` are possibly clipped (recommended). ``False`` does not clip
any solution (not recommended).
If ``True``, clips solutions that realize long steps (i.e. also
those that are unlikely to be generated with `ask()`). `check_points`
can be a list of indices to be checked in solutions.
`copy`
``solutions`` can be modified in this routine, if ``copy is False``
Details
-------
`tell()` updates the parameters of the multivariate
normal search distribution, namely covariance matrix and
step-size and updates also the attributes ``countiter`` and
``countevals``. To check the points for consistency is quadratic
in the dimension (like sampling points).
Bugs
----
The effect of changing the solutions delivered by `ask()`
depends on whether boundary handling is applied. With boundary
handling, modifications are disregarded. This is necessary to
apply the default boundary handling that uses unrepaired
solutions but might change in future.
Example
-------
::
import cma
func = cma.fcts.elli # choose objective function
es = cma.CMAEvolutionStrategy(cma.np.random.rand(10), 1)
while not es.stop():
X = es.ask()
es.tell(X, [func(x) for x in X])
es.result() # where the result can be found
:See: class `CMAEvolutionStrategy`, `ask()`, `ask_and_eval()`, `fmin()`
"""
if self._flgtelldone:
raise _Error('tell should only be called once per iteration')
lam = len(solutions)
if lam != array(function_values).shape[0]:
raise _Error('for each candidate solution '
+ 'a function value must be provided')
if lam + self.sp.lam_mirr < 3:
raise _Error('population size ' + str(lam) + ' is too small when option CMA_mirrors * popsize < 0.5')
if not isscalar(function_values[0]):
if isscalar(function_values[0][0]):
if self.countiter <= 1:
_print_warning('function values are not a list of scalars (further warnings are suppressed)')
function_values = [val[0] for val in function_values]
else:
raise _Error('objective function values must be a list of scalars')
# ## prepare
N = self.N
sp = self.sp
if lam < sp.mu: # rather decrease cmean instead of having mu > lambda//2
raise _Error('not enough solutions passed to function tell (mu>lambda)')
self.countiter += 1 # >= 1 now
self.countevals += sp.popsize * self.evaluations_per_f_value
self.best.update(solutions, self.sent_solutions, function_values, self.countevals)
flg_diagonal = self.opts['CMA_diagonal'] is True \
or self.countiter <= self.opts['CMA_diagonal']
if not flg_diagonal and len(self.C.shape) == 1: # C was diagonal ie 1-D
# enter non-separable phase (no easy return from here)
self.C = np.diag(self.C)
if 1 < 3:
self.B = np.eye(N) # identity(N)
idx = np.argsort(self.D)
self.D = self.D[idx]
self.B = self.B[:, idx]
self._Yneg = np.zeros((N, N))
# ## manage fitness
fit = self.fit # make short cut
# CPU for N,lam=20,200: this takes 10s vs 7s
fit.bndpen = self.boundary_handler.update(function_values, self)(solutions, self.sent_solutions, self.gp)
# for testing:
# fit.bndpen = self.boundary_handler.update(function_values, self)([s.unrepaired for s in solutions])
fit.idx = np.argsort(array(fit.bndpen) + array(function_values))
fit.fit = array(function_values, copy=False)[fit.idx]
# update output data TODO: this is obsolete!? However: need communicate current best x-value?
# old: out['recent_x'] = self.gp.pheno(pop[0])
# self.out['recent_x'] = array(solutions[fit.idx[0]]) # TODO: change in a data structure(?) and use current as identify
# self.out['recent_f'] = fit.fit[0]
# fitness histories
fit.hist.insert(0, fit.fit[0])
# if len(self.fit.histbest) < 120+30*N/sp.popsize or # does not help, as tablet in the beginning is the critical counter-case
if ((self.countiter % 5) == 0): # 20 percent of 1e5 gen.
fit.histbest.insert(0, fit.fit[0])
fit.histmedian.insert(0, np.median(fit.fit) if len(fit.fit) < 21
else fit.fit[self.popsize // 2])
if len(fit.histbest) > 2e4: # 10 + 30*N/sp.popsize:
fit.histbest.pop()
fit.histmedian.pop()
if len(fit.hist) > 10 + 30 * N / sp.popsize:
fit.hist.pop()
# TODO: clean up inconsistency when an unrepaired solution is available and used
# now get the genotypes
pop = self.pop_sorted = [] # create pop from input argument solutions
for k, s in enumerate(solutions): # use phenotype before Solution.repair()
if 1 < 3:
pop += [self.gp.geno(s,
from_bounds=self.boundary_handler.inverse,
repair=(self.repair_genotype if check_points not in (False, 0, [], ()) else None),
archive=self.sent_solutions)] # takes genotype from sent_solutions, if available
try:
self.archive.insert(s, value=self.sent_solutions.pop(s), fitness=function_values[k])
# self.sent_solutions.pop(s)
except KeyError:
pass
# check that TPA mirrors are available, TODO: move to TPA class?
if isinstance(self.adapt_sigma, CMAAdaptSigmaTPA) and self.countiter > 3 and not (self.countiter % 3):
dm = self.mean[0] - self.mean_old[0]
dx0 = pop[0][0] - self.mean_old[0]
dx1 = pop[1][0] - self.mean_old[0]
for i in np.random.randint(1, self.N, 1):
try:
if not Mh.equals_approximately(
(self.mean[i] - self.mean_old[i])
/ (pop[0][i] - self.mean_old[i]),
dm / dx0, 1e-8) or \
not Mh.equals_approximately(
(self.mean[i] - self.mean_old[i])
/ (pop[1][i] - self.mean_old[i]),
dm / dx1, 1e-8):
_print_warning('TPA error with mirrored samples', 'tell',
'CMAEvolutionStrategy', self.countiter)
except ZeroDivisionError:
_print_warning('zero division encountered in TPA check\n which should be very rare and is likely a bug',
'tell', 'CMAEvolutionStrategy', self.countiter)
try:
moldold = self.mean_old
except:
pass
self.mean_old = self.mean
mold = self.mean_old # just an alias
# check and normalize each x - m
# check_points is a flag (None is default: check non-known solutions) or an index list
# should also a number possible (first check_points points)?
if check_points not in (None, False, 0, [], ()): # useful in case of injected solutions and/or adaptive encoding, however is automatic with use_sent_solutions
try:
if len(check_points):
idx = check_points
except:
idx = range(sp.popsize)
for k in idx:
self.repair_genotype(pop[k])
# only arrays can be multiple indexed
pop = array(pop, copy=False)
# sort pop
pop = pop[fit.idx]
# prepend best-ever solution to population, in case
if self.opts['CMA_elitist'] and self.best.f < fit.fit[0]:
if self.best.x_geno is not None:
xp = [self.best.x_geno]
# xp = [self.best.xdict['geno']]
# xp = [self.gp.geno(self.best.x[:])] # TODO: remove
# print self.mahalanobis_norm(xp[0]-self.mean)
else:
xp = [self.gp.geno(array(self.best.x, copy=True),
self.boundary_handler.inverse,
copy_if_changed=False)]
print('genotype for elitist not found')
self.clip_or_fit_solutions(xp, [0])
pop = array([xp[0]] + list(pop))
elif self.opts['CMA_elitist'] == 'initial': # current solution was better
self.opts['CMA_elitist'] = False
self.pop_sorted = pop
# compute new mean
self.mean = mold + self.sp.cmean * \
(sum(sp.weights * pop[0:sp.mu].T, 1) - mold)
# check Delta m (this is not default, but could become at some point)
# CAVE: upper_length=sqrt(2)+2 is too restrictive, test upper_length = sqrt(2*N) thoroughly.
# replaced by repair_geno?
# simple test case injecting self.mean:
# self.mean = 1e-4 * self.sigma * np.random.randn(N)
if 1 < 3:
cmean = self.sp.cmean
# zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
# get learning rate constants
cc, c1, cmu = sp.cc, sp.c1, sp.cmu
if flg_diagonal:
cc, c1, cmu = sp.cc_sep, sp.c1_sep, sp.cmu_sep
# now the real work can start
hsig = self.adapt_sigma.hsig(self) # ps update must be done here in separable case
# hsig = sum(self.ps**2) / self.N < 2 + 4./(N+1)
# adjust missing variance due to hsig, in 4-D with damps=1e99 and sig0 small
# hsig leads to premature convergence of C otherwise
# hsiga = (1-hsig**2) * c1 * cc * (2-cc) # to be removed in future
c1a = c1 - (1 - hsig**2) * c1 * cc * (2 - cc) # adjust for variance loss
self.pc = (1 - cc) * self.pc + \
hsig * (sqrt(cc * (2 - cc) * sp.mueff) / self.sigma / cmean) * \
(self.mean - mold) / self.sigma_vec
# covariance matrix adaptation/udpate
if sp.CMA_on:
# assert sp.c1 + sp.cmu < sp.mueff / N # ??
assert c1 + cmu <= 1
# default full matrix case
if not flg_diagonal:
Y = (pop[0:sp.mu] - mold) / (self.sigma * self.sigma_vec)
Y = dot((cmu * sp.weights) * Y.T, Y) # learning rate integrated
if self.sp.neg.cmuexp:
tmp = (pop[-sp.neg.mu:] - mold) / (self.sigma * self.sigma_vec)
# normalize to constant length (seems preferable in several aspects)
for i in range(tmp.shape[0]):
tmp[i, :] *= N**0.5 / self.mahalanobis_norm(
self.sigma_vec * tmp[i, :]) / self.sigma
self._Yneg *= 1 - self.sp.neg.cmuexp # for some reason necessary?
self._Yneg += dot(sp.neg.weights * tmp.T, tmp) - self.C
# self.update_exponential(dot(sp.neg.weights * tmp.T, tmp) - 1 * self.C, -1*self.sp.neg.cmuexp)
self.C *= 1 - c1a - cmu
self.C += np.outer(c1 * self.pc, self.pc) + Y
self.dC[:] = np.diag(self.C) # for output and termination checking
else: # separable/diagonal linear case
assert(c1 + cmu <= 1)
Z = np.zeros(N)
for k in range(sp.mu):
z = (pop[k] - mold) / (self.sigma * self.sigma_vec) # TODO see above
Z += sp.weights[k] * z * z # is 1-D
self.C = (1 - c1a - cmu) * self.C + c1 * self.pc * self.pc + cmu * Z
# TODO: self.C *= exp(cmuneg * (N - dot(sp.neg.weights, **2)
self.dC = self.C
self.D = sqrt(self.C) # C is a 1-D array, this is why adapt_sigma needs to prepare before
self.itereigenupdated = self.countiter
# idx = self._mirror_idx_cov() # take half of mirrored vectors for negative update
# step-size adaptation, adapt sigma
# in case of TPA, function_values[0] and [1] must reflect samples colinear to xmean - xmean_old
self.adapt_sigma.update(self, function_values=function_values)
if self.sigma * min(self.sigma_vec * self.dC**0.5) < self.opts['minstd']:
self.sigma = self.opts['minstd'] / min(self.sigma_vec * self.dC**0.5)
if self.sigma * max(self.sigma_vec * self.dC**0.5) > self.opts['maxstd']:
self.sigma = self.opts['maxstd'] / max(self.sigma_vec * self.dC**0.5)
# g = self.countiter
# N = self.N
# mindx = eval(self.opts['mindx'])
# if isinstance(self.opts['mindx'], basestring) else self.opts['mindx']
if self.sigma * min(self.D) < self.opts['mindx']: # TODO: sigma_vec is missing here
self.sigma = self.opts['mindx'] / min(self.D)
if self.sigma > 1e9 * self.sigma0:
alpha = self.sigma / max(self.D)
self.multiplyC(alpha)
self.sigma /= alpha**0.5
self.opts['tolupsigma'] /= alpha**0.5 # to be compared with sigma
# TODO increase sigma in case of a plateau?
# Uncertainty noise measurement is done on an upper level
# move mean into "feasible preimage", leads to weird behavior on
# 40-D tablet with bound 0.1, not quite explained (constant
# dragging is problematic, but why doesn't it settle), still a bug?
if new_injections:
self.pop_injection_directions = self.prepare_injection_directions()
self.pop_sorted = [] # remove this in case pop is still needed
self._flgtelldone = True
# end tell()
def inject(self, solutions):
"""inject a genotypic solution. The solution is used as direction
relative to the distribution mean to compute a new candidate
solution returned in method `ask_geno` which in turn is used in
method `ask`.
>>> import cma
>>> es = cma.CMAEvolutionStrategy(4 * [1], 2)
>>> while not es.stop():
... es.inject([4 * [0.0]])
... X = es.ask()
... break
>>> assert X[0][0] == X[0][1]
"""
if not hasattr(self, 'pop_injection_directions'):
self.pop_injection_directions = []
for solution in solutions:
if len(solution) != self.N:
raise ValueError('method `inject` needs a list or array'
+ (' each el with dimension (`len`) %d' % self.N))
self.pop_injection_directions.append(
array(solution, copy=False, dtype=float) - self.mean)
def result(self):
"""return::
(xbest, f(xbest), evaluations_xbest, evaluations, iterations,
pheno(xmean), effective_stds)
"""
# TODO: how about xcurrent?
return self.best.get() + (
self.countevals, self.countiter, self.gp.pheno(self.mean),
self.gp.scales * self.sigma * self.sigma_vec * self.dC**0.5)
def result_pretty(self, number_of_runs=0, time_str=None,
fbestever=None):
"""pretty print result.
Returns ``self.result()``
"""
if fbestever is None:
fbestever = self.best.f
s = (' after %i restart' + ('s' if number_of_runs > 1 else '')) \
% number_of_runs if number_of_runs else ''
for k, v in list(self.stop().items()):
print('termination on %s=%s%s' % (k, str(v), s +
(' (%s)' % time_str if time_str else '')))
print('final/bestever f-value = %e %e' % (self.best.last.f,
fbestever))
if self.N < 9:
print('incumbent solution: ' + str(list(self.gp.pheno(self.mean, into_bounds=self.boundary_handler.repair))))
print('std deviation: ' + str(list(self.sigma * self.sigma_vec * sqrt(self.dC) * self.gp.scales)))
else:
print('incumbent solution: %s ...]' % (str(self.gp.pheno(self.mean, into_bounds=self.boundary_handler.repair)[:8])[:-1]))
print('std deviations: %s ...]' % (str((self.sigma * self.sigma_vec * sqrt(self.dC) * self.gp.scales)[:8])[:-1]))
return self.result()
def clip_or_fit_solutions(self, pop, idx):
"""make sure that solutions fit to sample distribution, this interface will probably change.
In particular the frequency of long vectors appearing in pop[idx] - self.mean is limited.
"""
for k in idx:
self.repair_genotype(pop[k])
def repair_genotype(self, x, copy_if_changed=False):
"""make sure that solutions fit to the sample distribution, this interface will probably change.
In particular the frequency of x - self.mean being long is limited.
"""
x = array(x, copy=False)
mold = array(self.mean, copy=False)
if 1 < 3: # hard clip at upper_length
upper_length = self.N**0.5 + 2 * self.N / (self.N + 2) # should become an Option, but how? e.g. [0, 2, 2]
fac = self.mahalanobis_norm(x - mold) / upper_length
if fac > 1:
if copy_if_changed:
x = (x - mold) / fac + mold
else: # should be 25% faster:
x -= mold
x /= fac
x += mold
# print self.countiter, k, fac, self.mahalanobis_norm(pop[k] - mold)
# adapt also sigma: which are the trust-worthy/injected solutions?
else:
if 'checktail' not in self.__dict__: # hasattr(self, 'checktail')
raise NotImplementedError
# from check_tail_smooth import CheckTail # for the time being
# self.checktail = CheckTail()
# print('untested feature checktail is on')
fac = self.checktail.addchin(self.mahalanobis_norm(x - mold))
if fac < 1:
x = fac * (x - mold) + mold
return x
def decompose_C(self):
"""eigen-decompose self.C and update self.dC, self.C, self.B.
Known bugs: this might give a runtime error with
CMA_diagonal / separable option on.
"""
if self.opts['CMA_diagonal']:
_print_warning("this might fail with CMA_diagonal option on",
iteration=self.countiter)
print(self.opts['CMA_diagonal'])
# print(' %.19e' % self.C[0][0])
self.C = (self.C + self.C.T) / 2
self.dC = np.diag(self.C).copy()
self.D, self.B = self.opts['CMA_eigenmethod'](self.C)
# self.B = np.round(self.B, 10)
# for i in rglen(self.D):
# d = self.D[i]
# oom = np.round(np.log10(d))
# self.D[i] = 10**oom * np.round(d / 10**oom, 10)
# print(' %.19e' % self.C[0][0])
# print(' %.19e' % self.D[0])
if any(self.D <= 0):
_print_warning("ERROR", iteration=self.countiter)
raise ValueError("covariance matrix was not positive definite," +
" this must be considered as a bug")
self.D = self.D**0.5
assert all(isfinite(self.D))
idx = np.argsort(self.D)
self.D = self.D[idx]
self.B = self.B[:, idx] # self.B[i] is a row, columns self.B[:,i] are eigenvectors
self.count_eigen += 1
def updateBD(self):
"""update internal variables for sampling the distribution with the
current covariance matrix C. This method is O(N^3), if C is not diagonal.
"""
# itereigenupdated is always up-to-date in the diagonal case
# just double check here
if self.itereigenupdated == self.countiter:
return
if self.opts['CMA_diagonal'] >= self.countiter:
_print_warning("updateBD called in CMA_diagonal mode, " +
"this should be considered a bug", "updateBD",
iteration=self.countiter)
# C has already positive updates, here come the additional negative updates
if self.sp.neg.cmuexp:
C_shrunken = (1 - self.sp.cmu - self.sp.c1)**(self.countiter - self.itereigenupdated)
clip_fac = 0.60 # 0.9 is sufficient to prevent degeneration in small dimension
if hasattr(self.opts['vv'], '__getitem__') and self.opts['vv'][0] == 'sweep_ccov_neg':
clip_fac = 0.98
if (self.countiter - self.itereigenupdated) * self.sp.neg.cmuexp * self.N \
< clip_fac * C_shrunken:
# pos.def. guarantied, because vectors are normalized
self.C -= self.sp.neg.cmuexp * self._Yneg
else:
max_warns = 1
try:
self._updateBD_warnings += 1
except AttributeError:
self._updateBD_warnings = 1
if self.opts['verbose'] > 1 and \
self._updateBD_warnings <= max_warns:
_print_warning('doing two additional eigen' +
'decompositions to guarantee pos.def.',
'updateBD', 'CMAEvolutionStrategy')
if self._updateBD_warnings == max_warns:
_print_warning('further warnings are surpressed',
'updateBD')
self.decompose_C()
_tmp_inverse_root_C = dot(self.B / self.D, self.B.T)
_tmp_inverse_root_C = (_tmp_inverse_root_C + _tmp_inverse_root_C.T) / 2
Zneg = dot(dot(_tmp_inverse_root_C, self._Yneg), _tmp_inverse_root_C)
eigvals, eigvecs = self.opts['CMA_eigenmethod'](Zneg)
self.count_eigen += 1
if max(eigvals) * self.sp.neg.cmuexp <= clip_fac:
self.C -= self.sp.neg.cmuexp * self._Yneg
elif 1 < 3:
self.C -= (clip_fac / max(eigvals)) * self._Yneg
_print_warning(
'clipped learning rate for negative weights, ' +
'maximal eigenvalue = %f, maxeig * ccov = %f > %f'
% (max(eigvals), max(eigvals) * self.sp.neg.cmuexp, clip_fac),
iteration=self.countiter)
if 1 < 3: # let's check
eigvals, eigvecs = self.opts['CMA_eigenmethod'](self.C)
self.count_eigen += 1
print('new min eigenval = %e, old = %e'
% (min(eigvals), min(self.D)**2))
if min(eigvals) > 0:
print('new cond = %e, old = %e'
% (max(eigvals) / min(eigvals),
(max(self.D) / min(self.D))**2))
else: # guaranties pos.def. unconditionally
_print_warning('exponential update for negative weights (internally more expensive)',
iteration=self.countiter)
self.update_exponential(self._Yneg, -self.sp.neg.cmuexp)
# self.C = self.Ypos + Cs * Mh.expms(-self.sp.neg.cmuexp*Csi*self.Yneg*Csi) * Cs
# Yneg = self.Yneg # for temporary debugging, can be removed
self._Yneg = np.zeros((self.N, self.N))
if hasattr(self.opts['vv'], '__getitem__') and self.opts['vv'][0].startswith('sweep_ccov'):
self.opts['CMA_const_trace'] = True
if self.opts['CMA_const_trace'] in (True, 1, 2): # normalize trace of C
if self.opts['CMA_const_trace'] == 2:
s = np.exp(2 * np.mean(np.log(self.D))) # or geom average of dC?
else:
s = np.mean(np.diag(self.C))
self.C /= s
dC = np.diag(self.C)
if max(dC) / min(dC) > 1e8:
# allows for much larger condition numbers, if axis-parallel
self.sigma_vec *= np.diag(self.C)**0.5
self.C = self.correlation_matrix()
_print_warning('condition in coordinate system exceeded 1e8' +
', rescaled to 1')
# self.C = np.triu(self.C) + np.triu(self.C,1).T # should work as well
# self.D, self.B = eigh(self.C) # hermitian, ie symmetric C is assumed
self.decompose_C()
# assert(sum(self.D-DD) < 1e-6)
# assert(sum(sum(np.dot(BB, BB.T)-np.eye(self.N))) < 1e-6)
# assert(sum(sum(np.dot(BB * DD, BB.T) - self.C)) < 1e-6)
# assert(all(self.B[self.countiter % self.N] == self.B[self.countiter % self.N,:]))
# qqqqqqqqqq
# is O(N^3)
# assert(sum(abs(self.C - np.dot(self.D * self.B, self.B.T))) < N**2*1e-11)
if 1 < 3 and max(self.D) / min(self.D) > 1e6 and self.gp.isidentity:
# TODO: allow to do this again
# dmean_prev = dot(self.B, (1. / self.D) * dot(self.B.T, (self.mean - 0*self.mean_old) / self.sigma_vec))
self.gp._tf_matrix = (self.sigma_vec * dot(self.B * self.D, self.B.T).T).T
self.gp._tf_matrix_inv = (dot(self.B / self.D, self.B.T).T / self.sigma_vec).T
self.gp.tf_pheno = lambda x: dot(self.gp._tf_matrix, x)
self.gp.tf_geno = lambda x: dot(self.gp._tf_matrix_inv, x) # not really necessary
self.gp.isidentity = False
assert self.mean is not self.mean_old
self.mean = self.gp.geno(self.mean) # same as tf_geno
self.mean_old = self.gp.geno(self.mean_old) # not needed?
self.pc = self.gp.geno(self.pc)
self.D[:] = 1.0
self.B = np.eye(self.N)
self.C = np.eye(self.N)
self.dC[:] = 1.0
self.sigma_vec = 1
# dmean_now = dot(self.B, (1. / self.D) * dot(self.B.T, (self.mean - 0*self.mean_old) / self.sigma_vec))
# assert Mh.vequals_approximately(dmean_now, dmean_prev)
_print_warning('\n geno-pheno transformation introduced based on current C,\n injected solutions become "invalid" in this iteration',
'updateBD', 'CMAEvolutionStrategy', self.countiter)
self.itereigenupdated = self.countiter
def multiplyC(self, alpha):
"""multiply C with a scalar and update all related internal variables (dC, D,...)"""
self.C *= alpha
if self.dC is not self.C:
self.dC *= alpha
self.D *= alpha**0.5
def update_exponential(self, Z, eta, BDpair=None):
"""exponential update of C that guarantees positive definiteness, that is,
instead of the assignment ``C = C + eta * Z``,
we have ``C = C**.5 * exp(eta * C**-.5 * Z * C**-.5) * C**.5``.
Parameter `Z` should have expectation zero, e.g. sum(w[i] * z[i] * z[i].T) - C
if E z z.T = C.
Parameter `eta` is the learning rate, for ``eta == 0`` nothing is updated.
This function conducts two eigendecompositions, assuming that
B and D are not up to date, unless `BDpair` is given. Given BDpair,
B is the eigensystem and D is the vector of sqrt(eigenvalues), one
eigendecomposition is omitted.
Reference: Glasmachers et al 2010, Exponential Natural Evolution Strategies
"""
if eta == 0:
return
if BDpair:
B, D = BDpair
else:
D, B = self.opts['CMA_eigenmethod'](self.C)
self.count_eigen += 1
D **= 0.5
Cs = dot(B, (B * D).T) # square root of C
Csi = dot(B, (B / D).T) # square root of inverse of C
self.C = dot(Cs, dot(Mh.expms(eta * dot(Csi, dot(Z, Csi)),
self.opts['CMA_eigenmethod']), Cs))
self.count_eigen += 1
# ____________________________________________________________
# ____________________________________________________________
def feedForResume(self, X, function_values):
"""Given all "previous" candidate solutions and their respective
function values, the state of a `CMAEvolutionStrategy` object
can be reconstructed from this history. This is the purpose of
function `feedForResume`.
Arguments
---------
`X`
(all) solution points in chronological order, phenotypic
representation. The number of points must be a multiple
of popsize.
`function_values`
respective objective function values
Details
-------
`feedForResume` can be called repeatedly with only parts of
the history. The part must have the length of a multiple
of the population size.
`feedForResume` feeds the history in popsize-chunks into `tell`.
The state of the random number generator might not be
reconstructed, but this would be only relevant for the future.
Example
-------
::
import cma
# prepare
(x0, sigma0) = ... # initial values from previous trial
X = ... # list of generated solutions from a previous trial
f = ... # respective list of f-values
# resume
es = cma.CMAEvolutionStrategy(x0, sigma0)
es.feedForResume(X, f)
# continue with func as objective function
while not es.stop():
X = es.ask()
es.tell(X, [func(x) for x in X])
Credits to Dirk Bueche and Fabrice Marchal for the feeding idea.
:See: class `CMAEvolutionStrategy` for a simple dump/load to resume
"""
if self.countiter > 0:
_print_warning('feed should generally be used with a new object instance')
if len(X) != len(function_values):
raise _Error('number of solutions ' + str(len(X)) +
' and number function values ' +
str(len(function_values)) + ' must not differ')
popsize = self.sp.popsize
if (len(X) % popsize) != 0:
raise _Error('number of solutions ' + str(len(X)) +
' must be a multiple of popsize (lambda) ' +
str(popsize))
for i in rglen((X) / popsize):
# feed in chunks of size popsize
self.ask() # a fake ask, mainly for a conditioned calling of updateBD
# and secondary to get possibly the same random state
self.tell(X[i * popsize:(i + 1) * popsize], function_values[i * popsize:(i + 1) * popsize])
# ____________________________________________________________
# ____________________________________________________________
def readProperties(self):
"""reads dynamic parameters from property file (not implemented)
"""
print('not yet implemented')
# ____________________________________________________________
# ____________________________________________________________
def correlation_matrix(self):
if len(self.C.shape) <= 1:
return None
c = self.C.copy()
for i in range(c.shape[0]):
fac = c[i, i]**0.5
c[:, i] /= fac
c[i, :] /= fac
c = (c + c.T) / 2.0
return c
def mahalanobis_norm(self, dx):
"""compute the Mahalanobis norm that is induced by the adapted
sample distribution, covariance matrix ``C`` times ``sigma**2``,
including ``sigma_vec``. The expected Mahalanobis distance to
the sample mean is about ``sqrt(dimension)``.
Argument
--------
A *genotype* difference `dx`.
Example
-------
>>> import cma, numpy
>>> es = cma.CMAEvolutionStrategy(numpy.ones(10), 1)
>>> xx = numpy.random.randn(2, 10)
>>> d = es.mahalanobis_norm(es.gp.geno(xx[0]-xx[1]))
`d` is the distance "in" the true sample distribution,
sampled points have a typical distance of ``sqrt(2*es.N)``,
where ``es.N`` is the dimension, and an expected distance of
close to ``sqrt(N)`` to the sample mean. In the example,
`d` is the Euclidean distance, because C = I and sigma = 1.
"""
return sqrt(sum((self.D**-1. * np.dot(self.B.T, dx / self.sigma_vec))**2)) / self.sigma
def _metric_when_multiplied_with_sig_vec(self, sig):
"""return D^-1 B^T diag(sig) B D as a measure for
C^-1/2 diag(sig) C^1/2
:param sig: a vector "used" as diagonal matrix
:return:
"""
return dot((self.B * self.D**-1.).T * sig, self.B * self.D)
def disp_annotation(self):
"""print annotation for `disp()`"""
print('Iterat #Fevals function value axis ratio sigma min&max std t[m:s]')
sys.stdout.flush()
def disp(self, modulo=None): # TODO: rather assign opt['verb_disp'] as default?
"""prints some single-line infos according to `disp_annotation()`,
if ``iteration_counter % modulo == 0``
"""
if modulo is None:
modulo = self.opts['verb_disp']
# console display
if modulo:
if (self.countiter - 1) % (10 * modulo) < 1:
self.disp_annotation()
if self.countiter > 0 and (self.stop() or self.countiter < 4
or self.countiter % modulo < 1):
if self.opts['verb_time']:
toc = self.elapsed_time()
stime = str(int(toc // 60)) + ':' + str(round(toc % 60, 1))
else:
stime = ''
print(' '.join((repr(self.countiter).rjust(5),
repr(self.countevals).rjust(6),
'%.15e' % (min(self.fit.fit)),
'%4.1e' % (self.D.max() / self.D.min()),
'%6.2e' % self.sigma,
'%6.0e' % (self.sigma * min(self.sigma_vec * sqrt(self.dC))),
'%6.0e' % (self.sigma * max(self.sigma_vec * sqrt(self.dC))),
stime)))
# if self.countiter < 4:
sys.stdout.flush()
return self
def plot(self):
try:
self.logger.plot()
except AttributeError:
_print_warning('plotting failed, no logger attribute found')
except:
_print_warning(('plotting failed with:', sys.exc_info()[0]),
'plot', 'CMAEvolutionStrategy')
return self
cma_default_options = {
# the follow string arguments are evaluated if they do not contain "filename"
'AdaptSigma': 'CMAAdaptSigmaCSA # or any other CMAAdaptSigmaBase class e.g. CMAAdaptSigmaTPA',
'CMA_active': 'True # negative update, conducted after the original update',
# 'CMA_activefac': '1 # learning rate multiplier for active update',
'CMA_cmean': '1 # learning rate for the mean value',
'CMA_const_trace': 'False # normalize trace, value CMA_const_trace=2 normalizes sum log eigenvalues to zero',
'CMA_diagonal': '0*100*N/sqrt(popsize) # nb of iterations with diagonal covariance matrix, True for always', # TODO 4/ccov_separable?
'CMA_eigenmethod': 'np.linalg.eigh # 0=numpy-s eigh, -1=pygsl, otherwise cma.Misc.eig (slower)',
'CMA_elitist': 'False #v or "initial" or True, elitism likely impairs global search performance',
'CMA_mirrors': 'popsize < 6 # values <0.5 are interpreted as fraction, values >1 as numbers (rounded), otherwise about 0.16 is used',
'CMA_mirrormethod': '1 # 0=unconditional, 1=selective, 2==experimental',
'CMA_mu': 'None # parents selection parameter, default is popsize // 2',
'CMA_on': 'True # False or 0 for no adaptation of the covariance matrix',
'CMA_sample_on_sphere_surface': 'False #v all mutation vectors have the same length',
'CMA_rankmu': 'True # False or 0 for omitting rank-mu update of covariance matrix',
'CMA_rankmualpha': '0.3 # factor of rank-mu update if mu=1, subject to removal, default might change to 0.0',
'CMA_dampsvec_fac': 'np.Inf # tentative and subject to changes, 0.5 would be a "default" damping for sigma vector update',
'CMA_dampsvec_fade': '0.1 # tentative fading out parameter for sigma vector update',
'CMA_teststds': 'None # factors for non-isotropic initial distr. of C, mainly for test purpose, see CMA_stds for production',
'CMA_stds': 'None # multipliers for sigma0 in each coordinate, not represented in C, makes scaling_of_variables obsolete',
# 'CMA_AII': 'False # not yet tested',
'CSA_dampfac': '1 #v positive multiplier for step-size damping, 0.3 is close to optimal on the sphere',
'CSA_damp_mueff_exponent': '0.5 # zero would mean no dependency of damping on mueff, useful with CSA_disregard_length option',
'CSA_disregard_length': 'False #v True is untested',
'CSA_clip_length_value': 'None #v untested, [0, 0] means disregarding length completely',
'CSA_squared': 'False #v use squared length for sigma-adaptation ',
'boundary_handling': 'BoundTransform # or BoundPenalty, unused when ``bounds in (None, [None, None])``',
'bounds': '[None, None] # lower (=bounds[0]) and upper domain boundaries, each a scalar or a list/vector',
# , eval_parallel2': 'not in use {"processes": None, "timeout": 12, "is_feasible": lambda x: True} # distributes function calls to processes processes'
'fixed_variables': 'None # dictionary with index-value pairs like {0:1.1, 2:0.1} that are not optimized',
'ftarget': '-inf #v target function value, minimization',
'is_feasible': 'is_feasible #v a function that computes feasibility, by default lambda x, f: f not in (None, np.NaN)',
'maxfevals': 'inf #v maximum number of function evaluations',
'maxiter': '100 + 50 * (N+3)**2 // popsize**0.5 #v maximum number of iterations',
'mean_shift_line_samples': 'False #v sample two new solutions colinear to previous mean shift',
'mindx': '0 #v minimal std in any direction, cave interference with tol*',
'minstd': '0 #v minimal std in any coordinate direction, cave interference with tol*',
'maxstd': 'inf #v maximal std in any coordinate direction',
'pc_line_samples': 'False #v two line samples along the evolution path pc',
'popsize': '4+int(3*log(N)) # population size, AKA lambda, number of new solution per iteration',
'randn': 'np.random.standard_normal #v randn((lam, N)) must return an np.array of shape (lam, N)',
'scaling_of_variables': 'None # (rather use CMA_stds) scale for each variable, sigma0 is interpreted w.r.t. this scale, in that effective_sigma0 = sigma0*scaling. Internally the variables are divided by scaling_of_variables and sigma is unchanged, default is np.ones(N)',
'seed': 'None # random number seed',
'signals_filename': 'cmaes_signals.par # read from this file, e.g. "stop now"',
'termination_callback': 'None #v a function returning True for termination, called after each iteration step and could be abused for side effects',
'tolfacupx': '1e3 #v termination when step-size increases by tolfacupx (diverges). That is, the initial step-size was chosen far too small and better solutions were found far away from the initial solution x0',
'tolupsigma': '1e20 #v sigma/sigma0 > tolupsigma * max(sqrt(eivenvals(C))) indicates "creeping behavior" with usually minor improvements',
'tolfun': '1e-11 #v termination criterion: tolerance in function value, quite useful',
'tolfunhist': '1e-12 #v termination criterion: tolerance in function value history',
'tolstagnation': 'int(100 + 100 * N**1.5 / popsize) #v termination if no improvement over tolstagnation iterations',
'tolx': '1e-11 #v termination criterion: tolerance in x-changes',
'transformation': 'None # [t0, t1] are two mappings, t0 transforms solutions from CMA-representation to f-representation (tf_pheno), t1 is the (optional) back transformation, see class GenoPheno',
'typical_x': 'None # used with scaling_of_variables',
'updatecovwait': 'None #v number of iterations without distribution update, name is subject to future changes', # TODO: rename: iterwaitupdatedistribution?
'verbose': '1 #v verbosity e.v. of initial/final message, -1 is very quiet, -9 maximally quiet, not yet fully implemented',
'verb_append': '0 # initial evaluation counter, if append, do not overwrite output files',
'verb_disp': '100 #v verbosity: display console output every verb_disp iteration',
'verb_filenameprefix': 'outcmaes # output filenames prefix',
'verb_log': '1 #v verbosity: write data to files every verb_log iteration, writing can be time critical on fast to evaluate functions',
'verb_plot': '0 #v in fmin(): plot() is called every verb_plot iteration',
'verb_time': 'True #v output timings on console',
'vv': '0 #? versatile variable for hacking purposes, value found in self.opts["vv"]'
}
class CMAOptions(dict):
"""``CMAOptions()`` returns a dictionary with the available options
and their default values for class ``CMAEvolutionStrategy``.
``CMAOptions('pop')`` returns a subset of recognized options that
contain 'pop' in there keyword name or (default) value or description.
``CMAOptions(opts)`` returns the subset of recognized options in
``dict(opts)``.
Option values can be "written" in a string and, when passed to fmin
or CMAEvolutionStrategy, are evaluated using "N" and "popsize" as
known values for dimension and population size (sample size, number
of new solutions per iteration). All default option values are such
a string.
Details
-------
``CMAOptions`` entries starting with ``tol`` are termination
"tolerances".
For `tolstagnation`, the median over the first and the second half
of at least `tolstagnation` iterations are compared for both, the
per-iteration best and per-iteration median function value.
Example
-------
::
import cma
cma.CMAOptions('tol')
is a shortcut for cma.CMAOptions().match('tol') that returns all options
that contain 'tol' in their name or description.
To set an option
import cma
opts = cma.CMAOptions()
opts.set('tolfun', 1e-12)
opts['tolx'] = 1e-11
:See: `fmin`(), `CMAEvolutionStrategy`, `_CMAParameters`
"""
# @classmethod # self is the class, not the instance
# @property
# def default(self):
# """returns all options with defaults"""
# return fmin([],[])
@staticmethod
def defaults():
"""return a dictionary with default option values and description"""
return dict((str(k), str(v)) for k, v in list(cma_default_options.items()))
# getting rid of the u of u"name" by str(u"name")
# return dict(cma_default_options)
@staticmethod
def versatile_options():
"""return list of options that can be changed at any time (not
only be initialized), however the list might not be entirely up
to date.
The string ' #v ' in the default value indicates a 'versatile'
option that can be changed any time.
"""
return tuple(sorted(i[0] for i in list(CMAOptions.defaults().items()) if i[1].find(' #v ') > 0))
def check(self, options=None):
"""check for ambiguous keys and move attributes into dict"""
self.check_values(options)
self.check_attributes(options)
self.check_values(options)
return self
def check_values(self, options=None):
corrected_key = CMAOptions().corrected_key # caveat: infinite recursion
validated_keys = []
original_keys = []
if options is None:
options = self
for key in options:
correct_key = corrected_key(key)
if correct_key is None:
raise ValueError("""%s is not a valid option""" % key)
if correct_key in validated_keys:
if key == correct_key:
key = original_keys[validated_keys.index(key)]
raise ValueError("%s was not a unique key for %s option"
% (key, correct_key))
validated_keys.append(correct_key)
original_keys.append(key)
return options
def check_attributes(self, opts=None):
"""check for attributes and moves them into the dictionary"""
if opts is None:
opts = self
if 1 < 3:
# the problem with merge is that ``opts['ftarget'] = new_value``
# would be overwritten by the old ``opts.ftarget``.
# The solution here is to empty opts.__dict__ after the merge
if hasattr(opts, '__dict__'):
for key in list(opts.__dict__):
if key in self._attributes:
continue
_print_warning(
"""
An option attribute has been merged into the dictionary,
thereby possibly overwriting the dictionary value, and the
attribute has been removed. Assign options with
``opts['%s'] = value`` # dictionary assignment
or use
``opts.set('%s', value) # here isinstance(opts, CMAOptions)
instead of
``opts.%s = value`` # attribute assignment
""" % (key, key, key), 'check', 'CMAOptions')
opts[key] = opts.__dict__[key] # getattr(opts, key)
delattr(opts, key) # is that cosher?
# delattr is necessary to prevent that the attribute
# overwrites the dict entry later again
return opts
@staticmethod
def merge(self, dict_=None):
"""not is use so far, see check()"""
if dict_ is None and hasattr(self, '__dict__'):
dict_ = self.__dict__
# doesn't work anymore as we have _lock attribute
if dict_ is None:
return self
self.update(dict_)
return self
def __init__(self, s=None, unchecked=False):
"""return an `CMAOptions` instance, either with the default
options, if ``s is None``, or with all options whose name or
description contains `s`, if `s` is a string (case is
disregarded), or with entries from dictionary `s` as options,
not complemented with default options or settings
Returns: see above.
"""
# if not CMAOptions.defaults: # this is different from self.defaults!!!
# CMAOptions.defaults = fmin([],[])
if s is None:
super(CMAOptions, self).__init__(CMAOptions.defaults()) # dict.__init__(self, CMAOptions.defaults()) should be the same
# self = CMAOptions.defaults()
elif isinstance(s, str):
super(CMAOptions, self).__init__(CMAOptions().match(s))
# we could return here
else:
super(CMAOptions, self).__init__(s)
if not unchecked and s is not None:
self.check() # caveat: infinite recursion
for key in list(self.keys()):
correct_key = self.corrected_key(key)
if correct_key not in CMAOptions.defaults():
_print_warning('invalid key ``' + str(key) +
'`` removed', '__init__', 'CMAOptions')
self.pop(key)
elif key != correct_key:
self[correct_key] = self.pop(key)
# self.evaluated = False # would become an option entry
self._lock_setting = False
self._attributes = self.__dict__.copy() # are not valid keys
self._attributes['_attributes'] = len(self._attributes)
def init(self, dict_or_str, val=None, warn=True):
"""initialize one or several options.
Arguments
---------
`dict_or_str`
a dictionary if ``val is None``, otherwise a key.
If `val` is provided `dict_or_str` must be a valid key.
`val`
value for key
Details
-------
Only known keys are accepted. Known keys are in `CMAOptions.defaults()`
"""
# dic = dict_or_key if val is None else {dict_or_key:val}
self.check(dict_or_str)
dic = dict_or_str
if val is not None:
dic = {dict_or_str:val}
for key, val in list(dic.items()):
key = self.corrected_key(key)
if key not in CMAOptions.defaults():
# TODO: find a better solution?
if warn:
print('Warning in cma.CMAOptions.init(): key ' +
str(key) + ' ignored')
else:
self[key] = val
return self
def set(self, dic, val=None, force=False):
"""set can assign versatile options from
`CMAOptions.versatile_options()` with a new value, use `init()`
for the others.
Arguments
---------
`dic`
either a dictionary or a key. In the latter
case, `val` must be provided
`val`
value for `key`, approximate match is sufficient
`force`
force setting of non-versatile options, use with caution
This method will be most probably used with the ``opts`` attribute of
a `CMAEvolutionStrategy` instance.
"""
if val is not None: # dic is a key in this case
dic = {dic:val} # compose a dictionary
for key_original, val in list(dict(dic).items()):
key = self.corrected_key(key_original)
if not self._lock_setting or \
key in CMAOptions.versatile_options():
self[key] = val
else:
_print_warning('key ' + str(key_original) +
' ignored (not recognized as versatile)',
'set', 'CMAOptions')
return self # to allow o = CMAOptions(o).set(new)
def complement(self):
"""add all missing options with their default values"""
# add meta-parameters, given options have priority
self.check()
for key in CMAOptions.defaults():
if key not in self:
self[key] = CMAOptions.defaults()[key]
return self
def settable(self):
"""return the subset of those options that are settable at any
time.
Settable options are in `versatile_options()`, but the
list might be incomplete.
"""
return CMAOptions([i for i in list(self.items())
if i[0] in CMAOptions.versatile_options()])
def __call__(self, key, default=None, loc=None):
"""evaluate and return the value of option `key` on the fly, or
returns those options whose name or description contains `key`,
case disregarded.
Details
-------
Keys that contain `filename` are not evaluated.
For ``loc==None``, `self` is used as environment
but this does not define ``N``.
:See: `eval()`, `evalall()`
"""
try:
val = self[key]
except:
return self.match(key)
if loc is None:
loc = self # TODO: this hack is not so useful: popsize could be there, but N is missing
try:
if isinstance(val, str):
val = val.split('#')[0].strip() # remove comments
if isinstance(val, str) and \
key.find('filename') < 0:
# and key.find('mindx') < 0:
val = eval(val, globals(), loc)
# invoke default
# TODO: val in ... fails with array type, because it is applied element wise!
# elif val in (None,(),[],{}) and default is not None:
elif val is None and default is not None:
val = eval(str(default), globals(), loc)
except:
pass # slighly optimistic: the previous is bug-free
return val
def corrected_key(self, key):
"""return the matching valid key, if ``key.lower()`` is a unique
starting sequence to identify the valid key, ``else None``
"""
matching_keys = []
for allowed_key in CMAOptions.defaults():
if allowed_key.lower() == key.lower():
return allowed_key
if allowed_key.lower().startswith(key.lower()):
matching_keys.append(allowed_key)
return matching_keys[0] if len(matching_keys) == 1 else None
def eval(self, key, default=None, loc=None, correct_key=True):
"""Evaluates and sets the specified option value in
environment `loc`. Many options need ``N`` to be defined in
`loc`, some need `popsize`.
Details
-------
Keys that contain 'filename' are not evaluated.
For `loc` is None, the self-dict is used as environment
:See: `evalall()`, `__call__`
"""
# TODO: try: loc['dim'] = loc['N'] etc
if correct_key:
# in_key = key # for debugging only
key = self.corrected_key(key)
self[key] = self(key, default, loc)
return self[key]
def evalall(self, loc=None, defaults=None):
"""Evaluates all option values in environment `loc`.
:See: `eval()`
"""
self.check()
if defaults is None:
defaults = cma_default_options
# TODO: this needs rather the parameter N instead of loc
if 'N' in loc: # TODO: __init__ of CMA can be simplified
popsize = self('popsize', defaults['popsize'], loc)
for k in list(self.keys()):
k = self.corrected_key(k)
self.eval(k, defaults[k],
{'N':loc['N'], 'popsize':popsize})
self._lock_setting = True
return self
def match(self, s=''):
"""return all options that match, in the name or the description,
with string `s`, case is disregarded.
Example: ``cma.CMAOptions().match('verb')`` returns the verbosity
options.
"""
match = s.lower()
res = {}
for k in sorted(self):
s = str(k) + '=\'' + str(self[k]) + '\''
if match in s.lower():
res[k] = self[k]
return CMAOptions(res, unchecked=True)
def pp(self):
pprint(self)
def pprint(self, linebreak=80):
for i in sorted(self.items()):
s = str(i[0]) + "='" + str(i[1]) + "'"
a = s.split(' ')
# print s in chunks
l = '' # start entire to the left
while a:
while a and len(l) + len(a[0]) < linebreak:
l += ' ' + a.pop(0)
print(l)
l = ' ' # tab for subsequent lines
print_ = pprint # Python style to prevent clash with keywords
printme = pprint
# ____________________________________________________________
# ____________________________________________________________
class _CMAStopDict(dict):
"""keep and update a termination condition dictionary, which is
"usually" empty and returned by `CMAEvolutionStrategy.stop()`.
The class methods entirely depend on `CMAEvolutionStrategy` class
attributes.
Details
-------
This class is not relevant for the end-user and could be a nested
class, but nested classes cannot be serialized.
Example
-------
>>> import cma
>>> es = cma.CMAEvolutionStrategy(4 * [1], 1, {'verbose':-1})
>>> print(es.stop())
{}
>>> es.optimize(cma.fcts.sphere, verb_disp=0)
>>> print(es.stop())
{'tolfun': 1e-11}
:See: `OOOptimizer.stop()`, `CMAEvolutionStrategy.stop()`
"""
def __init__(self, d={}):
update = isinstance(d, CMAEvolutionStrategy)
super(_CMAStopDict, self).__init__({} if update else d)
self._stoplist = [] # to keep multiple entries
self.lastiter = 0 # probably not necessary
if isinstance(d, _CMAStopDict): # inherit
self._stoplist = d._stoplist # multiple entries
self.lastiter = d.lastiter # probably not necessary
if update:
self._update(d)
def __call__(self, es=None, check=True):
"""update and return the termination conditions dictionary
"""
if not check:
return self
if es is None and self.es is None:
raise ValueError('termination conditions need an optimizer to act upon')
self._update(es)
return self
def _update(self, es):
"""Test termination criteria and update dictionary
"""
if es is None:
es = self.es
assert es is not None
if es.countiter == 0: # in this case termination tests fail
self.__init__()
return self
self.lastiter = es.countiter
self.es = es
self.clear() # compute conditions from scratch
N = es.N
opts = es.opts
self.opts = opts # a hack to get _addstop going
# fitness: generic criterion, user defined w/o default
self._addstop('ftarget',
es.best.f < opts['ftarget'])
# maxiter, maxfevals: generic criteria
self._addstop('maxfevals',
es.countevals - 1 >= opts['maxfevals'])
self._addstop('maxiter',
## meta_parameters.maxiter_multiplier == 1.0
es.countiter >= 1.0 * opts['maxiter'])
# tolx, tolfacupx: generic criteria
# tolfun, tolfunhist (CEC:tolfun includes hist)
self._addstop('tolx',
all([es.sigma * xi < opts['tolx'] for xi in es.sigma_vec * es.pc]) and
all([es.sigma * xi < opts['tolx'] for xi in es.sigma_vec * sqrt(es.dC)]))
self._addstop('tolfacupx',
any(es.sigma * es.sigma_vec * sqrt(es.dC) >
es.sigma0 * es.sigma_vec0 * opts['tolfacupx']))
self._addstop('tolfun',
es.fit.fit[-1] - es.fit.fit[0] < opts['tolfun'] and
max(es.fit.hist) - min(es.fit.hist) < opts['tolfun'])
self._addstop('tolfunhist',
len(es.fit.hist) > 9 and
max(es.fit.hist) - min(es.fit.hist) < opts['tolfunhist'])
# worst seen false positive: table N=80,lam=80, getting worse for fevals=35e3 \approx 50 * N**1.5
# but the median is not so much getting worse
# / 5 reflects the sparsity of histbest/median
# / 2 reflects the left and right part to be compared
## meta_parameters.tolstagnation_multiplier == 1.0
l = int(max(( 1.0 * opts['tolstagnation'] / 5. / 2, len(es.fit.histbest) / 10)))
# TODO: why max(..., len(histbest)/10) ???
# TODO: the problem in the beginning is only with best ==> ???
# equality should handle flat fitness
self._addstop('tolstagnation', # leads sometimes early stop on ftablet, fcigtab, N>=50?
1 < 3 and opts['tolstagnation'] and es.countiter > N * (5 + 100 / es.popsize) and
len(es.fit.histbest) > 100 and 2 * l < len(es.fit.histbest) and
np.median(es.fit.histmedian[:l]) >= np.median(es.fit.histmedian[l:2 * l]) and
np.median(es.fit.histbest[:l]) >= np.median(es.fit.histbest[l:2 * l]))
# iiinteger: stagnation termination can prevent to find the optimum
self._addstop('tolupsigma', opts['tolupsigma'] and
es.sigma / np.max(es.D) > es.sigma0 * opts['tolupsigma'])
if 1 < 3:
# non-user defined, method specific
# noeffectaxis (CEC: 0.1sigma), noeffectcoord (CEC:0.2sigma), conditioncov
idx = np.where(es.mean == es.mean + 0.2 * es.sigma *
es.sigma_vec * es.dC**0.5)[0]
self._addstop('noeffectcoord', any(idx), idx)
# any([es.mean[i] == es.mean[i] + 0.2 * es.sigma *
# (es.sigma_vec if isscalar(es.sigma_vec) else es.sigma_vec[i]) *
# sqrt(es.dC[i])
# for i in xrange(N)])
# )
if opts['CMA_diagonal'] is not True and es.countiter > opts['CMA_diagonal']:
i = es.countiter % N
self._addstop('noeffectaxis',
sum(es.mean == es.mean + 0.1 * es.sigma * es.D[i] * es.B[:, i]) == N)
self._addstop('conditioncov',
es.D[-1] > 1e7 * es.D[0], 1e14) # TODO
self._addstop('callback', es.callbackstop) # termination_callback
try:
with open(self.opts['signals_filename'], 'r') as f:
for line in f.readlines():
words = line.split()
if len(words) < 2 or words[0].startswith(('#', '%')):
continue
if words[0] == 'stop' and words[1] == 'now':
if len(words) > 2 and not words[2].startswith(
self.opts['verb_filenameprefix']):
continue
self._addstop('file_signal', True, "stop now")
break
except IOError:
pass
if len(self):
self._addstop('flat fitness: please (re)consider how to compute the fitness more elaborate',
len(es.fit.hist) > 9 and
max(es.fit.hist) == min(es.fit.hist))
return self
def _addstop(self, key, cond, val=None):
if cond:
self.stoplist.append(key) # can have the same key twice
self[key] = val if val is not None \
else self.opts.get(key, None)
def clear(self):
for k in list(self):
self.pop(k)
self.stoplist = []
# ____________________________________________________________
# ____________________________________________________________
class _CMAParameters(object):
"""strategy parameters like population size and learning rates.
Note:
contrary to `CMAOptions`, `_CMAParameters` is not (yet) part of the
"user-interface" and subject to future changes (it might become
a `collections.namedtuple`)
Example
-------
>>> import cma
>>> es = cma.CMAEvolutionStrategy(20 * [0.1], 1)
(6_w,12)-CMA-ES (mu_w=3.7,w_1=40%) in dimension 20 (seed=504519190) # the seed is "random" by default
>>>
>>> type(es.sp) # sp contains the strategy parameters
<class 'cma._CMAParameters'>
>>>
>>> es.sp.disp()
{'CMA_on': True,
'N': 20,
'c1': 0.004181139918745593,
'c1_sep': 0.034327992810300939,
'cc': 0.17176721127681213,
'cc_sep': 0.25259494835857677,
'cmean': 1.0,
'cmu': 0.0085149624979034746,
'cmu_sep': 0.057796356229390715,
'cs': 0.21434997799189287,
'damps': 1.2143499779918929,
'mu': 6,
'mu_f': 6.0,
'mueff': 3.7294589343030671,
'popsize': 12,
'rankmualpha': 0.3,
'weights': array([ 0.40240294, 0.25338908, 0.16622156, 0.10437523, 0.05640348,
0.01720771])}
>>>
>> es.sp == cma._CMAParameters(20, 12, cma.CMAOptions().evalall({'N': 20}))
True
:See: `CMAOptions`, `CMAEvolutionStrategy`
"""
def __init__(self, N, opts, ccovfac=1, verbose=True):
"""Compute strategy parameters, mainly depending on
dimension and population size, by calling `set`
"""
self.N = N
if ccovfac == 1:
ccovfac = opts['CMA_on'] # that's a hack
self.popsize = None # declaring the attribute, not necessary though
self.set(opts, ccovfac=ccovfac, verbose=verbose)
def set(self, opts, popsize=None, ccovfac=1, verbose=True):
"""Compute strategy parameters as a function
of dimension and population size """
alpha_cc = 1.0 # cc-correction for mueff, was zero before
def conedf(df, mu, N):
"""used for computing separable learning rate"""
return 1. / (df + 2.*sqrt(df) + float(mu) / N)
def cmudf(df, mu, alphamu):
"""used for computing separable learning rate"""
return (alphamu + mu - 2. + 1. / mu) / (df + 4.*sqrt(df) + mu / 2.)
sp = self
N = sp.N
if popsize:
opts.evalall({'N':N, 'popsize':popsize})
else:
popsize = opts.evalall({'N':N})['popsize'] # the default popsize is computed in CMAOptions()
## meta_parameters.lambda_exponent == 0.0
popsize = int(popsize + N** 0.0 - 1)
sp.popsize = popsize
if opts['CMA_mirrors'] < 0.5:
sp.lam_mirr = int(0.5 + opts['CMA_mirrors'] * popsize)
elif opts['CMA_mirrors'] > 1:
sp.lam_mirr = int(0.5 + opts['CMA_mirrors'])
else:
sp.lam_mirr = int(0.5 + 0.16 * min((popsize, 2 * N + 2)) + 0.29) # 0.158650... * popsize is optimal
# lam = arange(2,22)
# mirr = 0.16 + 0.29/lam
# print(lam); print([int(0.5 + l) for l in mirr*lam])
# [ 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21]
# [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4]
## meta_parameters.parent_fraction == 0.5
sp.mu_f = 0.5 * sp.popsize # float value of mu
if opts['CMA_mu'] is not None:
sp.mu_f = opts['CMA_mu']
sp.mu = int(sp.mu_f + 0.499999) # round down for x.5
sp.mu = max((sp.mu, 1))
# in principle we have mu_opt = popsize/2 + lam_mirr/2,
# which means in particular weights should only be negative for q > 0.5+mirr_frac/2
if sp.mu > sp.popsize - 2 * sp.lam_mirr + 1:
_print_warning("pairwise selection is not implemented, therefore " +
" mu = %d > %d = %d - 2*%d + 1 = popsize - 2*mirr + 1 can produce a bias" % (
sp.mu, sp.popsize - 2 * sp.lam_mirr + 1, sp.popsize, sp.lam_mirr))
if sp.lam_mirr > sp.popsize // 2:
raise _Error("fraction of mirrors in the population as read from option CMA_mirrors cannot be larger 0.5, " +
"theoretically optimal is 0.159")
sp.weights = log(max([sp.mu, sp.popsize / 2.0]) + 0.5) - log(1 + np.arange(sp.mu))
sp.weights /= sum(sp.weights)
sp.mueff = 1 / sum(sp.weights**2)
# TODO: this will disappear, as it is done in class CMAAdaptSigmaCSA
## meta_parameters.cs_exponent == 1.0
b = 1.0
## meta_parameters.cs_multiplier == 1.0
sp.cs = 1.0 * (sp.mueff + 2)**b / (N + (sp.mueff + 3)**b) # TODO: this doesn't change dependency of dimension
# sp.cs = (sp.mueff + 2) / (N + 1.5*sp.mueff + 1)
## meta_parameters.cc_exponent == 1.0
b = 1.0
## meta_parameters.cc_multiplier == 1.0
sp.cc = 1.0 * \
(4 + alpha_cc * sp.mueff / N)**b / \
(N**b + (4 + alpha_cc * 2 * sp.mueff / N)**b)
sp.cc_sep = (1 + 1 / N + alpha_cc * sp.mueff / N) / (N**0.5 + 1 / N + alpha_cc * 2 * sp.mueff / N) # \not\gg\cc
if hasattr(opts['vv'], '__getitem__') and opts['vv'][0] == 'sweep_ccov1':
## meta_parameters.cc_multiplier == 1.0
sp.cc = 1.0 * (4 + sp.mueff / N)**0.5 / ((N + 4)**0.5 + (2 * sp.mueff / N)**0.5)
sp.rankmualpha = opts['CMA_rankmualpha']
# sp.rankmualpha = _evalOption(opts['CMA_rankmualpha'], 0.3)
## meta_parameters.c1_multiplier == 1.0
sp.c1 = ( 1.0 * ccovfac * min(1, sp.popsize / 6) *
## meta_parameters.c1_exponent == 2.0
2 / ((N + 1.3)** 2.0 + sp.mueff))
# 1/0
sp.c1_sep = ccovfac * conedf(N, sp.mueff, N)
if opts['CMA_rankmu'] != 0: # also empty
## meta_parameters.cmu_multiplier == 2.0
alphacov, mu = 2.0 , sp.mueff
sp.cmu = min(1 - sp.c1, ccovfac * alphacov *
## meta_parameters.cmu_exponent == 2.0
(sp.rankmualpha + mu - 2 + 1 / mu) / ((N + 2)** 2.0 + alphacov * mu / 2))
if hasattr(opts['vv'], '__getitem__') and opts['vv'][0] == 'sweep_ccov':
sp.cmu = opts['vv'][1]
sp.cmu_sep = min(1 - sp.c1_sep, ccovfac * cmudf(N, sp.mueff, sp.rankmualpha))
else:
sp.cmu = sp.cmu_sep = 0
if hasattr(opts['vv'], '__getitem__') and opts['vv'][0] == 'sweep_ccov1':
sp.c1 = opts['vv'][1]
sp.neg = _BlancClass()
if opts['CMA_active'] and opts['CMA_on']:
# in principle we have mu_opt = popsize/2 + lam_mirr/2,
# which means in particular weights should only be negative for q > 0.5+mirr_frac/2
if 1 < 3: # seems most natural: continuation of log(lambda/2) - log(k) qqqqqqqqqqqqqqqqqqqqqqqqqq
sp.neg.mu_f = popsize // 2 # not sure anymore what this is good for
sp.neg.weights = array([log(k) - log(popsize/2 + 1/2) for k in np.arange(np.ceil(popsize/2 + 1.1/2), popsize + .1)])
sp.neg.mu = len(sp.neg.weights)
sp.neg.weights /= sum(sp.neg.weights)
sp.neg.mueff = 1 / sum(sp.neg.weights**2)
## meta_parameters.cact_exponent == 1.5
sp.neg.cmuexp = opts['CMA_active'] * 0.3 * sp.neg.mueff / ((N + 2)** 1.5 + 1.0 * sp.neg.mueff)
if hasattr(opts['vv'], '__getitem__') and opts['vv'][0] == 'sweep_ccov_neg':
sp.neg.cmuexp = opts['vv'][1]
# reasoning on learning rate cmuexp: with sum |w| == 1 and
# length-normalized vectors in the update, the residual
# variance in any direction exceeds exp(-N*cmuexp)
assert sp.neg.mu >= sp.lam_mirr # not really necessary
# sp.neg.minresidualvariance = 0.66 # not it use, keep at least 0.66 in all directions, small popsize is most critical
else:
sp.neg.cmuexp = 0
sp.CMA_on = sp.c1 + sp.cmu > 0
# print(sp.c1_sep / sp.cc_sep)
if not opts['CMA_on'] and opts['CMA_on'] not in (None, [], (), ''):
sp.CMA_on = False
# sp.c1 = sp.cmu = sp.c1_sep = sp.cmu_sep = 0
mueff_exponent = 0.5
if 1 < 3:
mueff_exponent = opts['CSA_damp_mueff_exponent']
# TODO: this will disappear, as it is done in class CMAAdaptSigmaCSA
sp.damps = opts['CSA_dampfac'] * (0.5 +
0.5 * min([1, (sp.lam_mirr / (0.159 * sp.popsize) - 1)**2])**1 +
2 * max([0, ((sp.mueff - 1) / (N + 1))**mueff_exponent - 1]) + sp.cs
)
sp.cmean = float(opts['CMA_cmean'])
# sp.kappa = 1 # 4-D, lam=16, rank1, kappa < 4 does not influence convergence rate
# in larger dim it does, 15-D with defaults, kappa=8 factor 2
if verbose:
if not sp.CMA_on:
print('covariance matrix adaptation turned off')
if opts['CMA_mu'] != None:
print('mu = %f' % (sp.mu_f))
# return self # the constructor returns itself
def disp(self):
pprint(self.__dict__)
def fmin(objective_function, x0, sigma0,
options=None,
args=(),
gradf=None,
restarts=0,
restart_from_best='False',
incpopsize=2,
eval_initial_x=False,
noise_handler=None,
noise_change_sigma_exponent=1,
noise_kappa_exponent=0, # TODO: add max kappa value as parameter
bipop=False):
"""functional interface to the stochastic optimizer CMA-ES
for non-convex function minimization.
Calling Sequences
=================
``fmin(objective_function, x0, sigma0)``
minimizes `objective_function` starting at `x0` and with standard deviation
`sigma0` (step-size)
``fmin(objective_function, x0, sigma0, options={'ftarget': 1e-5})``
minimizes `objective_function` up to target function value 1e-5, which
is typically useful for benchmarking.
``fmin(objective_function, x0, sigma0, args=('f',))``
minimizes `objective_function` called with an additional argument ``'f'``.
``fmin(objective_function, x0, sigma0, options={'ftarget':1e-5, 'popsize':40})``
uses additional options ``ftarget`` and ``popsize``
``fmin(objective_function, esobj, None, options={'maxfevals': 1e5})``
uses the `CMAEvolutionStrategy` object instance `esobj` to optimize
`objective_function`, similar to `esobj.optimize()`.
Arguments
=========
`objective_function`
function to be minimized. Called as ``objective_function(x,
*args)``. `x` is a one-dimensional `numpy.ndarray`.
`objective_function` can return `numpy.NaN`,
which is interpreted as outright rejection of solution `x`
and invokes an immediate resampling and (re-)evaluation
of a new solution not counting as function evaluation.
`x0`
list or `numpy.ndarray`, initial guess of minimum solution
before the application of the geno-phenotype transformation
according to the ``transformation`` option. It can also be
a string holding a Python expression that is evaluated
to yield the initial guess - this is important in case
restarts are performed so that they start from different
places. Otherwise `x0` can also be a `cma.CMAEvolutionStrategy`
object instance, in that case `sigma0` can be ``None``.
`sigma0`
scalar, initial standard deviation in each coordinate.
`sigma0` should be about 1/4th of the search domain width
(where the optimum is to be expected). The variables in
`objective_function` should be scaled such that they
presumably have similar sensitivity.
See also option `scaling_of_variables`.
`options`
a dictionary with additional options passed to the constructor
of class ``CMAEvolutionStrategy``, see ``cma.CMAOptions()``
for a list of available options.
``args=()``
arguments to be used to call the `objective_function`
``gradf``
gradient of f, where ``len(gradf(x, *args)) == len(x)``.
`gradf` is called once in each iteration if
``gradf is not None``.
``restarts=0``
number of restarts with increasing population size, see also
parameter `incpopsize`, implementing the IPOP-CMA-ES restart
strategy, see also parameter `bipop`; to restart from
different points (recommended), pass `x0` as a string.
``restart_from_best=False``
which point to restart from
``incpopsize=2``
multiplier for increasing the population size `popsize` before
each restart
``eval_initial_x=None``
evaluate initial solution, for `None` only with elitist option
``noise_handler=None``
a ``NoiseHandler`` instance or ``None``, a simple usecase is
``cma.fmin(f, 6 * [1], 1, noise_handler=cma.NoiseHandler(6))``
see ``help(cma.NoiseHandler)``.
``noise_change_sigma_exponent=1``
exponent for sigma increment for additional noise treatment
``noise_evaluations_as_kappa``
instead of applying reevaluations, the "number of evaluations"
is (ab)used as scaling factor kappa (experimental).
``bipop``
if True, run as BIPOP-CMA-ES; BIPOP is a special restart
strategy switching between two population sizings - small
(like the default CMA, but with more focused search) and
large (progressively increased as in IPOP). This makes the
algorithm perform well both on functions with many regularly
or irregularly arranged local optima (the latter by frequently
restarting with small populations). For the `bipop` parameter
to actually take effect, also select non-zero number of
(IPOP) restarts; the recommended setting is ``restarts<=9``
and `x0` passed as a string. Note that small-population
restarts do not count into the total restart count.
Optional Arguments
==================
All values in the `options` dictionary are evaluated if they are of
type `str`, besides `verb_filenameprefix`, see class `CMAOptions` for
details. The full list is available via ``cma.CMAOptions()``.
>>> import cma
>>> cma.CMAOptions()
Subsets of options can be displayed, for example like
``cma.CMAOptions('tol')``, or ``cma.CMAOptions('bound')``,
see also class `CMAOptions`.
Return
======
Return the list provided by `CMAEvolutionStrategy.result()` appended
with termination conditions, an `OOOptimizer` and a `BaseDataLogger`::
res = es.result() + (es.stop(), es, logger)
where
- ``res[0]`` (``xopt``) -- best evaluated solution
- ``res[1]`` (``fopt``) -- respective function value
- ``res[2]`` (``evalsopt``) -- respective number of function evaluations
- ``res[3]`` (``evals``) -- number of overall conducted objective function evaluations
- ``res[4]`` (``iterations``) -- number of overall conducted iterations
- ``res[5]`` (``xmean``) -- mean of the final sample distribution
- ``res[6]`` (``stds``) -- effective stds of the final sample distribution
- ``res[-3]`` (``stop``) -- termination condition(s) in a dictionary
- ``res[-2]`` (``cmaes``) -- class `CMAEvolutionStrategy` instance
- ``res[-1]`` (``logger``) -- class `CMADataLogger` instance
Details
=======
This function is an interface to the class `CMAEvolutionStrategy`. The
latter class should be used when full control over the iteration loop
of the optimizer is desired.
Examples
========
The following example calls `fmin` optimizing the Rosenbrock function
in 10-D with initial solution 0.1 and initial step-size 0.5. The
options are specified for the usage with the `doctest` module.
>>> import cma
>>> # cma.CMAOptions() # returns all possible options
>>> options = {'CMA_diagonal':100, 'seed':1234, 'verb_time':0}
>>>
>>> res = cma.fmin(cma.fcts.rosen, [0.1] * 10, 0.5, options)
(5_w,10)-CMA-ES (mu_w=3.2,w_1=45%) in dimension 10 (seed=1234)
Covariance matrix is diagonal for 10 iterations (1/ccov=29.0)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 10 1.264232686260072e+02 1.1e+00 4.40e-01 4e-01 4e-01
2 20 1.023929748193649e+02 1.1e+00 4.00e-01 4e-01 4e-01
3 30 1.214724267489674e+02 1.2e+00 3.70e-01 3e-01 4e-01
100 1000 6.366683525319511e+00 6.2e+00 2.49e-02 9e-03 3e-02
200 2000 3.347312410388666e+00 1.2e+01 4.52e-02 8e-03 4e-02
300 3000 1.027509686232270e+00 1.3e+01 2.85e-02 5e-03 2e-02
400 4000 1.279649321170636e-01 2.3e+01 3.53e-02 3e-03 3e-02
500 5000 4.302636076186532e-04 4.6e+01 4.78e-03 3e-04 5e-03
600 6000 6.943669235595049e-11 5.1e+01 5.41e-06 1e-07 4e-06
650 6500 5.557961334063003e-14 5.4e+01 1.88e-07 4e-09 1e-07
termination on tolfun : 1e-11
final/bestever f-value = 5.55796133406e-14 2.62435631419e-14
mean solution: [ 1. 1.00000001 1. 1.
1. 1.00000001 1.00000002 1.00000003 ...]
std deviation: [ 3.9193387e-09 3.7792732e-09 4.0062285e-09 4.6605925e-09
5.4966188e-09 7.4377745e-09 1.3797207e-08 2.6020765e-08 ...]
>>>
>>> print('best solutions fitness = %f' % (res[1]))
best solutions fitness = 2.62435631419e-14
>>> assert res[1] < 1e-12
The above call is pretty much equivalent with the slightly more
verbose call::
es = cma.CMAEvolutionStrategy([0.1] * 10, 0.5,
options=options).optimize(cma.fcts.rosen)
The following example calls `fmin` optimizing the Rastrigin function
in 3-D with random initial solution in [-2,2], initial step-size 0.5
and the BIPOP restart strategy (that progressively increases population).
The options are specified for the usage with the `doctest` module.
>>> import cma
>>> # cma.CMAOptions() # returns all possible options
>>> options = {'seed':12345, 'verb_time':0, 'ftarget': 1e-8}
>>>
>>> res = cma.fmin(cma.fcts.rastrigin, '2. * np.random.rand(3) - 1', 0.5,
... options, restarts=9, bipop=True)
(3_w,7)-aCMA-ES (mu_w=2.3,w_1=58%) in dimension 3 (seed=12345)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 7 1.633489455763566e+01 1.0e+00 4.35e-01 4e-01 4e-01
2 14 9.762462950258016e+00 1.2e+00 4.12e-01 4e-01 4e-01
3 21 2.461107851413725e+01 1.4e+00 3.78e-01 3e-01 4e-01
100 700 9.949590571272680e-01 1.7e+00 5.07e-05 3e-07 5e-07
123 861 9.949590570932969e-01 1.3e+00 3.93e-06 9e-09 1e-08
termination on tolfun=1e-11
final/bestever f-value = 9.949591e-01 9.949591e-01
mean solution: [ 9.94958638e-01 -7.19265205e-10 2.09294450e-10]
std deviation: [ 8.71497860e-09 8.58994807e-09 9.85585654e-09]
[...]
(4_w,9)-aCMA-ES (mu_w=2.8,w_1=49%) in dimension 3 (seed=12349)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 5342.0 2.114883315350800e+01 1.0e+00 3.42e-02 3e-02 4e-02
2 5351.0 1.810102940125502e+01 1.4e+00 3.79e-02 3e-02 4e-02
3 5360.0 1.340222457448063e+01 1.4e+00 4.58e-02 4e-02 6e-02
50 5783.0 8.631491965616078e-09 1.6e+00 2.01e-04 8e-06 1e-05
termination on ftarget=1e-08 after 4 restarts
final/bestever f-value = 8.316963e-09 8.316963e-09
mean solution: [ -3.10652459e-06 2.77935436e-06 -4.95444519e-06]
std deviation: [ 1.02825265e-05 8.08348144e-06 8.47256408e-06]
In either case, the method::
cma.plot();
(based on `matplotlib.pyplot`) produces a plot of the run and, if
necessary::
cma.show()
shows the plot in a window. Finally::
cma.savefig('myfirstrun') # savefig from matplotlib.pyplot
will save the figure in a png.
We can use the gradient like
>>> import cma
>>> res = cma.fmin(cma.fcts.rosen, np.zeros(10), 0.1,
... options = {'ftarget':1e-8,},
... gradf=cma.fcts.grad_rosen,
... )
>>> assert cma.fcts.rosen(res[0]) < 1e-8
>>> assert res[2] < 3600 # 1% are > 3300
>>> assert res[3] < 3600 # 1% are > 3300
:See: `CMAEvolutionStrategy`, `OOOptimizer.optimize(), `plot()`,
`CMAOptions`, `scipy.optimize.fmin()`
""" # style guides say there should be the above empty line
if 1 < 3: # try: # pass on KeyboardInterrupt
if not objective_function: # cma.fmin(0, 0, 0)
return CMAOptions() # these opts are by definition valid
fmin_options = locals().copy() # archive original options
del fmin_options['objective_function']
del fmin_options['x0']
del fmin_options['sigma0']
del fmin_options['options']
del fmin_options['args']
if options is None:
options = cma_default_options
CMAOptions().check_attributes(options) # might modify options
# checked that no options.ftarget =
opts = CMAOptions(options.copy()).complement()
# BIPOP-related variables:
runs_with_small = 0
small_i = []
large_i = []
popsize0 = None # to be evaluated after the first iteration
maxiter0 = None # to be evaluated after the first iteration
base_evals = 0
irun = 0
best = BestSolution()
while True: # restart loop
sigma_factor = 1
# Adjust the population according to BIPOP after a restart.
if not bipop:
# BIPOP not in use, simply double the previous population
# on restart.
if irun > 0:
popsize_multiplier = fmin_options['incpopsize'] ** (irun - runs_with_small)
opts['popsize'] = popsize0 * popsize_multiplier
elif irun == 0:
# Initial run is with "normal" population size; it is
# the large population before first doubling, but its
# budget accounting is the same as in case of small
# population.
poptype = 'small'
elif sum(small_i) < sum(large_i):
# An interweaved run with small population size
poptype = 'small'
runs_with_small += 1 # _Before_ it's used in popsize_lastlarge
sigma_factor = 0.01 ** np.random.uniform() # Local search
popsize_multiplier = fmin_options['incpopsize'] ** (irun - runs_with_small)
opts['popsize'] = np.floor(popsize0 * popsize_multiplier ** (np.random.uniform() ** 2))
opts['maxiter'] = min(maxiter0, 0.5 * sum(large_i) / opts['popsize'])
# print('small basemul %s --> %s; maxiter %s' % (popsize_multiplier, opts['popsize'], opts['maxiter']))
else:
# A run with large population size; the population
# doubling is implicit with incpopsize.
poptype = 'large'
popsize_multiplier = fmin_options['incpopsize'] ** (irun - runs_with_small)
opts['popsize'] = popsize0 * popsize_multiplier
opts['maxiter'] = maxiter0
# print('large basemul %s --> %s; maxiter %s' % (popsize_multiplier, opts['popsize'], opts['maxiter']))
# recover from a CMA object
if irun == 0 and isinstance(x0, CMAEvolutionStrategy):
es = x0
x0 = es.inputargs['x0'] # for the next restarts
if isscalar(sigma0) and isfinite(sigma0) and sigma0 > 0:
es.sigma = sigma0
# debatable whether this makes sense:
sigma0 = es.inputargs['sigma0'] # for the next restarts
if options is not None:
es.opts.set(options)
# ignore further input args and keep original options
else: # default case
if irun and eval(str(fmin_options['restart_from_best'])):
print_warning('CAVE: restart_from_best is often not useful',
verbose=opts['verbose'])
es = CMAEvolutionStrategy(best.x, sigma_factor * sigma0, opts)
else:
es = CMAEvolutionStrategy(x0, sigma_factor * sigma0, opts)
if eval_initial_x or es.opts['CMA_elitist'] == 'initial' \
or (es.opts['CMA_elitist'] and eval_initial_x is None):
x = es.gp.pheno(es.mean,
into_bounds=es.boundary_handler.repair,
archive=es.sent_solutions)
es.best.update([x], es.sent_solutions,
[objective_function(x, *args)], 1)
es.countevals += 1
opts = es.opts # processed options, unambiguous
# a hack:
fmin_opts = CMAOptions(fmin_options.copy(), unchecked=True)
for k in fmin_opts:
# locals() cannot be modified directly, exec won't work
# in 3.x, therefore
fmin_opts.eval(k, loc={'N': es.N,
'popsize': opts['popsize']},
correct_key=False)
append = opts['verb_append'] or es.countiter > 0 or irun > 0
# es.logger is "the same" logger, because the "identity"
# is only determined by the `filenameprefix`
logger = CMADataLogger(opts['verb_filenameprefix'],
opts['verb_log'])
logger.register(es, append).add() # no fitness values here
es.logger = logger
if noise_handler:
noisehandler = noise_handler
noise_handling = True
if fmin_opts['noise_change_sigma_exponent'] > 0:
es.opts['tolfacupx'] = inf
else:
noisehandler = NoiseHandler(es.N, 0)
noise_handling = False
es.noise_handler = noisehandler
# the problem: this assumes that good solutions cannot take longer than bad ones:
# with EvalInParallel(objective_function, 2, is_feasible=opts['is_feasible']) as eval_in_parallel:
if 1 < 3:
while not es.stop(): # iteration loop
# X, fit = eval_in_parallel(lambda: es.ask(1)[0], es.popsize, args, repetitions=noisehandler.evaluations-1)
X, fit = es.ask_and_eval(objective_function, args, gradf=gradf,
evaluations=noisehandler.evaluations,
aggregation=np.median) # treats NaN with resampling
# TODO: check args and in case use args=(noisehandler.evaluations, )
es.tell(X, fit) # prepare for next iteration
if noise_handling: # it would be better to also use these f-evaluations in tell
es.sigma *= noisehandler(X, fit, objective_function, es.ask,
args=args)**fmin_opts['noise_change_sigma_exponent']
es.countevals += noisehandler.evaluations_just_done # TODO: this is a hack, not important though
# es.more_to_write.append(noisehandler.evaluations_just_done)
if noisehandler.maxevals > noisehandler.minevals:
es.more_to_write.append(noisehandler.get_evaluations())
if 1 < 3:
es.sp.cmean *= exp(-noise_kappa_exponent * np.tanh(noisehandler.noiseS))
if es.sp.cmean > 1:
es.sp.cmean = 1
es.disp()
logger.add(# more_data=[noisehandler.evaluations, 10**noisehandler.noiseS] if noise_handling else [],
modulo=1 if es.stop() and logger.modulo else None)
if (opts['verb_log'] and opts['verb_plot'] and
(es.countiter % max(opts['verb_plot'], opts['verb_log']) == 0 or es.stop())):
logger.plot(324)
# end while not es.stop
mean_pheno = es.gp.pheno(es.mean, into_bounds=es.boundary_handler.repair, archive=es.sent_solutions)
fmean = objective_function(mean_pheno, *args)
es.countevals += 1
es.best.update([mean_pheno], es.sent_solutions, [fmean], es.countevals)
best.update(es.best, es.sent_solutions) # in restarted case
# es.best.update(best)
this_evals = es.countevals - base_evals
base_evals = es.countevals
# BIPOP stats update
if irun == 0:
popsize0 = opts['popsize']
maxiter0 = opts['maxiter']
# XXX: This might be a bug? Reproduced from Matlab
# small_i.append(this_evals)
if bipop:
if poptype == 'small':
small_i.append(this_evals)
else: # poptype == 'large'
large_i.append(this_evals)
# final message
if opts['verb_disp']:
es.result_pretty(irun, time.asctime(time.localtime()),
best.f)
irun += 1
# if irun > fmin_opts['restarts'] or 'ftarget' in es.stop() \
# if irun > restarts or 'ftarget' in es.stop() \
if irun - runs_with_small > fmin_opts['restarts'] or 'ftarget' in es.stop() \
or 'maxfevals' in es.stop(check=False):
break
opts['verb_append'] = es.countevals
opts['popsize'] = fmin_opts['incpopsize'] * es.sp.popsize # TODO: use rather options?
opts['seed'] += 1
# while irun
# es.out['best'] = best # TODO: this is a rather suboptimal type for inspection in the shell
if 1 < 3:
if irun:
es.best.update(best)
# TODO: there should be a better way to communicate the overall best
return es.result() + (es.stop(), es, logger)
else: # previously: to be removed
return (best.x.copy(), best.f, es.countevals,
dict((('stopdict', _CMAStopDict(es._stopdict))
, ('mean', es.gp.pheno(es.mean))
, ('std', es.sigma * es.sigma_vec * sqrt(es.dC) * es.gp.scales)
, ('out', es.out)
, ('opts', es.opts) # last state of options
, ('cma', es)
, ('inputargs', es.inputargs)
))
)
# TODO refine output, can #args be flexible?
# is this well usable as it is now?
else: # except KeyboardInterrupt: # Exception, e:
if eval(str(options['verb_disp'])) > 0:
print(' in/outcomment ``raise`` in last line of cma.fmin to prevent/restore KeyboardInterrupt exception')
raise # cave: swallowing this exception can silently mess up experiments, if ctrl-C is hit
# _____________________________________________________________________
# _____________________________________________________________________
#
class BaseDataLogger(object):
""""abstract" base class for a data logger that can be used with an `OOOptimizer`
Details: attribute `modulo` is used in ``OOOptimizer.optimize``
"""
def add(self, optim=None, more_data=[]):
"""abstract method, add a "data point" from the state of `optim` into the
logger, the argument `optim` can be omitted if it was `register()`-ed before,
acts like an event handler"""
raise NotImplementedError
def register(self, optim):
"""abstract method, register an optimizer `optim`, only needed if `add()` is
called without a value for the `optim` argument"""
self.optim = optim
def disp(self):
"""display some data trace (not implemented)"""
print('method BaseDataLogger.disp() not implemented, to be done in subclass ' + str(type(self)))
def plot(self):
"""plot data (not implemented)"""
print('method BaseDataLogger.plot() is not implemented, to be done in subclass ' + str(type(self)))
def data(self):
"""return logged data in a dictionary (not implemented)"""
print('method BaseDataLogger.data() is not implemented, to be done in subclass ' + str(type(self)))
# _____________________________________________________________________
# _____________________________________________________________________
#
class CMADataLogger(BaseDataLogger):
"""data logger for class `CMAEvolutionStrategy`. The logger is
identified by its name prefix and (over-)writes or reads according
data files. Therefore, the logger must be considered as *global* variable
with unpredictable side effects, if two loggers with the same name
and on the same working folder are used at the same time.
Examples
========
::
import cma
es = cma.CMAEvolutionStrategy(...)
logger = cma.CMADataLogger().register(es)
while not es.stop():
...
logger.add() # add can also take an argument
logger.plot() # or a short cut can be used:
cma.plot() # plot data from logger with default name
logger2 = cma.CMADataLogger('just_another_filename_prefix').load()
logger2.plot()
logger2.disp()
::
import cma
from matplotlib.pylab import *
res = cma.fmin(cma.Fcts.sphere, rand(10), 1e-0)
logger = res[-1] # the CMADataLogger
logger.load() # by "default" data are on disk
semilogy(logger.f[:,0], logger.f[:,5]) # plot f versus iteration, see file header
show()
Details
=======
After loading data, the logger has the attributes `xmean`, `xrecent`,
`std`, `f`, `D` and `corrspec` corresponding to ``xmean``,
``xrecentbest``, ``stddev``, ``fit``, ``axlen`` and ``axlencorr``
filename trails.
:See: `disp()`, `plot()`
"""
default_prefix = 'outcmaes'
# names = ('axlen','fit','stddev','xmean','xrecentbest')
# key_names_with_annotation = ('std', 'xmean', 'xrecent')
def __init__(self, name_prefix=default_prefix, modulo=1, append=False):
"""initialize logging of data from a `CMAEvolutionStrategy`
instance, default ``modulo=1`` means logging with each call
"""
# super(CMAData, self).__init__({'iter':[], 'stds':[], 'D':[],
# 'sig':[], 'fit':[], 'xm':[]})
# class properties:
self.name_prefix = name_prefix if name_prefix \
else CMADataLogger.default_prefix
if isinstance(self.name_prefix, CMAEvolutionStrategy):
self.name_prefix = self.name_prefix.opts.eval(
'verb_filenameprefix')
self.file_names = ('axlen', 'axlencorr', 'fit', 'stddev', 'xmean',
'xrecentbest')
"""used in load, however hard-coded in add"""
self.key_names = ('D', 'corrspec', 'f', 'std', 'xmean', 'xrecent')
"""used in load, however hard-coded in plot"""
self._key_names_with_annotation = ('std', 'xmean', 'xrecent')
"""used in load to add one data row to be modified in plot"""
self.modulo = modulo
"""how often to record data, allows calling `add` without args"""
self.append = append
"""append to previous data"""
self.counter = 0
"""number of calls to `add`"""
self.last_iteration = 0
self.registered = False
self.last_correlation_spectrum = None
self._eigen_counter = 1 # reduce costs
def data(self):
"""return dictionary with data.
If data entries are None or incomplete, consider calling
``.load().data()`` to (re-)load the data from files first.
"""
d = {}
for name in self.key_names:
d[name] = self.__dict__.get(name, None)
return d
def register(self, es, append=None, modulo=None):
"""register a `CMAEvolutionStrategy` instance for logging,
``append=True`` appends to previous data logged under the same name,
by default previous data are overwritten.
"""
if not isinstance(es, CMAEvolutionStrategy):
raise TypeError("only class CMAEvolutionStrategy can be " +
"registered for logging")
self.es = es
if append is not None:
self.append = append
if modulo is not None:
self.modulo = modulo
self.registered = True
return self
def initialize(self, modulo=None):
"""reset logger, overwrite original files, `modulo`: log only every modulo call"""
if modulo is not None:
self.modulo = modulo
try:
es = self.es # must have been registered
except AttributeError:
pass # TODO: revise usage of es... that this can pass
raise _Error('call register() before initialize()')
self.counter = 0 # number of calls of add
self.last_iteration = 0 # some lines are only written if iteration>last_iteration
# write headers for output
fn = self.name_prefix + 'fit.dat'
strseedtime = 'seed=%d, %s' % (es.opts['seed'], time.asctime())
try:
with open(fn, 'w') as f:
f.write('% # columns="iteration, evaluation, sigma, axis ratio, ' +
'bestever, best, median, worst objective function value, ' +
'further objective values of best", ' +
strseedtime +
# strftime("%Y/%m/%d %H:%M:%S", localtime()) + # just asctime() would do
'\n')
except (IOError, OSError):
print('could not open file ' + fn)
fn = self.name_prefix + 'axlen.dat'
try:
with open(fn, 'w') as f:
f.write('% columns="iteration, evaluation, sigma, ' +
'max axis length, ' +
' min axis length, all principle axes lengths ' +
' (sorted square roots of eigenvalues of C)", ' +
strseedtime +
'\n')
except (IOError, OSError):
print('could not open/write file ' + fn)
fn = self.name_prefix + 'axlencorr.dat'
try:
with open(fn, 'w') as f:
f.write('% columns="iteration, evaluation, min max(neg(.)) min(pos(.))' +
' max correlation, correlation matrix principle axes lengths ' +
' (sorted square roots of eigenvalues of correlation matrix)", ' +
strseedtime +
'\n')
except (IOError, OSError):
print('could not open file ' + fn)
fn = self.name_prefix + 'stddev.dat'
try:
with open(fn, 'w') as f:
f.write('% # columns=["iteration, evaluation, sigma, void, void, ' +
' stds==sigma*sqrt(diag(C))", ' +
strseedtime +
'\n')
except (IOError, OSError):
print('could not open file ' + fn)
fn = self.name_prefix + 'xmean.dat'
try:
with open(fn, 'w') as f:
f.write('% # columns="iteration, evaluation, void, void, void, xmean", ' +
strseedtime)
f.write(' # scaling_of_variables: ')
if np.size(es.gp.scales) > 1:
f.write(' '.join(map(str, es.gp.scales)))
else:
f.write(str(es.gp.scales))
f.write(', typical_x: ')
if np.size(es.gp.typical_x) > 1:
f.write(' '.join(map(str, es.gp.typical_x)))
else:
f.write(str(es.gp.typical_x))
f.write('\n')
except (IOError, OSError):
print('could not open/write file ' + fn)
fn = self.name_prefix + 'xrecentbest.dat'
try:
with open(fn, 'w') as f:
f.write('% # iter+eval+sigma+0+fitness+xbest, ' +
strseedtime +
'\n')
except (IOError, OSError):
print('could not open/write file ' + fn)
return self
# end def __init__
def load(self, filenameprefix=None):
"""load (or reload) data from output files, `load()` is called in
`plot()` and `disp()`.
Argument `filenameprefix` is the filename prefix of data to be
loaded (six files), by default ``'outcmaes'``.
Return self with (added) attributes `xrecent`, `xmean`,
`f`, `D`, `std`, 'corrspec'
"""
if not filenameprefix:
filenameprefix = self.name_prefix
assert len(self.file_names) == len(self.key_names)
for i in rglen((self.file_names)):
fn = filenameprefix + self.file_names[i] + '.dat'
try:
self.__dict__[self.key_names[i]] = _fileToMatrix(fn)
except:
_print_warning('reading from file "' + fn + '" failed',
'load', 'CMADataLogger')
try:
if self.key_names[i] in self._key_names_with_annotation:
# copy last row to later fill in annotation position for display
self.__dict__[self.key_names[i]].append(
self.__dict__[self.key_names[i]][-1])
self.__dict__[self.key_names[i]] = \
array(self.__dict__[self.key_names[i]], copy=False)
except:
_print_warning('no data for %s' % fn, 'load',
'CMADataLogger')
return self
def add(self, es=None, more_data=[], modulo=None):
"""append some logging data from `CMAEvolutionStrategy` class instance `es`,
if ``number_of_times_called % modulo`` equals to zero, never if ``modulo==0``.
The sequence ``more_data`` must always have the same length.
When used for a different optimizer class, this function can be
(easily?) adapted by changing the assignments under INTERFACE
in the implemention.
"""
mod = modulo if modulo is not None else self.modulo
self.counter += 1
if mod == 0 or (self.counter > 3 and (self.counter - 1) % mod):
return
if es is None:
try:
es = self.es # must have been registered
except AttributeError :
raise _Error('call `add` with argument `es` or ``register(es)`` before ``add()``')
elif not self.registered:
self.register(es)
if 1 < 3:
if self.counter == 1 and not self.append and self.modulo != 0:
self.initialize() # write file headers
self.counter = 1
# --- INTERFACE, can be changed if necessary ---
if not isinstance(es, CMAEvolutionStrategy): # not necessary
_print_warning('type CMAEvolutionStrategy expected, found '
+ str(type(es)), 'add', 'CMADataLogger')
evals = es.countevals
iteration = es.countiter
eigen_decompositions = es.count_eigen
sigma = es.sigma
axratio = es.D.max() / es.D.min()
xmean = es.mean # TODO: should be optionally phenotype?
fmean_noise_free = es.fmean_noise_free
fmean = es.fmean
# TODO: find a different way to communicate current x and f?
try:
besteverf = es.best.f
bestf = es.fit.fit[0]
worstf = es.fit.fit[-1]
medianf = es.fit.fit[es.sp.popsize // 2]
except:
if iteration > 0: # first call without f-values is OK
raise
try:
xrecent = es.best.last.x
except:
xrecent = None
maxD = es.D.max()
minD = es.D.min()
diagD = es.D
diagC = es.sigma * es.sigma_vec * sqrt(es.dC)
more_to_write = es.more_to_write
es.more_to_write = []
# --- end interface ---
try:
# fit
if iteration > self.last_iteration:
fn = self.name_prefix + 'fit.dat'
with open(fn, 'a') as f:
f.write(str(iteration) + ' '
+ str(evals) + ' '
+ str(sigma) + ' '
+ str(axratio) + ' '
+ str(besteverf) + ' '
+ '%.16e' % bestf + ' '
+ str(medianf) + ' '
+ str(worstf) + ' '
# + str(es.sp.popsize) + ' '
# + str(10**es.noiseS) + ' '
# + str(es.sp.cmean) + ' '
+ ' '.join(str(i) for i in more_to_write) + ' '
+ ' '.join(str(i) for i in more_data) + ' '
+ '\n')
# axlen
fn = self.name_prefix + 'axlen.dat'
if 1 < 3:
with open(fn, 'a') as f: # does not rely on reference counting
f.write(str(iteration) + ' '
+ str(evals) + ' '
+ str(sigma) + ' '
+ str(maxD) + ' '
+ str(minD) + ' '
+ ' '.join(map(str, diagD))
+ '\n')
# correlation matrix eigenvalues
if 1 < 3:
fn = self.name_prefix + 'axlencorr.dat'
c = es.correlation_matrix()
if c is not None:
# accept at most 50% internal loss
if self._eigen_counter < eigen_decompositions / 2:
self.last_correlation_spectrum = \
sorted(es.opts['CMA_eigenmethod'](c)[0]**0.5)
self._eigen_counter += 1
if self.last_correlation_spectrum is None:
self.last_correlation_spectrum = len(diagD) * [1]
c = c[c < 1 - 1e-14] # remove diagonal elements
c[c > 1 - 1e-14] = 1 - 1e-14
c[c < -1 + 1e-14] = -1 + 1e-14
c_min = np.min(c)
c_max = np.max(c)
if np.min(abs(c)) == 0:
c_medminus = 0 # thereby zero "is negative"
c_medplus = 0 # thereby zero "is positive"
else:
c_medminus = c[np.argmin(1/c)] # c is flat
c_medplus = c[np.argmax(1/c)] # c is flat
with open(fn, 'a') as f:
f.write(str(iteration) + ' '
+ str(evals) + ' '
+ str(c_min) + ' '
+ str(c_medminus) + ' ' # the one closest to 0
+ str(c_medplus) + ' ' # the one closest to 0
+ str(c_max) + ' '
+ ' '.join(map(str,
self.last_correlation_spectrum))
+ '\n')
# stddev
fn = self.name_prefix + 'stddev.dat'
with open(fn, 'a') as f:
f.write(str(iteration) + ' '
+ str(evals) + ' '
+ str(sigma) + ' '
+ '0 0 '
+ ' '.join(map(str, diagC))
+ '\n')
# xmean
fn = self.name_prefix + 'xmean.dat'
with open(fn, 'a') as f:
f.write(str(iteration) + ' '
+ str(evals) + ' '
# + str(sigma) + ' '
+ '0 '
+ str(fmean_noise_free) + ' '
+ str(fmean) + ' ' # TODO: this does not make sense
# TODO should be optional the phenotyp?
+ ' '.join(map(str, xmean))
+ '\n')
# xrecent
fn = self.name_prefix + 'xrecentbest.dat'
if iteration > 0 and xrecent is not None:
with open(fn, 'a') as f:
f.write(str(iteration) + ' '
+ str(evals) + ' '
+ str(sigma) + ' '
+ '0 '
+ str(bestf) + ' '
+ ' '.join(map(str, xrecent))
+ '\n')
except (IOError, OSError):
if iteration <= 1:
_print_warning(('could not open/write file %s: ' % fn,
sys.exc_info()))
self.last_iteration = iteration
def closefig(self):
pyplot.close(self.fighandle)
def save_to(self, nameprefix, switch=False):
"""saves logger data to a different set of files, for
``switch=True`` also the loggers name prefix is switched to
the new value
"""
if not nameprefix or not isinstance(nameprefix, str):
raise _Error('filename prefix must be a nonempty string')
if nameprefix == self.default_prefix:
raise _Error('cannot save to default name "' + nameprefix + '...", chose another name')
if nameprefix == self.name_prefix:
return
for name in self.file_names:
open(nameprefix + name + '.dat', 'w').write(open(self.name_prefix + name + '.dat').read())
if switch:
self.name_prefix = nameprefix
def select_data(self, iteration_indices):
"""keep only data of `iteration_indices`"""
dat = self
iteridx = iteration_indices
dat.f = dat.f[np.where([x in iteridx for x in dat.f[:, 0]])[0], :]
dat.D = dat.D[np.where([x in iteridx for x in dat.D[:, 0]])[0], :]
try:
iteridx = list(iteridx)
iteridx.append(iteridx[-1]) # last entry is artificial
except:
pass
dat.std = dat.std[np.where([x in iteridx
for x in dat.std[:, 0]])[0], :]
dat.xmean = dat.xmean[np.where([x in iteridx
for x in dat.xmean[:, 0]])[0], :]
try:
dat.xrecent = dat.x[np.where([x in iteridx for x in
dat.xrecent[:, 0]])[0], :]
except AttributeError:
pass
try:
dat.corrspec = dat.x[np.where([x in iteridx for x in
dat.corrspec[:, 0]])[0], :]
except AttributeError:
pass
def plot(self, fig=None, iabscissa=1, iteridx=None,
plot_mean=False, # was: plot_mean=True
foffset=1e-19, x_opt=None, fontsize=9):
"""plot data from a `CMADataLogger` (using the files written
by the logger).
Arguments
---------
`fig`
figure number, by default 325
`iabscissa`
``0==plot`` versus iteration count,
``1==plot`` versus function evaluation number
`iteridx`
iteration indices to plot
Return `CMADataLogger` itself.
Examples
--------
::
import cma
logger = cma.CMADataLogger() # with default name
# try to plot the "default logging" data (e.g.
# from previous fmin calls, which is essentially what
# also cma.plot() does)
logger.plot()
cma.savefig('fig325.png') # save current figure
logger.closefig()
Dependencies: matlabplotlib/pyplot.
"""
try:
# pyplot: prodedural interface for matplotlib
from matplotlib.pyplot import figure, subplot, hold, gcf
except ImportError:
ImportError('could not find matplotlib.pyplot module, function plot() is not available')
return
if fig is None:
fig = 325
if iabscissa not in (0, 1):
iabscissa = 1
self.load() # better load only conditionally?
dat = self
dat.x = dat.xmean # this is the genotyp
if not plot_mean:
if len(dat.x) < 2:
print('not enough data to plot recent x')
else:
dat.x = dat.xrecent
# index out some data
if iteridx is not None:
self.select_data(iteridx)
if len(dat.f) <= 1:
print('nothing to plot')
return
# not in use anymore, see formatter above
# xticklocs = np.arange(5) * np.round(minxend/4., -int(np.log10(minxend/4.)))
# dfit(dfit<1e-98) = NaN;
# TODO: if abscissa==0 plot in chunks, ie loop over subsets where
# dat.f[:,0]==countiter is monotonous
figure(fig)
self._enter_plotting(fontsize)
self.fighandle = gcf() # fighandle.number
subplot(2, 2, 1)
self.plot_divers(iabscissa, foffset)
pyplot.xlabel('')
# Scaling
subplot(2, 2, 3)
self.plot_axes_scaling(iabscissa)
# spectrum of correlation matrix
figure(fig)
subplot(2, 2, 2)
if plot_mean:
self.plot_mean(iabscissa, x_opt)
else:
self.plot_xrecent(iabscissa, x_opt)
pyplot.xlabel('')
# pyplot.xticks(xticklocs)
# standard deviations
subplot(2, 2, 4)
self.plot_stds(iabscissa)
self._finalize_plotting()
return self
def plot_all(self, fig=None, iabscissa=1, iteridx=None,
foffset=1e-19, x_opt=None, fontsize=9):
"""
plot data from a `CMADataLogger` (using the files written by the logger).
Arguments
---------
`fig`
figure number, by default 425
`iabscissa`
``0==plot`` versus iteration count,
``1==plot`` versus function evaluation number
`iteridx`
iteration indices to plot
Return `CMADataLogger` itself.
Examples
--------
::
import cma
logger = cma.CMADataLogger() # with default name
# try to plot the "default logging" data (e.g.
# from previous fmin calls, which is essentially what
# also cma.plot() does)
logger.plot_all()
cma.savefig('fig425.png') # save current figure
logger.closefig()
Dependencies: matlabplotlib/pyplot.
"""
try:
# pyplot: prodedural interface for matplotlib
from matplotlib.pyplot import figure, subplot, gcf
except ImportError:
ImportError('could not find matplotlib.pyplot module, function plot() is not available')
return
if fig is None:
fig = 426
if iabscissa not in (0, 1):
iabscissa = 1
self.load()
dat = self
# index out some data
if iteridx is not None:
self.select_data(iteridx)
if len(dat.f) == 0:
print('nothing to plot')
return
# not in use anymore, see formatter above
# xticklocs = np.arange(5) * np.round(minxend/4., -int(np.log10(minxend/4.)))
# dfit(dfit<1e-98) = NaN;
# TODO: if abscissa==0 plot in chunks, ie loop over subsets where dat.f[:,0]==countiter is monotonous
figure(fig)
self._enter_plotting(fontsize)
self.fighandle = gcf() # fighandle.number
if 1 < 3:
subplot(2, 3, 1)
self.plot_divers(iabscissa, foffset)
pyplot.xlabel('')
# standard deviations
subplot(2, 3, 4)
self.plot_stds(iabscissa)
# Scaling
subplot(2, 3, 2)
self.plot_axes_scaling(iabscissa)
pyplot.xlabel('')
# spectrum of correlation matrix
subplot(2, 3, 5)
self.plot_correlations(iabscissa)
# x-vectors
subplot(2, 3, 3)
self.plot_xrecent(iabscissa, x_opt)
pyplot.xlabel('')
subplot(2, 3, 6)
self.plot_mean(iabscissa, x_opt)
self._finalize_plotting()
return self
def plot_axes_scaling(self, iabscissa=1):
if not hasattr(self, 'D'):
self.load()
dat = self
self._enter_plotting()
pyplot.semilogy(dat.D[:, iabscissa], dat.D[:, 5:], '-b')
pyplot.hold(True)
pyplot.grid(True)
ax = array(pyplot.axis())
# ax[1] = max(minxend, ax[1])
pyplot.axis(ax)
pyplot.title('Principle Axes Lengths')
# pyplot.xticks(xticklocs)
self._xlabel(iabscissa)
self._finalize_plotting()
return self
def plot_stds(self, iabscissa=1):
if not hasattr(self, 'std'):
self.load()
dat = self
self._enter_plotting()
# remove sigma from stds (graphs become much better readible)
dat.std[:, 5:] = np.transpose(dat.std[:, 5:].T / dat.std[:, 2].T)
# ax = array(pyplot.axis())
# ax[1] = max(minxend, ax[1])
# axis(ax)
if 1 < 2 and dat.std.shape[1] < 100:
# use fake last entry in x and std for line extension-annotation
minxend = int(1.06 * dat.std[-2, iabscissa])
# minxend = int(1.06 * dat.x[-2, iabscissa])
dat.std[-1, iabscissa] = minxend # TODO: should be ax[1]
idx = np.argsort(dat.std[-2, 5:])
idx2 = np.argsort(idx)
dat.std[-1, 5 + idx] = np.logspace(np.log10(np.min(dat.std[:, 5:])),
np.log10(np.max(dat.std[:, 5:])), dat.std.shape[1] - 5)
dat.std[-1, iabscissa] = minxend # TODO: should be ax[1]
pyplot.semilogy(dat.std[:, iabscissa], dat.std[:, 5:], '-')
pyplot.hold(True)
ax = array(pyplot.axis())
yy = np.logspace(np.log10(ax[2]), np.log10(ax[3]), dat.std.shape[1] - 5)
# yyl = np.sort(dat.std[-1,5:])
idx = np.argsort(dat.std[-1, 5:])
idx2 = np.argsort(idx)
# plot(np.dot(dat.std[-2, iabscissa],[1,1]), array([ax[2]+1e-6, ax[3]-1e-6]), 'k-') # vertical separator
# vertical separator
pyplot.plot(np.dot(dat.std[-2, iabscissa], [1, 1]),
array([ax[2] + 1e-6, ax[3] - 1e-6]),
# array([np.min(dat.std[:, 5:]), np.max(dat.std[:, 5:])]),
'k-')
pyplot.hold(True)
# plot([dat.std[-1, iabscissa], ax[1]], [dat.std[-1,5:], yy[idx2]], 'k-') # line from last data point
for i in rglen((idx)):
# text(ax[1], yy[i], ' '+str(idx[i]))
pyplot.text(dat.std[-1, iabscissa], dat.std[-1, 5 + i], ' ' + str(i))
else:
pyplot.semilogy(dat.std[:, iabscissa], dat.std[:, 5:], '-')
pyplot.hold(True)
pyplot.grid(True)
pyplot.title(r'Standard Deviations $\times$ $\sigma^{-1}$ in All Coordinates')
# pyplot.xticks(xticklocs)
self._xlabel(iabscissa)
self._finalize_plotting()
return self
def plot_mean(self, iabscissa=1, x_opt=None, annotations=None):
if not hasattr(self, 'xmean'):
self.load()
self.x = self.xmean
self._plot_x(iabscissa, x_opt, 'mean', annotations=annotations)
self._xlabel(iabscissa)
return self
def plot_xrecent(self, iabscissa=1, x_opt=None, annotations=None):
if not hasattr(self, 'xrecent'):
self.load()
self.x = self.xrecent
self._plot_x(iabscissa, x_opt, 'curr best', annotations=annotations)
self._xlabel(iabscissa)
return self
def plot_correlations(self, iabscissa=1):
"""spectrum of correlation matrix and largest correlation"""
if not hasattr(self, 'corrspec'):
self.load()
if len(self.corrspec) < 2:
return self
x = self.corrspec[:, iabscissa]
y = self.corrspec[:, 6:] # principle axes
ys = self.corrspec[:, :6] # "special" values
from matplotlib.pyplot import semilogy, hold, text, grid, axis, title
self._enter_plotting()
semilogy(x, y, '-c')
hold(True)
semilogy(x[:], np.max(y, 1) / np.min(y, 1), '-r')
text(x[-1], np.max(y[-1, :]) / np.min(y[-1, :]), 'axis ratio')
if ys is not None:
semilogy(x, 1 + ys[:, 2], '-b')
text(x[-1], 1 + ys[-1, 2], '1 + min(corr)')
semilogy(x, 1 - ys[:, 5], '-b')
text(x[-1], 1 - ys[-1, 5], '1 - max(corr)')
semilogy(x[:], 1 + ys[:, 3], '-k')
text(x[-1], 1 + ys[-1, 3], '1 + max(neg corr)')
semilogy(x[:], 1 - ys[:, 4], '-k')
text(x[-1], 1 - ys[-1, 4], '1 - min(pos corr)')
grid(True)
ax = array(axis())
# ax[1] = max(minxend, ax[1])
axis(ax)
title('Spectrum (roots) of correlation matrix')
# pyplot.xticks(xticklocs)
self._xlabel(iabscissa)
self._finalize_plotting()
return self
def plot_divers(self, iabscissa=1, foffset=1e-19):
"""plot fitness, sigma, axis ratio...
:param iabscissa: 0 means vs evaluations, 1 means vs iterations
:param foffset: added to f-value
:See: `plot()`
"""
from matplotlib.pyplot import semilogy, hold, grid, \
axis, title, text
fontsize = pyplot.rcParams['font.size']
if not hasattr(self, 'f'):
self.load()
dat = self
minfit = min(dat.f[:, 5])
dfit = dat.f[:, 5] - minfit # why not using idx?
dfit[dfit < 1e-98] = np.NaN
self._enter_plotting()
if dat.f.shape[1] > 7:
# semilogy(dat.f[:, iabscissa], abs(dat.f[:,[6, 7, 10, 12]])+foffset,'-k')
semilogy(dat.f[:, iabscissa], abs(dat.f[:, [6, 7]]) + foffset, '-k')
hold(True)
# (larger indices): additional fitness data, for example constraints values
if dat.f.shape[1] > 8:
# dd = abs(dat.f[:,7:]) + 10*foffset
# dd = np.where(dat.f[:,7:]==0, np.NaN, dd) # cannot be
semilogy(dat.f[:, iabscissa], np.abs(dat.f[:, 8:]) + 10 * foffset, 'y')
hold(True)
idx = np.where(dat.f[:, 5] > 1e-98)[0] # positive values
semilogy(dat.f[idx, iabscissa], dat.f[idx, 5] + foffset, '.b')
hold(True)
grid(True)
semilogy(dat.f[:, iabscissa], abs(dat.f[:, 5]) + foffset, '-b')
text(dat.f[-1, iabscissa], abs(dat.f[-1, 5]) + foffset,
r'$|f_\mathsf{best}|$', fontsize=fontsize + 2)
# negative f-values, dots
sgn = np.sign(dat.f[:, 5])
sgn[np.abs(dat.f[:, 5]) < 1e-98] = 0
idx = np.where(sgn < 0)[0]
semilogy(dat.f[idx, iabscissa], abs(dat.f[idx, 5]) + foffset,
'.m') # , markersize=5
# lines between negative f-values
dsgn = np.diff(sgn)
start_idx = 1 + np.where((dsgn < 0) * (sgn[1:] < 0))[0]
stop_idx = 1 + np.where(dsgn > 0)[0]
if sgn[0] < 0:
start_idx = np.concatenate(([0], start_idx))
for istart in start_idx:
istop = stop_idx[stop_idx > istart]
istop = istop[0] if len(istop) else 0
idx = range(istart, istop if istop else dat.f.shape[0])
if len(idx) > 1:
semilogy(dat.f[idx, iabscissa], abs(dat.f[idx, 5]) + foffset,
'm') # , markersize=5
# lines between positive and negative f-values
# TODO: the following might plot values very close to zero
if istart > 0: # line to the left of istart
semilogy(dat.f[istart-1:istart+1, iabscissa],
abs(dat.f[istart-1:istart+1, 5]) +
foffset, '--m')
if istop: # line to the left of istop
semilogy(dat.f[istop-1:istop+1, iabscissa],
abs(dat.f[istop-1:istop+1, 5]) +
foffset, '--m')
# mark the respective first positive values
semilogy(dat.f[istop, iabscissa], abs(dat.f[istop, 5]) +
foffset, '.b', markersize=7)
# mark the respective first negative values
semilogy(dat.f[istart, iabscissa], abs(dat.f[istart, 5]) +
foffset, '.r', markersize=7)
# standard deviations std
semilogy(dat.std[:-1, iabscissa],
np.vstack([list(map(max, dat.std[:-1, 5:])),
list(map(min, dat.std[:-1, 5:]))]).T,
'-m', linewidth=2)
text(dat.std[-2, iabscissa], max(dat.std[-2, 5:]), 'max std',
fontsize=fontsize)
text(dat.std[-2, iabscissa], min(dat.std[-2, 5:]), 'min std',
fontsize=fontsize)
# delta-fitness in cyan
idx = isfinite(dfit)
if 1 < 3:
idx_nan = np.where(idx == False)[0] # gaps
if not len(idx_nan): # should never happen
semilogy(dat.f[:, iabscissa][idx], dfit[idx], '-c')
else:
i_start = 0
for i_end in idx_nan:
if i_end > i_start:
semilogy(dat.f[:, iabscissa][i_start:i_end],
dfit[i_start:i_end], '-c')
i_start = i_end + 1
if len(dfit) > idx_nan[-1] + 1:
semilogy(dat.f[:, iabscissa][idx_nan[-1]+1:],
dfit[idx_nan[-1]+1:], '-c')
text(dat.f[idx, iabscissa][-1], dfit[idx][-1],
r'$f_\mathsf{best} - \min(f)$', fontsize=fontsize + 2)
# overall minimum
i = np.argmin(dat.f[:, 5])
semilogy(dat.f[i, iabscissa], np.abs(dat.f[i, 5]), 'ro',
markersize=9)
semilogy(dat.f[i, iabscissa], dfit[idx][np.argmin(dfit[idx])]
+ 1e-98, 'ro', markersize=9)
# semilogy(dat.f[-1, iabscissa]*np.ones(2), dat.f[-1,4]*np.ones(2), 'rd')
# AR and sigma
semilogy(dat.f[:, iabscissa], dat.f[:, 3], '-r') # AR
semilogy(dat.f[:, iabscissa], dat.f[:, 2], '-g') # sigma
text(dat.f[-1, iabscissa], dat.f[-1, 3], r'axis ratio',
fontsize=fontsize)
text(dat.f[-1, iabscissa], dat.f[-1, 2] / 1.5, r'$\sigma$',
fontsize=fontsize+3)
ax = array(axis())
# ax[1] = max(minxend, ax[1])
axis(ax)
text(ax[0] + 0.01, ax[2], # 10**(log10(ax[2])+0.05*(log10(ax[3])-log10(ax[2]))),
'.min($f$)=' + repr(minfit))
#'.f_recent=' + repr(dat.f[-1, 5]))
# title('abs(f) (blue), f-min(f) (cyan), Sigma (green), Axis Ratio (red)')
# title(r'blue:$\mathrm{abs}(f)$, cyan:$f - \min(f)$, green:$\sigma$, red:axis ratio',
# fontsize=fontsize - 0.0)
title(r'$|f_{\mathrm{best},\mathrm{med},\mathrm{worst}}|$, $f - \min(f)$, $\sigma$, axis ratio')
# if __name__ != 'cma': # should be handled by the caller
self._xlabel(iabscissa)
self._finalize_plotting()
return self
def _enter_plotting(self, fontsize=9):
"""assumes that a figure is open """
# interactive_status = matplotlib.is_interactive()
self.original_fontsize = pyplot.rcParams['font.size']
pyplot.rcParams['font.size'] = fontsize
pyplot.hold(False) # opens a figure window, if non exists
pyplot.ioff()
def _finalize_plotting(self):
pyplot.ion()
pyplot.draw() # update "screen"
pyplot.show() # show figure
# matplotlib.interactive(interactive_status)
pyplot.rcParams['font.size'] = self.original_fontsize
def _xlabel(self, iabscissa=1):
pyplot.xlabel('iterations' if iabscissa == 0
else 'function evaluations')
def _plot_x(self, iabscissa=1, x_opt=None, remark=None,
annotations=None):
"""If ``x_opt is not None`` the difference to x_opt is plotted
in log scale
"""
if not hasattr(self, 'x'):
_print_warning('no x-attributed found, use methods ' +
'plot_xrecent or plot_mean', 'plot_x',
'CMADataLogger')
return
from matplotlib.pyplot import plot, semilogy, hold, text, grid, axis, title
dat = self # for convenience and historical reasons
# modify fake last entry in x for line extension-annotation
if dat.x.shape[1] < 100:
minxend = int(1.06 * dat.x[-2, iabscissa])
# write y-values for individual annotation into dat.x
dat.x[-1, iabscissa] = minxend # TODO: should be ax[1]
if x_opt is None:
idx = np.argsort(dat.x[-2, 5:])
idx2 = np.argsort(idx)
dat.x[-1, 5 + idx] = np.linspace(np.min(dat.x[:, 5:]),
np.max(dat.x[:, 5:]), dat.x.shape[1] - 5)
else: # y-axis is in log
xdat = np.abs(dat.x[:, 5:] - np.array(x_opt, copy=False))
idx = np.argsort(xdat[-2, :])
idx2 = np.argsort(idx)
xdat[-1, idx] = np.logspace(np.log10(np.min(abs(xdat[xdat!=0]))),
np.log10(np.max(np.abs(xdat))),
dat.x.shape[1] - 5)
else:
minxend = 0
self._enter_plotting()
if x_opt is not None: # TODO: differentate neg and pos?
semilogy(dat.x[:, iabscissa], abs(xdat), '-')
else:
plot(dat.x[:, iabscissa], dat.x[:, 5:], '-')
hold(True)
grid(True)
ax = array(axis())
# ax[1] = max(minxend, ax[1])
axis(ax)
ax[1] -= 1e-6 # to prevent last x-tick annotation, probably superfluous
if dat.x.shape[1] < 100:
yy = np.linspace(ax[2] + 1e-6, ax[3] - 1e-6, dat.x.shape[1] - 5)
# yyl = np.sort(dat.x[-1,5:])
if x_opt is not None:
# semilogy([dat.x[-1, iabscissa], ax[1]], [abs(dat.x[-1, 5:]), yy[idx2]], 'k-') # line from last data point
semilogy(np.dot(dat.x[-2, iabscissa], [1, 1]),
array([ax[2] * (1+1e-6), ax[3] / (1+1e-6)]), 'k-')
else:
# plot([dat.x[-1, iabscissa], ax[1]], [dat.x[-1,5:], yy[idx2]], 'k-') # line from last data point
plot(np.dot(dat.x[-2, iabscissa], [1, 1]),
array([ax[2] + 1e-6, ax[3] - 1e-6]), 'k-')
# plot(array([dat.x[-1, iabscissa], ax[1]]),
# reshape(array([dat.x[-1,5:], yy[idx2]]).flatten(), (2,4)), '-k')
for i in rglen(idx):
# TODOqqq: annotate phenotypic value!?
# text(ax[1], yy[i], 'x(' + str(idx[i]) + ')=' + str(dat.x[-2,5+idx[i]]))
text(dat.x[-1, iabscissa], dat.x[-1, 5 + i]
if x_opt is None else np.abs(xdat[-1, i]),
('x(' + str(i) + ')=' if annotations is None
else str(i) + ':' + annotations[i] + "=")
+ str(dat.x[-2, 5 + i]))
i = 2 # find smallest i where iteration count differs (in case the same row appears twice)
while i < len(dat.f) and dat.f[-i][0] == dat.f[-1][0]:
i += 1
title('Object Variables (' +
(remark + ', ' if remark is not None else '') +
str(dat.x.shape[1] - 5) + '-D, popsize~' +
(str(int((dat.f[-1][1] - dat.f[-i][1]) / (dat.f[-1][0] - dat.f[-i][0])))
if len(dat.f.T[0]) > 1 and dat.f[-1][0] > dat.f[-i][0] else 'NA')
+ ')')
self._finalize_plotting()
def downsampling(self, factor=10, first=3, switch=True, verbose=True):
"""
rude downsampling of a `CMADataLogger` data file by `factor`,
keeping also the first `first` entries. This function is a
stump and subject to future changes. Return self.
Arguments
---------
- `factor` -- downsampling factor
- `first` -- keep first `first` entries
- `switch` -- switch the new logger to the downsampled logger
original_name+'down'
Details
-------
``self.name_prefix+'down'`` files are written
Example
-------
::
import cma
cma.downsampling() # takes outcmaes* files
cma.plot('outcmaesdown')
"""
newprefix = self.name_prefix + 'down'
for name in self.file_names:
f = open(newprefix + name + '.dat', 'w')
iline = 0
cwritten = 0
for line in open(self.name_prefix + name + '.dat'):
if iline < first or iline % factor == 0:
f.write(line)
cwritten += 1
iline += 1
f.close()
if verbose and iline > first:
print('%d' % (cwritten) + ' lines written in ' + newprefix + name + '.dat')
if switch:
self.name_prefix += 'down'
return self
# ____________________________________________________________
# ____________________________________________________________
#
def disp(self, idx=100): # r_[0:5,1e2:1e9:1e2,-10:0]):
"""displays selected data from (files written by) the class `CMADataLogger`.
Arguments
---------
`idx`
indices corresponding to rows in the data file;
if idx is a scalar (int), the first two, then every idx-th,
and the last three rows are displayed. Too large index values are removed.
Example
-------
>>> import cma, numpy as np
>>> res = cma.fmin(cma.fcts.elli, 7 * [0.1], 1, {'verb_disp':1e9}) # generate data
>>> assert res[1] < 1e-9
>>> assert res[2] < 4400
>>> l = cma.CMADataLogger() # == res[-1], logger with default name, "points to" above data
>>> l.disp([0,-1]) # first and last
>>> l.disp(20) # some first/last and every 20-th line
>>> l.disp(np.r_[0:999999:100, -1]) # every 100-th and last
>>> l.disp(np.r_[0, -10:0]) # first and ten last
>>> cma.disp(l.name_prefix, np.r_[0::100, -10:]) # the same as l.disp(...)
Details
-------
The data line with the best f-value is displayed as last line.
:See: `disp()`
"""
filenameprefix = self.name_prefix
def printdatarow(dat, iteration):
"""print data of iteration i"""
i = np.where(dat.f[:, 0] == iteration)[0][0]
j = np.where(dat.std[:, 0] == iteration)[0][0]
print('%5d' % (int(dat.f[i, 0])) + ' %6d' % (int(dat.f[i, 1])) + ' %.14e' % (dat.f[i, 5]) +
' %5.1e' % (dat.f[i, 3]) +
' %6.2e' % (max(dat.std[j, 5:])) + ' %6.2e' % min(dat.std[j, 5:]))
dat = CMADataLogger(filenameprefix).load()
ndata = dat.f.shape[0]
# map index to iteration number, is difficult if not all iteration numbers exist
# idx = idx[np.where(map(lambda x: x in dat.f[:,0], idx))[0]] # TODO: takes pretty long
# otherwise:
if idx is None:
idx = 100
if isscalar(idx):
# idx = np.arange(0, ndata, idx)
if idx:
idx = np.r_[0, 1, idx:ndata - 3:idx, -3:0]
else:
idx = np.r_[0, 1, -3:0]
idx = array(idx)
idx = idx[idx < ndata]
idx = idx[-idx <= ndata]
iters = dat.f[idx, 0]
idxbest = np.argmin(dat.f[:, 5])
iterbest = dat.f[idxbest, 0]
if len(iters) == 1:
printdatarow(dat, iters[0])
else:
self.disp_header()
for i in iters:
printdatarow(dat, i)
self.disp_header()
printdatarow(dat, iterbest)
sys.stdout.flush()
def disp_header(self):
heading = 'Iterat Nfevals function value axis ratio maxstd minstd'
print(heading)
# end class CMADataLogger
# ____________________________________________________________
# ____________________________________________________________
#
last_figure_number = 324
def plot(name=None, fig=None, abscissa=1, iteridx=None,
plot_mean=False,
foffset=1e-19, x_opt=None, fontsize=9):
"""
plot data from files written by a `CMADataLogger`,
the call ``cma.plot(name, **argsdict)`` is a shortcut for
``cma.CMADataLogger(name).plot(**argsdict)``
Arguments
---------
`name`
name of the logger, filename prefix, None evaluates to
the default 'outcmaes'
`fig`
filename or figure number, or both as a tuple (any order)
`abscissa`
0==plot versus iteration count,
1==plot versus function evaluation number
`iteridx`
iteration indices to plot
Return `None`
Examples
--------
::
cma.plot(); # the optimization might be still
# running in a different shell
cma.savefig('fig325.png')
cma.closefig()
cdl = cma.CMADataLogger().downsampling().plot()
# in case the file sizes are large
Details
-------
Data from codes in other languages (C, Java, Matlab, Scilab) have the same
format and can be plotted just the same.
:See: `CMADataLogger`, `CMADataLogger.plot()`
"""
global last_figure_number
if not fig:
last_figure_number += 1
fig = last_figure_number
if isinstance(fig, (int, float)):
last_figure_number = fig
CMADataLogger(name).plot(fig, abscissa, iteridx, plot_mean, foffset,
x_opt, fontsize)
def disp(name=None, idx=None):
"""displays selected data from (files written by) the class `CMADataLogger`.
The call ``cma.disp(name, idx)`` is a shortcut for ``cma.CMADataLogger(name).disp(idx)``.
Arguments
---------
`name`
name of the logger, filename prefix, `None` evaluates to
the default ``'outcmaes'``
`idx`
indices corresponding to rows in the data file; by
default the first five, then every 100-th, and the last
10 rows. Too large index values are removed.
Examples
--------
::
import cma, numpy
# assume some data are available from previous runs
cma.disp(None,numpy.r_[0,-1]) # first and last
cma.disp(None,numpy.r_[0:1e9:100,-1]) # every 100-th and last
cma.disp(idx=numpy.r_[0,-10:0]) # first and ten last
cma.disp(idx=numpy.r_[0:1e9:1e3,-10:0])
:See: `CMADataLogger.disp()`
"""
return CMADataLogger(name if name else CMADataLogger.default_prefix
).disp(idx)
# ____________________________________________________________
def _fileToMatrix(file_name):
"""rudimentary method to read in data from a file"""
# TODO: np.loadtxt() might be an alternative
# try:
if 1 < 3:
lres = []
for line in open(file_name, 'r').readlines():
if len(line) > 0 and line[0] not in ('%', '#'):
lres.append(list(map(float, line.split())))
res = lres
while res != [] and res[0] == []: # remove further leading empty lines
del res[0]
return res
# except:
print('could not read file ' + file_name)
# ____________________________________________________________
# ____________________________________________________________
class NoiseHandler(object):
"""Noise handling according to [Hansen et al 2009, A Method for
Handling Uncertainty in Evolutionary Optimization...]
The interface of this class is yet versatile and subject to changes.
The noise handling follows closely [Hansen et al 2009] in the
measurement part, but the implemented treatment is slightly
different: for ``noiseS > 0``, ``evaluations`` (time) and sigma are
increased by ``alpha``. For ``noiseS < 0``, ``evaluations`` (time)
is decreased by ``alpha**(1/4)``.
The (second) parameter ``evaluations`` defines the maximal number
of evaluations for a single fitness computation. If it is a list,
the smallest element defines the minimal number and if the list has
three elements, the median value is the start value for
``evaluations``.
``NoiseHandler`` serves to control the noise via steps-size
increase and number of re-evaluations, for example via ``fmin`` or
with ``ask_and_eval()``.
Examples
--------
Minimal example together with `fmin` on a non-noisy function:
>>> import cma
>>> cma.fmin(cma.felli, 7 * [1], 1, noise_handler=cma.NoiseHandler(7))
in dimension 7 (which needs to be given tice). More verbose example
in the optimization loop with a noisy function defined in ``func``:
>>> import cma, numpy as np
>>> func = lambda x: cma.fcts.sphere(x) * (1 + 4 * np.random.randn() / len(x)) # cma.Fcts.noisysphere
>>> es = cma.CMAEvolutionStrategy(np.ones(10), 1)
>>> nh = cma.NoiseHandler(es.N, maxevals=[1, 1, 30])
>>> while not es.stop():
... X, fit_vals = es.ask_and_eval(func, evaluations=nh.evaluations)
... es.tell(X, fit_vals) # prepare for next iteration
... es.sigma *= nh(X, fit_vals, func, es.ask) # see method __call__
... es.countevals += nh.evaluations_just_done # this is a hack, not important though
... es.logger.add(more_data = [nh.evaluations, nh.noiseS]) # add a data point
... es.disp()
... # nh.maxevals = ... it might be useful to start with smaller values and then increase
>>> print(es.stop())
>>> print(es.result()[-2]) # take mean value, the best solution is totally off
>>> assert sum(es.result()[-2]**2) < 1e-9
>>> print(X[np.argmin(fit_vals)]) # not bad, but probably worse than the mean
>>> # es.logger.plot()
The command ``logger.plot()`` will plot the logged data.
The noise options of `fmin()` control a `NoiseHandler` instance
similar to this example. The command ``cma.CMAOptions('noise')``
lists in effect the parameters of `__init__` apart from
``aggregate``.
Details
-------
The parameters reevals, theta, c_s, and alpha_t are set differently
than in the original publication, see method `__init__()`. For a
very small population size, say popsize <= 5, the measurement
technique based on rank changes is likely to fail.
Missing Features
----------------
In case no noise is found, ``self.lam_reeval`` should be adaptive
and get at least as low as 1 (however the possible savings from this
are rather limited). Another option might be to decide during the
first call by a quantitative analysis of fitness values whether
``lam_reeval`` is set to zero. More generally, an automatic noise
mode detection might also set the covariance matrix learning rates
to smaller values.
:See: `fmin()`, `CMAEvolutionStrategy.ask_and_eval()`
"""
# TODO: for const additive noise a better version might be with alphasigma also used for sigma-increment,
# while all other variance changing sources are removed (because they are intrinsically biased). Then
# using kappa to get convergence (with unit sphere samples): noiseS=0 leads to a certain kappa increasing rate?
def __init__(self, N, maxevals=[1, 1, 1], aggregate=np.median,
reevals=None, epsilon=1e-7, parallel=False):
"""parameters are
`N`
dimension, (only) necessary to adjust the internal
"alpha"-parameters
`maxevals`
maximal value for ``self.evaluations``, where
``self.evaluations`` function calls are aggregated for
noise treatment. With ``maxevals == 0`` the noise
handler is (temporarily) "switched off". If `maxevals`
is a list, min value and (for >2 elements) median are
used to define minimal and initial value of
``self.evaluations``. Choosing ``maxevals > 1`` is only
reasonable, if also the original ``fit`` values (that
are passed to `__call__`) are computed by aggregation of
``self.evaluations`` values (otherwise the values are
not comparable), as it is done within `fmin()`.
`aggregate`
function to aggregate single f-values to a 'fitness', e.g.
``np.median``.
`reevals`
number of solutions to be reevaluated for noise
measurement, can be a float, by default set to ``2 +
popsize/20``, where ``popsize = len(fit)`` in
``__call__``. zero switches noise handling off.
`epsilon`
multiplier for perturbation of the reevaluated solutions
`parallel`
a single f-call with all resampled solutions
:See: `fmin()`, `CMAOptions`, `CMAEvolutionStrategy.ask_and_eval()`
"""
self.lam_reeval = reevals # 2 + popsize/20, see method indices(), originally 2 + popsize/10
self.epsilon = epsilon
self.parallel = parallel
## meta_parameters.noise_theta == 0.5
self.theta = 0.5 # 0.5 # originally 0.2
self.cum = 0.3 # originally 1, 0.3 allows one disagreement of current point with resulting noiseS
## meta_parameters.noise_alphasigma == 2.0
self.alphasigma = 1 + 2.0 / (N + 10) # 2, unit sphere sampling: 1 + 1 / (N + 10)
## meta_parameters.noise_alphaevals == 2.0
self.alphaevals = 1 + 2.0 / (N + 10) # 2, originally 1.5
## meta_parameters.noise_alphaevalsdown_exponent == -0.25
self.alphaevalsdown = self.alphaevals** -0.25 # originally 1/1.5
# zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
self.evaluations = 1 # to aggregate for a single f-evaluation
self.minevals = 1
self.maxevals = int(np.max(maxevals))
if hasattr(maxevals, '__contains__'): # i.e. can deal with ``in``
if len(maxevals) > 1:
self.minevals = min(maxevals)
self.evaluations = self.minevals
if len(maxevals) > 2:
self.evaluations = np.median(maxevals)
## meta_parameters.noise_aggregate == None
self.f_aggregate = aggregate if not None else {1: np.median, 2: np.mean}[ None ]
self.evaluations_just_done = 0 # actually conducted evals, only for documentation
self.noiseS = 0
def __call__(self, X, fit, func, ask=None, args=()):
"""proceed with noise measurement, set anew attributes ``evaluations``
(proposed number of evaluations to "treat" noise) and ``evaluations_just_done``
and return a factor for increasing sigma.
Parameters
----------
`X`
a list/sequence/vector of solutions
`fit`
the respective list of function values
`func`
the objective function, ``fit[i]`` corresponds to ``func(X[i], *args)``
`ask`
a method to generate a new, slightly disturbed solution. The argument
is (only) mandatory if ``epsilon`` is not zero, see `__init__()`.
`args`
optional additional arguments to `func`
Details
-------
Calls the methods ``reeval()``, ``update_measure()`` and ``treat()`` in this order.
``self.evaluations`` is adapted within the method `treat()`.
"""
self.evaluations_just_done = 0
if not self.maxevals or self.lam_reeval == 0:
return 1.0
res = self.reeval(X, fit, func, ask, args)
if not len(res):
return 1.0
self.update_measure()
return self.treat()
def get_evaluations(self):
"""return ``self.evaluations``, the number of evalutions to get a single fitness measurement"""
return self.evaluations
def treat(self):
"""adapt self.evaluations depending on the current measurement value
and return ``sigma_fac in (1.0, self.alphasigma)``
"""
if self.noiseS > 0:
self.evaluations = min((self.evaluations * self.alphaevals, self.maxevals))
return self.alphasigma
else:
self.evaluations = max((self.evaluations * self.alphaevalsdown, self.minevals))
return 1.0 # / self.alphasigma
def reeval(self, X, fit, func, ask, args=()):
"""store two fitness lists, `fit` and ``fitre`` reevaluating some
solutions in `X`.
``self.evaluations`` evaluations are done for each reevaluated
fitness value.
See `__call__()`, where `reeval()` is called.
"""
self.fit = list(fit)
self.fitre = list(fit)
self.idx = self.indices(fit)
if not len(self.idx):
return self.idx
evals = int(self.evaluations) if self.f_aggregate else 1
fagg = np.median if self.f_aggregate is None else self.f_aggregate
for i in self.idx:
X_i = X[i]
if self.epsilon:
if self.parallel:
self.fitre[i] = fagg(func(ask(evals, X_i, self.epsilon), *args))
else:
self.fitre[i] = fagg([func(ask(1, X_i, self.epsilon)[0], *args)
for _k in range(evals)])
else:
self.fitre[i] = fagg([func(X_i, *args) for _k in range(evals)])
self.evaluations_just_done = evals * len(self.idx)
return self.fit, self.fitre, self.idx
def update_measure(self):
"""updated noise level measure using two fitness lists ``self.fit`` and
``self.fitre``, return ``self.noiseS, all_individual_measures``.
Assumes that `self.idx` contains the indices where the fitness
lists differ
"""
lam = len(self.fit)
idx = np.argsort(self.fit + self.fitre)
ranks = np.argsort(idx).reshape((2, lam))
rankDelta = ranks[0] - ranks[1] - np.sign(ranks[0] - ranks[1])
# compute rank change limits using both ranks[0] and ranks[1]
r = np.arange(1, 2 * lam) # 2 * lam - 2 elements
limits = [0.5 * (Mh.prctile(np.abs(r - (ranks[0, i] + 1 - (ranks[0, i] > ranks[1, i]))),
self.theta * 50) +
Mh.prctile(np.abs(r - (ranks[1, i] + 1 - (ranks[1, i] > ranks[0, i]))),
self.theta * 50))
for i in self.idx]
# compute measurement
# max: 1 rankchange in 2*lambda is always fine
s = np.abs(rankDelta[self.idx]) - Mh.amax(limits, 1) # lives roughly in 0..2*lambda
self.noiseS += self.cum * (np.mean(s) - self.noiseS)
return self.noiseS, s
def indices(self, fit):
"""return the set of indices to be reevaluated for noise
measurement.
Given the first values are the earliest, this is a useful policy also
with a time changing objective.
"""
## meta_parameters.noise_reeval_multiplier == 1.0
lam_reev = 1.0 * (self.lam_reeval if self.lam_reeval
else 2 + len(fit) / 20)
lam_reev = int(lam_reev) + ((lam_reev % 1) > np.random.rand())
## meta_parameters.noise_choose_reeval == 1
choice = 1
if choice == 1:
# take n_first first and reev - n_first best of the remaining
n_first = lam_reev - lam_reev // 2
sort_idx = np.argsort(array(fit, copy=False)[n_first:]) + n_first
return np.array(list(range(0, n_first)) +
list(sort_idx[0:lam_reev - n_first]), copy=False)
elif choice == 2:
idx_sorted = np.argsort(array(fit, copy=False))
# take lam_reev equally spaced, starting with best
linsp = np.linspace(0, len(fit) - len(fit) / lam_reev, lam_reev)
return idx_sorted[[int(i) for i in linsp]]
# take the ``lam_reeval`` best from the first ``2 * lam_reeval + 2`` values.
elif choice == 3:
return np.argsort(array(fit, copy=False)[:2 * (lam_reev + 1)])[:lam_reev]
else:
raise ValueError('unrecognized choice value %d for noise reev'
% choice)
# ____________________________________________________________
# ____________________________________________________________
class Sections(object):
"""plot sections through an objective function.
A first rational thing to do, when facing an (expensive)
application. By default 6 points in each coordinate are evaluated.
This class is still experimental.
Examples
--------
>>> import cma, numpy as np
>>> s = cma.Sections(cma.Fcts.rosen, np.zeros(3)).do(plot=False)
>>> s.do(plot=False) # evaluate the same points again, i.e. check for noise
>> try:
... s.plot()
... except:
... print('plotting failed: matplotlib.pyplot package missing?')
Details
-------
Data are saved after each function call during `do()`. The filename
is attribute ``name`` and by default ``str(func)``, see `__init__()`.
A random (orthogonal) basis can be generated with
``cma.Rotation()(np.eye(3))``.
CAVEAT: The default name is unique in the function name, but it
should be unique in all parameters of `__init__()` but `plot_cmd`
and `load`. If, for example, a different basis is chosen, either
the name must be changed or the ``.pkl`` file containing the
previous data must first be renamed or deleted.
``s.res`` is a dictionary with an entry for each "coordinate" ``i``
and with an entry ``'x'``, the middle point. Each entry ``i`` is
again a dictionary with keys being different dx values and the
value being a sequence of f-values. For example ``s.res[2][0.1] ==
[0.01, 0.01]``, which is generated using the difference vector ``s
.basis[2]`` like
``s.res[2][dx] += func(s.res['x'] + dx * s.basis[2])``.
:See: `__init__()`
"""
def __init__(self, func, x, args=(), basis=None, name=None,
plot_cmd=pyplot.plot if pyplot else None, load=True):
"""
Parameters
----------
`func`
objective function
`x`
point in search space, middle point of the sections
`args`
arguments passed to `func`
`basis`
evaluated points are ``func(x + locations[j] * basis[i])
for i in len(basis) for j in len(locations)``,
see `do()`
`name`
filename where to save the result
`plot_cmd`
command used to plot the data, typically matplotlib pyplots `plot` or `semilogy`
`load`
load previous data from file ``str(func) + '.pkl'``
"""
self.func = func
self.args = args
self.x = x
self.name = name if name else str(func).replace(' ', '_').replace('>', '').replace('<', '')
self.plot_cmd = plot_cmd # or semilogy
self.basis = np.eye(len(x)) if basis is None else basis
try:
self.load()
if any(self.res['x'] != x):
self.res = {}
self.res['x'] = x # TODO: res['x'] does not look perfect
else:
print(self.name + ' loaded')
except:
self.res = {}
self.res['x'] = x
def do(self, repetitions=1, locations=np.arange(-0.5, 0.6, 0.2), plot=True):
"""generates, plots and saves function values ``func(y)``,
where ``y`` is 'close' to `x` (see `__init__()`). The data are stored in
the ``res`` attribute and the class instance is saved in a file
with (the weired) name ``str(func)``.
Parameters
----------
`repetitions`
for each point, only for noisy functions is >1 useful. For
``repetitions==0`` only already generated data are plotted.
`locations`
coordinated wise deviations from the middle point given in `__init__`
"""
if not repetitions:
self.plot()
return
res = self.res
for i in range(len(self.basis)): # i-th coordinate
if i not in res:
res[i] = {}
# xx = np.array(self.x)
# TODO: store res[i]['dx'] = self.basis[i] here?
for dx in locations:
xx = self.x + dx * self.basis[i]
xkey = dx # xx[i] if (self.basis == np.eye(len(self.basis))).all() else dx
if xkey not in res[i]:
res[i][xkey] = []
n = repetitions
while n > 0:
n -= 1
res[i][xkey].append(self.func(xx, *self.args))
if plot:
self.plot()
self.save()
return self
def plot(self, plot_cmd=None, tf=lambda y: y):
"""plot the data we have, return ``self``"""
if not plot_cmd:
plot_cmd = self.plot_cmd
colors = 'bgrcmyk'
pyplot.hold(False)
res = self.res
flatx, flatf = self.flattened()
minf = np.inf
for i in flatf:
minf = min((minf, min(flatf[i])))
addf = 1e-9 - minf if minf <= 1e-9 else 0
for i in sorted(res.keys()): # we plot not all values here
if isinstance(i, int):
color = colors[i % len(colors)]
arx = sorted(res[i].keys())
plot_cmd(arx, [tf(np.median(res[i][x]) + addf) for x in arx], color + '-')
pyplot.text(arx[-1], tf(np.median(res[i][arx[-1]])), i)
pyplot.hold(True)
plot_cmd(flatx[i], tf(np.array(flatf[i]) + addf), color + 'o')
pyplot.ylabel('f + ' + str(addf))
pyplot.draw()
pyplot.ion()
pyplot.show()
# raw_input('press return')
return self
def flattened(self):
"""return flattened data ``(x, f)`` such that for the sweep through
coordinate ``i`` we have for data point ``j`` that ``f[i][j] == func(x[i][j])``
"""
flatx = {}
flatf = {}
for i in self.res:
if isinstance(i, int):
flatx[i] = []
flatf[i] = []
for x in sorted(self.res[i]):
for d in sorted(self.res[i][x]):
flatx[i].append(x)
flatf[i].append(d)
return flatx, flatf
def save(self, name=None):
"""save to file"""
import pickle
name = name if name else self.name
fun = self.func
del self.func # instance method produces error
pickle.dump(self, open(name + '.pkl', "wb"))
self.func = fun
return self
def load(self, name=None):
"""load from file"""
import pickle
name = name if name else self.name
s = pickle.load(open(name + '.pkl', 'rb'))
self.res = s.res # disregard the class
return self
#____________________________________________________________
#____________________________________________________________
class _Error(Exception):
"""generic exception of cma module"""
pass
# ____________________________________________________________
# ____________________________________________________________
#
class ElapsedTime(object):
"""using ``time.clock`` with overflow handling to measure CPU time.
Example:
>>> clock = ElapsedTime() # clock starts here
>>> t1 = clock() # get elapsed CPU time
Details: 32-bit C overflows after int(2**32/1e6) == 4294s about 72 min
"""
def __init__(self):
self.tic0 = time.clock()
self.tic = self.tic0
self.lasttoc = time.clock()
self.lastdiff = time.clock() - self.lasttoc
self.time_to_add = 0
self.messages = 0
reset = __init__
def __call__(self):
toc = time.clock()
if toc - self.tic >= self.lasttoc - self.tic:
self.lastdiff = toc - self.lasttoc
self.lasttoc = toc
else: # overflow, reset self.tic
if self.messages < 3:
self.messages += 1
print(' in cma.ElapsedTime: time measure overflow, last difference estimated from',
self.tic0, self.tic, self.lasttoc, toc, toc - self.lasttoc, self.lastdiff)
self.time_to_add += self.lastdiff + self.lasttoc - self.tic
self.tic = toc # reset
self.lasttoc = toc
self.elapsedtime = toc - self.tic + self.time_to_add
return self.elapsedtime
class Misc(object):
# ____________________________________________________________
# ____________________________________________________________
#
class MathHelperFunctions(object):
"""static convenience math helper functions, if the function name
is preceded with an "a", a numpy array is returned
"""
@staticmethod
def aclamp(x, upper):
return -Misc.MathHelperFunctions.apos(-x, -upper)
@staticmethod
def equals_approximately(a, b, eps=1e-12):
if a < 0:
a, b = -1 * a, -1 * b
return (a - eps < b < a + eps) or ((1 - eps) * a < b < (1 + eps) * a)
@staticmethod
def vequals_approximately(a, b, eps=1e-12):
a, b = array(a), array(b)
idx = np.where(a < 0)[0]
if len(idx):
a[idx], b[idx] = -1 * a[idx], -1 * b[idx]
return (np.all(a - eps < b) and np.all(b < a + eps)
) or (np.all((1 - eps) * a < b) and np.all(b < (1 + eps) * a))
@staticmethod
def expms(A, eig=np.linalg.eigh):
"""matrix exponential for a symmetric matrix"""
# TODO: check that this works reliably for low rank matrices
# first: symmetrize A
D, B = eig(A)
return np.dot(B, (np.exp(D) * B).T)
@staticmethod
def amax(vec, vec_or_scalar):
return array(Misc.MathHelperFunctions.max(vec, vec_or_scalar))
@staticmethod
def max(vec, vec_or_scalar):
b = vec_or_scalar
if isscalar(b):
m = [max(x, b) for x in vec]
else:
m = [max(vec[i], b[i]) for i in rglen((vec))]
return m
@staticmethod
def minmax(val, min_val, max_val):
assert min_val <= max_val
return min((max_val, max((val, min_val))))
@staticmethod
def aminmax(val, min_val, max_val):
return array([min((max_val, max((v, min_val)))) for v in val])
@staticmethod
def amin(vec_or_scalar, vec_or_scalar2):
return array(Misc.MathHelperFunctions.min(vec_or_scalar, vec_or_scalar2))
@staticmethod
def min(a, b):
iss = isscalar
if iss(a) and iss(b):
return min(a, b)
if iss(a):
a, b = b, a
# now only b can be still a scalar
if iss(b):
return [min(x, b) for x in a]
else: # two non-scalars must have the same length
return [min(a[i], b[i]) for i in rglen((a))]
@staticmethod
def norm(vec, expo=2):
return sum(vec**expo)**(1 / expo)
@staticmethod
def apos(x, lower=0):
"""clips argument (scalar or array) from below at lower"""
if lower == 0:
return (x > 0) * x
else:
return lower + (x > lower) * (x - lower)
@staticmethod
def prctile(data, p_vals=[0, 25, 50, 75, 100], sorted_=False):
"""``prctile(data, 50)`` returns the median, but p_vals can
also be a sequence.
Provides for small samples better values than matplotlib.mlab.prctile,
however also slower.
"""
ps = [p_vals] if isscalar(p_vals) else p_vals
if not sorted_:
data = sorted(data)
n = len(data)
d = []
for p in ps:
fi = p * n / 100 - 0.5
if fi <= 0: # maybe extrapolate?
d.append(data[0])
elif fi >= n - 1:
d.append(data[-1])
else:
i = int(fi)
d.append((i + 1 - fi) * data[i] + (fi - i) * data[i + 1])
return d[0] if isscalar(p_vals) else d
@staticmethod
def sround(nb): # TODO: to be vectorized
"""return stochastic round: floor(nb) + (rand()<remainder(nb))"""
return nb // 1 + (np.random.rand(1)[0] < (nb % 1))
@staticmethod
def cauchy_with_variance_one():
n = np.random.randn() / np.random.randn()
while abs(n) > 1000:
n = np.random.randn() / np.random.randn()
return n / 25
@staticmethod
def standard_finite_cauchy(size=1):
try:
l = len(size)
except TypeError:
l = 0
if l == 0:
return array([Mh.cauchy_with_variance_one() for _i in range(size)])
elif l == 1:
return array([Mh.cauchy_with_variance_one() for _i in range(size[0])])
elif l == 2:
return array([[Mh.cauchy_with_variance_one() for _i in range(size[1])]
for _j in range(size[0])])
else:
raise _Error('len(size) cannot be large than two')
@staticmethod
def likelihood(x, m=None, Cinv=None, sigma=1, detC=None):
"""return likelihood of x for the normal density N(m, sigma**2 * Cinv**-1)"""
# testing: MC integrate must be one: mean(p(x_i)) * volume(where x_i are uniformely sampled)
# for i in xrange(3): print mean([cma.likelihood(20*r-10, dim * [0], None, 3) for r in rand(10000,dim)]) * 20**dim
if m is None:
dx = x
else:
dx = x - m # array(x) - array(m)
n = len(x)
s2pi = (2 * np.pi)**(n / 2.)
if Cinv is None:
return exp(-sum(dx**2) / sigma**2 / 2) / s2pi / sigma**n
if detC is None:
detC = 1. / np.linalg.linalg.det(Cinv)
return exp(-np.dot(dx, np.dot(Cinv, dx)) / sigma**2 / 2) / s2pi / abs(detC)**0.5 / sigma**n
@staticmethod
def loglikelihood(self, x, previous=False):
"""return log-likelihood of `x` regarding the current sample distribution"""
# testing of original fct: MC integrate must be one: mean(p(x_i)) * volume(where x_i are uniformely sampled)
# for i in xrange(3): print mean([cma.likelihood(20*r-10, dim * [0], None, 3) for r in rand(10000,dim)]) * 20**dim
# TODO: test this!!
# c=cma.fmin...
# c[3]['cma'].loglikelihood(...)
if previous and hasattr(self, 'lastiter'):
sigma = self.lastiter.sigma
Crootinv = self.lastiter._Crootinv
xmean = self.lastiter.mean
D = self.lastiter.D
elif previous and self.countiter > 1:
raise _Error('no previous distribution parameters stored, check options importance_mixing')
else:
sigma = self.sigma
Crootinv = self._Crootinv
xmean = self.mean
D = self.D
dx = array(x) - xmean # array(x) - array(m)
n = self.N
logs2pi = n * log(2 * np.pi) / 2.
logdetC = 2 * sum(log(D))
dx = np.dot(Crootinv, dx)
res = -sum(dx**2) / sigma**2 / 2 - logs2pi - logdetC / 2 - n * log(sigma)
if 1 < 3: # testing
s2pi = (2 * np.pi)**(n / 2.)
detC = np.prod(D)**2
res2 = -sum(dx**2) / sigma**2 / 2 - log(s2pi * abs(detC)**0.5 * sigma**n)
assert res2 < res + 1e-8 or res2 > res - 1e-8
return res
# ____________________________________________________________
# ____________________________________________________________
#
# C and B are arrays rather than matrices, because they are
# addressed via B[i][j], matrices can only be addressed via B[i,j]
# tred2(N, B, diagD, offdiag);
# tql2(N, diagD, offdiag, B);
# Symmetric Householder reduction to tridiagonal form, translated from JAMA package.
@staticmethod
def eig(C):
"""eigendecomposition of a symmetric matrix, much slower than
`numpy.linalg.eigh`, return ``(EVals, Basis)``, the eigenvalues
and an orthonormal basis of the corresponding eigenvectors, where
``Basis[i]``
the i-th row of ``Basis``
columns of ``Basis``, ``[Basis[j][i] for j in range(len(Basis))]``
the i-th eigenvector with eigenvalue ``EVals[i]``
"""
# class eig(object):
# def __call__(self, C):
# Householder transformation of a symmetric matrix V into tridiagonal form.
# -> n : dimension
# -> V : symmetric nxn-matrix
# <- V : orthogonal transformation matrix:
# tridiag matrix == V * V_in * V^t
# <- d : diagonal
# <- e[0..n-1] : off diagonal (elements 1..n-1)
# Symmetric tridiagonal QL algorithm, iterative
# Computes the eigensystem from a tridiagonal matrix in roughtly 3N^3 operations
# -> n : Dimension.
# -> d : Diagonale of tridiagonal matrix.
# -> e[1..n-1] : off-diagonal, output from Householder
# -> V : matrix output von Householder
# <- d : eigenvalues
# <- e : garbage?
# <- V : basis of eigenvectors, according to d
# tred2(N, B, diagD, offdiag); B=C on input
# tql2(N, diagD, offdiag, B);
# private void tred2 (int n, double V[][], double d[], double e[]) {
def tred2 (n, V, d, e):
# This is derived from the Algol procedures tred2 by
# Bowdler, Martin, Reinsch, and Wilkinson, Handbook for
# Auto. Comp., Vol.ii-Linear Algebra, and the corresponding
# Fortran subroutine in EISPACK.
num_opt = False # factor 1.5 in 30-D
for j in range(n):
d[j] = V[n - 1][j] # d is output argument
# Householder reduction to tridiagonal form.
for i in range(n - 1, 0, -1):
# Scale to avoid under/overflow.
h = 0.0
if not num_opt:
scale = 0.0
for k in range(i):
scale = scale + abs(d[k])
else:
scale = sum(abs(d[0:i]))
if scale == 0.0:
e[i] = d[i - 1]
for j in range(i):
d[j] = V[i - 1][j]
V[i][j] = 0.0
V[j][i] = 0.0
else:
# Generate Householder vector.
if not num_opt:
for k in range(i):
d[k] /= scale
h += d[k] * d[k]
else:
d[:i] /= scale
h = np.dot(d[:i], d[:i])
f = d[i - 1]
g = h**0.5
if f > 0:
g = -g
e[i] = scale * g
h = h - f * g
d[i - 1] = f - g
if not num_opt:
for j in range(i):
e[j] = 0.0
else:
e[:i] = 0.0
# Apply similarity transformation to remaining columns.
for j in range(i):
f = d[j]
V[j][i] = f
g = e[j] + V[j][j] * f
if not num_opt:
for k in range(j + 1, i):
g += V[k][j] * d[k]
e[k] += V[k][j] * f
e[j] = g
else:
e[j + 1:i] += V.T[j][j + 1:i] * f
e[j] = g + np.dot(V.T[j][j + 1:i], d[j + 1:i])
f = 0.0
if not num_opt:
for j in range(i):
e[j] /= h
f += e[j] * d[j]
else:
e[:i] /= h
f += np.dot(e[:i], d[:i])
hh = f / (h + h)
if not num_opt:
for j in range(i):
e[j] -= hh * d[j]
else:
e[:i] -= hh * d[:i]
for j in range(i):
f = d[j]
g = e[j]
if not num_opt:
for k in range(j, i):
V[k][j] -= (f * e[k] + g * d[k])
else:
V.T[j][j:i] -= (f * e[j:i] + g * d[j:i])
d[j] = V[i - 1][j]
V[i][j] = 0.0
d[i] = h
# end for i--
# Accumulate transformations.
for i in range(n - 1):
V[n - 1][i] = V[i][i]
V[i][i] = 1.0
h = d[i + 1]
if h != 0.0:
if not num_opt:
for k in range(i + 1):
d[k] = V[k][i + 1] / h
else:
d[:i + 1] = V.T[i + 1][:i + 1] / h
for j in range(i + 1):
if not num_opt:
g = 0.0
for k in range(i + 1):
g += V[k][i + 1] * V[k][j]
for k in range(i + 1):
V[k][j] -= g * d[k]
else:
g = np.dot(V.T[i + 1][0:i + 1], V.T[j][0:i + 1])
V.T[j][:i + 1] -= g * d[:i + 1]
if not num_opt:
for k in range(i + 1):
V[k][i + 1] = 0.0
else:
V.T[i + 1][:i + 1] = 0.0
if not num_opt:
for j in range(n):
d[j] = V[n - 1][j]
V[n - 1][j] = 0.0
else:
d[:n] = V[n - 1][:n]
V[n - 1][:n] = 0.0
V[n - 1][n - 1] = 1.0
e[0] = 0.0
# Symmetric tridiagonal QL algorithm, taken from JAMA package.
# private void tql2 (int n, double d[], double e[], double V[][]) {
# needs roughly 3N^3 operations
def tql2 (n, d, e, V):
# This is derived from the Algol procedures tql2, by
# Bowdler, Martin, Reinsch, and Wilkinson, Handbook for
# Auto. Comp., Vol.ii-Linear Algebra, and the corresponding
# Fortran subroutine in EISPACK.
num_opt = False # using vectors from numpy makes it faster
if not num_opt:
for i in range(1, n): # (int i = 1; i < n; i++):
e[i - 1] = e[i]
else:
e[0:n - 1] = e[1:n]
e[n - 1] = 0.0
f = 0.0
tst1 = 0.0
eps = 2.0**-52.0
for l in range(n): # (int l = 0; l < n; l++) {
# Find small subdiagonal element
tst1 = max(tst1, abs(d[l]) + abs(e[l]))
m = l
while m < n:
if abs(e[m]) <= eps * tst1:
break
m += 1
# If m == l, d[l] is an eigenvalue,
# otherwise, iterate.
if m > l:
iiter = 0
while 1: # do {
iiter += 1 # (Could check iteration count here.)
# Compute implicit shift
g = d[l]
p = (d[l + 1] - g) / (2.0 * e[l])
r = (p**2 + 1)**0.5 # hypot(p,1.0)
if p < 0:
r = -r
d[l] = e[l] / (p + r)
d[l + 1] = e[l] * (p + r)
dl1 = d[l + 1]
h = g - d[l]
if not num_opt:
for i in range(l + 2, n):
d[i] -= h
else:
d[l + 2:n] -= h
f = f + h
# Implicit QL transformation.
p = d[m]
c = 1.0
c2 = c
c3 = c
el1 = e[l + 1]
s = 0.0
s2 = 0.0
# hh = V.T[0].copy() # only with num_opt
for i in range(m - 1, l - 1, -1): # (int i = m-1; i >= l; i--) {
c3 = c2
c2 = c
s2 = s
g = c * e[i]
h = c * p
r = (p**2 + e[i]**2)**0.5 # hypot(p,e[i])
e[i + 1] = s * r
s = e[i] / r
c = p / r
p = c * d[i] - s * g
d[i + 1] = h + s * (c * g + s * d[i])
# Accumulate transformation.
if not num_opt: # overall factor 3 in 30-D
for k in range(n): # (int k = 0; k < n; k++) {
h = V[k][i + 1]
V[k][i + 1] = s * V[k][i] + c * h
V[k][i] = c * V[k][i] - s * h
else: # about 20% faster in 10-D
hh = V.T[i + 1].copy()
# hh[:] = V.T[i+1][:]
V.T[i + 1] = s * V.T[i] + c * hh
V.T[i] = c * V.T[i] - s * hh
# V.T[i] *= c
# V.T[i] -= s * hh
p = -s * s2 * c3 * el1 * e[l] / dl1
e[l] = s * p
d[l] = c * p
# Check for convergence.
if abs(e[l]) <= eps * tst1:
break
# } while (Math.abs(e[l]) > eps*tst1);
d[l] = d[l] + f
e[l] = 0.0
# Sort eigenvalues and corresponding vectors.
# tql2
N = len(C[0])
if 1 < 3:
V = [[x[i] for i in range(N)] for x in C] # copy each "row"
d = N * [0.]
e = N * [0.]
tred2(N, V, d, e)
tql2(N, d, e, V)
return (array(d), array(V))
Mh = Misc.MathHelperFunctions
# if _experimental:
# from new_stuff import *
def pprint(to_be_printed):
"""nicely formated print"""
try:
import pprint as pp
# generate an instance PrettyPrinter
# pp.PrettyPrinter().pprint(to_be_printed)
pp.pprint(to_be_printed)
except ImportError:
if isinstance(to_be_printed, dict):
print('{')
for k, v in list(to_be_printed.items()):
print("'" + k + "'" if isinstance(k, str) else k,
': ',
"'" + v + "'" if isinstance(k, str) else v,
sep="")
print('}')
else:
print('could not import pprint module, appling regular print')
print(to_be_printed)
pp = pprint
class ConstRandnShift(object):
"""``ConstRandnShift()(x)`` adds a fixed realization of
``stddev * randn(len(x))`` to the vector x.
By default, the realized shift is the same for each instance of
``ConstRandnShift``, see ``seed`` argument. This class is used in
class ``FFWrapper.ShiftedFitness`` as default transformation.
See: class ``FFWrapper.ShiftedFitness``
"""
def __init__(self, stddev=3, seed=1):
"""with ``seed=None`` each instance realizes a different shift"""
self.seed = seed
self.stddev = stddev
self._xopt = {}
def __call__(self, x):
"""return "shifted" ``x - shift``
"""
try:
x_opt = self._xopt[len(x)]
except KeyError:
if self.seed is None:
shift = np.random.randn(len(x))
else:
rstate = np.random.get_state()
np.random.seed(self.seed)
shift = np.random.randn(len(x))
np.random.set_state(rstate)
x_opt = self._xopt.setdefault(len(x), self.stddev * shift)
return array(x, copy=False) - x_opt
def get(self, dimension):
"""return shift applied to ``zeros(dimension)``
>>> import numpy as np, cma
>>> s = cma.ConstRandnShift()
>>> assert all(s(-s.get(3)) == np.zeros(3))
>>> assert all(s.get(3) == s(np.zeros(3)))
"""
return self.__call__(np.zeros(dimension))
class Rotation(object):
"""Rotation class that implements an orthogonal linear transformation,
one for each dimension.
By default reach ``Rotation`` instance provides a different "random"
but fixed rotation. This class is used to implement non-separable
test functions, most conveniently via `FFWrapper.RotatedFitness`.
Example:
>>> import cma, numpy as np
>>> R = cma.Rotation()
>>> R2 = cma.Rotation() # another rotation
>>> x = np.array((1,2,3))
>>> print(R(R(x), inverse=1))
[ 1. 2. 3.]
See: `FFWrapper.RotatedFitness`
"""
dicMatrices = {} # store matrix if necessary, for each dimension
def __init__(self, seed=None):
"""by default a random but fixed rotation, different for each instance"""
self.seed = seed
self.dicMatrices = {} # otherwise there might be shared bases which is probably not what we want
def __call__(self, x, inverse=False): # function when calling an object
"""Rotates the input array `x` with a fixed rotation matrix
(``self.dicMatrices['str(len(x))']``)
"""
x = np.array(x, copy=False)
N = x.shape[0] # can be an array or matrix, TODO: accept also a list of arrays?
if str(N) not in self.dicMatrices: # create new N-basis for once and all
rstate = np.random.get_state()
np.random.seed(self.seed) if self.seed else np.random.seed()
B = np.random.randn(N, N)
for i in range(N):
for j in range(0, i):
B[i] -= np.dot(B[i], B[j]) * B[j]
B[i] /= sum(B[i]**2)**0.5
self.dicMatrices[str(N)] = B
np.random.set_state(rstate)
if inverse:
return np.dot(self.dicMatrices[str(N)].T, x) # compute rotation
else:
return np.dot(self.dicMatrices[str(N)], x) # compute rotation
# Use rotate(x) to rotate x
rotate = Rotation()
# ____________________________________________________________
# ____________________________________________________________
#
class FFWrapper(object):
"""
A collection of (yet experimental) classes to implement fitness
transformations and wrappers. Aliased to `FF2` below.
"""
class FitnessTransformation(object):
"""This class does nothing but serve as an interface template.
Typical use-case::
f = FitnessTransformation(f, parameters_if_needed)``
See: class ``TransformSearchSpace``
"""
def __init__(self, fitness_function, *args, **kwargs):
"""`fitness_function` must be callable (e.g. a function
or a callable class instance)"""
# the original fitness to be called
self.inner_fitness = fitness_function
# self.condition_number = ...
def __call__(self, x, *args):
"""identity as default transformation"""
if hasattr(self, 'x_transformation'):
x = self.x_transformation(x)
f = self.inner_fitness(x, *args)
if hasattr(self, 'f_transformation'):
f = self.f_transformation(f)
return f
class BookKeeping(FitnessTransformation):
"""a stump for experimenting with use-cases and possible
extensions of book keeping
use-case:
f = BookKeeping(f)
print(f.count_evaluations)
"""
def __init__(self, callable=None):
self.count_evaluations = 0
self.inner_fitness = callable
def __call__(self, *args):
# assert len(args[0]) # x-vector
self.count_evaluations += 1
return self.inner_fitness(*args)
class TransformSearchSpace(FitnessTransformation):
"""::
f = TransformSearchSpace(f, ConstRandnShift())
constructs the composed function f <- f o shift.
Details: to some extend this is a nice shortcut for::
f = lambda x, *args: f_in(ConstRandnShift()(x), *args)
however the `lambda` definition depends on the value of
``f_in`` even after ``f`` has been assigned.
See: `ShiftedFitness`, `RotatedFitness`
"""
def __init__(self, fitness_function, transformation):
"""``TransformSearchSpace(f, s)(x) == f(s(x))``
>>> import cma
>>> f0 = lambda x: sum(x)
>>> shift_fct = cma.ConstRandnShift()
>>> f = cma.FF2.TransformSearchSpace(f0, shift_fct)
>>> x = [1, 2, 3]
>>> assert f(x) == f0(shift_fct(x))
"""
self.inner_fitness = fitness_function
# akin to FitnessTransformation.__init__(self, fitness_function)
# akin to super(TransformSearchSpace, self).__init__(fitness_function)
self.x_transformation = transformation
# will be used in base class
class ScaleCoordinates(TransformSearchSpace):
"""define a scaling of each variable
"""
def __init__(self, fitness_function, multipliers=None):
"""
:param fitness_function: a callable object
:param multipliers: recycling is not implemented, i.e.
the dimension must fit to the `fitness_function` argument
when called
"""
super(FFWrapper.ScaleCoordinates, self).__init__(
fitness_function, self.transformation)
# TransformSearchSpace.__init__(self, fitness_function,
# self.transformation)
self.multiplier = multipliers
if self.multiplier is not None and hasattr(self.multiplier, 'len'):
self.multiplier = array(self.multiplier, copy=True)
def transformation(x, *args):
if self.multiplier is None:
return array(x, copy=False)
return self.multiplier * array(x, copy=False)
class ShiftedFitness(TransformSearchSpace):
"""``f = cma.ShiftedFitness(cma.fcts.sphere)`` constructs a
shifted sphere function, by default the shift is computed
from class ``ConstRandnShift`` with std dev 3.
"""
def __init__(self, f, shift=None):
"""``shift(x)`` must return a (stable) shift of x.
Details: this class solely provides as default second
argument to TransformSearchSpace a shift in search space.
``shift=lambda x: x`` would provide "no shift", ``None``
expands to ``cma.ConstRandnShift()``.
"""
self.inner_fitness = f
self.x_transformation = shift if shift else ConstRandnShift()
# alternatively we could have called super
class RotatedFitness(TransformSearchSpace):
"""``f = cma.RotatedFitness(cma.fcts.elli)`` constructs a
rotated ellipsoid function
"""
def __init__(self, f, rotate=rotate):
"""``rotate(x)`` must return a (stable) rotation of x.
Details: this class solely provides a default second
argument to TransformSearchSpace, namely a search space
rotation.
"""
super(FFWrapper.RotatedFitness, self).__init__(f, rotate)
# self.x_transformation = rotate
class FixVariables(TransformSearchSpace):
"""fix variables to given values, thereby reducing the
dimensionality of the preimage.
The constructor takes ``index_value_pairs`` as dict or list of
pairs as input and returns a function with smaller preimage space
than `f`.
Details: this might replace the fixed_variables option in
CMAOptions in future, but hasn't been tested yet.
"""
def __init__(self, f, index_value_pairs):
"""`f` has """
super(FFWrapper.FixVariables, self).__init__(f, self.insert_variables)
# same as TransformSearchSpace.__init__(f, self.insert_variables)
self.index_value_pairs = dict(index_value_pairs)
def insert_variables(self, x):
y = np.zeros(len(x) + len(self.index_value_pairs))
assert len(y) > max(self.index_value_pairs)
j = 0
for i in range(len(y)):
if i in self.index_value_pairs:
y[i] = self.index_value_pairs[i]
else:
y[i] = x[j]
j += 1
return y
class SomeNaNFitness(FitnessTransformation):
def __init__(self, fitness_function, probability_of_nan=0.1):
self.p = probability_of_nan
self.inner_fitness = fitness_function
def __call__(self, x, *args):
if np.random.rand(1) <= self.p:
return np.NaN
else:
return self.inner_fitness(x, *args)
class NoisyFitness(FitnessTransformation):
"""apply noise via f += rel_noise(dim) * f + abs_noise()"""
def __init__(self, fitness_function,
rel_noise=lambda dim: 1.1 * np.random.randn() / dim,
abs_noise=lambda: 1.1 * np.random.randn()):
self.rel_noise = rel_noise
self.abs_noise = abs_noise
self.inner_fitness = fitness_function
def __call__(self, x, *args):
f = self.inner_fitness(x, *args)
if self.rel_noise:
f += f * self.rel_noise(len(x))
assert isscalar(f)
if self.abs_noise:
f += self.abs_noise()
return f
class GlueArguments(FitnessTransformation):
"""``f = cma.FF2.GlueArguments(cma.fcts.elli, cond=1e4)``
>>> import cma
>>> f = cma.FF2.GlueArguments(cma.fcts.elli, cond=1e1)
>>> f([1, 2]) # == 1**2 + 1e1 * 2**2
41.0
"""
def __init__(self, fitness_function, *args, **kwargs):
self.inner_fitness = fitness_function
self.args = args
self.kwargs = kwargs
def __call__(self, x, *args):
return self.inner_fitness(array(x, copy=False),
*(args + self.args), **self.kwargs)
class UnknownFF(object):
"""search in [-10, 10] for the unknown (optimum)"""
def __init__(self, seed=2):
self.seed = seed
self._x_opt_ = {}
self.rotate = Rotation(seed)
self.count_evaluations = 0
def _x_opt(self, dim):
rstate = np.random.get_state()
np.random.seed(self.seed)
x = self._x_opt_.setdefault(dim,
0 * 3 * np.random.randn(dim))
np.random.set_state(rstate)
return x
def typical_x(self, dim):
off = self.rotate(np.floor(np.arange(0, 3, 3. / dim)) /
np.logspace(0, 1, dim), inverse=True)
off[np.s_[3:]] += 0.005
off[-1] *= 1e2
off[0] /= 2.0e3 if off[0] > 0 else 1e3
off[2] /= 3.01e4 if off[2] < 0 else 2e4
return self._x_opt(dim) + off
def __call__(self, x):
self.count_evaluations += 1
N = len(x)
x = x - self._x_opt(N)
x[-1] /= 1e2
x[0] *= 2.0e3 if x[0] > 0 else 1e3
x[2] *= 3.01e4 if x[2] < 0 else 2e4
x = np.logspace(0, 1, N) * self.rotate(x)
return 10 * N - np.e**2 + \
sum(x**2 - 10 * np.cos(2 * np.pi * x))
FF2 = FFWrapper
class FitnessFunctions(object):
""" versatile container for test objective functions """
def __init__(self):
self.counter = 0 # number of calls or any other practical use
def rot(self, x, fun, rot=1, args=()):
"""returns ``fun(rotation(x), *args)``, ie. `fun` applied to a rotated argument"""
if len(np.shape(array(x))) > 1: # parallelized
res = []
for x in x:
res.append(self.rot(x, fun, rot, args))
return res
if rot:
return fun(rotate(x, *args))
else:
return fun(x)
def somenan(self, x, fun, p=0.1):
"""returns sometimes np.NaN, otherwise fun(x)"""
if np.random.rand(1) < p:
return np.NaN
else:
return fun(x)
def rand(self, x):
"""Random test objective function"""
return np.random.random(1)[0]
def linear(self, x):
return -x[0]
def lineard(self, x):
if 1 < 3 and any(array(x) < 0):
return np.nan
if 1 < 3 and sum([ (10 + i) * x[i] for i in rglen(x)]) > 50e3:
return np.nan
return -sum(x)
def sphere(self, x):
"""Sphere (squared norm) test objective function"""
# return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]
return sum((x + 0)**2)
def grad_sphere(self, x, *args):
return 2*array(x, copy=False)
def grad_to_one(self, x, *args):
return array(x, copy=False) - 1
def sphere_pos(self, x):
"""Sphere (squared norm) test objective function"""
# return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]
c = 0.0
if x[0] < c:
return np.nan
return -c**2 + sum((x + 0)**2)
def spherewithoneconstraint(self, x):
return sum((x + 0)**2) if x[0] > 1 else np.nan
def elliwithoneconstraint(self, x, idx=[-1]):
return self.ellirot(x) if all(array(x)[idx] > 1) else np.nan
def spherewithnconstraints(self, x):
return sum((x + 0)**2) if all(array(x) > 1) else np.nan
# zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
def noisysphere(self, x, noise=2.10e-9, cond=1.0, noise_offset=0.10):
"""noise=10 does not work with default popsize, noise handling does not help """
return self.elli(x, cond=cond) * (1 + noise * np.random.randn() / len(x)) + noise_offset * np.random.rand()
def spherew(self, x):
"""Sphere (squared norm) with sum x_i = 1 test objective function"""
# return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]
# s = sum(abs(x))
# return sum((x/s+0)**2) - 1/len(x)
# return sum((x/s)**2) - 1/len(x)
return -0.01 * x[0] + abs(x[0])**-2 * sum(x[1:]**2)
def partsphere(self, x):
"""Sphere (squared norm) test objective function"""
self.counter += 1
# return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]
dim = len(x)
x = array([x[i % dim] for i in range(2 * dim)])
N = 8
i = self.counter % dim
# f = sum(x[i:i + N]**2)
f = sum(x[np.random.randint(dim, size=N)]**2)
return f
def sectorsphere(self, x):
"""asymmetric Sphere (squared norm) test objective function"""
return sum(x**2) + (1e6 - 1) * sum(x[x < 0]**2)
def cornersphere(self, x):
"""Sphere (squared norm) test objective function constraint to the corner"""
nconstr = len(x) - 0
if any(x[:nconstr] < 1):
return np.NaN
return sum(x**2) - nconstr
def cornerelli(self, x):
""" """
if any(x < 1):
return np.NaN
return self.elli(x) - self.elli(np.ones(len(x)))
def cornerellirot(self, x):
""" """
if any(x < 1):
return np.NaN
return self.ellirot(x)
def normalSkew(self, f):
N = np.random.randn(1)[0]**2
if N < 1:
N = f * N # diminish blow up lower part
return N
def noiseC(self, x, func=sphere, fac=10, expon=0.8):
f = func(self, x)
N = np.random.randn(1)[0] / np.random.randn(1)[0]
return max(1e-19, f + (float(fac) / len(x)) * f**expon * N)
def noise(self, x, func=sphere, fac=10, expon=1):
f = func(self, x)
# R = np.random.randn(1)[0]
R = np.log10(f) + expon * abs(10 - np.log10(f)) * np.random.rand(1)[0]
# sig = float(fac)/float(len(x))
# R = log(f) + 0.5*log(f) * random.randn(1)[0]
# return max(1e-19, f + sig * (f**np.log10(f)) * np.exp(R))
# return max(1e-19, f * np.exp(sig * N / f**expon))
# return max(1e-19, f * normalSkew(f**expon)**sig)
return f + 10**R # == f + f**(1+0.5*RN)
def cigar(self, x, rot=0, cond=1e6, noise=0):
"""Cigar test objective function"""
if rot:
x = rotate(x)
x = [x] if isscalar(x[0]) else x # scalar into list
f = [(x[0]**2 + cond * sum(x[1:]**2)) * np.exp(noise * np.random.randn(1)[0] / len(x)) for x in x]
return f if len(f) > 1 else f[0] # 1-element-list into scalar
def grad_cigar(self, x, *args):
grad = 2 * 1e6 * np.array(x)
grad[0] /= 1e6
return grad
def diagonal_cigar(self, x, cond=1e6):
axis = np.ones(len(x)) / len(x)**0.5
proj = dot(axis, x) * axis
s = sum(proj**2)
s += cond * sum((x - proj)**2)
return s
def tablet(self, x, rot=0):
"""Tablet test objective function"""
if rot and rot is not fcts.tablet:
x = rotate(x)
x = [x] if isscalar(x[0]) else x # scalar into list
f = [1e6 * x[0]**2 + sum(x[1:]**2) for x in x]
return f if len(f) > 1 else f[0] # 1-element-list into scalar
def grad_tablet(self, x, *args):
grad = 2 * np.array(x)
grad[0] *= 1e6
return grad
def cigtab(self, y):
"""Cigtab test objective function"""
X = [y] if isscalar(y[0]) else y
f = [1e-4 * x[0]**2 + 1e4 * x[1]**2 + sum(x[2:]**2) for x in X]
return f if len(f) > 1 else f[0]
def twoaxes(self, y):
"""Cigtab test objective function"""
X = [y] if isscalar(y[0]) else y
N2 = len(X[0]) // 2
f = [1e6 * sum(x[0:N2]**2) + sum(x[N2:]**2) for x in X]
return f if len(f) > 1 else f[0]
def ellirot(self, x):
return fcts.elli(array(x), 1)
def hyperelli(self, x):
N = len(x)
return sum((np.arange(1, N + 1) * x)**2)
def halfelli(self, x):
l = len(x) // 2
felli = self.elli(x[:l])
return felli + 1e-8 * sum(x[l:]**2)
def elli(self, x, rot=0, xoffset=0, cond=1e6, actuator_noise=0.0, both=False):
"""Ellipsoid test objective function"""
if not isscalar(x[0]): # parallel evaluation
return [self.elli(xi, rot) for xi in x] # could save 20% overall
if rot:
x = rotate(x)
N = len(x)
if actuator_noise:
x = x + actuator_noise * np.random.randn(N)
ftrue = sum(cond**(np.arange(N) / (N - 1.)) * (x + xoffset)**2)
alpha = 0.49 + 1. / N
beta = 1
felli = np.random.rand(1)[0]**beta * ftrue * \
max(1, (10.**9 / (ftrue + 1e-99))**(alpha * np.random.rand(1)[0]))
# felli = ftrue + 1*np.random.randn(1)[0] / (1e-30 +
# np.abs(np.random.randn(1)[0]))**0
if both:
return (felli, ftrue)
else:
# return felli # possibly noisy value
return ftrue # + np.random.randn()
def grad_elli(self, x, *args):
cond = 1e6
N = len(x)
return 2 * cond**(np.arange(N) / (N - 1.)) * array(x, copy=False)
def fun_as_arg(self, x, *args):
"""``fun_as_arg(x, fun, *more_args)`` calls ``fun(x, *more_args)``.
Use case::
fmin(cma.fun_as_arg, args=(fun,), gradf=grad_numerical)
calls fun_as_args(x, args) and grad_numerical(x, fun, args=args)
"""
fun = args[0]
more_args = args[1:] if len(args) > 1 else ()
return fun(x, *more_args)
def grad_numerical(self, x, func, epsilon=None):
"""symmetric gradient"""
eps = 1e-8 * (1 + abs(x)) if epsilon is None else epsilon
grad = np.zeros(len(x))
ei = np.zeros(len(x)) # float is 1.6 times faster than int
for i in rglen(x):
ei[i] = eps[i]
grad[i] = (func(x + ei) - func(x - ei)) / (2*eps[i])
ei[i] = 0
return grad
def elliconstraint(self, x, cfac=1e8, tough=True, cond=1e6):
"""ellipsoid test objective function with "constraints" """
N = len(x)
f = sum(cond**(np.arange(N)[-1::-1] / (N - 1)) * x**2)
cvals = (x[0] + 1,
x[0] + 1 + 100 * x[1],
x[0] + 1 - 100 * x[1])
if tough:
f += cfac * sum(max(0, c) for c in cvals)
else:
f += cfac * sum(max(0, c + 1e-3)**2 for c in cvals)
return f
def rosen(self, x, alpha=1e2):
"""Rosenbrock test objective function"""
x = [x] if isscalar(x[0]) else x # scalar into list
f = [sum(alpha * (x[:-1]**2 - x[1:])**2 + (1. - x[:-1])**2) for x in x]
return f if len(f) > 1 else f[0] # 1-element-list into scalar
def grad_rosen(self, x, *args):
N = len(x)
grad = np.zeros(N)
grad[0] = 2 * (x[0] - 1) + 200 * (x[1] - x[0]**2) * -2 * x[0]
i = np.arange(1, N - 1)
grad[i] = 2 * (x[i] - 1) - 400 * (x[i+1] - x[i]**2) * x[i] + 200 * (x[i] - x[i-1]**2)
grad[N-1] = 200 * (x[N-1] - x[N-2]**2)
return grad
def diffpow(self, x, rot=0):
"""Diffpow test objective function"""
N = len(x)
if rot:
x = rotate(x)
return sum(np.abs(x)**(2. + 4.*np.arange(N) / (N - 1.)))**0.5
def rosenelli(self, x):
N = len(x)
return self.rosen(x[:N / 2]) + self.elli(x[N / 2:], cond=1)
def ridge(self, x, expo=2):
x = [x] if isscalar(x[0]) else x # scalar into list
f = [x[0] + 100 * np.sum(x[1:]**2)**(expo / 2.) for x in x]
return f if len(f) > 1 else f[0] # 1-element-list into scalar
def ridgecircle(self, x, expo=0.5):
"""happy cat by HG Beyer"""
a = len(x)
s = sum(x**2)
return ((s - a)**2)**(expo / 2) + s / a + sum(x) / a
def happycat(self, x, alpha=1. / 8):
s = sum(x**2)
return ((s - len(x))**2)**alpha + (s / 2 + sum(x)) / len(x) + 0.5
def flat(self, x):
return 1
return 1 if np.random.rand(1) < 0.9 else 1.1
return np.random.randint(1, 30)
def branin(self, x):
# in [0,15]**2
y = x[1]
x = x[0] + 5
return (y - 5.1 * x**2 / 4 / np.pi**2 + 5 * x / np.pi - 6)**2 + 10 * (1 - 1 / 8 / np.pi) * np.cos(x) + 10 - 0.397887357729738160000
def goldsteinprice(self, x):
x1 = x[0]
x2 = x[1]
return (1 + (x1 + x2 + 1)**2 * (19 - 14 * x1 + 3 * x1**2 - 14 * x2 + 6 * x1 * x2 + 3 * x2**2)) * (
30 + (2 * x1 - 3 * x2)**2 * (18 - 32 * x1 + 12 * x1**2 + 48 * x2 - 36 * x1 * x2 + 27 * x2**2)) - 3
def griewank(self, x):
# was in [-600 600]
x = (600. / 5) * x
return 1 - np.prod(np.cos(x / sqrt(1. + np.arange(len(x))))) + sum(x**2) / 4e3
def rastrigin(self, x):
"""Rastrigin test objective function"""
if not isscalar(x[0]):
N = len(x[0])
return [10 * N + sum(xi**2 - 10 * np.cos(2 * np.pi * xi)) for xi in x]
# return 10*N + sum(x**2 - 10*np.cos(2*np.pi*x), axis=1)
N = len(x)
return 10 * N + sum(x**2 - 10 * np.cos(2 * np.pi * x))
def schaffer(self, x):
""" Schaffer function x0 in [-100..100]"""
N = len(x)
s = x[0:N - 1]**2 + x[1:N]**2
return sum(s**0.25 * (np.sin(50 * s**0.1)**2 + 1))
def schwefelelli(self, x):
s = 0
f = 0
for i in rglen(x):
s += x[i]
f += s**2
return f
def schwefelmult(self, x, pen_fac=1e4):
"""multimodal Schwefel function with domain -500..500"""
y = [x] if isscalar(x[0]) else x
N = len(y[0])
f = array([418.9829 * N - 1.27275661e-5 * N - sum(x * np.sin(np.abs(x)**0.5))
+ pen_fac * sum((abs(x) > 500) * (abs(x) - 500)**2) for x in y])
return f if len(f) > 1 else f[0]
def optprob(self, x):
n = np.arange(len(x)) + 1
f = n * x * (1 - x)**(n - 1)
return sum(1 - f)
def lincon(self, x, theta=0.01):
"""ridge like linear function with one linear constraint"""
if x[0] < 0:
return np.NaN
return theta * x[1] + x[0]
def rosen_nesterov(self, x, rho=100):
"""needs exponential number of steps in a non-increasing f-sequence.
x_0 = (-1,1,...,1)
See Jarre (2011) "On Nesterov's Smooth Chebyshev-Rosenbrock Function"
"""
f = 0.25 * (x[0] - 1)**2
f += rho * sum((x[1:] - 2 * x[:-1]**2 + 1)**2)
return f
def powel_singular(self, x):
# ((8 * np.sin(7 * (x[i] - 0.9)**2)**2 ) + (6 * np.sin()))
res = np.sum((x[i - 1] + 10 * x[i])**2 + 5 * (x[i + 1] - x[i + 2])**2 +
(x[i] - 2 * x[i + 1])**4 + 10 * (x[i - 1] - x[i + 2])**4
for i in range(1, len(x) - 2))
return 1 + res
def styblinski_tang(self, x):
"""in [-5, 5]
"""
# x_opt = N * [-2.90353402], seems to have essentially
# (only) 2**N local optima
return (39.1661657037714171054273576010019 * len(x))**1 + \
sum(x**4 - 16*x**2 + 5*x) / 2
def trid(self, x):
return sum((x-1)**2) - sum(x[:-1] * x[1:])
def bukin(self, x):
"""Bukin function from Wikipedia, generalized simplistically from 2-D.
http://en.wikipedia.org/wiki/Test_functions_for_optimization"""
s = 0
for k in range((1+len(x)) // 2):
z = x[2 * k]
y = x[min((2*k + 1, len(x)-1))]
s += 100 * np.abs(y - 0.01 * z**2)**0.5 + 0.01 * np.abs(z + 10)
return s
fcts = FitnessFunctions()
Fcts = fcts # for cross compatibility, as if the functions were static members of class Fcts
FF = fcts
def felli(x):
"""unbound test function, needed to test multiprocessor"""
return sum(1e6**(np.arange(len(x)) / (len(x) - 1)) * (np.array(x, copy=False))**2)
# ____________________________________________
# ____________________________________________________________
def _test(module=None): # None is fine when called from inside the module
import doctest
print(doctest.testmod(module)) # this is pretty coool!
def process_doctest_output(stream=None):
""" """
import fileinput
s1 = ""
s2 = ""
s3 = ""
state = 0
for line in fileinput.input(stream): # takes argv as file or stdin
if 1 < 3:
s3 += line
if state < -1 and line.startswith('***'):
print(s3)
if line.startswith('***'):
s3 = ""
if state == -1: # found a failed example line
s1 += '\n\n*** Failed Example:' + line
s2 += '\n\n\n' # line
# state = 0 # wait for 'Expected:' line
if line.startswith('Expected:'):
state = 1
continue
elif line.startswith('Got:'):
state = 2
continue
elif line.startswith('***'): # marks end of failed example
state = 0
elif line.startswith('Failed example:'):
state = -1
elif line.startswith('Exception raised'):
state = -2
# in effect more else:
if state == 1:
s1 += line + ''
if state == 2:
s2 += line + ''
# ____________________________________________________________
# ____________________________________________________________
#
def main(argv=None):
"""to install and/or test from the command line use::
python cma.py [options | func dim sig0 [optkey optval][optkey optval]...]
with options being
``--test`` (or ``-t``) to run the doctest, ``--test -v`` to get (much) verbosity.
``install`` to install cma.py (uses setup from distutils.core).
``--doc`` for more infos.
Or start Python or (even better) ``ipython`` and::
import cma
cma.main('--test')
help(cma)
help(cma.fmin)
res = fmin(cma.fcts.rosen, 10 * [0], 1)
cma.plot()
Examples
========
Testing with the local python distribution from a command line
in a folder where ``cma.py`` can be found::
python cma.py --test
And a single run on the Rosenbrock function::
python cma.py rosen 10 1 # dimension initial_sigma
python cma.py plot
In the python shell::
import cma
cma.main('--test')
"""
if argv is None:
argv = sys.argv # should have better been sys.argv[1:]
else:
if isinstance(argv, list):
argv = ['python'] + argv # see above
else:
argv = ['python'] + [argv]
# uncomment for unit test
# _test()
# handle input arguments, getopt might be helpful ;-)
if len(argv) >= 1: # function and help
if len(argv) == 1 or argv[1].startswith('-h') or argv[1].startswith('--help'):
print(main.__doc__)
fun = None
elif argv[1].startswith('-t') or argv[1].startswith('--test'):
import doctest
if len(argv) > 2 and (argv[2].startswith('--v') or argv[2].startswith('-v')): # verbose
print('doctest for cma.py: due to different platforms and python versions')
print('and in some cases due to a missing unique random seed')
print('many examples will "fail". This is OK, if they give a similar')
print('to the expected result and if no exception occurs. ')
# if argv[1][2] == 'v':
doctest.testmod(sys.modules[__name__], report=True) # this is quite cool!
else: # was: if len(argv) > 2 and (argv[2].startswith('--qu') or argv[2].startswith('-q')):
print('doctest for cma.py: launching...') # not anymore: (it might be necessary to close the pop up window to finish)
fn = '_cma_doctest_.txt'
stdout = sys.stdout
try:
with open(fn, 'w') as f:
sys.stdout = f
clock = ElapsedTime()
doctest.testmod(sys.modules[__name__], report=True) # this is quite cool!
t_elapsed = clock()
finally:
sys.stdout = stdout
process_doctest_output(fn)
# clean up
try:
import os
for name in os.listdir('.'):
if (name.startswith('bound_method_FitnessFunctions.rosen_of_cma.FitnessFunctions_object_at_')
and name.endswith('.pkl')):
os.remove(name)
except:
pass
print('doctest for cma.py: finished (no other output should be seen after launching, more in file _cma_doctest_.txt)')
print(' elapsed time [s]:', t_elapsed)
return
elif argv[1] == '--doc':
print(__doc__)
print(CMAEvolutionStrategy.__doc__)
print(fmin.__doc__)
fun = None
elif argv[1] == '--fcts':
print('List of valid function names:')
print([d for d in dir(fcts) if not d.startswith('_')])
fun = None
elif argv[1] in ('install', '--install'):
from distutils.core import setup
setup(name="cma",
long_description=__doc__,
version=__version__.split()[0],
description="CMA-ES, Covariance Matrix Adaptation Evolution Strategy for non-linear numerical optimization in Python",
author="Nikolaus Hansen",
author_email="hansen at lri.fr",
maintainer="Nikolaus Hansen",
maintainer_email="hansen at lri.fr",
url="https://www.lri.fr/~hansen/cmaes_inmatlab.html#python",
license="BSD",
classifiers = [
"Intended Audience :: Science/Research",
"Intended Audience :: Education",
"Intended Audience :: Other Audience",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"Environment :: Console",
"License :: OSI Approved :: BSD License",
# "License :: OSI Approved :: MIT License",
],
keywords=["optimization", "CMA-ES", "cmaes"],
py_modules=["cma"],
requires=["numpy"],
)
fun = None
elif argv[1] in ('plot',):
plot(name=argv[2] if len(argv) > 2 else None)
input('press return')
fun = None
elif len(argv) > 3:
fun = eval('fcts.' + argv[1])
else:
print('try -h option')
fun = None
if fun is not None:
if len(argv) > 2: # dimension
x0 = np.ones(eval(argv[2]))
if len(argv) > 3: # sigma
sig0 = eval(argv[3])
opts = {}
for i in range(5, len(argv), 2):
opts[argv[i - 1]] = eval(argv[i])
# run fmin
if fun is not None:
tic = time.time()
fmin(fun, x0, sig0, opts) # ftarget=1e-9, tolfacupx=1e9, verb_log=10)
# plot()
# print ' best function value ', res[2]['es'].best[1]
print('elapsed time [s]: + %.2f', round(time.time() - tic, 2))
elif not len(argv):
fmin(fcts.elli, np.ones(6) * 0.1, 0.1, {'ftarget':1e-9})
# ____________________________________________________________
# ____________________________________________________________
#
# mainly for testing purpose
# executed when called from an OS shell
if __name__ == "__main__":
# for i in xrange(1000): # how to find the memory leak
# main(["cma.py", "rastrigin", "10", "5", "popsize", "200", "maxfevals", "24999", "verb_log", "0"])
main() | 377,327 | 41.878182 | 276 | py |
rllab | rllab-master/rllab/algos/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/rllab/algos/cem.py | from itertools import chain, zip_longest
from rllab.algos.base import RLAlgorithm
import numpy as np
from rllab.misc.special import discount_cumsum
from rllab.sampler import parallel_sampler, stateful_pool
from rllab.sampler.utils import rollout
from rllab.core.serializable import Serializable
import rllab.misc.logger as logger
import rllab.plotter as plotter
def _get_stderr_lb(x):
mu = np.mean(x, 0)
stderr = np.std(x, axis=0, ddof=1 if len(x) > 1 else 0) / np.sqrt(len(x))
return mu - stderr
def _get_stderr_lb_varyinglens(x):
mus, stds, ns = [], [], []
for temp_list in zip_longest(*x, fillvalue=np.nan):
mus.append(np.nanmean(temp_list))
n = len(temp_list) - np.sum(np.isnan(temp_list))
stds.append(np.nanstd(temp_list, ddof=1 if n > 1 else 0))
ns.append(n)
return np.array(mus) - np.array(stds) / np.sqrt(ns)
def _worker_rollout_policy(G, args):
sample_std = args["sample_std"].flatten()
cur_mean = args["cur_mean"].flatten()
n_evals = args["n_evals"]
K = len(cur_mean)
params = np.random.standard_normal(K) * sample_std + cur_mean
G.policy.set_param_values(params)
paths, returns, undiscounted_returns = [], [], []
for _ in range(n_evals):
path = rollout(G.env, G.policy, args["max_path_length"])
path["returns"] = discount_cumsum(path["rewards"], args["discount"])
path["undiscounted_return"] = sum(path["rewards"])
paths.append(path)
returns.append(path["returns"])
undiscounted_returns.append(path["undiscounted_return"])
result_path = {'full_paths':paths}
result_path['undiscounted_return'] = _get_stderr_lb(undiscounted_returns)
result_path['returns'] = _get_stderr_lb_varyinglens(returns)
# not letting n_evals count towards below cases since n_evals is multiple eval for single paramset
if args["criterion"] == "samples":
inc = len(path["rewards"])
elif args["criterion"] == "paths":
inc = 1
else:
raise NotImplementedError
return (params, result_path), inc
class CEM(RLAlgorithm, Serializable):
def __init__(
self,
env,
policy,
n_itr=500,
max_path_length=500,
discount=0.99,
init_std=1.,
n_samples=100,
batch_size=None,
best_frac=0.05,
extra_std=1.,
extra_decay_time=100,
plot=False,
n_evals=1,
**kwargs
):
"""
:param n_itr: Number of iterations.
:param max_path_length: Maximum length of a single rollout.
:param batch_size: # of samples from trajs from param distribution, when this
is set, n_samples is ignored
:param discount: Discount.
:param plot: Plot evaluation run after each iteration.
:param init_std: Initial std for param distribution
:param extra_std: Decaying std added to param distribution at each iteration
:param extra_decay_time: Iterations that it takes to decay extra std
:param n_samples: #of samples from param distribution
:param best_frac: Best fraction of the sampled params
:param n_evals: # of evals per sample from the param distr. returned score is mean - stderr of evals
:return:
"""
Serializable.quick_init(self, locals())
self.env = env
self.policy = policy
self.batch_size = batch_size
self.plot = plot
self.extra_decay_time = extra_decay_time
self.extra_std = extra_std
self.best_frac = best_frac
self.n_samples = n_samples
self.init_std = init_std
self.discount = discount
self.max_path_length = max_path_length
self.n_itr = n_itr
self.n_evals = n_evals
def train(self):
parallel_sampler.populate_task(self.env, self.policy)
if self.plot:
plotter.init_plot(self.env, self.policy)
cur_std = self.init_std
cur_mean = self.policy.get_param_values()
# K = cur_mean.size
n_best = max(1, int(self.n_samples * self.best_frac))
for itr in range(self.n_itr):
# sample around the current distribution
extra_var_mult = max(1.0 - itr / self.extra_decay_time, 0)
sample_std = np.sqrt(np.square(cur_std) + np.square(self.extra_std) * extra_var_mult)
if self.batch_size is None:
criterion = 'paths'
threshold = self.n_samples
else:
criterion = 'samples'
threshold = self.batch_size
infos = stateful_pool.singleton_pool.run_collect(
_worker_rollout_policy,
threshold=threshold,
args=(dict(cur_mean=cur_mean,
sample_std=sample_std,
max_path_length=self.max_path_length,
discount=self.discount,
criterion=criterion,
n_evals=self.n_evals),)
)
xs = np.asarray([info[0] for info in infos])
paths = [info[1] for info in infos]
fs = np.array([path['returns'][0] for path in paths])
print((xs.shape, fs.shape))
best_inds = (-fs).argsort()[:n_best]
best_xs = xs[best_inds]
cur_mean = best_xs.mean(axis=0)
cur_std = best_xs.std(axis=0)
best_x = best_xs[0]
logger.push_prefix('itr #%d | ' % itr)
logger.record_tabular('Iteration', itr)
logger.record_tabular('CurStdMean', np.mean(cur_std))
undiscounted_returns = np.array([path['undiscounted_return'] for path in paths])
logger.record_tabular('AverageReturn',
np.mean(undiscounted_returns))
logger.record_tabular('StdReturn',
np.std(undiscounted_returns))
logger.record_tabular('MaxReturn',
np.max(undiscounted_returns))
logger.record_tabular('MinReturn',
np.min(undiscounted_returns))
logger.record_tabular('AverageDiscountedReturn',
np.mean(fs))
logger.record_tabular('NumTrajs',
len(paths))
paths = list(chain(*[d['full_paths'] for d in paths])) #flatten paths for the case n_evals > 1
logger.record_tabular('AvgTrajLen',
np.mean([len(path['returns']) for path in paths]))
self.policy.set_param_values(best_x)
self.env.log_diagnostics(paths)
self.policy.log_diagnostics(paths)
logger.save_itr_params(itr, dict(
itr=itr,
policy=self.policy,
env=self.env,
cur_mean=cur_mean,
cur_std=cur_std,
))
logger.dump_tabular(with_prefix=False)
logger.pop_prefix()
if self.plot:
plotter.update_plot(self.policy, self.max_path_length)
parallel_sampler.terminate_task() | 7,274 | 39.19337 | 108 | py |
rllab | rllab-master/rllab/algos/reps.py | import theano.tensor as TT
import theano
import scipy.optimize
from rllab.misc import logger
from rllab.misc.overrides import overrides
from rllab.misc import ext
from rllab.algos.batch_polopt import BatchPolopt
from rllab.core.serializable import Serializable
import numpy as np
from rllab.misc import tensor_utils
class REPS(BatchPolopt, Serializable):
"""
Relative Entropy Policy Search (REPS)
References
----------
[1] J. Peters, K. Mulling, and Y. Altun, "Relative Entropy Policy Search," Artif. Intell., pp. 1607-1612, 2008.
"""
def __init__(
self,
epsilon=0.5,
L2_reg_dual=0., # 1e-5,
L2_reg_loss=0.,
max_opt_itr=50,
optimizer=scipy.optimize.fmin_l_bfgs_b,
**kwargs):
"""
:param epsilon: Max KL divergence between new policy and old policy.
:param L2_reg_dual: Dual regularization
:param L2_reg_loss: Loss regularization
:param max_opt_itr: Maximum number of batch optimization iterations.
:param optimizer: Module path to the optimizer. It must support the same interface as
scipy.optimize.fmin_l_bfgs_b.
:return:
"""
Serializable.quick_init(self, locals())
super(REPS, self).__init__(**kwargs)
self.epsilon = epsilon
self.L2_reg_dual = L2_reg_dual
self.L2_reg_loss = L2_reg_loss
self.max_opt_itr = max_opt_itr
self.optimizer = optimizer
self.opt_info = None
@overrides
def init_opt(self):
is_recurrent = int(self.policy.recurrent)
# Init dual param values
self.param_eta = 15.
# Adjust for linear feature vector.
self.param_v = np.random.rand(self.env.observation_space.flat_dim * 2 + 4)
# Theano vars
obs_var = self.env.observation_space.new_tensor_variable(
'obs',
extra_dims=1 + is_recurrent,
)
action_var = self.env.action_space.new_tensor_variable(
'action',
extra_dims=1 + is_recurrent,
)
rewards = ext.new_tensor(
'rewards',
ndim=1 + is_recurrent,
dtype=theano.config.floatX,
)
# Feature difference variable representing the difference in feature
# value of the next observation and the current observation \phi(s') -
# \phi(s).
feat_diff = ext.new_tensor(
'feat_diff',
ndim=2 + is_recurrent,
dtype=theano.config.floatX
)
param_v = TT.vector('param_v')
param_eta = TT.scalar('eta')
valid_var = TT.matrix('valid')
state_info_vars = {
k: ext.new_tensor(
k,
ndim=2 + is_recurrent,
dtype=theano.config.floatX
) for k in self.policy.state_info_keys
}
state_info_vars_list = [state_info_vars[k] for k in self.policy.state_info_keys]
# Policy-related symbolics
dist_info_vars = self.policy.dist_info_sym(obs_var, state_info_vars)
dist = self.policy.distribution
# log of the policy dist
logli = dist.log_likelihood_sym(action_var, dist_info_vars)
# Symbolic sample Bellman error
delta_v = rewards + TT.dot(feat_diff, param_v)
# Policy loss (negative because we minimize)
if is_recurrent:
loss = - TT.sum(logli * TT.exp(
delta_v / param_eta - TT.max(delta_v / param_eta)
) * valid_var) / TT.sum(valid_var)
else:
loss = - TT.mean(logli * TT.exp(
delta_v / param_eta - TT.max(delta_v / param_eta)
))
# Add regularization to loss.
reg_params = self.policy.get_params(regularizable=True)
loss += self.L2_reg_loss * TT.sum(
[TT.mean(TT.square(param)) for param in reg_params]
) / len(reg_params)
# Policy loss gradient.
loss_grad = TT.grad(
loss, self.policy.get_params(trainable=True))
if is_recurrent:
recurrent_vars = [valid_var]
else:
recurrent_vars = []
input = [rewards, obs_var, feat_diff,
action_var] + state_info_vars_list + recurrent_vars + [param_eta, param_v]
# if is_recurrent:
# input +=
f_loss = ext.compile_function(
inputs=input,
outputs=loss,
)
f_loss_grad = ext.compile_function(
inputs=input,
outputs=loss_grad,
)
# Debug prints
old_dist_info_vars = {
k: ext.new_tensor(
'old_%s' % k,
ndim=2 + is_recurrent,
dtype=theano.config.floatX
) for k in dist.dist_info_keys
}
old_dist_info_vars_list = [old_dist_info_vars[k] for k in dist.dist_info_keys]
if is_recurrent:
mean_kl = TT.sum(dist.kl_sym(old_dist_info_vars, dist_info_vars) * valid_var) / TT.sum(valid_var)
else:
mean_kl = TT.mean(dist.kl_sym(old_dist_info_vars, dist_info_vars))
f_kl = ext.compile_function(
inputs=[obs_var, action_var] + state_info_vars_list + old_dist_info_vars_list + recurrent_vars,
outputs=mean_kl,
)
# Dual-related symbolics
# Symbolic dual
if is_recurrent:
dual = param_eta * self.epsilon + \
param_eta * TT.log(
TT.sum(
TT.exp(
delta_v / param_eta - TT.max(delta_v / param_eta)
) * valid_var
) / TT.sum(valid_var)
) + param_eta * TT.max(delta_v / param_eta)
else:
dual = param_eta * self.epsilon + \
param_eta * TT.log(
TT.mean(
TT.exp(
delta_v / param_eta - TT.max(delta_v / param_eta)
)
)
) + param_eta * TT.max(delta_v / param_eta)
# Add L2 regularization.
dual += self.L2_reg_dual * \
(TT.square(param_eta) + TT.square(1 / param_eta))
# Symbolic dual gradient
dual_grad = TT.grad(cost=dual, wrt=[param_eta, param_v])
# Eval functions.
f_dual = ext.compile_function(
inputs=[rewards, feat_diff] + state_info_vars_list + recurrent_vars + [param_eta, param_v],
outputs=dual
)
f_dual_grad = ext.compile_function(
inputs=[rewards, feat_diff] + state_info_vars_list + recurrent_vars + [param_eta, param_v],
outputs=dual_grad
)
self.opt_info = dict(
f_loss_grad=f_loss_grad,
f_loss=f_loss,
f_dual=f_dual,
f_dual_grad=f_dual_grad,
f_kl=f_kl
)
def _features(self, path):
o = np.clip(path["observations"], -10, 10)
l = len(path["rewards"])
al = np.arange(l).reshape(-1, 1) / 100.0
return np.concatenate([o, o ** 2, al, al ** 2, al ** 3, np.ones((l, 1))], axis=1)
@overrides
def optimize_policy(self, itr, samples_data):
# Init vars
rewards = samples_data['rewards']
actions = samples_data['actions']
observations = samples_data['observations']
agent_infos = samples_data["agent_infos"]
state_info_list = [agent_infos[k] for k in self.policy.state_info_keys]
dist_info_list = [agent_infos[k] for k in self.policy.distribution.dist_info_keys]
if self.policy.recurrent:
recurrent_vals = [samples_data["valids"]]
else:
recurrent_vals = []
# Compute sample Bellman error.
feat_diff = []
for path in samples_data['paths']:
feats = self._features(path)
feats = np.vstack([feats, np.zeros(feats.shape[1])])
feat_diff.append(feats[1:] - feats[:-1])
if self.policy.recurrent:
max_path_length = max([len(path["advantages"]) for path in samples_data["paths"]])
# pad feature diffs
feat_diff = np.array([tensor_utils.pad_tensor(fd, max_path_length) for fd in feat_diff])
else:
feat_diff = np.vstack(feat_diff)
#################
# Optimize dual #
#################
# Here we need to optimize dual through BFGS in order to obtain \eta
# value. Initialize dual function g(\theta, v). \eta > 0
# First eval delta_v
f_dual = self.opt_info['f_dual']
f_dual_grad = self.opt_info['f_dual_grad']
# Set BFGS eval function
def eval_dual(input):
param_eta = input[0]
param_v = input[1:]
val = f_dual(*([rewards, feat_diff] + state_info_list + recurrent_vals + [param_eta, param_v]))
return val.astype(np.float64)
# Set BFGS gradient eval function
def eval_dual_grad(input):
param_eta = input[0]
param_v = input[1:]
grad = f_dual_grad(*([rewards, feat_diff] + state_info_list + recurrent_vals + [param_eta, param_v]))
eta_grad = np.float(grad[0])
v_grad = grad[1]
return np.hstack([eta_grad, v_grad])
# Initial BFGS parameter values.
x0 = np.hstack([self.param_eta, self.param_v])
# Set parameter boundaries: \eta>0, v unrestricted.
bounds = [(-np.inf, np.inf) for _ in x0]
bounds[0] = (0., np.inf)
# Optimize through BFGS
logger.log('optimizing dual')
eta_before = x0[0]
dual_before = eval_dual(x0)
params_ast, _, _ = self.optimizer(
func=eval_dual, x0=x0,
fprime=eval_dual_grad,
bounds=bounds,
maxiter=self.max_opt_itr,
disp=0
)
dual_after = eval_dual(params_ast)
# Optimal values have been obtained
self.param_eta = params_ast[0]
self.param_v = params_ast[1:]
###################
# Optimize policy #
###################
cur_params = self.policy.get_param_values(trainable=True)
f_loss = self.opt_info["f_loss"]
f_loss_grad = self.opt_info['f_loss_grad']
input = [rewards, observations, feat_diff,
actions] + state_info_list + recurrent_vals + [self.param_eta, self.param_v]
# Set loss eval function
def eval_loss(params):
self.policy.set_param_values(params, trainable=True)
val = f_loss(*input)
return val.astype(np.float64)
# Set loss gradient eval function
def eval_loss_grad(params):
self.policy.set_param_values(params, trainable=True)
grad = f_loss_grad(*input)
flattened_grad = tensor_utils.flatten_tensors(list(map(np.asarray, grad)))
return flattened_grad.astype(np.float64)
loss_before = eval_loss(cur_params)
logger.log('optimizing policy')
params_ast, _, _ = self.optimizer(
func=eval_loss, x0=cur_params,
fprime=eval_loss_grad,
disp=0,
maxiter=self.max_opt_itr
)
loss_after = eval_loss(params_ast)
f_kl = self.opt_info['f_kl']
mean_kl = f_kl(*([observations, actions] + state_info_list + dist_info_list + recurrent_vals)).astype(
np.float64)
logger.log('eta %f -> %f' % (eta_before, self.param_eta))
logger.record_tabular("LossBefore", loss_before)
logger.record_tabular("LossAfter", loss_after)
logger.record_tabular('DualBefore', dual_before)
logger.record_tabular('DualAfter', dual_after)
logger.record_tabular('MeanKL', mean_kl)
@overrides
def get_itr_snapshot(self, itr, samples_data):
return dict(
itr=itr,
policy=self.policy,
baseline=self.baseline,
env=self.env,
)
| 12,115 | 34.323615 | 115 | py |
rllab | rllab-master/rllab/algos/batch_polopt.py | from rllab.algos.base import RLAlgorithm
from rllab.sampler import parallel_sampler
from rllab.sampler.base import BaseSampler
import rllab.misc.logger as logger
import rllab.plotter as plotter
from rllab.policies.base import Policy
class BatchSampler(BaseSampler):
def __init__(self, algo):
"""
:type algo: BatchPolopt
"""
self.algo = algo
def start_worker(self):
parallel_sampler.populate_task(self.algo.env, self.algo.policy, scope=self.algo.scope)
def shutdown_worker(self):
parallel_sampler.terminate_task(scope=self.algo.scope)
def obtain_samples(self, itr):
cur_params = self.algo.policy.get_param_values()
paths = parallel_sampler.sample_paths(
policy_params=cur_params,
max_samples=self.algo.batch_size,
max_path_length=self.algo.max_path_length,
scope=self.algo.scope,
)
if self.algo.whole_paths:
return paths
else:
paths_truncated = parallel_sampler.truncate_paths(paths, self.algo.batch_size)
return paths_truncated
class BatchPolopt(RLAlgorithm):
"""
Base class for batch sampling-based policy optimization methods.
This includes various policy gradient methods like vpg, npg, ppo, trpo, etc.
"""
def __init__(
self,
env,
policy,
baseline,
scope=None,
n_itr=500,
start_itr=0,
batch_size=5000,
max_path_length=500,
discount=0.99,
gae_lambda=1,
plot=False,
pause_for_plot=False,
center_adv=True,
positive_adv=False,
store_paths=False,
whole_paths=True,
sampler_cls=None,
sampler_args=None,
**kwargs
):
"""
:param env: Environment
:param policy: Policy
:type policy: Policy
:param baseline: Baseline
:param scope: Scope for identifying the algorithm. Must be specified if running multiple algorithms
simultaneously, each using different environments and policies
:param n_itr: Number of iterations.
:param start_itr: Starting iteration.
:param batch_size: Number of samples per iteration.
:param max_path_length: Maximum length of a single rollout.
:param discount: Discount.
:param gae_lambda: Lambda used for generalized advantage estimation.
:param plot: Plot evaluation run after each iteration.
:param pause_for_plot: Whether to pause before contiuing when plotting.
:param center_adv: Whether to rescale the advantages so that they have mean 0 and standard deviation 1.
:param positive_adv: Whether to shift the advantages so that they are always positive. When used in
conjunction with center_adv the advantages will be standardized before shifting.
:param store_paths: Whether to save all paths data to the snapshot.
"""
self.env = env
self.policy = policy
self.baseline = baseline
self.scope = scope
self.n_itr = n_itr
self.current_itr = start_itr
self.batch_size = batch_size
self.max_path_length = max_path_length
self.discount = discount
self.gae_lambda = gae_lambda
self.plot = plot
self.pause_for_plot = pause_for_plot
self.center_adv = center_adv
self.positive_adv = positive_adv
self.store_paths = store_paths
self.whole_paths = whole_paths
if sampler_cls is None:
sampler_cls = BatchSampler
if sampler_args is None:
sampler_args = dict()
self.sampler = sampler_cls(self, **sampler_args)
def start_worker(self):
self.sampler.start_worker()
if self.plot:
plotter.init_plot(self.env, self.policy)
def shutdown_worker(self):
self.sampler.shutdown_worker()
def train(self):
self.start_worker()
self.init_opt()
for itr in range(self.current_itr, self.n_itr):
with logger.prefix('itr #%d | ' % itr):
paths = self.sampler.obtain_samples(itr)
samples_data = self.sampler.process_samples(itr, paths)
self.log_diagnostics(paths)
self.optimize_policy(itr, samples_data)
logger.log("saving snapshot...")
params = self.get_itr_snapshot(itr, samples_data)
self.current_itr = itr + 1
params["algo"] = self
if self.store_paths:
params["paths"] = samples_data["paths"]
logger.save_itr_params(itr, params)
logger.log("saved")
logger.dump_tabular(with_prefix=False)
if self.plot:
self.update_plot()
if self.pause_for_plot:
input("Plotting evaluation run: Press Enter to "
"continue...")
self.shutdown_worker()
def log_diagnostics(self, paths):
self.env.log_diagnostics(paths)
self.policy.log_diagnostics(paths)
self.baseline.log_diagnostics(paths)
def init_opt(self):
"""
Initialize the optimization procedure. If using theano / cgt, this may
include declaring all the variables and compiling functions
"""
raise NotImplementedError
def get_itr_snapshot(self, itr, samples_data):
"""
Returns all the data that should be saved in the snapshot for this
iteration.
"""
raise NotImplementedError
def optimize_policy(self, itr, samples_data):
raise NotImplementedError
def update_plot(self):
if self.plot:
plotter.update_plot(self.policy, self.max_path_length)
| 5,938 | 34.777108 | 111 | py |
rllab | rllab-master/rllab/spaces/box.py | from rllab.core.serializable import Serializable
from .base import Space
import numpy as np
from rllab.misc import ext
import theano
class Box(Space):
"""
A box in R^n.
I.e., each coordinate is bounded.
"""
def __init__(self, low, high, shape=None):
"""
Two kinds of valid input:
Box(-1.0, 1.0, (3,4)) # low and high are scalars, and shape is provided
Box(np.array([-1.0,-2.0]), np.array([2.0,4.0])) # low and high are arrays of the same shape
"""
if shape is None:
assert low.shape == high.shape
self.low = low
self.high = high
else:
assert np.isscalar(low) and np.isscalar(high)
self.low = low + np.zeros(shape)
self.high = high + np.zeros(shape)
def sample(self):
return np.random.uniform(low=self.low, high=self.high, size=self.low.shape)
def contains(self, x):
return x.shape == self.shape and (x >= self.low).all() and (x <= self.high).all()
@property
def shape(self):
return self.low.shape
@property
def flat_dim(self):
return np.prod(self.low.shape)
@property
def bounds(self):
return self.low, self.high
def flatten(self, x):
return np.asarray(x).flatten()
def unflatten(self, x):
return np.asarray(x).reshape(self.shape)
def flatten_n(self, xs):
xs = np.asarray(xs)
return xs.reshape((xs.shape[0], -1))
def unflatten_n(self, xs):
xs = np.asarray(xs)
return xs.reshape((xs.shape[0],) + self.shape)
def __repr__(self):
return "Box" + str(self.shape)
def __eq__(self, other):
return isinstance(other, Box) and np.allclose(self.low, other.low) and \
np.allclose(self.high, other.high)
def __hash__(self):
return hash((self.low, self.high))
def new_tensor_variable(self, name, extra_dims):
return ext.new_tensor(
name=name,
ndim=extra_dims+1,
dtype=theano.config.floatX
)
| 2,093 | 25.846154 | 103 | py |
rllab | rllab-master/rllab/spaces/base.py | import numpy as np
class Space(object):
"""
Provides a classification state spaces and action spaces,
so you can write generic code that applies to any Environment.
E.g. to choose a random action.
"""
def sample(self, seed=0):
"""
Uniformly randomly sample a random elemnt of this space
"""
raise NotImplementedError
def contains(self, x):
"""
Return boolean specifying if x is a valid
member of this space
"""
raise NotImplementedError
def flatten(self, x):
raise NotImplementedError
def unflatten(self, x):
raise NotImplementedError
def flatten_n(self, xs):
raise NotImplementedError
def unflatten_n(self, xs):
raise NotImplementedError
@property
def flat_dim(self):
"""
The dimension of the flattened vector of the tensor representation
"""
raise NotImplementedError
def new_tensor_variable(self, name, extra_dims):
"""
Create a Theano tensor variable given the name and extra dimensions prepended
:param name: name of the variable
:param extra_dims: extra dimensions in the front
:return: the created tensor variable
"""
raise NotImplementedError
| 1,309 | 24.686275 | 85 | py |
rllab | rllab-master/rllab/spaces/discrete.py | from .base import Space
import numpy as np
from rllab.misc import special
from rllab.misc import ext
class Discrete(Space):
"""
{0,1,...,n-1}
"""
def __init__(self, n):
self._n = n
@property
def n(self):
return self._n
def sample(self):
return np.random.randint(self.n)
def contains(self, x):
x = np.asarray(x)
return x.shape == () and x.dtype.kind == 'i' and x >= 0 and x < self.n
def __repr__(self):
return "Discrete(%d)" % self.n
def __eq__(self, other):
return self.n == other.n
def flatten(self, x):
return special.to_onehot(x, self.n)
def unflatten(self, x):
return special.from_onehot(x)
def flatten_n(self, x):
return special.to_onehot_n(x, self.n)
def unflatten_n(self, x):
return special.from_onehot_n(x)
@property
def flat_dim(self):
return self.n
def weighted_sample(self, weights):
return special.weighted_sample(weights, range(self.n))
@property
def default_value(self):
return 0
def new_tensor_variable(self, name, extra_dims):
if self.n <= 2 ** 8:
return ext.new_tensor(
name=name,
ndim=extra_dims+1,
dtype='uint8'
)
elif self.n <= 2 ** 16:
return ext.new_tensor(
name=name,
ndim=extra_dims+1,
dtype='uint16'
)
else:
return ext.new_tensor(
name=name,
ndim=extra_dims+1,
dtype='uint32'
)
def __eq__(self, other):
if not isinstance(other, Discrete):
return False
return self.n == other.n
def __hash__(self):
return hash(self.n) | 1,835 | 21.666667 | 78 | py |
rllab | rllab-master/rllab/spaces/__init__.py | from .product import Product
from .discrete import Discrete
from .box import Box
__all__ = ["Product", "Discrete", "Box"] | 122 | 23.6 | 40 | py |
rllab | rllab-master/rllab/spaces/product.py | from rllab.spaces.base import Space
import numpy as np
from rllab.misc import ext
class Product(Space):
def __init__(self, *components):
if isinstance(components[0], (list, tuple)):
assert len(components) == 1
components = components[0]
self._components = tuple(components)
dtypes = [c.new_tensor_variable("tmp", extra_dims=0).dtype for c in components]
if len(dtypes) > 0 and hasattr(dtypes[0], "as_numpy_dtype"):
dtypes = [d.as_numpy_dtype for d in dtypes]
self._common_dtype = np.core.numerictypes.find_common_type([], dtypes)
def sample(self):
return tuple(x.sample() for x in self._components)
@property
def components(self):
return self._components
def contains(self, x):
return isinstance(x, tuple) and all(c.contains(xi) for c, xi in zip(self._components, x))
def new_tensor_variable(self, name, extra_dims):
return ext.new_tensor(
name=name,
ndim=extra_dims+1,
dtype=self._common_dtype,
)
@property
def flat_dim(self):
return np.sum([c.flat_dim for c in self._components])
def flatten(self, x):
return np.concatenate([c.flatten(xi) for c, xi in zip(self._components, x)])
def flatten_n(self, xs):
xs_regrouped = [[x[i] for x in xs] for i in range(len(xs[0]))]
flat_regrouped = [c.flatten_n(xi) for c, xi in zip(self.components, xs_regrouped)]
return np.concatenate(flat_regrouped, axis=-1)
def unflatten(self, x):
dims = [c.flat_dim for c in self._components]
flat_xs = np.split(x, np.cumsum(dims)[:-1])
return tuple(c.unflatten(xi) for c, xi in zip(self._components, flat_xs))
def unflatten_n(self, xs):
dims = [c.flat_dim for c in self._components]
flat_xs = np.split(xs, np.cumsum(dims)[:-1], axis=-1)
unflat_xs = [c.unflatten_n(xi) for c, xi in zip(self.components, flat_xs)]
unflat_xs_grouped = list(zip(*unflat_xs))
return unflat_xs_grouped
def __eq__(self, other):
if not isinstance(other, Product):
return False
return tuple(self.components) == tuple(other.components)
def __hash__(self):
return hash(tuple(self.components))
| 2,304 | 33.924242 | 97 | py |
rllab | rllab-master/rllab/mujoco_py/glfw.py | '''
Python bindings for GLFW.
'''
__author__ = 'Florian Rhiem (florian.rhiem@gmail.com)'
__copyright__ = 'Copyright (c) 2013 Florian Rhiem'
__license__ = 'MIT'
__version__ = '1.0.1'
import ctypes
import os
import glob
import sys
import subprocess
import textwrap
# Python 3 compatibility:
try:
_getcwd = os.getcwd
except AttributeError:
_getcwd = os.getcwd
if sys.version_info.major > 2:
_to_char_p = lambda s: s.encode('utf-8')
else:
_to_char_p = lambda s: s
def _find_library_candidates(library_names,
library_file_extensions,
library_search_paths):
'''
Finds and returns filenames which might be the library you are looking for.
'''
candidates = set()
for library_name in library_names:
for search_path in library_search_paths:
glob_query = os.path.join(search_path, '*'+library_name+'*')
for filename in glob.iglob(glob_query):
filename = os.path.realpath(filename)
if filename in candidates:
continue
basename = os.path.basename(filename)
if basename.startswith('lib'+library_name):
basename_end = basename[len('lib'+library_name):]
elif basename.startswith(library_name):
basename_end = basename[len(library_name):]
else:
continue
for file_extension in library_file_extensions:
if basename_end.startswith(file_extension):
if basename_end[len(file_extension):][:1] in ('', '.'):
candidates.add(filename)
if basename_end.endswith(file_extension):
basename_middle = basename_end[:-len(file_extension)]
if all(c in '0123456789.' for c in basename_middle):
candidates.add(filename)
return candidates
def _load_library():
'''
Finds, loads and returns the most recent version of the library.
'''
# MODIFIED by john schulman for cs294 homework because existing method was broken
osp = os.path
if sys.platform.startswith("darwin"):
libfile = osp.abspath(osp.join(osp.dirname(__file__),"../../vendor/mujoco/libglfw.3.dylib"))
elif sys.platform.startswith("linux"):
libfile = osp.abspath(osp.join(osp.dirname(__file__),"../../vendor/mujoco/libglfw.so.3"))
elif sys.platform.startswith("win"):
libfile = osp.abspath(osp.join(osp.dirname(__file__),"../../vendor/mujoco/glfw3.dll"))
else:
raise RuntimeError("unrecognized platform %s"%sys.platform)
return ctypes.CDLL(libfile)
def _glfw_get_version(filename):
'''
Queries and returns the library version tuple or None by using a
subprocess.
'''
version_checker_source = """
import sys
import ctypes
def get_version(library_handle):
'''
Queries and returns the library version tuple or None.
'''
major_value = ctypes.c_int(0)
major = ctypes.pointer(major_value)
minor_value = ctypes.c_int(0)
minor = ctypes.pointer(minor_value)
rev_value = ctypes.c_int(0)
rev = ctypes.pointer(rev_value)
if hasattr(library_handle, 'glfwGetVersion'):
library_handle.glfwGetVersion(major, minor, rev)
version = (major_value.value,
minor_value.value,
rev_value.value)
return version
else:
return None
try:
input_func = raw_input
except NameError:
input_func = input
filename = input_func().strip()
try:
library_handle = ctypes.CDLL(filename)
except OSError:
pass
else:
version = get_version(library_handle)
print(version)
"""
args = [sys.executable, '-c', textwrap.dedent(version_checker_source)]
process = subprocess.Popen(args, universal_newlines=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out = process.communicate(_to_char_p(filename))[0]
out = out.strip()
if out:
return eval(out)
else:
return None
_glfw = _load_library()
if _glfw is None:
raise ImportError("Failed to load GLFW3 shared library.")
_callback_repositories = []
class _GLFWwindow(ctypes.Structure):
'''
Wrapper for:
typedef struct GLFWwindow GLFWwindow;
'''
_fields_ = [("dummy", ctypes.c_int)]
class _GLFWmonitor(ctypes.Structure):
'''
Wrapper for:
typedef struct GLFWmonitor GLFWmonitor;
'''
_fields_ = [("dummy", ctypes.c_int)]
class _GLFWvidmode(ctypes.Structure):
'''
Wrapper for:
typedef struct GLFWvidmode GLFWvidmode;
'''
_fields_ = [("width", ctypes.c_int),
("height", ctypes.c_int),
("red_bits", ctypes.c_int),
("green_bits", ctypes.c_int),
("blue_bits", ctypes.c_int),
("refresh_rate", ctypes.c_uint)]
def __init__(self):
ctypes.Structure.__init__(self)
self.width = 0
self.height = 0
self.red_bits = 0
self.green_bits = 0
self.blue_bits = 0
self.refresh_rate = 0
def wrap(self, video_mode):
'''
Wraps a nested python sequence.
'''
size, bits, self.refresh_rate = video_mode
self.width, self.height = size
self.red_bits, self.green_bits, self.blue_bits = bits
def unwrap(self):
'''
Returns a nested python sequence.
'''
size = self.width, self.height
bits = self.red_bits, self.green_bits, self.blue_bits
return size, bits, self.refresh_rate
class _GLFWgammaramp(ctypes.Structure):
'''
Wrapper for:
typedef struct GLFWgammaramp GLFWgammaramp;
'''
_fields_ = [("red", ctypes.POINTER(ctypes.c_ushort)),
("green", ctypes.POINTER(ctypes.c_ushort)),
("blue", ctypes.POINTER(ctypes.c_ushort)),
("size", ctypes.c_uint)]
def __init__(self):
ctypes.Structure.__init__(self)
self.red = None
self.red_array = None
self.green = None
self.green_array = None
self.blue = None
self.blue_array = None
self.size = 0
def wrap(self, gammaramp):
'''
Wraps a nested python sequence.
'''
red, green, blue = gammaramp
size = min(len(red), len(green), len(blue))
array_type = ctypes.c_ushort*size
self.size = ctypes.c_uint(size)
self.red_array = array_type()
self.green_array = array_type()
self.blue_array = array_type()
for i in range(self.size):
self.red_array[i] = int(red[i]*65535)
self.green_array[i] = int(green[i]*65535)
self.blue_array[i] = int(blue[i]*65535)
pointer_type = ctypes.POINTER(ctypes.c_ushort)
self.red = ctypes.cast(self.red_array, pointer_type)
self.green = ctypes.cast(self.green_array, pointer_type)
self.blue = ctypes.cast(self.blue_array, pointer_type)
def unwrap(self):
'''
Returns a nested python sequence.
'''
red = [self.red[i]/65535.0 for i in range(self.size)]
green = [self.green[i]/65535.0 for i in range(self.size)]
blue = [self.blue[i]/65535.0 for i in range(self.size)]
return red, green, blue
VERSION_MAJOR = 3
VERSION_MINOR = 0
VERSION_REVISION = 3
RELEASE = 0
PRESS = 1
REPEAT = 2
KEY_UNKNOWN = -1
KEY_SPACE = 32
KEY_APOSTROPHE = 39
KEY_COMMA = 44
KEY_MINUS = 45
KEY_PERIOD = 46
KEY_SLASH = 47
KEY_0 = 48
KEY_1 = 49
KEY_2 = 50
KEY_3 = 51
KEY_4 = 52
KEY_5 = 53
KEY_6 = 54
KEY_7 = 55
KEY_8 = 56
KEY_9 = 57
KEY_SEMICOLON = 59
KEY_EQUAL = 61
KEY_A = 65
KEY_B = 66
KEY_C = 67
KEY_D = 68
KEY_E = 69
KEY_F = 70
KEY_G = 71
KEY_H = 72
KEY_I = 73
KEY_J = 74
KEY_K = 75
KEY_L = 76
KEY_M = 77
KEY_N = 78
KEY_O = 79
KEY_P = 80
KEY_Q = 81
KEY_R = 82
KEY_S = 83
KEY_T = 84
KEY_U = 85
KEY_V = 86
KEY_W = 87
KEY_X = 88
KEY_Y = 89
KEY_Z = 90
KEY_LEFT_BRACKET = 91
KEY_BACKSLASH = 92
KEY_RIGHT_BRACKET = 93
KEY_GRAVE_ACCENT = 96
KEY_WORLD_1 = 161
KEY_WORLD_2 = 162
KEY_ESCAPE = 256
KEY_ENTER = 257
KEY_TAB = 258
KEY_BACKSPACE = 259
KEY_INSERT = 260
KEY_DELETE = 261
KEY_RIGHT = 262
KEY_LEFT = 263
KEY_DOWN = 264
KEY_UP = 265
KEY_PAGE_UP = 266
KEY_PAGE_DOWN = 267
KEY_HOME = 268
KEY_END = 269
KEY_CAPS_LOCK = 280
KEY_SCROLL_LOCK = 281
KEY_NUM_LOCK = 282
KEY_PRINT_SCREEN = 283
KEY_PAUSE = 284
KEY_F1 = 290
KEY_F2 = 291
KEY_F3 = 292
KEY_F4 = 293
KEY_F5 = 294
KEY_F6 = 295
KEY_F7 = 296
KEY_F8 = 297
KEY_F9 = 298
KEY_F10 = 299
KEY_F11 = 300
KEY_F12 = 301
KEY_F13 = 302
KEY_F14 = 303
KEY_F15 = 304
KEY_F16 = 305
KEY_F17 = 306
KEY_F18 = 307
KEY_F19 = 308
KEY_F20 = 309
KEY_F21 = 310
KEY_F22 = 311
KEY_F23 = 312
KEY_F24 = 313
KEY_F25 = 314
KEY_KP_0 = 320
KEY_KP_1 = 321
KEY_KP_2 = 322
KEY_KP_3 = 323
KEY_KP_4 = 324
KEY_KP_5 = 325
KEY_KP_6 = 326
KEY_KP_7 = 327
KEY_KP_8 = 328
KEY_KP_9 = 329
KEY_KP_DECIMAL = 330
KEY_KP_DIVIDE = 331
KEY_KP_MULTIPLY = 332
KEY_KP_SUBTRACT = 333
KEY_KP_ADD = 334
KEY_KP_ENTER = 335
KEY_KP_EQUAL = 336
KEY_LEFT_SHIFT = 340
KEY_LEFT_CONTROL = 341
KEY_LEFT_ALT = 342
KEY_LEFT_SUPER = 343
KEY_RIGHT_SHIFT = 344
KEY_RIGHT_CONTROL = 345
KEY_RIGHT_ALT = 346
KEY_RIGHT_SUPER = 347
KEY_MENU = 348
KEY_LAST = KEY_MENU
MOD_SHIFT = 0x0001
MOD_CONTROL = 0x0002
MOD_ALT = 0x0004
MOD_SUPER = 0x0008
MOUSE_BUTTON_1 = 0
MOUSE_BUTTON_2 = 1
MOUSE_BUTTON_3 = 2
MOUSE_BUTTON_4 = 3
MOUSE_BUTTON_5 = 4
MOUSE_BUTTON_6 = 5
MOUSE_BUTTON_7 = 6
MOUSE_BUTTON_8 = 7
MOUSE_BUTTON_LAST = MOUSE_BUTTON_8
MOUSE_BUTTON_LEFT = MOUSE_BUTTON_1
MOUSE_BUTTON_RIGHT = MOUSE_BUTTON_2
MOUSE_BUTTON_MIDDLE = MOUSE_BUTTON_3
JOYSTICK_1 = 0
JOYSTICK_2 = 1
JOYSTICK_3 = 2
JOYSTICK_4 = 3
JOYSTICK_5 = 4
JOYSTICK_6 = 5
JOYSTICK_7 = 6
JOYSTICK_8 = 7
JOYSTICK_9 = 8
JOYSTICK_10 = 9
JOYSTICK_11 = 10
JOYSTICK_12 = 11
JOYSTICK_13 = 12
JOYSTICK_14 = 13
JOYSTICK_15 = 14
JOYSTICK_16 = 15
JOYSTICK_LAST = JOYSTICK_16
NOT_INITIALIZED = 0x00010001
NO_CURRENT_CONTEXT = 0x00010002
INVALID_ENUM = 0x00010003
INVALID_VALUE = 0x00010004
OUT_OF_MEMORY = 0x00010005
API_UNAVAILABLE = 0x00010006
VERSION_UNAVAILABLE = 0x00010007
PLATFORM_ERROR = 0x00010008
FORMAT_UNAVAILABLE = 0x00010009
FOCUSED = 0x00020001
ICONIFIED = 0x00020002
RESIZABLE = 0x00020003
VISIBLE = 0x00020004
DECORATED = 0x00020005
RED_BITS = 0x00021001
GREEN_BITS = 0x00021002
BLUE_BITS = 0x00021003
ALPHA_BITS = 0x00021004
DEPTH_BITS = 0x00021005
STENCIL_BITS = 0x00021006
ACCUM_RED_BITS = 0x00021007
ACCUM_GREEN_BITS = 0x00021008
ACCUM_BLUE_BITS = 0x00021009
ACCUM_ALPHA_BITS = 0x0002100A
AUX_BUFFERS = 0x0002100B
STEREO = 0x0002100C
SAMPLES = 0x0002100D
SRGB_CAPABLE = 0x0002100E
REFRESH_RATE = 0x0002100F
CLIENT_API = 0x00022001
CONTEXT_VERSION_MAJOR = 0x00022002
CONTEXT_VERSION_MINOR = 0x00022003
CONTEXT_REVISION = 0x00022004
CONTEXT_ROBUSTNESS = 0x00022005
OPENGL_FORWARD_COMPAT = 0x00022006
OPENGL_DEBUG_CONTEXT = 0x00022007
OPENGL_PROFILE = 0x00022008
OPENGL_API = 0x00030001
OPENGL_ES_API = 0x00030002
NO_ROBUSTNESS = 0
NO_RESET_NOTIFICATION = 0x00031001
LOSE_CONTEXT_ON_RESET = 0x00031002
OPENGL_ANY_PROFILE = 0
OPENGL_CORE_PROFILE = 0x00032001
OPENGL_COMPAT_PROFILE = 0x00032002
CURSOR = 0x00033001
STICKY_KEYS = 0x00033002
STICKY_MOUSE_BUTTONS = 0x00033003
CURSOR_NORMAL = 0x00034001
CURSOR_HIDDEN = 0x00034002
CURSOR_DISABLED = 0x00034003
CONNECTED = 0x00040001
DISCONNECTED = 0x00040002
_GLFWerrorfun = ctypes.CFUNCTYPE(None,
ctypes.c_int,
ctypes.c_char_p)
_GLFWwindowposfun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow),
ctypes.c_int,
ctypes.c_int)
_GLFWwindowsizefun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow),
ctypes.c_int,
ctypes.c_int)
_GLFWwindowclosefun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow))
_GLFWwindowrefreshfun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow))
_GLFWwindowfocusfun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow),
ctypes.c_int)
_GLFWwindowiconifyfun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow),
ctypes.c_int)
_GLFWframebuffersizefun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow),
ctypes.c_int,
ctypes.c_int)
_GLFWmousebuttonfun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow),
ctypes.c_int,
ctypes.c_int,
ctypes.c_int)
_GLFWcursorposfun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow),
ctypes.c_double,
ctypes.c_double)
_GLFWcursorenterfun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow),
ctypes.c_int)
_GLFWscrollfun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow),
ctypes.c_double,
ctypes.c_double)
_GLFWkeyfun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow),
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int)
_GLFWcharfun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWwindow),
ctypes.c_int)
_GLFWmonitorfun = ctypes.CFUNCTYPE(None,
ctypes.POINTER(_GLFWmonitor),
ctypes.c_int)
_glfw.glfwInit.restype = ctypes.c_int
_glfw.glfwInit.argtypes = []
def init():
'''
Initializes the GLFW library.
Wrapper for:
int glfwInit(void);
'''
cwd = _getcwd()
res = _glfw.glfwInit()
os.chdir(cwd)
return res
_glfw.glfwTerminate.restype = None
_glfw.glfwTerminate.argtypes = []
def terminate():
'''
Terminates the GLFW library.
Wrapper for:
void glfwTerminate(void);
'''
_glfw.glfwTerminate()
_glfw.glfwGetVersion.restype = None
_glfw.glfwGetVersion.argtypes = [ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(ctypes.c_int)]
def get_version():
'''
Retrieves the version of the GLFW library.
Wrapper for:
void glfwGetVersion(int* major, int* minor, int* rev);
'''
major_value = ctypes.c_int(0)
major = ctypes.pointer(major_value)
minor_value = ctypes.c_int(0)
minor = ctypes.pointer(minor_value)
rev_value = ctypes.c_int(0)
rev = ctypes.pointer(rev_value)
_glfw.glfwGetVersion(major, minor, rev)
return major_value.value, minor_value.value, rev_value.value
_glfw.glfwGetVersionString.restype = ctypes.c_char_p
_glfw.glfwGetVersionString.argtypes = []
def get_version_string():
'''
Returns a string describing the compile-time configuration.
Wrapper for:
const char* glfwGetVersionString(void);
'''
return _glfw.glfwGetVersionString()
_error_callback = None
_glfw.glfwSetErrorCallback.restype = _GLFWerrorfun
_glfw.glfwSetErrorCallback.argtypes = [_GLFWerrorfun]
def set_error_callback(cbfun):
'''
Sets the error callback.
Wrapper for:
GLFWerrorfun glfwSetErrorCallback(GLFWerrorfun cbfun);
'''
global _error_callback
previous_callback = _error_callback
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWerrorfun(cbfun)
_error_callback = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetErrorCallback(cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_glfw.glfwGetMonitors.restype = ctypes.POINTER(ctypes.POINTER(_GLFWmonitor))
_glfw.glfwGetMonitors.argtypes = [ctypes.POINTER(ctypes.c_int)]
def get_monitors():
'''
Returns the currently connected monitors.
Wrapper for:
GLFWmonitor** glfwGetMonitors(int* count);
'''
count_value = ctypes.c_int(0)
count = ctypes.pointer(count_value)
result = _glfw.glfwGetMonitors(count)
monitors = [result[i] for i in range(count_value.value)]
return monitors
_glfw.glfwGetPrimaryMonitor.restype = ctypes.POINTER(_GLFWmonitor)
_glfw.glfwGetPrimaryMonitor.argtypes = []
def get_primary_monitor():
'''
Returns the primary monitor.
Wrapper for:
GLFWmonitor* glfwGetPrimaryMonitor(void);
'''
return _glfw.glfwGetPrimaryMonitor()
_glfw.glfwGetMonitorPos.restype = None
_glfw.glfwGetMonitorPos.argtypes = [ctypes.POINTER(_GLFWmonitor),
ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(ctypes.c_int)]
def get_monitor_pos(monitor):
'''
Returns the position of the monitor's viewport on the virtual screen.
Wrapper for:
void glfwGetMonitorPos(GLFWmonitor* monitor, int* xpos, int* ypos);
'''
xpos_value = ctypes.c_int(0)
xpos = ctypes.pointer(xpos_value)
ypos_value = ctypes.c_int(0)
ypos = ctypes.pointer(ypos_value)
_glfw.glfwGetMonitorPos(monitor, xpos, ypos)
return xpos_value.value, ypos_value.value
_glfw.glfwGetMonitorPhysicalSize.restype = None
_glfw.glfwGetMonitorPhysicalSize.argtypes = [ctypes.POINTER(_GLFWmonitor),
ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(ctypes.c_int)]
def get_monitor_physical_size(monitor):
'''
Returns the physical size of the monitor.
Wrapper for:
void glfwGetMonitorPhysicalSize(GLFWmonitor* monitor, int* width, int* height);
'''
width_value = ctypes.c_int(0)
width = ctypes.pointer(width_value)
height_value = ctypes.c_int(0)
height = ctypes.pointer(height_value)
_glfw.glfwGetMonitorPhysicalSize(monitor, width, height)
return width_value.value, height_value.value
_glfw.glfwGetMonitorName.restype = ctypes.c_char_p
_glfw.glfwGetMonitorName.argtypes = [ctypes.POINTER(_GLFWmonitor)]
def get_monitor_name(monitor):
'''
Returns the name of the specified monitor.
Wrapper for:
const char* glfwGetMonitorName(GLFWmonitor* monitor);
'''
return _glfw.glfwGetMonitorName(monitor)
_monitor_callback = None
_glfw.glfwSetMonitorCallback.restype = _GLFWmonitorfun
_glfw.glfwSetMonitorCallback.argtypes = [_GLFWmonitorfun]
def set_monitor_callback(cbfun):
'''
Sets the monitor configuration callback.
Wrapper for:
GLFWmonitorfun glfwSetMonitorCallback(GLFWmonitorfun cbfun);
'''
global _monitor_callback
previous_callback = _monitor_callback
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWmonitorfun(cbfun)
_monitor_callback = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetMonitorCallback(cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_glfw.glfwGetVideoModes.restype = ctypes.POINTER(_GLFWvidmode)
_glfw.glfwGetVideoModes.argtypes = [ctypes.POINTER(_GLFWmonitor),
ctypes.POINTER(ctypes.c_int)]
def get_video_modes(monitor):
'''
Returns the available video modes for the specified monitor.
Wrapper for:
const GLFWvidmode* glfwGetVideoModes(GLFWmonitor* monitor, int* count);
'''
count_value = ctypes.c_int(0)
count = ctypes.pointer(count_value)
result = _glfw.glfwGetVideoModes(monitor, count)
videomodes = [result[i].unwrap() for i in range(count_value.value)]
return videomodes
_glfw.glfwGetVideoMode.restype = ctypes.POINTER(_GLFWvidmode)
_glfw.glfwGetVideoMode.argtypes = [ctypes.POINTER(_GLFWmonitor)]
def get_video_mode(monitor):
'''
Returns the current mode of the specified monitor.
Wrapper for:
const GLFWvidmode* glfwGetVideoMode(GLFWmonitor* monitor);
'''
videomode = _glfw.glfwGetVideoMode(monitor).contents
return videomode.unwrap()
_glfw.glfwSetGamma.restype = None
_glfw.glfwSetGamma.argtypes = [ctypes.POINTER(_GLFWmonitor),
ctypes.c_float]
def set_gamma(monitor, gamma):
'''
Generates a gamma ramp and sets it for the specified monitor.
Wrapper for:
void glfwSetGamma(GLFWmonitor* monitor, float gamma);
'''
_glfw.glfwSetGamma(monitor, gamma)
_glfw.glfwGetGammaRamp.restype = ctypes.POINTER(_GLFWgammaramp)
_glfw.glfwGetGammaRamp.argtypes = [ctypes.POINTER(_GLFWmonitor)]
def get_gamma_ramp(monitor):
'''
Retrieves the current gamma ramp for the specified monitor.
Wrapper for:
const GLFWgammaramp* glfwGetGammaRamp(GLFWmonitor* monitor);
'''
gammaramp = _glfw.glfwGetGammaRamp(monitor).contents
return gammaramp.unwrap()
_glfw.glfwSetGammaRamp.restype = None
_glfw.glfwSetGammaRamp.argtypes = [ctypes.POINTER(_GLFWmonitor),
ctypes.POINTER(_GLFWgammaramp)]
def set_gamma_ramp(monitor, ramp):
'''
Sets the current gamma ramp for the specified monitor.
Wrapper for:
void glfwSetGammaRamp(GLFWmonitor* monitor, const GLFWgammaramp* ramp);
'''
gammaramp = _GLFWgammaramp()
gammaramp.wrap(ramp)
_glfw.glfwSetGammaRamp(monitor, ctypes.pointer(gammaramp))
_glfw.glfwDefaultWindowHints.restype = None
_glfw.glfwDefaultWindowHints.argtypes = []
def default_window_hints():
'''
Resets all window hints to their default values.
Wrapper for:
void glfwDefaultWindowHints(void);
'''
_glfw.glfwDefaultWindowHints()
_glfw.glfwWindowHint.restype = None
_glfw.glfwWindowHint.argtypes = [ctypes.c_int,
ctypes.c_int]
def window_hint(target, hint):
'''
Sets the specified window hint to the desired value.
Wrapper for:
void glfwWindowHint(int target, int hint);
'''
_glfw.glfwWindowHint(target, hint)
_glfw.glfwCreateWindow.restype = ctypes.POINTER(_GLFWwindow)
_glfw.glfwCreateWindow.argtypes = [ctypes.c_int,
ctypes.c_int,
ctypes.c_char_p,
ctypes.POINTER(_GLFWmonitor),
ctypes.POINTER(_GLFWwindow)]
def create_window(width, height, title, monitor, share):
'''
Creates a window and its associated context.
Wrapper for:
GLFWwindow* glfwCreateWindow(int width, int height, const char* title, GLFWmonitor* monitor, GLFWwindow* share);
'''
return _glfw.glfwCreateWindow(width, height, _to_char_p(title),
monitor, share)
_glfw.glfwDestroyWindow.restype = None
_glfw.glfwDestroyWindow.argtypes = [ctypes.POINTER(_GLFWwindow)]
def destroy_window(window):
'''
Destroys the specified window and its context.
Wrapper for:
void glfwDestroyWindow(GLFWwindow* window);
'''
_glfw.glfwDestroyWindow(window)
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_ulong)).contents.value
for callback_repository in _callback_repositories:
del callback_repository[window_addr]
_glfw.glfwWindowShouldClose.restype = ctypes.c_int
_glfw.glfwWindowShouldClose.argtypes = [ctypes.POINTER(_GLFWwindow)]
def window_should_close(window):
'''
Checks the close flag of the specified window.
Wrapper for:
int glfwWindowShouldClose(GLFWwindow* window);
'''
return _glfw.glfwWindowShouldClose(window)
_glfw.glfwSetWindowShouldClose.restype = None
_glfw.glfwSetWindowShouldClose.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_int]
def set_window_should_close(window, value):
'''
Sets the close flag of the specified window.
Wrapper for:
void glfwSetWindowShouldClose(GLFWwindow* window, int value);
'''
_glfw.glfwSetWindowShouldClose(window, value)
_glfw.glfwSetWindowTitle.restype = None
_glfw.glfwSetWindowTitle.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_char_p]
def set_window_title(window, title):
'''
Sets the title of the specified window.
Wrapper for:
void glfwSetWindowTitle(GLFWwindow* window, const char* title);
'''
_glfw.glfwSetWindowTitle(window, _to_char_p(title))
_glfw.glfwGetWindowPos.restype = None
_glfw.glfwGetWindowPos.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(ctypes.c_int)]
def get_window_pos(window):
'''
Retrieves the position of the client area of the specified window.
Wrapper for:
void glfwGetWindowPos(GLFWwindow* window, int* xpos, int* ypos);
'''
xpos_value = ctypes.c_int(0)
xpos = ctypes.pointer(xpos_value)
ypos_value = ctypes.c_int(0)
ypos = ctypes.pointer(ypos_value)
_glfw.glfwGetWindowPos(window, xpos, ypos)
return xpos_value.value, ypos_value.value
_glfw.glfwSetWindowPos.restype = None
_glfw.glfwSetWindowPos.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_int,
ctypes.c_int]
def set_window_pos(window, xpos, ypos):
'''
Sets the position of the client area of the specified window.
Wrapper for:
void glfwSetWindowPos(GLFWwindow* window, int xpos, int ypos);
'''
_glfw.glfwSetWindowPos(window, xpos, ypos)
_glfw.glfwGetWindowSize.restype = None
_glfw.glfwGetWindowSize.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(ctypes.c_int)]
def get_window_size(window):
'''
Retrieves the size of the client area of the specified window.
Wrapper for:
void glfwGetWindowSize(GLFWwindow* window, int* width, int* height);
'''
width_value = ctypes.c_int(0)
width = ctypes.pointer(width_value)
height_value = ctypes.c_int(0)
height = ctypes.pointer(height_value)
_glfw.glfwGetWindowSize(window, width, height)
return width_value.value, height_value.value
_glfw.glfwSetWindowSize.restype = None
_glfw.glfwSetWindowSize.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_int,
ctypes.c_int]
def set_window_size(window, width, height):
'''
Sets the size of the client area of the specified window.
Wrapper for:
void glfwSetWindowSize(GLFWwindow* window, int width, int height);
'''
_glfw.glfwSetWindowSize(window, width, height)
_glfw.glfwGetFramebufferSize.restype = None
_glfw.glfwGetFramebufferSize.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(ctypes.c_int)]
def get_framebuffer_size(window):
'''
Retrieves the size of the framebuffer of the specified window.
Wrapper for:
void glfwGetFramebufferSize(GLFWwindow* window, int* width, int* height);
'''
width_value = ctypes.c_int(0)
width = ctypes.pointer(width_value)
height_value = ctypes.c_int(0)
height = ctypes.pointer(height_value)
_glfw.glfwGetFramebufferSize(window, width, height)
return width_value.value, height_value.value
_glfw.glfwIconifyWindow.restype = None
_glfw.glfwIconifyWindow.argtypes = [ctypes.POINTER(_GLFWwindow)]
def iconify_window(window):
'''
Iconifies the specified window.
Wrapper for:
void glfwIconifyWindow(GLFWwindow* window);
'''
_glfw.glfwIconifyWindow(window)
_glfw.glfwRestoreWindow.restype = None
_glfw.glfwRestoreWindow.argtypes = [ctypes.POINTER(_GLFWwindow)]
def restore_window(window):
'''
Restores the specified window.
Wrapper for:
void glfwRestoreWindow(GLFWwindow* window);
'''
_glfw.glfwRestoreWindow(window)
_glfw.glfwShowWindow.restype = None
_glfw.glfwShowWindow.argtypes = [ctypes.POINTER(_GLFWwindow)]
def show_window(window):
'''
Makes the specified window visible.
Wrapper for:
void glfwShowWindow(GLFWwindow* window);
'''
_glfw.glfwShowWindow(window)
_glfw.glfwHideWindow.restype = None
_glfw.glfwHideWindow.argtypes = [ctypes.POINTER(_GLFWwindow)]
def hide_window(window):
'''
Hides the specified window.
Wrapper for:
void glfwHideWindow(GLFWwindow* window);
'''
_glfw.glfwHideWindow(window)
_glfw.glfwGetWindowMonitor.restype = ctypes.POINTER(_GLFWmonitor)
_glfw.glfwGetWindowMonitor.argtypes = [ctypes.POINTER(_GLFWwindow)]
def get_window_monitor(window):
'''
Returns the monitor that the window uses for full screen mode.
Wrapper for:
GLFWmonitor* glfwGetWindowMonitor(GLFWwindow* window);
'''
return _glfw.glfwGetWindowMonitor(window)
_glfw.glfwGetWindowAttrib.restype = ctypes.c_int
_glfw.glfwGetWindowAttrib.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_int]
def get_window_attrib(window, attrib):
'''
Returns an attribute of the specified window.
Wrapper for:
int glfwGetWindowAttrib(GLFWwindow* window, int attrib);
'''
return _glfw.glfwGetWindowAttrib(window, attrib)
_glfw.glfwSetWindowUserPointer.restype = None
_glfw.glfwSetWindowUserPointer.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_void_p]
def set_window_user_pointer(window, pointer):
'''
Sets the user pointer of the specified window.
Wrapper for:
void glfwSetWindowUserPointer(GLFWwindow* window, void* pointer);
'''
_glfw.glfwSetWindowUserPointer(window, pointer)
_glfw.glfwGetWindowUserPointer.restype = ctypes.c_void_p
_glfw.glfwGetWindowUserPointer.argtypes = [ctypes.POINTER(_GLFWwindow)]
def get_window_user_pointer(window):
'''
Returns the user pointer of the specified window.
Wrapper for:
void* glfwGetWindowUserPointer(GLFWwindow* window);
'''
return _glfw.glfwGetWindowUserPointer(window)
_window_pos_callback_repository = {}
_callback_repositories.append(_window_pos_callback_repository)
_glfw.glfwSetWindowPosCallback.restype = _GLFWwindowposfun
_glfw.glfwSetWindowPosCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWwindowposfun]
def set_window_pos_callback(window, cbfun):
'''
Sets the position callback for the specified window.
Wrapper for:
GLFWwindowposfun glfwSetWindowPosCallback(GLFWwindow* window, GLFWwindowposfun cbfun);
'''
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _window_pos_callback_repository:
previous_callback = _window_pos_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWwindowposfun(cbfun)
_window_pos_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetWindowPosCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_window_size_callback_repository = {}
_callback_repositories.append(_window_size_callback_repository)
_glfw.glfwSetWindowSizeCallback.restype = _GLFWwindowsizefun
_glfw.glfwSetWindowSizeCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWwindowsizefun]
def set_window_size_callback(window, cbfun):
'''
Sets the size callback for the specified window.
Wrapper for:
GLFWwindowsizefun glfwSetWindowSizeCallback(GLFWwindow* window, GLFWwindowsizefun cbfun);
'''
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _window_size_callback_repository:
previous_callback = _window_size_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWwindowsizefun(cbfun)
_window_size_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetWindowSizeCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_window_close_callback_repository = {}
_callback_repositories.append(_window_close_callback_repository)
_glfw.glfwSetWindowCloseCallback.restype = _GLFWwindowclosefun
_glfw.glfwSetWindowCloseCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWwindowclosefun]
def set_window_close_callback(window, cbfun):
'''
Sets the close callback for the specified window.
Wrapper for:
GLFWwindowclosefun glfwSetWindowCloseCallback(GLFWwindow* window, GLFWwindowclosefun cbfun);
'''
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _window_close_callback_repository:
previous_callback = _window_close_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWwindowclosefun(cbfun)
_window_close_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetWindowCloseCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_window_refresh_callback_repository = {}
_callback_repositories.append(_window_refresh_callback_repository)
_glfw.glfwSetWindowRefreshCallback.restype = _GLFWwindowrefreshfun
_glfw.glfwSetWindowRefreshCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWwindowrefreshfun]
def set_window_refresh_callback(window, cbfun):
'''
Sets the refresh callback for the specified window.
Wrapper for:
GLFWwindowrefreshfun glfwSetWindowRefreshCallback(GLFWwindow* window, GLFWwindowrefreshfun cbfun);
'''
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _window_refresh_callback_repository:
previous_callback = _window_refresh_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWwindowrefreshfun(cbfun)
_window_refresh_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetWindowRefreshCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_window_focus_callback_repository = {}
_callback_repositories.append(_window_focus_callback_repository)
_glfw.glfwSetWindowFocusCallback.restype = _GLFWwindowfocusfun
_glfw.glfwSetWindowFocusCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWwindowfocusfun]
def set_window_focus_callback(window, cbfun):
'''
Sets the focus callback for the specified window.
Wrapper for:
GLFWwindowfocusfun glfwSetWindowFocusCallback(GLFWwindow* window, GLFWwindowfocusfun cbfun);
'''
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _window_focus_callback_repository:
previous_callback = _window_focus_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWwindowfocusfun(cbfun)
_window_focus_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetWindowFocusCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_window_iconify_callback_repository = {}
_callback_repositories.append(_window_iconify_callback_repository)
_glfw.glfwSetWindowIconifyCallback.restype = _GLFWwindowiconifyfun
_glfw.glfwSetWindowIconifyCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWwindowiconifyfun]
def set_window_iconify_callback(window, cbfun):
'''
Sets the iconify callback for the specified window.
Wrapper for:
GLFWwindowiconifyfun glfwSetWindowIconifyCallback(GLFWwindow* window, GLFWwindowiconifyfun cbfun);
'''
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _window_iconify_callback_repository:
previous_callback = _window_iconify_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWwindowiconifyfun(cbfun)
_window_iconify_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetWindowIconifyCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_framebuffer_size_callback_repository = {}
_callback_repositories.append(_framebuffer_size_callback_repository)
_glfw.glfwSetFramebufferSizeCallback.restype = _GLFWframebuffersizefun
_glfw.glfwSetFramebufferSizeCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWframebuffersizefun]
def set_framebuffer_size_callback(window, cbfun):
'''
Sets the framebuffer resize callback for the specified window.
Wrapper for:
GLFWframebuffersizefun glfwSetFramebufferSizeCallback(GLFWwindow* window, GLFWframebuffersizefun cbfun);
'''
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _framebuffer_size_callback_repository:
previous_callback = _framebuffer_size_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWframebuffersizefun(cbfun)
_framebuffer_size_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetFramebufferSizeCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_glfw.glfwPollEvents.restype = None
_glfw.glfwPollEvents.argtypes = []
def poll_events():
'''
Processes all pending events.
Wrapper for:
void glfwPollEvents(void);
'''
_glfw.glfwPollEvents()
_glfw.glfwWaitEvents.restype = None
_glfw.glfwWaitEvents.argtypes = []
def wait_events():
'''
Waits until events are pending and processes them.
Wrapper for:
void glfwWaitEvents(void);
'''
_glfw.glfwWaitEvents()
_glfw.glfwGetInputMode.restype = ctypes.c_int
_glfw.glfwGetInputMode.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_int]
def get_input_mode(window, mode):
'''
Returns the value of an input option for the specified window.
Wrapper for:
int glfwGetInputMode(GLFWwindow* window, int mode);
'''
return _glfw.glfwGetInputMode(window, mode)
_glfw.glfwSetInputMode.restype = None
_glfw.glfwSetInputMode.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_int,
ctypes.c_int]
def set_input_mode(window, mode, value):
'''
Sets an input option for the specified window.
@param[in] window The window whose input mode to set.
@param[in] mode One of `GLFW_CURSOR`, `GLFW_STICKY_KEYS` or
`GLFW_STICKY_MOUSE_BUTTONS`.
@param[in] value The new value of the specified input mode.
Wrapper for:
void glfwSetInputMode(GLFWwindow* window, int mode, int value);
'''
_glfw.glfwSetInputMode(window, mode, value)
_glfw.glfwGetKey.restype = ctypes.c_int
_glfw.glfwGetKey.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_int]
def get_key(window, key):
'''
Returns the last reported state of a keyboard key for the specified
window.
Wrapper for:
int glfwGetKey(GLFWwindow* window, int key);
'''
return _glfw.glfwGetKey(window, key)
_glfw.glfwGetMouseButton.restype = ctypes.c_int
_glfw.glfwGetMouseButton.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_int]
def get_mouse_button(window, button):
'''
Returns the last reported state of a mouse button for the specified
window.
Wrapper for:
int glfwGetMouseButton(GLFWwindow* window, int button);
'''
return _glfw.glfwGetMouseButton(window, button)
_glfw.glfwGetCursorPos.restype = None
_glfw.glfwGetCursorPos.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.POINTER(ctypes.c_double),
ctypes.POINTER(ctypes.c_double)]
def get_cursor_pos(window):
'''
Retrieves the last reported cursor position, relative to the client
area of the window.
Wrapper for:
void glfwGetCursorPos(GLFWwindow* window, double* xpos, double* ypos);
'''
xpos_value = ctypes.c_double(0.0)
xpos = ctypes.pointer(xpos_value)
ypos_value = ctypes.c_double(0.0)
ypos = ctypes.pointer(ypos_value)
_glfw.glfwGetCursorPos(window, xpos, ypos)
return xpos_value.value, ypos_value.value
_glfw.glfwSetCursorPos.restype = None
_glfw.glfwSetCursorPos.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_double,
ctypes.c_double]
def set_cursor_pos(window, xpos, ypos):
'''
Sets the position of the cursor, relative to the client area of the window.
Wrapper for:
void glfwSetCursorPos(GLFWwindow* window, double xpos, double ypos);
'''
_glfw.glfwSetCursorPos(window, xpos, ypos)
_key_callback_repository = {}
_callback_repositories.append(_key_callback_repository)
_glfw.glfwSetKeyCallback.restype = _GLFWkeyfun
_glfw.glfwSetKeyCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWkeyfun]
def set_key_callback(window, cbfun):
'''
Sets the key callback.
Wrapper for:
GLFWkeyfun glfwSetKeyCallback(GLFWwindow* window, GLFWkeyfun cbfun);
'''
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _key_callback_repository:
previous_callback = _key_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWkeyfun(cbfun)
_key_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetKeyCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_char_callback_repository = {}
_callback_repositories.append(_char_callback_repository)
_glfw.glfwSetCharCallback.restype = _GLFWcharfun
_glfw.glfwSetCharCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWcharfun]
def set_char_callback(window, cbfun):
'''
Sets the Unicode character callback.
Wrapper for:
GLFWcharfun glfwSetCharCallback(GLFWwindow* window, GLFWcharfun cbfun);
'''
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _char_callback_repository:
previous_callback = _char_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWcharfun(cbfun)
_char_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetCharCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_mouse_button_callback_repository = {}
_callback_repositories.append(_mouse_button_callback_repository)
_glfw.glfwSetMouseButtonCallback.restype = _GLFWmousebuttonfun
_glfw.glfwSetMouseButtonCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWmousebuttonfun]
def set_mouse_button_callback(window, cbfun):
'''
Sets the mouse button callback.
Wrapper for:
GLFWmousebuttonfun glfwSetMouseButtonCallback(GLFWwindow* window, GLFWmousebuttonfun cbfun);
'''
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _mouse_button_callback_repository:
previous_callback = _mouse_button_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWmousebuttonfun(cbfun)
_mouse_button_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetMouseButtonCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_cursor_pos_callback_repository = {}
_callback_repositories.append(_cursor_pos_callback_repository)
_glfw.glfwSetCursorPosCallback.restype = _GLFWcursorposfun
_glfw.glfwSetCursorPosCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWcursorposfun]
def set_cursor_pos_callback(window, cbfun):
'''
Sets the cursor position callback.
Wrapper for:
GLFWcursorposfun glfwSetCursorPosCallback(GLFWwindow* window, GLFWcursorposfun cbfun);
'''
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _cursor_pos_callback_repository:
previous_callback = _cursor_pos_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWcursorposfun(cbfun)
_cursor_pos_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetCursorPosCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_cursor_enter_callback_repository = {}
_callback_repositories.append(_cursor_enter_callback_repository)
_glfw.glfwSetCursorEnterCallback.restype = _GLFWcursorenterfun
_glfw.glfwSetCursorEnterCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWcursorenterfun]
def set_cursor_enter_callback(window, cbfun):
'''
Sets the cursor enter/exit callback.
Wrapper for:
GLFWcursorenterfun glfwSetCursorEnterCallback(GLFWwindow* window, GLFWcursorenterfun cbfun);
'''
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _cursor_enter_callback_repository:
previous_callback = _cursor_enter_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWcursorenterfun(cbfun)
_cursor_enter_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetCursorEnterCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_scroll_callback_repository = {}
_callback_repositories.append(_scroll_callback_repository)
_glfw.glfwSetScrollCallback.restype = _GLFWscrollfun
_glfw.glfwSetScrollCallback.argtypes = [ctypes.POINTER(_GLFWwindow),
_GLFWscrollfun]
def set_scroll_callback(window, cbfun):
'''
Sets the scroll callback.
Wrapper for:
GLFWscrollfun glfwSetScrollCallback(GLFWwindow* window, GLFWscrollfun cbfun);
'''
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _scroll_callback_repository:
previous_callback = _scroll_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWscrollfun(cbfun)
_scroll_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetScrollCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
_glfw.glfwJoystickPresent.restype = ctypes.c_int
_glfw.glfwJoystickPresent.argtypes = [ctypes.c_int]
def joystick_present(joy):
'''
Returns whether the specified joystick is present.
Wrapper for:
int glfwJoystickPresent(int joy);
'''
return _glfw.glfwJoystickPresent(joy)
_glfw.glfwGetJoystickAxes.restype = ctypes.POINTER(ctypes.c_float)
_glfw.glfwGetJoystickAxes.argtypes = [ctypes.c_int,
ctypes.POINTER(ctypes.c_int)]
def get_joystick_axes(joy):
'''
Returns the values of all axes of the specified joystick.
Wrapper for:
const float* glfwGetJoystickAxes(int joy, int* count);
'''
count_value = ctypes.c_int(0)
count = ctypes.pointer(count_value)
result = _glfw.glfwGetJoystickAxes(joy, count)
return result, count_value.value
_glfw.glfwGetJoystickButtons.restype = ctypes.POINTER(ctypes.c_ubyte)
_glfw.glfwGetJoystickButtons.argtypes = [ctypes.c_int,
ctypes.POINTER(ctypes.c_int)]
def get_joystick_buttons(joy):
'''
Returns the state of all buttons of the specified joystick.
Wrapper for:
const unsigned char* glfwGetJoystickButtons(int joy, int* count);
'''
count_value = ctypes.c_int(0)
count = ctypes.pointer(count_value)
result = _glfw.glfwGetJoystickButtons(joy, count)
return result, count_value.value
_glfw.glfwGetJoystickName.restype = ctypes.c_char_p
_glfw.glfwGetJoystickName.argtypes = [ctypes.c_int]
def get_joystick_name(joy):
'''
Returns the name of the specified joystick.
Wrapper for:
const char* glfwGetJoystickName(int joy);
'''
return _glfw.glfwGetJoystickName(joy)
_glfw.glfwSetClipboardString.restype = None
_glfw.glfwSetClipboardString.argtypes = [ctypes.POINTER(_GLFWwindow),
ctypes.c_char_p]
def set_clipboard_string(window, string):
'''
Sets the clipboard to the specified string.
Wrapper for:
void glfwSetClipboardString(GLFWwindow* window, const char* string);
'''
_glfw.glfwSetClipboardString(window, _to_char_p(string))
_glfw.glfwGetClipboardString.restype = ctypes.c_char_p
_glfw.glfwGetClipboardString.argtypes = [ctypes.POINTER(_GLFWwindow)]
def get_clipboard_string(window):
'''
Retrieves the contents of the clipboard as a string.
Wrapper for:
const char* glfwGetClipboardString(GLFWwindow* window);
'''
return _glfw.glfwGetClipboardString(window)
_glfw.glfwGetTime.restype = ctypes.c_double
_glfw.glfwGetTime.argtypes = []
def get_time():
'''
Returns the value of the GLFW timer.
Wrapper for:
double glfwGetTime(void);
'''
return _glfw.glfwGetTime()
_glfw.glfwSetTime.restype = None
_glfw.glfwSetTime.argtypes = [ctypes.c_double]
def set_time(time):
'''
Sets the GLFW timer.
Wrapper for:
void glfwSetTime(double time);
'''
_glfw.glfwSetTime(time)
_glfw.glfwMakeContextCurrent.restype = None
_glfw.glfwMakeContextCurrent.argtypes = [ctypes.POINTER(_GLFWwindow)]
def make_context_current(window):
'''
Makes the context of the specified window current for the calling
thread.
Wrapper for:
void glfwMakeContextCurrent(GLFWwindow* window);
'''
_glfw.glfwMakeContextCurrent(window)
_glfw.glfwGetCurrentContext.restype = ctypes.POINTER(_GLFWwindow)
_glfw.glfwGetCurrentContext.argtypes = []
def get_current_context():
'''
Returns the window whose context is current on the calling thread.
Wrapper for:
GLFWwindow* glfwGetCurrentContext(void);
'''
return _glfw.glfwGetCurrentContext()
_glfw.glfwSwapBuffers.restype = None
_glfw.glfwSwapBuffers.argtypes = [ctypes.POINTER(_GLFWwindow)]
def swap_buffers(window):
'''
Swaps the front and back buffers of the specified window.
Wrapper for:
void glfwSwapBuffers(GLFWwindow* window);
'''
_glfw.glfwSwapBuffers(window)
_glfw.glfwSwapInterval.restype = None
_glfw.glfwSwapInterval.argtypes = [ctypes.c_int]
def swap_interval(interval):
'''
Sets the swap interval for the current context.
Wrapper for:
void glfwSwapInterval(int interval);
'''
_glfw.glfwSwapInterval(interval)
_glfw.glfwExtensionSupported.restype = ctypes.c_int
_glfw.glfwExtensionSupported.argtypes = [ctypes.c_char_p]
def extension_supported(extension):
'''
Returns whether the specified extension is available.
Wrapper for:
int glfwExtensionSupported(const char* extension);
'''
return _glfw.glfwExtensionSupported(_to_char_p(extension))
_glfw.glfwGetProcAddress.restype = ctypes.c_void_p
_glfw.glfwGetProcAddress.argtypes = [ctypes.c_char_p]
def get_proc_address(procname):
'''
Returns the address of the specified function for the current
context.
Wrapper for:
GLFWglproc glfwGetProcAddress(const char* procname);
'''
return _glfw.glfwGetProcAddress(_to_char_p(procname))
| 54,410 | 32.217949 | 120 | py |
rllab | rllab-master/rllab/mujoco_py/mjviewer.py | import ctypes
from ctypes import pointer, byref
import logging
from threading import Lock
import os
from . import mjcore, mjconstants, glfw
from .mjlib import mjlib
import numpy as np
import OpenGL.GL as gl
logger = logging.getLogger(__name__)
mjCAT_ALL = 7
def _glfw_error_callback(e, d):
logger.error('GLFW error: %s, desc: %s', e, d)
class MjViewer(object):
def __init__(self, visible=True, init_width=500, init_height=500, go_fast=False):
"""
Set go_fast=True to run at full speed instead of waiting for the 60 Hz monitor refresh
init_width and init_height set window size. On Mac Retina displays, they are in nominal
pixels but .render returns an array of device pixels, so the array will be twice as big
as you expect.
"""
self.visible = visible
self.init_width = init_width
self.init_height = init_height
self.go_fast = not visible or go_fast
self.last_render_time = 0
self.objects = mjcore.MJVOBJECTS()
self.cam = mjcore.MJVCAMERA()
self.vopt = mjcore.MJVOPTION()
self.ropt = mjcore.MJROPTION()
self.con = mjcore.MJRCONTEXT()
self.running = False
self.speedtype = 1
self.window = None
self.model = None
self.gui_lock = Lock()
# framebuffer objects
self._fbo = None
self._rbo = None
self._last_button = 0
self._last_click_time = 0
self._button_left_pressed = False
self._button_middle_pressed = False
self._button_right_pressed = False
self._last_mouse_x = 0
self._last_mouse_y = 0
def set_model(self, model):
self.model = model
if model:
self.data = model.data
else:
self.data = None
if self.running:
if model:
mjlib.mjr_makeContext(model.ptr, byref(self.con), 150)
else:
mjlib.mjr_makeContext(None, byref(self.con), 150)
self.render()
if model:
self.autoscale()
def autoscale(self):
self.cam.lookat[0] = self.model.stat.center[0]
self.cam.lookat[1] = self.model.stat.center[1]
self.cam.lookat[2] = self.model.stat.center[2]
self.cam.distance = 0.5 * self.model.stat.extent
self.cam.camid = -1
self.cam.trackbodyid = 1
width, height = self.get_dimensions()
mjlib.mjv_updateCameraPose(byref(self.cam), width*1.0/height)
def get_rect(self):
rect = mjcore.MJRRECT(0, 0, 0, 0)
rect.width, rect.height = self.get_dimensions()
return rect
def render(self):
if not self.data:
return
self.gui_lock.acquire()
rect = self.get_rect()
arr = (ctypes.c_double*3)(0, 0, 0)
mjlib.mjv_makeGeoms(self.model.ptr, self.data.ptr, byref(self.objects), byref(self.vopt), mjCAT_ALL, 0, None, None, ctypes.cast(arr, ctypes.POINTER(ctypes.c_double)))
mjlib.mjv_makeLights(self.model.ptr, self.data.ptr, byref(self.objects))
mjlib.mjv_setCamera(self.model.ptr, self.data.ptr, byref(self.cam))
mjlib.mjv_updateCameraPose(byref(self.cam), rect.width*1.0/rect.height)
mjlib.mjr_render(0, rect, byref(self.objects), byref(self.ropt), byref(self.cam.pose), byref(self.con))
self.gui_lock.release()
def get_dimensions(self):
"""
returns a tuple (width, height)
"""
if self.window:
return glfw.get_framebuffer_size(self.window)
return (self.init_width, self.init_height)
def get_image(self):
"""
returns a tuple (data, width, height), where:
- data is a string with raw bytes representing the pixels in 3-channel RGB
(i.e. every three bytes = 1 pixel)
- width is the width of the image
- height is the height of the image
"""
width, height = self.get_dimensions()
gl.glReadBuffer(gl.GL_BACK)
data = gl.glReadPixels(0, 0, width, height, gl.GL_RGB, gl.GL_UNSIGNED_BYTE)
return (data, width, height)
def _init_framebuffer_object(self):
"""
returns a Framebuffer Object to support offscreen rendering.
http://learnopengl.com/#!Advanced-OpenGL/Framebuffers
"""
fbo = gl.glGenFramebuffers(1)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo)
rbo = gl.glGenRenderbuffers(1)
gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, rbo)
gl.glRenderbufferStorage(
gl.GL_RENDERBUFFER,
gl.GL_RGBA,
self.init_width,
self.init_height
)
gl.glFramebufferRenderbuffer(
gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_RENDERBUFFER, rbo)
gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, 0)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
fbo_status = gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER)
if fbo_status != gl.GL_FRAMEBUFFER_COMPLETE:
gl.glDeleteFramebuffers([fbo])
glfw.terminate()
raise Exception('Framebuffer failed status check: %s' % fbo_status)
self._fbo = fbo
self._rbo = rbo
def start(self):
logger.info('initializing glfw@%s', glfw.get_version())
glfw.set_error_callback(_glfw_error_callback)
if not glfw.init():
raise Exception('glfw failed to initialize')
window = None
if self.visible:
glfw.window_hint(glfw.SAMPLES, 4)
else:
glfw.window_hint(glfw.VISIBLE, 0);
# try stereo if refresh rate is at least 100Hz
stereo_available = False
_, _, refresh_rate = glfw.get_video_mode(glfw.get_primary_monitor())
if refresh_rate >= 100:
glfw.window_hint(glfw.STEREO, 1)
window = glfw.create_window(
self.init_width, self.init_height, "Simulate", None, None)
if window:
stereo_available = True
# no stereo: try mono
if not window:
glfw.window_hint(glfw.STEREO, 0)
window = glfw.create_window(
self.init_width, self.init_height, "Simulate", None, None)
if not window:
glfw.terminate()
return
self.running = True
# Make the window's context current
glfw.make_context_current(window)
if self.go_fast:
# Let's go faster than 60 Hz
glfw.swap_interval(0)
self._init_framebuffer_object()
width, height = glfw.get_framebuffer_size(window)
width1, height = glfw.get_window_size(window)
self._scale = width * 1.0 / width1
self.window = window
mjlib.mjv_makeObjects(byref(self.objects), 1000)
mjlib.mjv_defaultCamera(byref(self.cam))
mjlib.mjv_defaultOption(byref(self.vopt))
mjlib.mjr_defaultOption(byref(self.ropt))
mjlib.mjr_defaultContext(byref(self.con))
if self.model:
mjlib.mjr_makeContext(self.model.ptr, byref(self.con), 150)
self.autoscale()
else:
mjlib.mjr_makeContext(None, byref(self.con), 150)
glfw.set_cursor_pos_callback(window, self.handle_mouse_move)
glfw.set_mouse_button_callback(window, self.handle_mouse_button)
glfw.set_scroll_callback(window, self.handle_scroll)
def handle_mouse_move(self, window, xpos, ypos):
# no buttons down: nothing to do
if not self._button_left_pressed \
and not self._button_middle_pressed \
and not self._button_right_pressed:
return
# compute mouse displacement, save
dx = int(self._scale * xpos) - self._last_mouse_x
dy = int(self._scale * ypos) - self._last_mouse_y
self._last_mouse_x = int(self._scale * xpos)
self._last_mouse_y = int(self._scale * ypos)
# require model
if not self.model:
return
# get current window size
width, height = glfw.get_framebuffer_size(self.window)
# get shift key state
mod_shift = glfw.get_key(window, glfw.KEY_LEFT_SHIFT) == glfw.PRESS \
or glfw.get_key(window, glfw.KEY_RIGHT_SHIFT) == glfw.PRESS
# determine action based on mouse button
action = None
if self._button_right_pressed:
action = mjconstants.MOUSE_MOVE_H if mod_shift else mjconstants.MOUSE_MOVE_V
elif self._button_left_pressed:
action = mjconstants.MOUSE_ROTATE_H if mod_shift else mjconstants.MOUSE_ROTATE_V
else:
action = mjconstants.MOUSE_ZOOM
self.gui_lock.acquire()
mjlib.mjv_moveCamera(action, dx, dy, byref(self.cam), width, height)
self.gui_lock.release()
def handle_mouse_button(self, window, button, act, mods):
# update button state
self._button_left_pressed = \
glfw.get_mouse_button(window, glfw.MOUSE_BUTTON_LEFT) == glfw.PRESS
self._button_middle_pressed = \
glfw.get_mouse_button(window, glfw.MOUSE_BUTTON_MIDDLE) == glfw.PRESS
self._button_right_pressed = \
glfw.get_mouse_button(window, glfw.MOUSE_BUTTON_RIGHT) == glfw.PRESS
# update mouse position
x, y = glfw.get_cursor_pos(window)
self._last_mouse_x = int(self._scale * x)
self._last_mouse_y = int(self._scale * y)
if not self.model:
return
self.gui_lock.acquire()
# save info
if act == glfw.PRESS:
self._last_button = button
self._last_click_time = glfw.get_time()
self.gui_lock.release()
def handle_scroll(self, window, x_offset, y_offset):
# require model
if not self.model:
return
# get current window size
width, height = glfw.get_framebuffer_size(window)
# scroll
self.gui_lock.acquire()
mjlib.mjv_moveCamera(mjconstants.MOUSE_ZOOM, 0, (-20*y_offset), byref(self.cam), width, height)
self.gui_lock.release()
def should_stop(self):
return glfw.window_should_close(self.window)
def loop_once(self):
self.render()
# Swap front and back buffers
glfw.swap_buffers(self.window)
# Poll for and process events
glfw.poll_events()
def finish(self):
glfw.terminate()
if gl.glIsFramebuffer(self._fbo):
gl.glDeleteFramebuffers(int(self._fbo))
if gl.glIsRenderbuffer(self._rbo):
gl.glDeleteRenderbuffers(1, int(self._rbo))
mjlib.mjr_freeContext(byref(self.con))
mjlib.mjv_freeObjects(byref(self.objects))
self.running = False
| 10,788 | 31.893293 | 174 | py |
rllab | rllab-master/rllab/mujoco_py/mjextra.py | def append_objects(cur, extra):
for i in range(cur.ngeom, cur.ngeom + extra.ngeom):
cur.geoms[i] = extra.geoms[i - cur.ngeom]
cur.ngeom = cur.ngeom + extra.ngeom
if cur.ngeom > cur.maxgeom:
raise ValueError("buffer limit exceeded!")
| 262 | 36.571429 | 55 | py |
rllab | rllab-master/rllab/mujoco_py/mjcore.py | from ctypes import create_string_buffer
import ctypes
from . import mjconstants as C
from .mjtypes import * # import all for backwards compatibility
from .mjlib import mjlib
class MjError(Exception):
pass
def register_license(file_path):
"""
activates mujoco with license at `file_path`
this does not check the return code, per usage example at simulate.cpp
and test.cpp.
"""
result = mjlib.mj_activate(file_path)
return result
class dict2(dict):
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
class MjModel(MjModelWrapper):
def __init__(self, xml_path):
buf = create_string_buffer(1000)
model_ptr = mjlib.mj_loadXML(xml_path, None, buf, 1000)
if len(buf.value) > 0:
super(MjModel, self).__init__(None)
raise MjError(buf.value)
super(MjModel, self).__init__(model_ptr)
data_ptr = mjlib.mj_makeData(model_ptr)
fields = ["nq","nv","na","nu","nbody","nmocap","nuserdata","nsensordata","njnt","ngeom","nsite","ncam","nlight","ntendon","nwrap","nM","njmax","nemax"]
sizes = dict2(**{ k: getattr(self, k) for k in fields })
data = MjData(data_ptr, sizes)
self.data = data
self._body_comvels = None
self.forward()
def forward(self):
mjlib.mj_forward(self.ptr, self.data.ptr)
mjlib.mj_sensor(self.ptr, self.data.ptr)
mjlib.mj_energy(self.ptr, self.data.ptr)
self._body_comvels = None
@property
def body_comvels(self):
if self._body_comvels is None:
self._body_comvels = self._compute_subtree()
return self._body_comvels
def _compute_subtree(self):
body_vels = np.zeros((self.nbody, 6))
# bodywise quantities
mass = self.body_mass.flatten()
for i in range(self.nbody):
# body velocity
mjlib.mj_objectVelocity(
self.ptr, self.data.ptr, C.mjOBJ_BODY, i,
body_vels[i].ctypes.data_as(POINTER(c_double)), 0
)
# body linear momentum
lin_moms = body_vels[:, 3:] * mass.reshape((-1, 1))
# init subtree mass
body_parentid = self.body_parentid
# subtree com and com_vel
for i in range(self.nbody - 1, -1, -1):
if i > 0:
parent = body_parentid[i]
# add scaled velocities
lin_moms[parent] += lin_moms[i]
# accumulate mass
mass[parent] += mass[i]
return lin_moms / mass.reshape((-1, 1))
def step(self):
mjlib.mj_step(self.ptr, self.data.ptr)
def __del__(self):
if self._wrapped is not None:
# At the very end of the process, mjlib can be unloaded before we are deleted.
# At that point, it's okay to leak this memory.
if mjlib: mjlib.mj_deleteModel(self._wrapped)
@property
def body_names(self):
start_addr = ctypes.addressof(self.names.contents)
return [ctypes.string_at(start_addr + int(inc)).decode("utf-8")
for inc in self.name_bodyadr.flatten()]
@property
def joint_names(self):
start_addr = ctypes.addressof(self.names.contents)
return [ctypes.string_at(start_addr + int(inc)).decode("utf-8")
for inc in self.name_jntadr.flatten()]
def joint_adr(self, joint_name):
"""Return (qposadr, qveladr, dof) for the given joint name.
If dof is 4 or 7, then the last 4 degrees of freedom in qpos represent a
unit quaternion."""
jntadr = mjlib.mj_name2id(self.ptr, C.mjOBJ_JOINT, joint_name)
assert(jntadr >= 0)
dofmap = {C.mjJNT_FREE: 7,
C.mjJNT_BALL: 4,
C.mjJNT_SLIDE: 1,
C.mjJNT_HINGE: 1}
qposadr = self.jnt_qposadr[jntadr][0]
qveladr = self.jnt_dofadr[jntadr][0]
dof = dofmap[self.jnt_type[jntadr][0]]
return (qposadr, qveladr, dof)
@property
def geom_names(self):
start_addr = ctypes.addressof(self.names.contents)
return [ctypes.string_at(start_addr + int(inc)).decode("utf-8")
for inc in self.name_geomadr.flatten()]
@property
def site_names(self):
start_addr = ctypes.addressof(self.names.contents)
return [ctypes.string_at(start_addr + int(inc)).decode("utf-8")
for inc in self.name_siteadr.flatten()]
@property
def mesh_names(self):
start_addr = ctypes.addressof(self.names.contents)
return [ctypes.string_at(start_addr + int(inc)).decode("utf-8")
for inc in self.name_meshadr.flatten()]
@property
def numeric_names(self):
start_addr = ctypes.addressof(self.names.contents)
return [ctypes.string_at(start_addr + int(inc)).decode("utf-8")
for inc in self.name_numericadr.flatten()]
class MjData(MjDataWrapper):
def __init__(self, wrapped, size_src=None):
super(MjData, self).__init__(wrapped, size_src)
def __del__(self):
if self._wrapped is not None:
# At the very end of the process, mjlib can be unloaded before we are deleted.
# At that point, it's okay to leak this memory.
if mjlib: mjlib.mj_deleteData(self._wrapped)
@property
def contact(self):
contacts = self._wrapped.contents.contact[:self.ncon]
return [MjContactWrapper(pointer(c)) for c in contacts]
| 5,531 | 33.575 | 159 | py |
rllab | rllab-master/rllab/mujoco_py/mjlib.py | from ctypes import *
import os
from .util import *
from .mjtypes import *
osp = os.path
if sys.platform.startswith("darwin"):
libfile = osp.abspath(osp.join(osp.dirname(__file__),"../../vendor/mujoco/libmujoco131.dylib"))
elif sys.platform.startswith("linux"):
libfile = osp.abspath(osp.join(osp.dirname(__file__),"../../vendor/mujoco/libmujoco131.so"))
elif sys.platform.startswith("win"):
libfile = osp.abspath(osp.join(osp.dirname(__file__),"../../vendor/mujoco/mujoco.lib"))
else:
raise RuntimeError("unrecognized platform %s"%sys.platform)
mjlib = cdll.LoadLibrary(libfile)
mjlib.mj_loadXML.argtypes = [String, String, c_char_p, c_int]
mjlib.mj_loadXML.restype = POINTER(MJMODEL)
mjlib.mj_saveXML.argtypes = [String, POINTER(MJMODEL), String]
mjlib.mj_saveXML.restype = c_int
#mjlib.mj_printSchema.argtypes = [String, String, c_int, c_int, c_int]
#mjlib.mj_printSchema.restype = c_int
mjlib.mj_activate.argtypes = [String]
mjlib.mj_activate.restype = c_int
mjlib.mj_step.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
mjlib.mj_step.restype = None
#mjlib.mj_step1.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_step1.restype = None
#mjlib.mj_step2.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_step2.restype = None
mjlib.mj_forward.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
mjlib.mj_forward.restype = None
mjlib.mj_inverse.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
mjlib.mj_inverse.restype = None
#mjlib.mj_forwardSkip.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int]
#mjlib.mj_forwardSkip.restype = None
#mjlib.mj_inverseSkip.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int]
#mjlib.mj_inverseSkip.restype = None
#mjlib.mj_sensor.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_sensor.restype = None
#mjlib.mj_energy.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_energy.restype = None
#mjlib.mj_defaultSolRefImp.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mj_defaultSolRefImp.restype = None
#mjlib.mj_defaultOption.argtypes = [POINTER(mjOption)]
#mjlib.mj_defaultOption.restype = None
#mjlib.mj_defaultVisual.argtypes = [POINTER(mjVisual)]
#mjlib.mj_defaultVisual.restype = None
#mjlib.mj_copyModel.argtypes = [POINTER(MJMODEL), POINTER(MJMODEL)]
#mjlib.mj_copyModel.restype = POINTER(MJMODEL)
#mjlib.mj_saveModel.argtypes = [POINTER(MJMODEL), String, c_int, POINTER(None)]
#mjlib.mj_saveModel.restype = None
#mjlib.mj_loadModel.argtypes = [String, c_int, POINTER(None)]
#mjlib.mj_loadModel.restype = POINTER(MJMODEL)
mjlib.mj_deleteModel.argtypes = [POINTER(MJMODEL)]
mjlib.mj_deleteModel.restype = None
#mjlib.mj_sizeModel.argtypes = [POINTER(MJMODEL)]
#mjlib.mj_sizeModel.restype = c_int
mjlib.mj_makeData.argtypes = [POINTER(MJMODEL)]
mjlib.mj_makeData.restype = POINTER(MJDATA)
#mjlib.mj_copyData.argtypes = [POINTER(MJDATA), POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_copyData.restype = POINTER(MJDATA)
#mjlib.mj_resetData.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_resetData.restype = None
#mjlib.mj_stackAlloc.argtypes = [POINTER(MJDATA), c_int]
#mjlib.mj_stackAlloc.restype = POINTER(c_double)
mjlib.mj_deleteData.argtypes = [POINTER(MJDATA)]
mjlib.mj_deleteData.restype = None
#mjlib.mj_resetCallbacks.argtypes = []
#mjlib.mj_resetCallbacks.restype = None
#mjlib.mj_setConst.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int]
#mjlib.mj_setConst.restype = None
#mjlib.mj_printModel.argtypes = [POINTER(MJMODEL), String]
#mjlib.mj_printModel.restype = None
#mjlib.mj_printData.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), String]
#mjlib.mj_printData.restype = None
#mjlib.mju_printMat.argtypes = [POINTER(c_double), c_int, c_int]
#mjlib.mju_printMat.restype = None
#mjlib.mj_fwdPosition.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_fwdPosition.restype = None
#mjlib.mj_fwdVelocity.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_fwdVelocity.restype = None
#mjlib.mj_fwdActuation.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_fwdActuation.restype = None
#mjlib.mj_fwdAcceleration.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_fwdAcceleration.restype = None
#mjlib.mj_fwdConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_fwdConstraint.restype = None
#mjlib.mj_Euler.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_Euler.restype = None
#mjlib.mj_RungeKutta.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int]
#mjlib.mj_RungeKutta.restype = None
#mjlib.mj_invPosition.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_invPosition.restype = None
#mjlib.mj_invVelocity.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_invVelocity.restype = None
#mjlib.mj_invConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_invConstraint.restype = None
#mjlib.mj_compareFwdInv.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_compareFwdInv.restype = None
#mjlib.mj_checkPos.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_checkPos.restype = None
#mjlib.mj_checkVel.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_checkVel.restype = None
#mjlib.mj_checkAcc.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_checkAcc.restype = None
#mjlib.mj_kinematics.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_kinematics.restype = None
#mjlib.mj_comPos.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_comPos.restype = None
#mjlib.mj_tendon.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_tendon.restype = None
#mjlib.mj_transmission.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_transmission.restype = None
#mjlib.mj_crb.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_crb.restype = None
#mjlib.mj_factorM.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_factorM.restype = None
#mjlib.mj_backsubM.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_backsubM.restype = None
#mjlib.mj_backsubM2.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_backsubM2.restype = None
#mjlib.mj_comVel.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_comVel.restype = None
#mjlib.mj_passive.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_passive.restype = None
#mjlib.mj_rne.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, POINTER(c_double)]
#mjlib.mj_rne.restype = None
#mjlib.mj_rnePostConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_rnePostConstraint.restype = None
#mjlib.mj_collision.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_collision.restype = None
#mjlib.mj_makeConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_makeConstraint.restype = None
#mjlib.mj_projectConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_projectConstraint.restype = None
#mjlib.mj_referenceConstraint.argtypes = [POINTER(MJMODEL), POINTER(MJDATA)]
#mjlib.mj_referenceConstraint.restype = None
#mjlib.mj_isPyramid.argtypes = [POINTER(MJMODEL)]
#mjlib.mj_isPyramid.restype = c_int
#mjlib.mj_isSparse.argtypes = [POINTER(MJMODEL)]
#mjlib.mj_isSparse.restype = c_int
#mjlib.mj_mulJacVec.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double)]
#mjlib.mj_mulJacVec.restype = None
#mjlib.mj_mulJacTVec.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double)]
#mjlib.mj_mulJacTVec.restype = None
#mjlib.mj_jac.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jac.restype = None
#mjlib.mj_jacBody.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jacBody.restype = None
#mjlib.mj_jacBodyCom.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jacBodyCom.restype = None
#mjlib.mj_jacGeom.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jacGeom.restype = None
#mjlib.mj_jacSite.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jacSite.restype = None
#mjlib.mj_jacPointAxis.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_jacPointAxis.restype = None
#mjlib.mj_name2id.argtypes = [POINTER(MJMODEL), mjtObj, String]
#mjlib.mj_name2id.restype = c_int
#mjlib.mj_id2name.argtypes = [POINTER(MJMODEL), mjtObj, c_int]
#mjlib. mj_id2name.restype = ReturnString
#mjlib.else:
#mjlib. mj_id2name.restype = String
#mjlib. mj_id2name.errcheck = ReturnString
#mjlib.mj_fullM.argtypes = [POINTER(MJMODEL), POINTER(c_double), POINTER(c_double)]
#mjlib.mj_fullM.restype = None
#mjlib.mj_mulM.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double)]
#mjlib.mj_mulM.restype = None
#mjlib.mj_applyFT.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, POINTER(c_double)]
#mjlib.mj_applyFT.restype = None
mjlib.mj_objectVelocity.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, c_int, POINTER(c_double), c_ubyte]
mjlib.mj_objectVelocity.restype = None
#mjlib.mj_objectAcceleration.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, c_int, POINTER(c_double), mjtByte]
#mjlib.mj_objectAcceleration.restype = None
#mjlib.mj_contactForce.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, POINTER(c_double)]
#mjlib.mj_contactForce.restype = None
#mjlib.mj_integratePos.argtypes = [POINTER(MJMODEL), POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mj_integratePos.restype = None
#mjlib.mj_normalizeQuat.argtypes = [POINTER(MJMODEL), POINTER(c_double)]
#mjlib.mj_normalizeQuat.restype = None
#mjlib.mj_local2Global.argtypes = [POINTER(MJDATA), POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mj_local2Global.restype = None
#mjlib.mj_getTotalmass.argtypes = [POINTER(MJMODEL)]
#mjlib.mj_getTotalmass.restype = c_double
#mjlib.mj_setTotalmass.argtypes = [POINTER(MJMODEL), c_double]
#mjlib.mj_setTotalmass.restype = None
#mjlib.mj_version.argtypes = []
#mjlib.mj_version.restype = c_double
mjlib.mjv_makeObjects.argtypes = [POINTER(MJVOBJECTS), c_int]
mjlib.mjv_makeObjects.restype = None
mjlib.mjv_freeObjects.argtypes = [POINTER(MJVOBJECTS)]
mjlib.mjv_freeObjects.restype = None
mjlib.mjv_defaultOption.argtypes = [POINTER(MJVOPTION)]
mjlib.mjv_defaultOption.restype = None
#mjlib.mjv_defaultCameraPose.argtypes = [POINTER(MJVCAMERAPOSE)]
#mjlib.mjv_defaultCameraPose.restype = None
mjlib.mjv_defaultCamera.argtypes = [POINTER(MJVCAMERA)]
mjlib.mjv_defaultCamera.restype = None
mjlib.mjv_setCamera.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(MJVCAMERA)]
mjlib.mjv_setCamera.restype = None
mjlib.mjv_updateCameraPose.argtypes = [POINTER(MJVCAMERA), c_double]
mjlib.mjv_updateCameraPose.restype = None
#mjlib.mjv_convert3D.argtypes = [POINTER(c_double), POINTER(c_double), c_double, POINTER(MJVCAMERAPOSE)]
#mjlib.mjv_convert3D.restype = None
#mjlib.mjv_convert2D.argtypes = [POINTER(c_double), mjtMouse, c_double, c_double, c_double, POINTER(MJVCAMERAPOSE)]
#mjlib.mjv_convert2D.restype = None
mjlib.mjv_moveCamera.argtypes = [c_int, c_float, c_float, POINTER(MJVCAMERA), c_float, c_float]
mjlib.mjv_moveCamera.restype = None
#mjlib.mjv_moveObject.argtypes = [mjtMouse, c_float, c_float, POINTER(MJVCAMERAPOSE), c_float, c_float, POINTER(c_double), POINTER(c_double)]
#mjlib.mjv_moveObject.restype = None
#mjlib.mjv_mousePerturb.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mjv_mousePerturb.restype = None
#mjlib.mjv_mouseEdit.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), c_int, c_int, POINTER(c_double), POINTER(c_double)]
#mjlib.mjv_mouseEdit.restype = None
mjlib.mjv_makeGeoms.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(MJVOBJECTS), POINTER(MJVOPTION), c_int, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)]
mjlib.mjv_makeGeoms.restype = None
mjlib.mjv_makeLights.argtypes = [POINTER(MJMODEL), POINTER(MJDATA), POINTER(MJVOBJECTS)]
mjlib.mjv_makeLights.restype = None
mjlib.mjr_overlay.argtypes = [MJRRECT, c_int, c_int, String, String, POINTER(MJRCONTEXT)]
mjlib.mjr_overlay.restype = None
#mjlib.mjr_rectangle.argtypes = [c_int, MJRRECT, c_double, c_double, c_double, c_double, c_double, c_double, c_double, c_double]
#mjlib.mjr_rectangle.restype = None
#mjlib.mjr_finish.argtypes = []
#mjlib.mjr_finish.restype = None
#mjlib.mjr_text.argtypes = [String, POINTER(MJRCONTEXT), c_int, c_float, c_float, c_float, c_float, c_float, c_float]
#mjlib.mjr_text.restype = None
#mjlib.mjr_textback.argtypes = [String, POINTER(MJRCONTEXT), c_float, c_float, c_float, c_float, c_float, c_float]
#mjlib.mjr_textback.restype = None
#mjlib.mjr_textWidth.argtypes = [String, POINTER(MJRCONTEXT), c_int]
#mjlib.mjr_textWidth.restype = c_int
mjlib.mjr_defaultOption.argtypes = [POINTER(MJROPTION)]
mjlib.mjr_defaultOption.restype = None
mjlib.mjr_defaultContext.argtypes = [POINTER(MJRCONTEXT)]
mjlib.mjr_defaultContext.restype = None
#mjlib.mjr_uploadTexture.argtypes = [POINTER(MJMODEL), POINTER(MJRCONTEXT), c_int]
#mjlib.mjr_uploadTexture.restype = None
mjlib.mjr_makeContext.argtypes = [POINTER(MJMODEL), POINTER(MJRCONTEXT), c_int]
mjlib.mjr_makeContext.restype = None
mjlib.mjr_freeContext.argtypes = [POINTER(MJRCONTEXT)]
mjlib.mjr_freeContext.restype = None
mjlib.mjr_render.argtypes = [c_int, MJRRECT, POINTER(MJVOBJECTS), POINTER(MJROPTION), POINTER(MJVCAMERAPOSE), POINTER(MJRCONTEXT)]
mjlib.mjr_render.restype = None
#mjlib.mjr_select.argtypes = [MJRRECT, POINTER(MJVOBJECTS), c_int, c_int, POINTER(c_double), POINTER(c_double), POINTER(MJROPTION), POINTER(MJVCAMERAPOSE), POINTER(MJRCONTEXT)]
#mjlib.mjr_select.restype = c_int
#mjlib.mjr_showOffscreen.argtypes = [c_int, c_int, POINTER(MJRCONTEXT)]
#mjlib.mjr_showOffscreen.restype = None
#mjlib.mjr_showBuffer.argtypes = [POINTER(c_ubyte), c_int, c_int, c_int, c_int, POINTER(MJRCONTEXT)]
#mjlib.mjr_showBuffer.restype = None
#mjlib.mjr_getOffscreen.argtypes = [POINTER(c_ubyte), POINTER(c_float), MJRRECT, POINTER(MJRCONTEXT)]
#mjlib.mjr_getOffscreen.restype = None
#mjlib.mjr_getBackbuffer.argtypes = [POINTER(c_ubyte), POINTER(c_float), MJRRECT, POINTER(MJRCONTEXT)]
#mjlib.mjr_getBackbuffer.restype = None
#mjlib.
#mjlib.
#mjlib.mju_error.argtypes = [String]
#mjlib.mju_error.restype = None
#mjlib.mju_error_i.argtypes = [String, c_int]
#mjlib.mju_error_i.restype = None
#mjlib.mju_error_s.argtypes = [String, String]
#mjlib.mju_error_s.restype = None
#mjlib.mju_warning.argtypes = [String]
#mjlib.mju_warning.restype = None
#mjlib.mju_warning_i.argtypes = [String, c_int]
#mjlib.mju_warning_i.restype = None
#mjlib.mju_warning_s.argtypes = [String, String]
#mjlib.mju_warning_s.restype = None
#mjlib.mju_clearHandlers.argtypes = []
#mjlib.mju_clearHandlers.restype = None
#mjlib.mju_malloc.argtypes = [c_size_t]
#mjlib.mju_malloc.restype = POINTER(None)
#mjlib.mju_free.argtypes = [POINTER(None)]
#mjlib.mju_free.restype = None
#mjlib.mj_warning.argtypes = [POINTER(MJDATA), c_int]
#mjlib.mj_warning.restype = None
#mjlib.mju_zero3.argtypes = [POINTER(c_double)]
#mjlib.mju_zero3.restype = None
#mjlib.mju_copy3.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_copy3.restype = None
#mjlib.mju_scl3.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_scl3.restype = None
#mjlib.mju_add3.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_add3.restype = None
#mjlib.mju_sub3.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_sub3.restype = None
#mjlib.mju_addTo3.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_addTo3.restype = None
#mjlib.mju_addToScl3.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_addToScl3.restype = None
#mjlib.mju_addScl3.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_addScl3.restype = None
#mjlib.mju_normalize3.argtypes = [POINTER(c_double)]
#mjlib.mju_normalize3.restype = c_double
#mjlib.mju_norm3.argtypes = [POINTER(c_double)]
#mjlib.mju_norm3.restype = c_double
#mjlib.mju_dot3.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_dot3.restype = c_double
#mjlib.mju_dist3.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_dist3.restype = c_double
#mjlib.mju_rotVecMat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_rotVecMat.restype = None
#mjlib.mju_rotVecMatT.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_rotVecMatT.restype = None
#mjlib.mju_cross.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_cross.restype = None
#mjlib.mju_zero.argtypes = [POINTER(c_double), c_int]
#mjlib.mju_zero.restype = None
#mjlib.mju_copy.argtypes = [POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_copy.restype = None
#mjlib.mju_scl.argtypes = [POINTER(c_double), POINTER(c_double), c_double, c_int]
#mjlib.mju_scl.restype = None
#mjlib.mju_add.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_add.restype = None
#mjlib.mju_sub.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_sub.restype = None
#mjlib.mju_addTo.argtypes = [POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_addTo.restype = None
#mjlib.mju_addToScl.argtypes = [POINTER(c_double), POINTER(c_double), c_double, c_int]
#mjlib.mju_addToScl.restype = None
#mjlib.mju_addScl.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_double, c_int]
#mjlib.mju_addScl.restype = None
#mjlib.mju_normalize.argtypes = [POINTER(c_double), c_int]
#mjlib.mju_normalize.restype = c_double
#mjlib.mju_norm.argtypes = [POINTER(c_double), c_int]
#mjlib.mju_norm.restype = c_double
#mjlib.mju_dot.argtypes = [POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_dot.restype = c_double
#mjlib.mju_mulMatVec.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int]
#mjlib.mju_mulMatVec.restype = None
#mjlib.mju_mulMatTVec.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int]
#mjlib.mju_mulMatTVec.restype = None
#mjlib.mju_transpose.argtypes = [POINTER(c_double), POINTER(c_double), c_int, c_int]
#mjlib.mju_transpose.restype = None
#mjlib.mju_mulMatMat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int, c_int]
#mjlib.mju_mulMatMat.restype = None
#mjlib.mju_mulMatMatT.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int, c_int]
#mjlib.mju_mulMatMatT.restype = None
#mjlib.mju_sqrMat.argtypes = [POINTER(c_double), POINTER(c_double), c_int, c_int, POINTER(c_double), c_int]
#mjlib.mju_sqrMat.restype = None
#mjlib.mju_mulMatTMat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int, c_int]
#mjlib.mju_mulMatTMat.restype = None
#mjlib.mju_transformSpatial.argtypes = [POINTER(c_double), POINTER(c_double), mjtByte, POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_transformSpatial.restype = None
#mjlib.mju_rotVecQuat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_rotVecQuat.restype = None
#mjlib.mju_negQuat.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_negQuat.restype = None
#mjlib.mju_mulQuat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_mulQuat.restype = None
#mjlib.mju_mulQuatAxis.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_mulQuatAxis.restype = None
#mjlib.mju_axisAngle2Quat.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_axisAngle2Quat.restype = None
#mjlib.mju_quat2Vel.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_quat2Vel.restype = None
#mjlib.mju_quat2Mat.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_quat2Mat.restype = None
#mjlib.mju_mat2Quat.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_mat2Quat.restype = None
#mjlib.mju_derivQuat.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_derivQuat.restype = None
#mjlib.mju_quatIntegrate.argtypes = [POINTER(c_double), POINTER(c_double), c_double]
#mjlib.mju_quatIntegrate.restype = None
#mjlib.mju_quatZ2Vec.argtypes = [POINTER(c_double), POINTER(c_double)]
#mjlib.mju_quatZ2Vec.restype = None
#mjlib.mju_cholFactor.argtypes = [POINTER(c_double), POINTER(c_double), c_int, c_double, c_double, POINTER(c_double)]
#mjlib.mju_cholFactor.restype = c_int
#mjlib.mju_cholBacksub.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int, c_int, c_int]
#mjlib.mju_cholBacksub.restype = None
#mjlib.mju_eig3.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double)]
#mjlib.mju_eig3.restype = c_int
#mjlib.mju_muscleFVL.argtypes = [c_double, c_double, c_double, c_double, POINTER(c_double)]
#mjlib.mju_muscleFVL.restype = c_double
#mjlib.mju_musclePassive.argtypes = [c_double, c_double, c_double, POINTER(c_double)]
#mjlib.mju_musclePassive.restype = c_double
#mjlib.mju_pneumatic.argtypes = [c_double, c_double, c_double, POINTER(c_double), c_double, c_double, c_double, POINTER(c_double)]
#mjlib.mju_pneumatic.restype = c_double
#mjlib.mju_encodePyramid.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_encodePyramid.restype = None
#mjlib.mju_decodePyramid.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double), c_int]
#mjlib.mju_decodePyramid.restype = None
#mjlib.mju_springDamper.argtypes = [c_double, c_double, c_double, c_double, c_double]
#mjlib.mju_springDamper.restype = c_double
#mjlib.mju_min.argtypes = [c_double, c_double]
#mjlib.mju_min.restype = c_double
#mjlib.mju_max.argtypes = [c_double, c_double]
#mjlib.mju_max.restype = c_double
#mjlib.mju_sign.argtypes = [c_double]
#mjlib.mju_sign.restype = c_double
#mjlib.mju_round.argtypes = [c_double]
#mjlib.mju_round.restype = c_int
#mjlib.mju_type2Str.argtypes = [c_int]
#mjlib. mju_type2Str.restype = ReturnString
#mjlib.else:
#mjlib. mju_type2Str.restype = String
#mjlib. mju_type2Str.errcheck = ReturnString
#mjlib.mju_str2Type.argtypes = [String]
#mjlib.mju_str2Type.restype = mjtObj
#mjlib.mju_warningText.argtypes = [c_int]
#mjlib. mju_warningText.restype = ReturnString
#mjlib.else:
#mjlib. mju_warningText.restype = String
#mjlib. mju_warningText.errcheck = ReturnString
#mjlib.mju_isBad.argtypes = [c_double]
#mjlib.mju_isBad.restype = c_int
#mjlib.mju_isZero.argtypes = [POINTER(c_double), c_int]
#mjlib.mju_isZero.restype = c_int
| 22,701 | 54.101942 | 178 | py |
rllab | rllab-master/rllab/mujoco_py/mjtypes.py |
# AUTO GENERATED. DO NOT CHANGE!
from ctypes import *
import numpy as np
class MJCONTACT(Structure):
_fields_ = [
("dist", c_double),
("pos", c_double * 3),
("frame", c_double * 9),
("includemargin", c_double),
("friction", c_double * 5),
("solref", c_double * 2),
("solimp", c_double * 3),
("mu", c_double),
("coef", c_double * 5),
("zone", c_int),
("dim", c_int),
("geom1", c_int),
("geom2", c_int),
("exclude", c_int),
("efc_address", c_int),
]
class MJRRECT(Structure):
_fields_ = [
("left", c_int),
("bottom", c_int),
("width", c_int),
("height", c_int),
]
class MJVCAMERAPOSE(Structure):
_fields_ = [
("head_pos", c_double * 3),
("head_right", c_double * 3),
("window_pos", c_double * 3),
("window_right", c_double * 3),
("window_up", c_double * 3),
("window_normal", c_double * 3),
("window_size", c_double * 2),
("scale", c_double),
("ipd", c_double),
]
class MJROPTION(Structure):
_fields_ = [
("stereo", c_ubyte),
("flags", c_ubyte * 6),
]
class MJRCONTEXT(Structure):
_fields_ = [
("linewidth", c_float),
("znear", c_float),
("zfar", c_float),
("shadowclip", c_float),
("shadowscale", c_float),
("shadowsize", c_int),
("offwidth", c_uint),
("offheight", c_uint),
("offFBO", c_uint),
("offColor", c_uint),
("offDepthStencil", c_uint),
("shadowFBO", c_uint),
("shadowTex", c_uint),
("ntexture", c_uint),
("texture", c_int * 100),
("textureType", c_int * 100),
("basePlane", c_uint),
("baseMesh", c_uint),
("baseHField", c_uint),
("baseBuiltin", c_uint),
("baseFontNormal", c_uint),
("baseFontBack", c_uint),
("baseFontBig", c_uint),
("rangePlane", c_int),
("rangeMesh", c_int),
("rangeHField", c_int),
("rangeBuiltin", c_int),
("rangeFont", c_int),
("charWidth", c_int * 127),
("charWidthBig", c_int * 127),
("charHeight", c_int),
("charHeightBig", c_int),
("glewInitialized", c_int),
]
class MJVCAMERA(Structure):
_fields_ = [
("fovy", c_double),
("camid", c_int),
("trackbodyid", c_int),
("lookat", c_double * 3),
("azimuth", c_double),
("elevation", c_double),
("distance", c_double),
("pose", MJVCAMERAPOSE),
("VR", c_ubyte),
]
class MJVOPTION(Structure):
_fields_ = [
("label", c_int),
("frame", c_int),
("geomgroup", c_ubyte * 5),
("sitegroup", c_ubyte * 5),
("flags", c_ubyte * 18),
]
class MJVGEOM(Structure):
_fields_ = [
("type", c_int),
("dataid", c_int),
("objtype", c_int),
("objid", c_int),
("category", c_int),
("texid", c_int),
("texuniform", c_int),
("texrepeat", c_float * 2),
("size", c_float * 3),
("pos", c_float * 3),
("mat", c_float * 9),
("rgba", c_float * 4),
("emission", c_float),
("specular", c_float),
("shininess", c_float),
("reflectance", c_float),
("label", c_char * 100),
("camdist", c_float),
("rbound", c_float),
("transparent", c_ubyte),
]
class MJVLIGHT(Structure):
_fields_ = [
("pos", c_float * 3),
("dir", c_float * 3),
("attenuation", c_float * 3),
("cutoff", c_float),
("exponent", c_float),
("ambient", c_float * 3),
("diffuse", c_float * 3),
("specular", c_float * 3),
("headlight", c_ubyte),
("directional", c_ubyte),
("castshadow", c_ubyte),
]
class MJVOBJECTS(Structure):
_fields_ = [
("nlight", c_int),
("ngeom", c_int),
("maxgeom", c_int),
("lights", MJVLIGHT * 8),
("geoms", POINTER(MJVGEOM)),
("geomorder", POINTER(c_int)),
]
class MJOPTION(Structure):
_fields_ = [
("timestep", c_double),
("apirate", c_double),
("tolerance", c_double),
("impratio", c_double),
("gravity", c_double * 3),
("wind", c_double * 3),
("magnetic", c_double * 3),
("density", c_double),
("viscosity", c_double),
("o_margin", c_double),
("o_solref", c_double * 2),
("o_solimp", c_double * 3),
("mpr_tolerance", c_double),
("mpr_iterations", c_int),
("integrator", c_int),
("collision", c_int),
("impedance", c_int),
("reference", c_int),
("solver", c_int),
("iterations", c_int),
("disableflags", c_int),
("enableflags", c_int),
]
class MJVISUAL(Structure):
class ANON_GLOBAL(Structure):
_fields_ = [
("fovy", c_float),
("ipd", c_float),
("linewidth", c_float),
("glow", c_float),
("offwidth", c_int),
("offheight", c_int),
]
class ANON_QUALITY(Structure):
_fields_ = [
("shadowsize", c_int),
("numSlices", c_int),
("numStacks", c_int),
("numArrows", c_int),
("numQuads", c_int),
]
class ANON_HEADLIGHT(Structure):
_fields_ = [
("ambient", c_float * 3),
("diffuse", c_float * 3),
("specular", c_float * 3),
("active", c_int),
]
class ANON_MAP(Structure):
_fields_ = [
("stiffness", c_float),
("force", c_float),
("torque", c_float),
("alpha", c_float),
("fogstart", c_float),
("fogend", c_float),
("znear", c_float),
("zfar", c_float),
("shadowclip", c_float),
("shadowscale", c_float),
]
class ANON_SCALE(Structure):
_fields_ = [
("forcewidth", c_float),
("contactwidth", c_float),
("contactheight", c_float),
("connect", c_float),
("com", c_float),
("camera", c_float),
("light", c_float),
("selectpoint", c_float),
("jointlength", c_float),
("jointwidth", c_float),
("actuatorlength", c_float),
("actuatorwidth", c_float),
("framelength", c_float),
("framewidth", c_float),
("constraint", c_float),
("slidercrank", c_float),
]
class ANON_RGBA(Structure):
_fields_ = [
("fog", c_float * 4),
("force", c_float * 4),
("inertia", c_float * 4),
("joint", c_float * 4),
("actuator", c_float * 4),
("com", c_float * 4),
("camera", c_float * 4),
("light", c_float * 4),
("selectpoint", c_float * 4),
("connect", c_float * 4),
("contactpoint", c_float * 4),
("contactforce", c_float * 4),
("contactfriction", c_float * 4),
("contacttorque", c_float * 4),
("constraint", c_float * 4),
("slidercrank", c_float * 4),
("crankbroken", c_float * 4),
]
_fields_ = [
("global_", ANON_GLOBAL),
("quality", ANON_QUALITY),
("headlight", ANON_HEADLIGHT),
("map_", ANON_MAP),
("scale", ANON_SCALE),
("rgba", ANON_RGBA),
]
class MJSTATISTIC(Structure):
_fields_ = [
("meanmass", c_double),
("meansize", c_double),
("extent", c_double),
("center", c_double * 3),
]
class MJDATA(Structure):
_fields_ = [
("nstack", c_int),
("nbuffer", c_int),
("pstack", c_int),
("maxstackuse", c_int),
("ne", c_int),
("nf", c_int),
("nefc", c_int),
("ncon", c_int),
("nwarning", c_int * 8),
("warning_info", c_int * 8),
("timer_duration", c_double * 14),
("timer_ncall", c_double * 14),
("mocaptime", c_double * 3),
("time", c_double),
("energy", c_double * 2),
("solverstat", c_double * 4),
("solvertrace", c_double * 200),
("buffer", POINTER(c_ubyte)),
("stack", POINTER(c_double)),
("qpos", POINTER(c_double)),
("qvel", POINTER(c_double)),
("act", POINTER(c_double)),
("ctrl", POINTER(c_double)),
("qfrc_applied", POINTER(c_double)),
("xfrc_applied", POINTER(c_double)),
("qacc", POINTER(c_double)),
("act_dot", POINTER(c_double)),
("mocap_pos", POINTER(c_double)),
("mocap_quat", POINTER(c_double)),
("userdata", POINTER(c_double)),
("sensordata", POINTER(c_double)),
("xpos", POINTER(c_double)),
("xquat", POINTER(c_double)),
("xmat", POINTER(c_double)),
("xipos", POINTER(c_double)),
("ximat", POINTER(c_double)),
("xanchor", POINTER(c_double)),
("xaxis", POINTER(c_double)),
("geom_xpos", POINTER(c_double)),
("geom_xmat", POINTER(c_double)),
("site_xpos", POINTER(c_double)),
("site_xmat", POINTER(c_double)),
("cam_xpos", POINTER(c_double)),
("cam_xmat", POINTER(c_double)),
("light_xpos", POINTER(c_double)),
("light_xdir", POINTER(c_double)),
("com_subtree", POINTER(c_double)),
("cdof", POINTER(c_double)),
("cinert", POINTER(c_double)),
("ten_wrapadr", POINTER(c_int)),
("ten_wrapnum", POINTER(c_int)),
("ten_length", POINTER(c_double)),
("ten_moment", POINTER(c_double)),
("wrap_obj", POINTER(c_int)),
("wrap_xpos", POINTER(c_double)),
("actuator_length", POINTER(c_double)),
("actuator_moment", POINTER(c_double)),
("crb", POINTER(c_double)),
("qM", POINTER(c_double)),
("qLD", POINTER(c_double)),
("qLDiagInv", POINTER(c_double)),
("qLDiagSqrtInv", POINTER(c_double)),
("contact", POINTER(MJCONTACT)),
("efc_type", POINTER(c_int)),
("efc_id", POINTER(c_int)),
("efc_rownnz", POINTER(c_int)),
("efc_rowadr", POINTER(c_int)),
("efc_colind", POINTER(c_int)),
("efc_rownnz_T", POINTER(c_int)),
("efc_rowadr_T", POINTER(c_int)),
("efc_colind_T", POINTER(c_int)),
("efc_solref", POINTER(c_double)),
("efc_solimp", POINTER(c_double)),
("efc_margin", POINTER(c_double)),
("efc_frictionloss", POINTER(c_double)),
("efc_pos", POINTER(c_double)),
("efc_J", POINTER(c_double)),
("efc_J_T", POINTER(c_double)),
("efc_diagApprox", POINTER(c_double)),
("efc_D", POINTER(c_double)),
("efc_R", POINTER(c_double)),
("efc_AR", POINTER(c_double)),
("e_ARchol", POINTER(c_double)),
("fc_e_rect", POINTER(c_double)),
("fc_AR", POINTER(c_double)),
("ten_velocity", POINTER(c_double)),
("actuator_velocity", POINTER(c_double)),
("cvel", POINTER(c_double)),
("cdof_dot", POINTER(c_double)),
("qfrc_bias", POINTER(c_double)),
("qfrc_passive", POINTER(c_double)),
("efc_vel", POINTER(c_double)),
("efc_aref", POINTER(c_double)),
("actuator_force", POINTER(c_double)),
("qfrc_actuator", POINTER(c_double)),
("qfrc_unc", POINTER(c_double)),
("qacc_unc", POINTER(c_double)),
("efc_b", POINTER(c_double)),
("fc_b", POINTER(c_double)),
("efc_force", POINTER(c_double)),
("qfrc_constraint", POINTER(c_double)),
("qfrc_inverse", POINTER(c_double)),
("cacc", POINTER(c_double)),
("cfrc_int", POINTER(c_double)),
("cfrc_ext", POINTER(c_double)),
]
class MJMODEL(Structure):
_fields_ = [
("nq", c_int),
("nv", c_int),
("nu", c_int),
("na", c_int),
("nbody", c_int),
("njnt", c_int),
("ngeom", c_int),
("nsite", c_int),
("ncam", c_int),
("nlight", c_int),
("nmesh", c_int),
("nmeshvert", c_int),
("nmeshface", c_int),
("nmeshgraph", c_int),
("nhfield", c_int),
("nhfielddata", c_int),
("ntex", c_int),
("ntexdata", c_int),
("nmat", c_int),
("npair", c_int),
("nexclude", c_int),
("neq", c_int),
("ntendon", c_int),
("nwrap", c_int),
("nsensor", c_int),
("nnumeric", c_int),
("nnumericdata", c_int),
("ntext", c_int),
("ntextdata", c_int),
("nkey", c_int),
("nuser_body", c_int),
("nuser_jnt", c_int),
("nuser_geom", c_int),
("nuser_site", c_int),
("nuser_tendon", c_int),
("nuser_actuator", c_int),
("nuser_sensor", c_int),
("nnames", c_int),
("nM", c_int),
("nemax", c_int),
("njmax", c_int),
("nconmax", c_int),
("nstack", c_int),
("nuserdata", c_int),
("nmocap", c_int),
("nsensordata", c_int),
("nbuffer", c_int),
("opt", MJOPTION),
("vis", MJVISUAL),
("stat", MJSTATISTIC),
("buffer", POINTER(c_ubyte)),
("qpos0", POINTER(c_double)),
("qpos_spring", POINTER(c_double)),
("body_parentid", POINTER(c_int)),
("body_rootid", POINTER(c_int)),
("body_weldid", POINTER(c_int)),
("body_mocapid", POINTER(c_int)),
("body_jntnum", POINTER(c_int)),
("body_jntadr", POINTER(c_int)),
("body_dofnum", POINTER(c_int)),
("body_dofadr", POINTER(c_int)),
("body_geomnum", POINTER(c_int)),
("body_geomadr", POINTER(c_int)),
("body_pos", POINTER(c_double)),
("body_quat", POINTER(c_double)),
("body_ipos", POINTER(c_double)),
("body_iquat", POINTER(c_double)),
("body_mass", POINTER(c_double)),
("body_inertia", POINTER(c_double)),
("body_invweight0", POINTER(c_double)),
("body_user", POINTER(c_double)),
("jnt_type", POINTER(c_int)),
("jnt_qposadr", POINTER(c_int)),
("jnt_dofadr", POINTER(c_int)),
("jnt_bodyid", POINTER(c_int)),
("jnt_limited", POINTER(c_ubyte)),
("jnt_solref", POINTER(c_double)),
("jnt_solimp", POINTER(c_double)),
("jnt_pos", POINTER(c_double)),
("jnt_axis", POINTER(c_double)),
("jnt_stiffness", POINTER(c_double)),
("jnt_range", POINTER(c_double)),
("jnt_margin", POINTER(c_double)),
("jnt_user", POINTER(c_double)),
("dof_bodyid", POINTER(c_int)),
("dof_jntid", POINTER(c_int)),
("dof_parentid", POINTER(c_int)),
("dof_Madr", POINTER(c_int)),
("dof_frictional", POINTER(c_ubyte)),
("dof_solref", POINTER(c_double)),
("dof_solimp", POINTER(c_double)),
("dof_frictionloss", POINTER(c_double)),
("dof_armature", POINTER(c_double)),
("dof_damping", POINTER(c_double)),
("dof_invweight0", POINTER(c_double)),
("geom_type", POINTER(c_int)),
("geom_contype", POINTER(c_int)),
("geom_conaffinity", POINTER(c_int)),
("geom_condim", POINTER(c_int)),
("geom_bodyid", POINTER(c_int)),
("geom_dataid", POINTER(c_int)),
("geom_matid", POINTER(c_int)),
("geom_group", POINTER(c_int)),
("geom_solmix", POINTER(c_double)),
("geom_solref", POINTER(c_double)),
("geom_solimp", POINTER(c_double)),
("geom_size", POINTER(c_double)),
("geom_rbound", POINTER(c_double)),
("geom_pos", POINTER(c_double)),
("geom_quat", POINTER(c_double)),
("geom_friction", POINTER(c_double)),
("geom_margin", POINTER(c_double)),
("geom_gap", POINTER(c_double)),
("geom_user", POINTER(c_double)),
("geom_rgba", POINTER(c_float)),
("site_type", POINTER(c_int)),
("site_bodyid", POINTER(c_int)),
("site_matid", POINTER(c_int)),
("site_group", POINTER(c_int)),
("site_size", POINTER(c_double)),
("site_pos", POINTER(c_double)),
("site_quat", POINTER(c_double)),
("site_user", POINTER(c_double)),
("site_rgba", POINTER(c_float)),
("cam_mode", POINTER(c_int)),
("cam_bodyid", POINTER(c_int)),
("cam_targetbodyid", POINTER(c_int)),
("cam_pos", POINTER(c_double)),
("cam_quat", POINTER(c_double)),
("cam_poscom0", POINTER(c_double)),
("cam_pos0", POINTER(c_double)),
("cam_mat0", POINTER(c_double)),
("cam_fovy", POINTER(c_double)),
("cam_ipd", POINTER(c_double)),
("light_mode", POINTER(c_int)),
("light_bodyid", POINTER(c_int)),
("light_targetbodyid", POINTER(c_int)),
("light_directional", POINTER(c_ubyte)),
("light_castshadow", POINTER(c_ubyte)),
("light_active", POINTER(c_ubyte)),
("light_pos", POINTER(c_double)),
("light_dir", POINTER(c_double)),
("light_poscom0", POINTER(c_double)),
("light_pos0", POINTER(c_double)),
("light_dir0", POINTER(c_double)),
("light_attenuation", POINTER(c_float)),
("light_cutoff", POINTER(c_float)),
("light_exponent", POINTER(c_float)),
("light_ambient", POINTER(c_float)),
("light_diffuse", POINTER(c_float)),
("light_specular", POINTER(c_float)),
("mesh_faceadr", POINTER(c_int)),
("mesh_facenum", POINTER(c_int)),
("mesh_vertadr", POINTER(c_int)),
("mesh_vertnum", POINTER(c_int)),
("mesh_graphadr", POINTER(c_int)),
("mesh_vert", POINTER(c_float)),
("mesh_normal", POINTER(c_float)),
("mesh_face", POINTER(c_int)),
("mesh_graph", POINTER(c_int)),
("hfield_size", POINTER(c_double)),
("hfield_nrow", POINTER(c_int)),
("hfield_ncol", POINTER(c_int)),
("hfield_adr", POINTER(c_int)),
("hfield_data", POINTER(c_float)),
("tex_type", POINTER(c_int)),
("tex_height", POINTER(c_int)),
("tex_width", POINTER(c_int)),
("tex_adr", POINTER(c_int)),
("tex_rgb", POINTER(c_ubyte)),
("mat_texid", POINTER(c_int)),
("mat_texuniform", POINTER(c_ubyte)),
("mat_texrepeat", POINTER(c_float)),
("mat_emission", POINTER(c_float)),
("mat_specular", POINTER(c_float)),
("mat_shininess", POINTER(c_float)),
("mat_reflectance", POINTER(c_float)),
("mat_rgba", POINTER(c_float)),
("pair_dim", POINTER(c_int)),
("pair_geom1", POINTER(c_int)),
("pair_geom2", POINTER(c_int)),
("pair_signature", POINTER(c_int)),
("pair_solref", POINTER(c_double)),
("pair_solimp", POINTER(c_double)),
("pair_margin", POINTER(c_double)),
("pair_gap", POINTER(c_double)),
("pair_friction", POINTER(c_double)),
("exclude_signature", POINTER(c_int)),
("eq_type", POINTER(c_int)),
("eq_obj1id", POINTER(c_int)),
("eq_obj2id", POINTER(c_int)),
("eq_active", POINTER(c_ubyte)),
("eq_solref", POINTER(c_double)),
("eq_solimp", POINTER(c_double)),
("eq_data", POINTER(c_double)),
("tendon_adr", POINTER(c_int)),
("tendon_num", POINTER(c_int)),
("tendon_matid", POINTER(c_int)),
("tendon_limited", POINTER(c_ubyte)),
("tendon_frictional", POINTER(c_ubyte)),
("tendon_width", POINTER(c_double)),
("tendon_solref_lim", POINTER(c_double)),
("tendon_solimp_lim", POINTER(c_double)),
("tendon_solref_fri", POINTER(c_double)),
("tendon_solimp_fri", POINTER(c_double)),
("tendon_range", POINTER(c_double)),
("tendon_margin", POINTER(c_double)),
("tendon_stiffness", POINTER(c_double)),
("tendon_damping", POINTER(c_double)),
("tendon_frictionloss", POINTER(c_double)),
("tendon_lengthspring", POINTER(c_double)),
("tendon_length0", POINTER(c_double)),
("tendon_invweight0", POINTER(c_double)),
("tendon_user", POINTER(c_double)),
("tendon_rgba", POINTER(c_float)),
("wrap_type", POINTER(c_int)),
("wrap_objid", POINTER(c_int)),
("wrap_prm", POINTER(c_double)),
("actuator_trntype", POINTER(c_int)),
("actuator_dyntype", POINTER(c_int)),
("actuator_gaintype", POINTER(c_int)),
("actuator_biastype", POINTER(c_int)),
("actuator_trnid", POINTER(c_int)),
("actuator_ctrllimited", POINTER(c_ubyte)),
("actuator_forcelimited", POINTER(c_ubyte)),
("actuator_dynprm", POINTER(c_double)),
("actuator_gainprm", POINTER(c_double)),
("actuator_biasprm", POINTER(c_double)),
("actuator_ctrlrange", POINTER(c_double)),
("actuator_forcerange", POINTER(c_double)),
("actuator_gear", POINTER(c_double)),
("actuator_cranklength", POINTER(c_double)),
("actuator_invweight0", POINTER(c_double)),
("actuator_length0", POINTER(c_double)),
("actuator_lengthrange", POINTER(c_double)),
("actuator_user", POINTER(c_double)),
("sensor_type", POINTER(c_int)),
("sensor_objid", POINTER(c_int)),
("sensor_dim", POINTER(c_int)),
("sensor_adr", POINTER(c_int)),
("sensor_scale", POINTER(c_double)),
("sensor_user", POINTER(c_double)),
("numeric_adr", POINTER(c_int)),
("numeric_size", POINTER(c_int)),
("numeric_data", POINTER(c_double)),
("text_adr", POINTER(c_int)),
("text_data", POINTER(c_char)),
("key_time", POINTER(c_double)),
("key_qpos", POINTER(c_double)),
("key_qvel", POINTER(c_double)),
("key_act", POINTER(c_double)),
("name_bodyadr", POINTER(c_int)),
("name_jntadr", POINTER(c_int)),
("name_geomadr", POINTER(c_int)),
("name_siteadr", POINTER(c_int)),
("name_camadr", POINTER(c_int)),
("name_lightadr", POINTER(c_int)),
("name_meshadr", POINTER(c_int)),
("name_hfieldadr", POINTER(c_int)),
("name_texadr", POINTER(c_int)),
("name_matadr", POINTER(c_int)),
("name_eqadr", POINTER(c_int)),
("name_tendonadr", POINTER(c_int)),
("name_actuatoradr", POINTER(c_int)),
("name_sensoradr", POINTER(c_int)),
("name_numericadr", POINTER(c_int)),
("name_textadr", POINTER(c_int)),
("names", POINTER(c_char)),
]
class MjContactWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def dist(self):
return self._wrapped.contents.dist
@dist.setter
def dist(self, value):
self._wrapped.contents.dist = value
@property
def pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.pos, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@pos.setter
def pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.pos, val_ptr, 3 * sizeof(c_double))
@property
def frame(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.frame, dtype=np.double, count=(9)), (9, ))
arr.setflags(write=False)
return arr
@frame.setter
def frame(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.frame, val_ptr, 9 * sizeof(c_double))
@property
def includemargin(self):
return self._wrapped.contents.includemargin
@includemargin.setter
def includemargin(self, value):
self._wrapped.contents.includemargin = value
@property
def friction(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.friction, dtype=np.double, count=(5)), (5, ))
arr.setflags(write=False)
return arr
@friction.setter
def friction(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.friction, val_ptr, 5 * sizeof(c_double))
@property
def solref(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.solref, dtype=np.double, count=(2)), (2, ))
arr.setflags(write=False)
return arr
@solref.setter
def solref(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.solref, val_ptr, 2 * sizeof(c_double))
@property
def solimp(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.solimp, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@solimp.setter
def solimp(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.solimp, val_ptr, 3 * sizeof(c_double))
@property
def mu(self):
return self._wrapped.contents.mu
@mu.setter
def mu(self, value):
self._wrapped.contents.mu = value
@property
def coef(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.coef, dtype=np.double, count=(5)), (5, ))
arr.setflags(write=False)
return arr
@coef.setter
def coef(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.coef, val_ptr, 5 * sizeof(c_double))
@property
def zone(self):
return self._wrapped.contents.zone
@zone.setter
def zone(self, value):
self._wrapped.contents.zone = value
@property
def dim(self):
return self._wrapped.contents.dim
@dim.setter
def dim(self, value):
self._wrapped.contents.dim = value
@property
def geom1(self):
return self._wrapped.contents.geom1
@geom1.setter
def geom1(self, value):
self._wrapped.contents.geom1 = value
@property
def geom2(self):
return self._wrapped.contents.geom2
@geom2.setter
def geom2(self, value):
self._wrapped.contents.geom2 = value
@property
def exclude(self):
return self._wrapped.contents.exclude
@exclude.setter
def exclude(self, value):
self._wrapped.contents.exclude = value
@property
def efc_address(self):
return self._wrapped.contents.efc_address
@efc_address.setter
def efc_address(self, value):
self._wrapped.contents.efc_address = value
class MjrRectWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def left(self):
return self._wrapped.contents.left
@left.setter
def left(self, value):
self._wrapped.contents.left = value
@property
def bottom(self):
return self._wrapped.contents.bottom
@bottom.setter
def bottom(self, value):
self._wrapped.contents.bottom = value
@property
def width(self):
return self._wrapped.contents.width
@width.setter
def width(self, value):
self._wrapped.contents.width = value
@property
def height(self):
return self._wrapped.contents.height
@height.setter
def height(self, value):
self._wrapped.contents.height = value
class MjvCameraPoseWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def head_pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.head_pos, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@head_pos.setter
def head_pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.head_pos, val_ptr, 3 * sizeof(c_double))
@property
def head_right(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.head_right, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@head_right.setter
def head_right(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.head_right, val_ptr, 3 * sizeof(c_double))
@property
def window_pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.window_pos, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@window_pos.setter
def window_pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.window_pos, val_ptr, 3 * sizeof(c_double))
@property
def window_right(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.window_right, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@window_right.setter
def window_right(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.window_right, val_ptr, 3 * sizeof(c_double))
@property
def window_up(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.window_up, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@window_up.setter
def window_up(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.window_up, val_ptr, 3 * sizeof(c_double))
@property
def window_normal(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.window_normal, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@window_normal.setter
def window_normal(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.window_normal, val_ptr, 3 * sizeof(c_double))
@property
def window_size(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.window_size, dtype=np.double, count=(2)), (2, ))
arr.setflags(write=False)
return arr
@window_size.setter
def window_size(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.window_size, val_ptr, 2 * sizeof(c_double))
@property
def scale(self):
return self._wrapped.contents.scale
@scale.setter
def scale(self, value):
self._wrapped.contents.scale = value
@property
def ipd(self):
return self._wrapped.contents.ipd
@ipd.setter
def ipd(self, value):
self._wrapped.contents.ipd = value
class MjrOptionWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def stereo(self):
return self._wrapped.contents.stereo
@stereo.setter
def stereo(self, value):
self._wrapped.contents.stereo = value
@property
def flags(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.flags, dtype=np.uint8, count=(6)), (6, ))
arr.setflags(write=False)
return arr
@flags.setter
def flags(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.flags, val_ptr, 6 * sizeof(c_ubyte))
class MjrContextWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def linewidth(self):
return self._wrapped.contents.linewidth
@linewidth.setter
def linewidth(self, value):
self._wrapped.contents.linewidth = value
@property
def znear(self):
return self._wrapped.contents.znear
@znear.setter
def znear(self, value):
self._wrapped.contents.znear = value
@property
def zfar(self):
return self._wrapped.contents.zfar
@zfar.setter
def zfar(self, value):
self._wrapped.contents.zfar = value
@property
def shadowclip(self):
return self._wrapped.contents.shadowclip
@shadowclip.setter
def shadowclip(self, value):
self._wrapped.contents.shadowclip = value
@property
def shadowscale(self):
return self._wrapped.contents.shadowscale
@shadowscale.setter
def shadowscale(self, value):
self._wrapped.contents.shadowscale = value
@property
def shadowsize(self):
return self._wrapped.contents.shadowsize
@shadowsize.setter
def shadowsize(self, value):
self._wrapped.contents.shadowsize = value
@property
def offwidth(self):
return self._wrapped.contents.offwidth
@offwidth.setter
def offwidth(self, value):
self._wrapped.contents.offwidth = value
@property
def offheight(self):
return self._wrapped.contents.offheight
@offheight.setter
def offheight(self, value):
self._wrapped.contents.offheight = value
@property
def offFBO(self):
return self._wrapped.contents.offFBO
@offFBO.setter
def offFBO(self, value):
self._wrapped.contents.offFBO = value
@property
def offColor(self):
return self._wrapped.contents.offColor
@offColor.setter
def offColor(self, value):
self._wrapped.contents.offColor = value
@property
def offDepthStencil(self):
return self._wrapped.contents.offDepthStencil
@offDepthStencil.setter
def offDepthStencil(self, value):
self._wrapped.contents.offDepthStencil = value
@property
def shadowFBO(self):
return self._wrapped.contents.shadowFBO
@shadowFBO.setter
def shadowFBO(self, value):
self._wrapped.contents.shadowFBO = value
@property
def shadowTex(self):
return self._wrapped.contents.shadowTex
@shadowTex.setter
def shadowTex(self, value):
self._wrapped.contents.shadowTex = value
@property
def ntexture(self):
return self._wrapped.contents.ntexture
@ntexture.setter
def ntexture(self, value):
self._wrapped.contents.ntexture = value
@property
def texture(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.texture, dtype=np.int, count=(100)), (100, ))
arr.setflags(write=False)
return arr
@texture.setter
def texture(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.texture, val_ptr, 100 * sizeof(c_int))
@property
def textureType(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.textureType, dtype=np.int, count=(100)), (100, ))
arr.setflags(write=False)
return arr
@textureType.setter
def textureType(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.textureType, val_ptr, 100 * sizeof(c_int))
@property
def basePlane(self):
return self._wrapped.contents.basePlane
@basePlane.setter
def basePlane(self, value):
self._wrapped.contents.basePlane = value
@property
def baseMesh(self):
return self._wrapped.contents.baseMesh
@baseMesh.setter
def baseMesh(self, value):
self._wrapped.contents.baseMesh = value
@property
def baseHField(self):
return self._wrapped.contents.baseHField
@baseHField.setter
def baseHField(self, value):
self._wrapped.contents.baseHField = value
@property
def baseBuiltin(self):
return self._wrapped.contents.baseBuiltin
@baseBuiltin.setter
def baseBuiltin(self, value):
self._wrapped.contents.baseBuiltin = value
@property
def baseFontNormal(self):
return self._wrapped.contents.baseFontNormal
@baseFontNormal.setter
def baseFontNormal(self, value):
self._wrapped.contents.baseFontNormal = value
@property
def baseFontBack(self):
return self._wrapped.contents.baseFontBack
@baseFontBack.setter
def baseFontBack(self, value):
self._wrapped.contents.baseFontBack = value
@property
def baseFontBig(self):
return self._wrapped.contents.baseFontBig
@baseFontBig.setter
def baseFontBig(self, value):
self._wrapped.contents.baseFontBig = value
@property
def rangePlane(self):
return self._wrapped.contents.rangePlane
@rangePlane.setter
def rangePlane(self, value):
self._wrapped.contents.rangePlane = value
@property
def rangeMesh(self):
return self._wrapped.contents.rangeMesh
@rangeMesh.setter
def rangeMesh(self, value):
self._wrapped.contents.rangeMesh = value
@property
def rangeHField(self):
return self._wrapped.contents.rangeHField
@rangeHField.setter
def rangeHField(self, value):
self._wrapped.contents.rangeHField = value
@property
def rangeBuiltin(self):
return self._wrapped.contents.rangeBuiltin
@rangeBuiltin.setter
def rangeBuiltin(self, value):
self._wrapped.contents.rangeBuiltin = value
@property
def rangeFont(self):
return self._wrapped.contents.rangeFont
@rangeFont.setter
def rangeFont(self, value):
self._wrapped.contents.rangeFont = value
@property
def charWidth(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.charWidth, dtype=np.int, count=(127)), (127, ))
arr.setflags(write=False)
return arr
@charWidth.setter
def charWidth(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.charWidth, val_ptr, 127 * sizeof(c_int))
@property
def charWidthBig(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.charWidthBig, dtype=np.int, count=(127)), (127, ))
arr.setflags(write=False)
return arr
@charWidthBig.setter
def charWidthBig(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.charWidthBig, val_ptr, 127 * sizeof(c_int))
@property
def charHeight(self):
return self._wrapped.contents.charHeight
@charHeight.setter
def charHeight(self, value):
self._wrapped.contents.charHeight = value
@property
def charHeightBig(self):
return self._wrapped.contents.charHeightBig
@charHeightBig.setter
def charHeightBig(self, value):
self._wrapped.contents.charHeightBig = value
@property
def glewInitialized(self):
return self._wrapped.contents.glewInitialized
@glewInitialized.setter
def glewInitialized(self, value):
self._wrapped.contents.glewInitialized = value
class MjvCameraWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def fovy(self):
return self._wrapped.contents.fovy
@fovy.setter
def fovy(self, value):
self._wrapped.contents.fovy = value
@property
def camid(self):
return self._wrapped.contents.camid
@camid.setter
def camid(self, value):
self._wrapped.contents.camid = value
@property
def trackbodyid(self):
return self._wrapped.contents.trackbodyid
@trackbodyid.setter
def trackbodyid(self, value):
self._wrapped.contents.trackbodyid = value
@property
def lookat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.lookat, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@lookat.setter
def lookat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.lookat, val_ptr, 3 * sizeof(c_double))
@property
def azimuth(self):
return self._wrapped.contents.azimuth
@azimuth.setter
def azimuth(self, value):
self._wrapped.contents.azimuth = value
@property
def elevation(self):
return self._wrapped.contents.elevation
@elevation.setter
def elevation(self, value):
self._wrapped.contents.elevation = value
@property
def distance(self):
return self._wrapped.contents.distance
@distance.setter
def distance(self, value):
self._wrapped.contents.distance = value
@property
def pose(self):
return self._wrapped.contents.pose
@pose.setter
def pose(self, value):
self._wrapped.contents.pose = value
@property
def VR(self):
return self._wrapped.contents.VR
@VR.setter
def VR(self, value):
self._wrapped.contents.VR = value
class MjvOptionWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def label(self):
return self._wrapped.contents.label
@label.setter
def label(self, value):
self._wrapped.contents.label = value
@property
def frame(self):
return self._wrapped.contents.frame
@frame.setter
def frame(self, value):
self._wrapped.contents.frame = value
@property
def geomgroup(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geomgroup, dtype=np.uint8, count=(5)), (5, ))
arr.setflags(write=False)
return arr
@geomgroup.setter
def geomgroup(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.geomgroup, val_ptr, 5 * sizeof(c_ubyte))
@property
def sitegroup(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.sitegroup, dtype=np.uint8, count=(5)), (5, ))
arr.setflags(write=False)
return arr
@sitegroup.setter
def sitegroup(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.sitegroup, val_ptr, 5 * sizeof(c_ubyte))
@property
def flags(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.flags, dtype=np.uint8, count=(18)), (18, ))
arr.setflags(write=False)
return arr
@flags.setter
def flags(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.flags, val_ptr, 18 * sizeof(c_ubyte))
class MjvGeomWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def type(self):
return self._wrapped.contents.type
@type.setter
def type(self, value):
self._wrapped.contents.type = value
@property
def dataid(self):
return self._wrapped.contents.dataid
@dataid.setter
def dataid(self, value):
self._wrapped.contents.dataid = value
@property
def objtype(self):
return self._wrapped.contents.objtype
@objtype.setter
def objtype(self, value):
self._wrapped.contents.objtype = value
@property
def objid(self):
return self._wrapped.contents.objid
@objid.setter
def objid(self, value):
self._wrapped.contents.objid = value
@property
def category(self):
return self._wrapped.contents.category
@category.setter
def category(self, value):
self._wrapped.contents.category = value
@property
def texid(self):
return self._wrapped.contents.texid
@texid.setter
def texid(self, value):
self._wrapped.contents.texid = value
@property
def texuniform(self):
return self._wrapped.contents.texuniform
@texuniform.setter
def texuniform(self, value):
self._wrapped.contents.texuniform = value
@property
def texrepeat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.texrepeat, dtype=np.float, count=(2)), (2, ))
arr.setflags(write=False)
return arr
@texrepeat.setter
def texrepeat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.texrepeat, val_ptr, 2 * sizeof(c_float))
@property
def size(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.size, dtype=np.float, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@size.setter
def size(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.size, val_ptr, 3 * sizeof(c_float))
@property
def pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.pos, dtype=np.float, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@pos.setter
def pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.pos, val_ptr, 3 * sizeof(c_float))
@property
def mat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mat, dtype=np.float, count=(9)), (9, ))
arr.setflags(write=False)
return arr
@mat.setter
def mat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.mat, val_ptr, 9 * sizeof(c_float))
@property
def rgba(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.rgba, dtype=np.float, count=(4)), (4, ))
arr.setflags(write=False)
return arr
@rgba.setter
def rgba(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.rgba, val_ptr, 4 * sizeof(c_float))
@property
def emission(self):
return self._wrapped.contents.emission
@emission.setter
def emission(self, value):
self._wrapped.contents.emission = value
@property
def specular(self):
return self._wrapped.contents.specular
@specular.setter
def specular(self, value):
self._wrapped.contents.specular = value
@property
def shininess(self):
return self._wrapped.contents.shininess
@shininess.setter
def shininess(self, value):
self._wrapped.contents.shininess = value
@property
def reflectance(self):
return self._wrapped.contents.reflectance
@reflectance.setter
def reflectance(self, value):
self._wrapped.contents.reflectance = value
@property
def label(self):
return self._wrapped.contents.label
@label.setter
def label(self, value):
self._wrapped.contents.label = value
@property
def camdist(self):
return self._wrapped.contents.camdist
@camdist.setter
def camdist(self, value):
self._wrapped.contents.camdist = value
@property
def rbound(self):
return self._wrapped.contents.rbound
@rbound.setter
def rbound(self, value):
self._wrapped.contents.rbound = value
@property
def transparent(self):
return self._wrapped.contents.transparent
@transparent.setter
def transparent(self, value):
self._wrapped.contents.transparent = value
class MjvLightWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.pos, dtype=np.float, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@pos.setter
def pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.pos, val_ptr, 3 * sizeof(c_float))
@property
def dir(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dir, dtype=np.float, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@dir.setter
def dir(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.dir, val_ptr, 3 * sizeof(c_float))
@property
def attenuation(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.attenuation, dtype=np.float, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@attenuation.setter
def attenuation(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.attenuation, val_ptr, 3 * sizeof(c_float))
@property
def cutoff(self):
return self._wrapped.contents.cutoff
@cutoff.setter
def cutoff(self, value):
self._wrapped.contents.cutoff = value
@property
def exponent(self):
return self._wrapped.contents.exponent
@exponent.setter
def exponent(self, value):
self._wrapped.contents.exponent = value
@property
def ambient(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.ambient, dtype=np.float, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@ambient.setter
def ambient(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.ambient, val_ptr, 3 * sizeof(c_float))
@property
def diffuse(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.diffuse, dtype=np.float, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@diffuse.setter
def diffuse(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.diffuse, val_ptr, 3 * sizeof(c_float))
@property
def specular(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.specular, dtype=np.float, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@specular.setter
def specular(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.specular, val_ptr, 3 * sizeof(c_float))
@property
def headlight(self):
return self._wrapped.contents.headlight
@headlight.setter
def headlight(self, value):
self._wrapped.contents.headlight = value
@property
def directional(self):
return self._wrapped.contents.directional
@directional.setter
def directional(self, value):
self._wrapped.contents.directional = value
@property
def castshadow(self):
return self._wrapped.contents.castshadow
@castshadow.setter
def castshadow(self, value):
self._wrapped.contents.castshadow = value
class MjvObjectsWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def nlight(self):
return self._wrapped.contents.nlight
@nlight.setter
def nlight(self, value):
self._wrapped.contents.nlight = value
@property
def ngeom(self):
return self._wrapped.contents.ngeom
@ngeom.setter
def ngeom(self, value):
self._wrapped.contents.ngeom = value
@property
def maxgeom(self):
return self._wrapped.contents.maxgeom
@maxgeom.setter
def maxgeom(self, value):
self._wrapped.contents.maxgeom = value
@property
def lights(self):
return self._wrapped.contents.lights
@lights.setter
def lights(self, value):
self._wrapped.contents.lights = value
class MjOptionWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def timestep(self):
return self._wrapped.contents.timestep
@timestep.setter
def timestep(self, value):
self._wrapped.contents.timestep = value
@property
def apirate(self):
return self._wrapped.contents.apirate
@apirate.setter
def apirate(self, value):
self._wrapped.contents.apirate = value
@property
def tolerance(self):
return self._wrapped.contents.tolerance
@tolerance.setter
def tolerance(self, value):
self._wrapped.contents.tolerance = value
@property
def impratio(self):
return self._wrapped.contents.impratio
@impratio.setter
def impratio(self, value):
self._wrapped.contents.impratio = value
@property
def gravity(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.gravity, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@gravity.setter
def gravity(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.gravity, val_ptr, 3 * sizeof(c_double))
@property
def wind(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.wind, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@wind.setter
def wind(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.wind, val_ptr, 3 * sizeof(c_double))
@property
def magnetic(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.magnetic, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@magnetic.setter
def magnetic(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.magnetic, val_ptr, 3 * sizeof(c_double))
@property
def density(self):
return self._wrapped.contents.density
@density.setter
def density(self, value):
self._wrapped.contents.density = value
@property
def viscosity(self):
return self._wrapped.contents.viscosity
@viscosity.setter
def viscosity(self, value):
self._wrapped.contents.viscosity = value
@property
def o_margin(self):
return self._wrapped.contents.o_margin
@o_margin.setter
def o_margin(self, value):
self._wrapped.contents.o_margin = value
@property
def o_solref(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.o_solref, dtype=np.double, count=(2)), (2, ))
arr.setflags(write=False)
return arr
@o_solref.setter
def o_solref(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.o_solref, val_ptr, 2 * sizeof(c_double))
@property
def o_solimp(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.o_solimp, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@o_solimp.setter
def o_solimp(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.o_solimp, val_ptr, 3 * sizeof(c_double))
@property
def mpr_tolerance(self):
return self._wrapped.contents.mpr_tolerance
@mpr_tolerance.setter
def mpr_tolerance(self, value):
self._wrapped.contents.mpr_tolerance = value
@property
def mpr_iterations(self):
return self._wrapped.contents.mpr_iterations
@mpr_iterations.setter
def mpr_iterations(self, value):
self._wrapped.contents.mpr_iterations = value
@property
def integrator(self):
return self._wrapped.contents.integrator
@integrator.setter
def integrator(self, value):
self._wrapped.contents.integrator = value
@property
def collision(self):
return self._wrapped.contents.collision
@collision.setter
def collision(self, value):
self._wrapped.contents.collision = value
@property
def impedance(self):
return self._wrapped.contents.impedance
@impedance.setter
def impedance(self, value):
self._wrapped.contents.impedance = value
@property
def reference(self):
return self._wrapped.contents.reference
@reference.setter
def reference(self, value):
self._wrapped.contents.reference = value
@property
def solver(self):
return self._wrapped.contents.solver
@solver.setter
def solver(self, value):
self._wrapped.contents.solver = value
@property
def iterations(self):
return self._wrapped.contents.iterations
@iterations.setter
def iterations(self, value):
self._wrapped.contents.iterations = value
@property
def disableflags(self):
return self._wrapped.contents.disableflags
@disableflags.setter
def disableflags(self, value):
self._wrapped.contents.disableflags = value
@property
def enableflags(self):
return self._wrapped.contents.enableflags
@enableflags.setter
def enableflags(self, value):
self._wrapped.contents.enableflags = value
class MjVisualWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def global_(self):
return self._wrapped.contents.global_
@global_.setter
def global_(self, value):
self._wrapped.contents.global_ = value
@property
def quality(self):
return self._wrapped.contents.quality
@quality.setter
def quality(self, value):
self._wrapped.contents.quality = value
@property
def headlight(self):
return self._wrapped.contents.headlight
@headlight.setter
def headlight(self, value):
self._wrapped.contents.headlight = value
@property
def map_(self):
return self._wrapped.contents.map_
@map_.setter
def map_(self, value):
self._wrapped.contents.map_ = value
@property
def scale(self):
return self._wrapped.contents.scale
@scale.setter
def scale(self, value):
self._wrapped.contents.scale = value
@property
def rgba(self):
return self._wrapped.contents.rgba
@rgba.setter
def rgba(self, value):
self._wrapped.contents.rgba = value
class MjStatisticWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def meanmass(self):
return self._wrapped.contents.meanmass
@meanmass.setter
def meanmass(self, value):
self._wrapped.contents.meanmass = value
@property
def meansize(self):
return self._wrapped.contents.meansize
@meansize.setter
def meansize(self, value):
self._wrapped.contents.meansize = value
@property
def extent(self):
return self._wrapped.contents.extent
@extent.setter
def extent(self, value):
self._wrapped.contents.extent = value
@property
def center(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.center, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@center.setter
def center(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.center, val_ptr, 3 * sizeof(c_double))
class MjDataWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def nstack(self):
return self._wrapped.contents.nstack
@nstack.setter
def nstack(self, value):
self._wrapped.contents.nstack = value
@property
def nbuffer(self):
return self._wrapped.contents.nbuffer
@nbuffer.setter
def nbuffer(self, value):
self._wrapped.contents.nbuffer = value
@property
def pstack(self):
return self._wrapped.contents.pstack
@pstack.setter
def pstack(self, value):
self._wrapped.contents.pstack = value
@property
def maxstackuse(self):
return self._wrapped.contents.maxstackuse
@maxstackuse.setter
def maxstackuse(self, value):
self._wrapped.contents.maxstackuse = value
@property
def ne(self):
return self._wrapped.contents.ne
@ne.setter
def ne(self, value):
self._wrapped.contents.ne = value
@property
def nf(self):
return self._wrapped.contents.nf
@nf.setter
def nf(self, value):
self._wrapped.contents.nf = value
@property
def nefc(self):
return self._wrapped.contents.nefc
@nefc.setter
def nefc(self, value):
self._wrapped.contents.nefc = value
@property
def ncon(self):
return self._wrapped.contents.ncon
@ncon.setter
def ncon(self, value):
self._wrapped.contents.ncon = value
@property
def nwarning(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.nwarning, dtype=np.int, count=(8)), (8, ))
arr.setflags(write=False)
return arr
@nwarning.setter
def nwarning(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.nwarning, val_ptr, 8 * sizeof(c_int))
@property
def warning_info(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.warning_info, dtype=np.int, count=(8)), (8, ))
arr.setflags(write=False)
return arr
@warning_info.setter
def warning_info(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.warning_info, val_ptr, 8 * sizeof(c_int))
@property
def timer_duration(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.timer_duration, dtype=np.double, count=(14)), (14, ))
arr.setflags(write=False)
return arr
@timer_duration.setter
def timer_duration(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.timer_duration, val_ptr, 14 * sizeof(c_double))
@property
def timer_ncall(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.timer_ncall, dtype=np.double, count=(14)), (14, ))
arr.setflags(write=False)
return arr
@timer_ncall.setter
def timer_ncall(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.timer_ncall, val_ptr, 14 * sizeof(c_double))
@property
def mocaptime(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mocaptime, dtype=np.double, count=(3)), (3, ))
arr.setflags(write=False)
return arr
@mocaptime.setter
def mocaptime(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.mocaptime, val_ptr, 3 * sizeof(c_double))
@property
def time(self):
return self._wrapped.contents.time
@time.setter
def time(self, value):
self._wrapped.contents.time = value
@property
def energy(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.energy, dtype=np.double, count=(2)), (2, ))
arr.setflags(write=False)
return arr
@energy.setter
def energy(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.energy, val_ptr, 2 * sizeof(c_double))
@property
def solverstat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.solverstat, dtype=np.double, count=(4)), (4, ))
arr.setflags(write=False)
return arr
@solverstat.setter
def solverstat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.solverstat, val_ptr, 4 * sizeof(c_double))
@property
def solvertrace(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.solvertrace, dtype=np.double, count=(200)), (200, ))
arr.setflags(write=False)
return arr
@solvertrace.setter
def solvertrace(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.solvertrace, val_ptr, 200 * sizeof(c_double))
@property
def buffer(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.buffer, dtype=np.uint8, count=(self.nbuffer)), (self.nbuffer, ))
arr.setflags(write=False)
return arr
@buffer.setter
def buffer(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.buffer, val_ptr, self.nbuffer * sizeof(c_ubyte))
@property
def stack(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.stack, dtype=np.double, count=(self.nstack)), (self.nstack, ))
arr.setflags(write=False)
return arr
@stack.setter
def stack(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.stack, val_ptr, self.nstack * sizeof(c_double))
@property
def qpos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qpos, dtype=np.double, count=(self._size_src.nq*1)), (self._size_src.nq, 1, ))
arr.setflags(write=False)
return arr
@qpos.setter
def qpos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qpos, val_ptr, self._size_src.nq*1 * sizeof(c_double))
@property
def qvel(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qvel, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qvel.setter
def qvel(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qvel, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def act(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.act, dtype=np.double, count=(self._size_src.na*1)), (self._size_src.na, 1, ))
arr.setflags(write=False)
return arr
@act.setter
def act(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.act, val_ptr, self._size_src.na*1 * sizeof(c_double))
@property
def ctrl(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.ctrl, dtype=np.double, count=(self._size_src.nu*1)), (self._size_src.nu, 1, ))
arr.setflags(write=False)
return arr
@ctrl.setter
def ctrl(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.ctrl, val_ptr, self._size_src.nu*1 * sizeof(c_double))
@property
def qfrc_applied(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qfrc_applied, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qfrc_applied.setter
def qfrc_applied(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qfrc_applied, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def xfrc_applied(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.xfrc_applied, dtype=np.double, count=(self._size_src.nbody*6)), (self._size_src.nbody, 6, ))
arr.setflags(write=False)
return arr
@xfrc_applied.setter
def xfrc_applied(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.xfrc_applied, val_ptr, self._size_src.nbody*6 * sizeof(c_double))
@property
def qacc(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qacc, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qacc.setter
def qacc(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qacc, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def act_dot(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.act_dot, dtype=np.double, count=(self._size_src.na*1)), (self._size_src.na, 1, ))
arr.setflags(write=False)
return arr
@act_dot.setter
def act_dot(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.act_dot, val_ptr, self._size_src.na*1 * sizeof(c_double))
@property
def mocap_pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mocap_pos, dtype=np.double, count=(self._size_src.nmocap*3)), (self._size_src.nmocap, 3, ))
arr.setflags(write=False)
return arr
@mocap_pos.setter
def mocap_pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.mocap_pos, val_ptr, self._size_src.nmocap*3 * sizeof(c_double))
@property
def mocap_quat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mocap_quat, dtype=np.double, count=(self._size_src.nmocap*4)), (self._size_src.nmocap, 4, ))
arr.setflags(write=False)
return arr
@mocap_quat.setter
def mocap_quat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.mocap_quat, val_ptr, self._size_src.nmocap*4 * sizeof(c_double))
@property
def userdata(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.userdata, dtype=np.double, count=(self._size_src.nuserdata*1)), (self._size_src.nuserdata, 1, ))
arr.setflags(write=False)
return arr
@userdata.setter
def userdata(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.userdata, val_ptr, self._size_src.nuserdata*1 * sizeof(c_double))
@property
def sensordata(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.sensordata, dtype=np.double, count=(self._size_src.nsensordata*1)), (self._size_src.nsensordata, 1, ))
arr.setflags(write=False)
return arr
@sensordata.setter
def sensordata(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.sensordata, val_ptr, self._size_src.nsensordata*1 * sizeof(c_double))
@property
def xpos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.xpos, dtype=np.double, count=(self._size_src.nbody*3)), (self._size_src.nbody, 3, ))
arr.setflags(write=False)
return arr
@xpos.setter
def xpos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.xpos, val_ptr, self._size_src.nbody*3 * sizeof(c_double))
@property
def xquat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.xquat, dtype=np.double, count=(self._size_src.nbody*4)), (self._size_src.nbody, 4, ))
arr.setflags(write=False)
return arr
@xquat.setter
def xquat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.xquat, val_ptr, self._size_src.nbody*4 * sizeof(c_double))
@property
def xmat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.xmat, dtype=np.double, count=(self._size_src.nbody*9)), (self._size_src.nbody, 9, ))
arr.setflags(write=False)
return arr
@xmat.setter
def xmat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.xmat, val_ptr, self._size_src.nbody*9 * sizeof(c_double))
@property
def xipos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.xipos, dtype=np.double, count=(self._size_src.nbody*3)), (self._size_src.nbody, 3, ))
arr.setflags(write=False)
return arr
@xipos.setter
def xipos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.xipos, val_ptr, self._size_src.nbody*3 * sizeof(c_double))
@property
def ximat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.ximat, dtype=np.double, count=(self._size_src.nbody*9)), (self._size_src.nbody, 9, ))
arr.setflags(write=False)
return arr
@ximat.setter
def ximat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.ximat, val_ptr, self._size_src.nbody*9 * sizeof(c_double))
@property
def xanchor(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.xanchor, dtype=np.double, count=(self._size_src.njnt*3)), (self._size_src.njnt, 3, ))
arr.setflags(write=False)
return arr
@xanchor.setter
def xanchor(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.xanchor, val_ptr, self._size_src.njnt*3 * sizeof(c_double))
@property
def xaxis(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.xaxis, dtype=np.double, count=(self._size_src.njnt*3)), (self._size_src.njnt, 3, ))
arr.setflags(write=False)
return arr
@xaxis.setter
def xaxis(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.xaxis, val_ptr, self._size_src.njnt*3 * sizeof(c_double))
@property
def geom_xpos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_xpos, dtype=np.double, count=(self._size_src.ngeom*3)), (self._size_src.ngeom, 3, ))
arr.setflags(write=False)
return arr
@geom_xpos.setter
def geom_xpos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_xpos, val_ptr, self._size_src.ngeom*3 * sizeof(c_double))
@property
def geom_xmat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_xmat, dtype=np.double, count=(self._size_src.ngeom*9)), (self._size_src.ngeom, 9, ))
arr.setflags(write=False)
return arr
@geom_xmat.setter
def geom_xmat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_xmat, val_ptr, self._size_src.ngeom*9 * sizeof(c_double))
@property
def site_xpos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.site_xpos, dtype=np.double, count=(self._size_src.nsite*3)), (self._size_src.nsite, 3, ))
arr.setflags(write=False)
return arr
@site_xpos.setter
def site_xpos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.site_xpos, val_ptr, self._size_src.nsite*3 * sizeof(c_double))
@property
def site_xmat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.site_xmat, dtype=np.double, count=(self._size_src.nsite*9)), (self._size_src.nsite, 9, ))
arr.setflags(write=False)
return arr
@site_xmat.setter
def site_xmat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.site_xmat, val_ptr, self._size_src.nsite*9 * sizeof(c_double))
@property
def cam_xpos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cam_xpos, dtype=np.double, count=(self._size_src.ncam*3)), (self._size_src.ncam, 3, ))
arr.setflags(write=False)
return arr
@cam_xpos.setter
def cam_xpos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cam_xpos, val_ptr, self._size_src.ncam*3 * sizeof(c_double))
@property
def cam_xmat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cam_xmat, dtype=np.double, count=(self._size_src.ncam*9)), (self._size_src.ncam, 9, ))
arr.setflags(write=False)
return arr
@cam_xmat.setter
def cam_xmat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cam_xmat, val_ptr, self._size_src.ncam*9 * sizeof(c_double))
@property
def light_xpos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.light_xpos, dtype=np.double, count=(self._size_src.nlight*3)), (self._size_src.nlight, 3, ))
arr.setflags(write=False)
return arr
@light_xpos.setter
def light_xpos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.light_xpos, val_ptr, self._size_src.nlight*3 * sizeof(c_double))
@property
def light_xdir(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.light_xdir, dtype=np.double, count=(self._size_src.nlight*3)), (self._size_src.nlight, 3, ))
arr.setflags(write=False)
return arr
@light_xdir.setter
def light_xdir(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.light_xdir, val_ptr, self._size_src.nlight*3 * sizeof(c_double))
@property
def com_subtree(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.com_subtree, dtype=np.double, count=(self._size_src.nbody*3)), (self._size_src.nbody, 3, ))
arr.setflags(write=False)
return arr
@com_subtree.setter
def com_subtree(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.com_subtree, val_ptr, self._size_src.nbody*3 * sizeof(c_double))
@property
def cdof(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cdof, dtype=np.double, count=(self._size_src.nv*6)), (self._size_src.nv, 6, ))
arr.setflags(write=False)
return arr
@cdof.setter
def cdof(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cdof, val_ptr, self._size_src.nv*6 * sizeof(c_double))
@property
def cinert(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cinert, dtype=np.double, count=(self._size_src.nbody*10)), (self._size_src.nbody, 10, ))
arr.setflags(write=False)
return arr
@cinert.setter
def cinert(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cinert, val_ptr, self._size_src.nbody*10 * sizeof(c_double))
@property
def ten_wrapadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.ten_wrapadr, dtype=np.int, count=(self._size_src.ntendon*1)), (self._size_src.ntendon, 1, ))
arr.setflags(write=False)
return arr
@ten_wrapadr.setter
def ten_wrapadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.ten_wrapadr, val_ptr, self._size_src.ntendon*1 * sizeof(c_int))
@property
def ten_wrapnum(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.ten_wrapnum, dtype=np.int, count=(self._size_src.ntendon*1)), (self._size_src.ntendon, 1, ))
arr.setflags(write=False)
return arr
@ten_wrapnum.setter
def ten_wrapnum(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.ten_wrapnum, val_ptr, self._size_src.ntendon*1 * sizeof(c_int))
@property
def ten_length(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.ten_length, dtype=np.double, count=(self._size_src.ntendon*1)), (self._size_src.ntendon, 1, ))
arr.setflags(write=False)
return arr
@ten_length.setter
def ten_length(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.ten_length, val_ptr, self._size_src.ntendon*1 * sizeof(c_double))
@property
def ten_moment(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.ten_moment, dtype=np.double, count=(self._size_src.ntendon*self._size_src.nv)), (self._size_src.ntendon, self._size_src.nv, ))
arr.setflags(write=False)
return arr
@ten_moment.setter
def ten_moment(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.ten_moment, val_ptr, self._size_src.ntendon*self._size_src.nv * sizeof(c_double))
@property
def wrap_obj(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.wrap_obj, dtype=np.int, count=(self._size_src.nwrap*2)), (self._size_src.nwrap, 2, ))
arr.setflags(write=False)
return arr
@wrap_obj.setter
def wrap_obj(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.wrap_obj, val_ptr, self._size_src.nwrap*2 * sizeof(c_int))
@property
def wrap_xpos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.wrap_xpos, dtype=np.double, count=(self._size_src.nwrap*6)), (self._size_src.nwrap, 6, ))
arr.setflags(write=False)
return arr
@wrap_xpos.setter
def wrap_xpos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.wrap_xpos, val_ptr, self._size_src.nwrap*6 * sizeof(c_double))
@property
def actuator_length(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_length, dtype=np.double, count=(self._size_src.nu*1)), (self._size_src.nu, 1, ))
arr.setflags(write=False)
return arr
@actuator_length.setter
def actuator_length(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.actuator_length, val_ptr, self._size_src.nu*1 * sizeof(c_double))
@property
def actuator_moment(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_moment, dtype=np.double, count=(self._size_src.nu*self._size_src.nv)), (self._size_src.nu, self._size_src.nv, ))
arr.setflags(write=False)
return arr
@actuator_moment.setter
def actuator_moment(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.actuator_moment, val_ptr, self._size_src.nu*self._size_src.nv * sizeof(c_double))
@property
def crb(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.crb, dtype=np.double, count=(self._size_src.nbody*10)), (self._size_src.nbody, 10, ))
arr.setflags(write=False)
return arr
@crb.setter
def crb(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.crb, val_ptr, self._size_src.nbody*10 * sizeof(c_double))
@property
def qM(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qM, dtype=np.double, count=(self._size_src.nM*1)), (self._size_src.nM, 1, ))
arr.setflags(write=False)
return arr
@qM.setter
def qM(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qM, val_ptr, self._size_src.nM*1 * sizeof(c_double))
@property
def qLD(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qLD, dtype=np.double, count=(self._size_src.nM*1)), (self._size_src.nM, 1, ))
arr.setflags(write=False)
return arr
@qLD.setter
def qLD(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qLD, val_ptr, self._size_src.nM*1 * sizeof(c_double))
@property
def qLDiagInv(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qLDiagInv, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qLDiagInv.setter
def qLDiagInv(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qLDiagInv, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def qLDiagSqrtInv(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qLDiagSqrtInv, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qLDiagSqrtInv.setter
def qLDiagSqrtInv(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qLDiagSqrtInv, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def efc_type(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_type, dtype=np.int, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_type.setter
def efc_type(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.efc_type, val_ptr, self._size_src.njmax*1 * sizeof(c_int))
@property
def efc_id(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_id, dtype=np.int, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_id.setter
def efc_id(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.efc_id, val_ptr, self._size_src.njmax*1 * sizeof(c_int))
@property
def efc_rownnz(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_rownnz, dtype=np.int, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_rownnz.setter
def efc_rownnz(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.efc_rownnz, val_ptr, self._size_src.njmax*1 * sizeof(c_int))
@property
def efc_rowadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_rowadr, dtype=np.int, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_rowadr.setter
def efc_rowadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.efc_rowadr, val_ptr, self._size_src.njmax*1 * sizeof(c_int))
@property
def efc_colind(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_colind, dtype=np.int, count=(self._size_src.njmax*self._size_src.nv)), (self._size_src.njmax, self._size_src.nv, ))
arr.setflags(write=False)
return arr
@efc_colind.setter
def efc_colind(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.efc_colind, val_ptr, self._size_src.njmax*self._size_src.nv * sizeof(c_int))
@property
def efc_rownnz_T(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_rownnz_T, dtype=np.int, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@efc_rownnz_T.setter
def efc_rownnz_T(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.efc_rownnz_T, val_ptr, self._size_src.nv*1 * sizeof(c_int))
@property
def efc_rowadr_T(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_rowadr_T, dtype=np.int, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@efc_rowadr_T.setter
def efc_rowadr_T(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.efc_rowadr_T, val_ptr, self._size_src.nv*1 * sizeof(c_int))
@property
def efc_colind_T(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_colind_T, dtype=np.int, count=(self._size_src.nv*self._size_src.njmax)), (self._size_src.nv, self._size_src.njmax, ))
arr.setflags(write=False)
return arr
@efc_colind_T.setter
def efc_colind_T(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.efc_colind_T, val_ptr, self._size_src.nv*self._size_src.njmax * sizeof(c_int))
@property
def efc_solref(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_solref, dtype=np.double, count=(self._size_src.njmax*2)), (self._size_src.njmax, 2, ))
arr.setflags(write=False)
return arr
@efc_solref.setter
def efc_solref(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_solref, val_ptr, self._size_src.njmax*2 * sizeof(c_double))
@property
def efc_solimp(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_solimp, dtype=np.double, count=(self._size_src.njmax*3)), (self._size_src.njmax, 3, ))
arr.setflags(write=False)
return arr
@efc_solimp.setter
def efc_solimp(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_solimp, val_ptr, self._size_src.njmax*3 * sizeof(c_double))
@property
def efc_margin(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_margin, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_margin.setter
def efc_margin(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_margin, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def efc_frictionloss(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_frictionloss, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_frictionloss.setter
def efc_frictionloss(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_frictionloss, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def efc_pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_pos, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_pos.setter
def efc_pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_pos, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def efc_J(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_J, dtype=np.double, count=(self._size_src.njmax*self._size_src.nv)), (self._size_src.njmax, self._size_src.nv, ))
arr.setflags(write=False)
return arr
@efc_J.setter
def efc_J(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_J, val_ptr, self._size_src.njmax*self._size_src.nv * sizeof(c_double))
@property
def efc_J_T(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_J_T, dtype=np.double, count=(self._size_src.nv*self._size_src.njmax)), (self._size_src.nv, self._size_src.njmax, ))
arr.setflags(write=False)
return arr
@efc_J_T.setter
def efc_J_T(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_J_T, val_ptr, self._size_src.nv*self._size_src.njmax * sizeof(c_double))
@property
def efc_diagApprox(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_diagApprox, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_diagApprox.setter
def efc_diagApprox(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_diagApprox, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def efc_D(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_D, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_D.setter
def efc_D(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_D, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def efc_R(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_R, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_R.setter
def efc_R(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_R, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def efc_AR(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_AR, dtype=np.double, count=(self._size_src.njmax*self._size_src.njmax)), (self._size_src.njmax, self._size_src.njmax, ))
arr.setflags(write=False)
return arr
@efc_AR.setter
def efc_AR(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_AR, val_ptr, self._size_src.njmax*self._size_src.njmax * sizeof(c_double))
@property
def e_ARchol(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.e_ARchol, dtype=np.double, count=(self._size_src.nemax*self._size_src.nemax)), (self._size_src.nemax, self._size_src.nemax, ))
arr.setflags(write=False)
return arr
@e_ARchol.setter
def e_ARchol(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.e_ARchol, val_ptr, self._size_src.nemax*self._size_src.nemax * sizeof(c_double))
@property
def fc_e_rect(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.fc_e_rect, dtype=np.double, count=(self._size_src.njmax*self._size_src.nemax)), (self._size_src.njmax, self._size_src.nemax, ))
arr.setflags(write=False)
return arr
@fc_e_rect.setter
def fc_e_rect(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.fc_e_rect, val_ptr, self._size_src.njmax*self._size_src.nemax * sizeof(c_double))
@property
def fc_AR(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.fc_AR, dtype=np.double, count=(self._size_src.njmax*self._size_src.njmax)), (self._size_src.njmax, self._size_src.njmax, ))
arr.setflags(write=False)
return arr
@fc_AR.setter
def fc_AR(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.fc_AR, val_ptr, self._size_src.njmax*self._size_src.njmax * sizeof(c_double))
@property
def ten_velocity(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.ten_velocity, dtype=np.double, count=(self._size_src.ntendon*1)), (self._size_src.ntendon, 1, ))
arr.setflags(write=False)
return arr
@ten_velocity.setter
def ten_velocity(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.ten_velocity, val_ptr, self._size_src.ntendon*1 * sizeof(c_double))
@property
def actuator_velocity(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_velocity, dtype=np.double, count=(self._size_src.nu*1)), (self._size_src.nu, 1, ))
arr.setflags(write=False)
return arr
@actuator_velocity.setter
def actuator_velocity(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.actuator_velocity, val_ptr, self._size_src.nu*1 * sizeof(c_double))
@property
def cvel(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cvel, dtype=np.double, count=(self._size_src.nbody*6)), (self._size_src.nbody, 6, ))
arr.setflags(write=False)
return arr
@cvel.setter
def cvel(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cvel, val_ptr, self._size_src.nbody*6 * sizeof(c_double))
@property
def cdof_dot(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cdof_dot, dtype=np.double, count=(self._size_src.nv*6)), (self._size_src.nv, 6, ))
arr.setflags(write=False)
return arr
@cdof_dot.setter
def cdof_dot(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cdof_dot, val_ptr, self._size_src.nv*6 * sizeof(c_double))
@property
def qfrc_bias(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qfrc_bias, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qfrc_bias.setter
def qfrc_bias(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qfrc_bias, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def qfrc_passive(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qfrc_passive, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qfrc_passive.setter
def qfrc_passive(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qfrc_passive, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def efc_vel(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_vel, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_vel.setter
def efc_vel(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_vel, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def efc_aref(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_aref, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_aref.setter
def efc_aref(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_aref, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def actuator_force(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_force, dtype=np.double, count=(self._size_src.nu*1)), (self._size_src.nu, 1, ))
arr.setflags(write=False)
return arr
@actuator_force.setter
def actuator_force(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.actuator_force, val_ptr, self._size_src.nu*1 * sizeof(c_double))
@property
def qfrc_actuator(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qfrc_actuator, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qfrc_actuator.setter
def qfrc_actuator(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qfrc_actuator, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def qfrc_unc(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qfrc_unc, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qfrc_unc.setter
def qfrc_unc(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qfrc_unc, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def qacc_unc(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qacc_unc, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qacc_unc.setter
def qacc_unc(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qacc_unc, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def efc_b(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_b, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_b.setter
def efc_b(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_b, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def fc_b(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.fc_b, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@fc_b.setter
def fc_b(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.fc_b, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def efc_force(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.efc_force, dtype=np.double, count=(self._size_src.njmax*1)), (self._size_src.njmax, 1, ))
arr.setflags(write=False)
return arr
@efc_force.setter
def efc_force(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.efc_force, val_ptr, self._size_src.njmax*1 * sizeof(c_double))
@property
def qfrc_constraint(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qfrc_constraint, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qfrc_constraint.setter
def qfrc_constraint(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qfrc_constraint, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def qfrc_inverse(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qfrc_inverse, dtype=np.double, count=(self._size_src.nv*1)), (self._size_src.nv, 1, ))
arr.setflags(write=False)
return arr
@qfrc_inverse.setter
def qfrc_inverse(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qfrc_inverse, val_ptr, self._size_src.nv*1 * sizeof(c_double))
@property
def cacc(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cacc, dtype=np.double, count=(self._size_src.nbody*6)), (self._size_src.nbody, 6, ))
arr.setflags(write=False)
return arr
@cacc.setter
def cacc(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cacc, val_ptr, self._size_src.nbody*6 * sizeof(c_double))
@property
def cfrc_int(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cfrc_int, dtype=np.double, count=(self._size_src.nbody*6)), (self._size_src.nbody, 6, ))
arr.setflags(write=False)
return arr
@cfrc_int.setter
def cfrc_int(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cfrc_int, val_ptr, self._size_src.nbody*6 * sizeof(c_double))
@property
def cfrc_ext(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cfrc_ext, dtype=np.double, count=(self._size_src.nbody*6)), (self._size_src.nbody, 6, ))
arr.setflags(write=False)
return arr
@cfrc_ext.setter
def cfrc_ext(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cfrc_ext, val_ptr, self._size_src.nbody*6 * sizeof(c_double))
class MjModelWrapper(object):
def __init__(self, wrapped, size_src=None):
self._wrapped = wrapped
self._size_src = size_src
@property
def ptr(self):
return self._wrapped
@property
def obj(self):
return self._wrapped.contents
@property
def nq(self):
return self._wrapped.contents.nq
@nq.setter
def nq(self, value):
self._wrapped.contents.nq = value
@property
def nv(self):
return self._wrapped.contents.nv
@nv.setter
def nv(self, value):
self._wrapped.contents.nv = value
@property
def nu(self):
return self._wrapped.contents.nu
@nu.setter
def nu(self, value):
self._wrapped.contents.nu = value
@property
def na(self):
return self._wrapped.contents.na
@na.setter
def na(self, value):
self._wrapped.contents.na = value
@property
def nbody(self):
return self._wrapped.contents.nbody
@nbody.setter
def nbody(self, value):
self._wrapped.contents.nbody = value
@property
def njnt(self):
return self._wrapped.contents.njnt
@njnt.setter
def njnt(self, value):
self._wrapped.contents.njnt = value
@property
def ngeom(self):
return self._wrapped.contents.ngeom
@ngeom.setter
def ngeom(self, value):
self._wrapped.contents.ngeom = value
@property
def nsite(self):
return self._wrapped.contents.nsite
@nsite.setter
def nsite(self, value):
self._wrapped.contents.nsite = value
@property
def ncam(self):
return self._wrapped.contents.ncam
@ncam.setter
def ncam(self, value):
self._wrapped.contents.ncam = value
@property
def nlight(self):
return self._wrapped.contents.nlight
@nlight.setter
def nlight(self, value):
self._wrapped.contents.nlight = value
@property
def nmesh(self):
return self._wrapped.contents.nmesh
@nmesh.setter
def nmesh(self, value):
self._wrapped.contents.nmesh = value
@property
def nmeshvert(self):
return self._wrapped.contents.nmeshvert
@nmeshvert.setter
def nmeshvert(self, value):
self._wrapped.contents.nmeshvert = value
@property
def nmeshface(self):
return self._wrapped.contents.nmeshface
@nmeshface.setter
def nmeshface(self, value):
self._wrapped.contents.nmeshface = value
@property
def nmeshgraph(self):
return self._wrapped.contents.nmeshgraph
@nmeshgraph.setter
def nmeshgraph(self, value):
self._wrapped.contents.nmeshgraph = value
@property
def nhfield(self):
return self._wrapped.contents.nhfield
@nhfield.setter
def nhfield(self, value):
self._wrapped.contents.nhfield = value
@property
def nhfielddata(self):
return self._wrapped.contents.nhfielddata
@nhfielddata.setter
def nhfielddata(self, value):
self._wrapped.contents.nhfielddata = value
@property
def ntex(self):
return self._wrapped.contents.ntex
@ntex.setter
def ntex(self, value):
self._wrapped.contents.ntex = value
@property
def ntexdata(self):
return self._wrapped.contents.ntexdata
@ntexdata.setter
def ntexdata(self, value):
self._wrapped.contents.ntexdata = value
@property
def nmat(self):
return self._wrapped.contents.nmat
@nmat.setter
def nmat(self, value):
self._wrapped.contents.nmat = value
@property
def npair(self):
return self._wrapped.contents.npair
@npair.setter
def npair(self, value):
self._wrapped.contents.npair = value
@property
def nexclude(self):
return self._wrapped.contents.nexclude
@nexclude.setter
def nexclude(self, value):
self._wrapped.contents.nexclude = value
@property
def neq(self):
return self._wrapped.contents.neq
@neq.setter
def neq(self, value):
self._wrapped.contents.neq = value
@property
def ntendon(self):
return self._wrapped.contents.ntendon
@ntendon.setter
def ntendon(self, value):
self._wrapped.contents.ntendon = value
@property
def nwrap(self):
return self._wrapped.contents.nwrap
@nwrap.setter
def nwrap(self, value):
self._wrapped.contents.nwrap = value
@property
def nsensor(self):
return self._wrapped.contents.nsensor
@nsensor.setter
def nsensor(self, value):
self._wrapped.contents.nsensor = value
@property
def nnumeric(self):
return self._wrapped.contents.nnumeric
@nnumeric.setter
def nnumeric(self, value):
self._wrapped.contents.nnumeric = value
@property
def nnumericdata(self):
return self._wrapped.contents.nnumericdata
@nnumericdata.setter
def nnumericdata(self, value):
self._wrapped.contents.nnumericdata = value
@property
def ntext(self):
return self._wrapped.contents.ntext
@ntext.setter
def ntext(self, value):
self._wrapped.contents.ntext = value
@property
def ntextdata(self):
return self._wrapped.contents.ntextdata
@ntextdata.setter
def ntextdata(self, value):
self._wrapped.contents.ntextdata = value
@property
def nkey(self):
return self._wrapped.contents.nkey
@nkey.setter
def nkey(self, value):
self._wrapped.contents.nkey = value
@property
def nuser_body(self):
return self._wrapped.contents.nuser_body
@nuser_body.setter
def nuser_body(self, value):
self._wrapped.contents.nuser_body = value
@property
def nuser_jnt(self):
return self._wrapped.contents.nuser_jnt
@nuser_jnt.setter
def nuser_jnt(self, value):
self._wrapped.contents.nuser_jnt = value
@property
def nuser_geom(self):
return self._wrapped.contents.nuser_geom
@nuser_geom.setter
def nuser_geom(self, value):
self._wrapped.contents.nuser_geom = value
@property
def nuser_site(self):
return self._wrapped.contents.nuser_site
@nuser_site.setter
def nuser_site(self, value):
self._wrapped.contents.nuser_site = value
@property
def nuser_tendon(self):
return self._wrapped.contents.nuser_tendon
@nuser_tendon.setter
def nuser_tendon(self, value):
self._wrapped.contents.nuser_tendon = value
@property
def nuser_actuator(self):
return self._wrapped.contents.nuser_actuator
@nuser_actuator.setter
def nuser_actuator(self, value):
self._wrapped.contents.nuser_actuator = value
@property
def nuser_sensor(self):
return self._wrapped.contents.nuser_sensor
@nuser_sensor.setter
def nuser_sensor(self, value):
self._wrapped.contents.nuser_sensor = value
@property
def nnames(self):
return self._wrapped.contents.nnames
@nnames.setter
def nnames(self, value):
self._wrapped.contents.nnames = value
@property
def nM(self):
return self._wrapped.contents.nM
@nM.setter
def nM(self, value):
self._wrapped.contents.nM = value
@property
def nemax(self):
return self._wrapped.contents.nemax
@nemax.setter
def nemax(self, value):
self._wrapped.contents.nemax = value
@property
def njmax(self):
return self._wrapped.contents.njmax
@njmax.setter
def njmax(self, value):
self._wrapped.contents.njmax = value
@property
def nconmax(self):
return self._wrapped.contents.nconmax
@nconmax.setter
def nconmax(self, value):
self._wrapped.contents.nconmax = value
@property
def nstack(self):
return self._wrapped.contents.nstack
@nstack.setter
def nstack(self, value):
self._wrapped.contents.nstack = value
@property
def nuserdata(self):
return self._wrapped.contents.nuserdata
@nuserdata.setter
def nuserdata(self, value):
self._wrapped.contents.nuserdata = value
@property
def nmocap(self):
return self._wrapped.contents.nmocap
@nmocap.setter
def nmocap(self, value):
self._wrapped.contents.nmocap = value
@property
def nsensordata(self):
return self._wrapped.contents.nsensordata
@nsensordata.setter
def nsensordata(self, value):
self._wrapped.contents.nsensordata = value
@property
def nbuffer(self):
return self._wrapped.contents.nbuffer
@nbuffer.setter
def nbuffer(self, value):
self._wrapped.contents.nbuffer = value
@property
def opt(self):
return self._wrapped.contents.opt
@opt.setter
def opt(self, value):
self._wrapped.contents.opt = value
@property
def vis(self):
return self._wrapped.contents.vis
@vis.setter
def vis(self, value):
self._wrapped.contents.vis = value
@property
def stat(self):
return self._wrapped.contents.stat
@stat.setter
def stat(self, value):
self._wrapped.contents.stat = value
@property
def buffer(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.buffer, dtype=np.uint8, count=(self.nbuffer)), (self.nbuffer, ))
arr.setflags(write=False)
return arr
@buffer.setter
def buffer(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.buffer, val_ptr, self.nbuffer * sizeof(c_ubyte))
@property
def qpos0(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qpos0, dtype=np.double, count=(self.nq*1)), (self.nq, 1, ))
arr.setflags(write=False)
return arr
@qpos0.setter
def qpos0(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qpos0, val_ptr, self.nq*1 * sizeof(c_double))
@property
def qpos_spring(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.qpos_spring, dtype=np.double, count=(self.nq*1)), (self.nq, 1, ))
arr.setflags(write=False)
return arr
@qpos_spring.setter
def qpos_spring(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.qpos_spring, val_ptr, self.nq*1 * sizeof(c_double))
@property
def body_parentid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_parentid, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_parentid.setter
def body_parentid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_parentid, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_rootid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_rootid, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_rootid.setter
def body_rootid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_rootid, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_weldid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_weldid, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_weldid.setter
def body_weldid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_weldid, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_mocapid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_mocapid, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_mocapid.setter
def body_mocapid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_mocapid, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_jntnum(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_jntnum, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_jntnum.setter
def body_jntnum(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_jntnum, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_jntadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_jntadr, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_jntadr.setter
def body_jntadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_jntadr, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_dofnum(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_dofnum, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_dofnum.setter
def body_dofnum(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_dofnum, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_dofadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_dofadr, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_dofadr.setter
def body_dofadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_dofadr, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_geomnum(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_geomnum, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_geomnum.setter
def body_geomnum(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_geomnum, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_geomadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_geomadr, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_geomadr.setter
def body_geomadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.body_geomadr, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def body_pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_pos, dtype=np.double, count=(self.nbody*3)), (self.nbody, 3, ))
arr.setflags(write=False)
return arr
@body_pos.setter
def body_pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.body_pos, val_ptr, self.nbody*3 * sizeof(c_double))
@property
def body_quat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_quat, dtype=np.double, count=(self.nbody*4)), (self.nbody, 4, ))
arr.setflags(write=False)
return arr
@body_quat.setter
def body_quat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.body_quat, val_ptr, self.nbody*4 * sizeof(c_double))
@property
def body_ipos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_ipos, dtype=np.double, count=(self.nbody*3)), (self.nbody, 3, ))
arr.setflags(write=False)
return arr
@body_ipos.setter
def body_ipos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.body_ipos, val_ptr, self.nbody*3 * sizeof(c_double))
@property
def body_iquat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_iquat, dtype=np.double, count=(self.nbody*4)), (self.nbody, 4, ))
arr.setflags(write=False)
return arr
@body_iquat.setter
def body_iquat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.body_iquat, val_ptr, self.nbody*4 * sizeof(c_double))
@property
def body_mass(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_mass, dtype=np.double, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@body_mass.setter
def body_mass(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.body_mass, val_ptr, self.nbody*1 * sizeof(c_double))
@property
def body_inertia(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_inertia, dtype=np.double, count=(self.nbody*3)), (self.nbody, 3, ))
arr.setflags(write=False)
return arr
@body_inertia.setter
def body_inertia(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.body_inertia, val_ptr, self.nbody*3 * sizeof(c_double))
@property
def body_invweight0(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_invweight0, dtype=np.double, count=(self.nbody*2)), (self.nbody, 2, ))
arr.setflags(write=False)
return arr
@body_invweight0.setter
def body_invweight0(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.body_invweight0, val_ptr, self.nbody*2 * sizeof(c_double))
@property
def body_user(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.body_user, dtype=np.double, count=(self.nbody*self.nuser_body)), (self.nbody, self.nuser_body, ))
arr.setflags(write=False)
return arr
@body_user.setter
def body_user(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.body_user, val_ptr, self.nbody*self.nuser_body * sizeof(c_double))
@property
def jnt_type(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_type, dtype=np.int, count=(self.njnt*1)), (self.njnt, 1, ))
arr.setflags(write=False)
return arr
@jnt_type.setter
def jnt_type(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.jnt_type, val_ptr, self.njnt*1 * sizeof(c_int))
@property
def jnt_qposadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_qposadr, dtype=np.int, count=(self.njnt*1)), (self.njnt, 1, ))
arr.setflags(write=False)
return arr
@jnt_qposadr.setter
def jnt_qposadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.jnt_qposadr, val_ptr, self.njnt*1 * sizeof(c_int))
@property
def jnt_dofadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_dofadr, dtype=np.int, count=(self.njnt*1)), (self.njnt, 1, ))
arr.setflags(write=False)
return arr
@jnt_dofadr.setter
def jnt_dofadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.jnt_dofadr, val_ptr, self.njnt*1 * sizeof(c_int))
@property
def jnt_bodyid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_bodyid, dtype=np.int, count=(self.njnt*1)), (self.njnt, 1, ))
arr.setflags(write=False)
return arr
@jnt_bodyid.setter
def jnt_bodyid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.jnt_bodyid, val_ptr, self.njnt*1 * sizeof(c_int))
@property
def jnt_limited(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_limited, dtype=np.uint8, count=(self.njnt*1)), (self.njnt, 1, ))
arr.setflags(write=False)
return arr
@jnt_limited.setter
def jnt_limited(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.jnt_limited, val_ptr, self.njnt*1 * sizeof(c_ubyte))
@property
def jnt_solref(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_solref, dtype=np.double, count=(self.njnt*2)), (self.njnt, 2, ))
arr.setflags(write=False)
return arr
@jnt_solref.setter
def jnt_solref(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.jnt_solref, val_ptr, self.njnt*2 * sizeof(c_double))
@property
def jnt_solimp(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_solimp, dtype=np.double, count=(self.njnt*3)), (self.njnt, 3, ))
arr.setflags(write=False)
return arr
@jnt_solimp.setter
def jnt_solimp(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.jnt_solimp, val_ptr, self.njnt*3 * sizeof(c_double))
@property
def jnt_pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_pos, dtype=np.double, count=(self.njnt*3)), (self.njnt, 3, ))
arr.setflags(write=False)
return arr
@jnt_pos.setter
def jnt_pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.jnt_pos, val_ptr, self.njnt*3 * sizeof(c_double))
@property
def jnt_axis(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_axis, dtype=np.double, count=(self.njnt*3)), (self.njnt, 3, ))
arr.setflags(write=False)
return arr
@jnt_axis.setter
def jnt_axis(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.jnt_axis, val_ptr, self.njnt*3 * sizeof(c_double))
@property
def jnt_stiffness(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_stiffness, dtype=np.double, count=(self.njnt*1)), (self.njnt, 1, ))
arr.setflags(write=False)
return arr
@jnt_stiffness.setter
def jnt_stiffness(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.jnt_stiffness, val_ptr, self.njnt*1 * sizeof(c_double))
@property
def jnt_range(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_range, dtype=np.double, count=(self.njnt*2)), (self.njnt, 2, ))
arr.setflags(write=False)
return arr
@jnt_range.setter
def jnt_range(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.jnt_range, val_ptr, self.njnt*2 * sizeof(c_double))
@property
def jnt_margin(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_margin, dtype=np.double, count=(self.njnt*1)), (self.njnt, 1, ))
arr.setflags(write=False)
return arr
@jnt_margin.setter
def jnt_margin(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.jnt_margin, val_ptr, self.njnt*1 * sizeof(c_double))
@property
def jnt_user(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.jnt_user, dtype=np.double, count=(self.njnt*self.nuser_jnt)), (self.njnt, self.nuser_jnt, ))
arr.setflags(write=False)
return arr
@jnt_user.setter
def jnt_user(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.jnt_user, val_ptr, self.njnt*self.nuser_jnt * sizeof(c_double))
@property
def dof_bodyid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_bodyid, dtype=np.int, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_bodyid.setter
def dof_bodyid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.dof_bodyid, val_ptr, self.nv*1 * sizeof(c_int))
@property
def dof_jntid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_jntid, dtype=np.int, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_jntid.setter
def dof_jntid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.dof_jntid, val_ptr, self.nv*1 * sizeof(c_int))
@property
def dof_parentid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_parentid, dtype=np.int, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_parentid.setter
def dof_parentid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.dof_parentid, val_ptr, self.nv*1 * sizeof(c_int))
@property
def dof_Madr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_Madr, dtype=np.int, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_Madr.setter
def dof_Madr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.dof_Madr, val_ptr, self.nv*1 * sizeof(c_int))
@property
def dof_frictional(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_frictional, dtype=np.uint8, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_frictional.setter
def dof_frictional(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.dof_frictional, val_ptr, self.nv*1 * sizeof(c_ubyte))
@property
def dof_solref(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_solref, dtype=np.double, count=(self.nv*2)), (self.nv, 2, ))
arr.setflags(write=False)
return arr
@dof_solref.setter
def dof_solref(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.dof_solref, val_ptr, self.nv*2 * sizeof(c_double))
@property
def dof_solimp(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_solimp, dtype=np.double, count=(self.nv*3)), (self.nv, 3, ))
arr.setflags(write=False)
return arr
@dof_solimp.setter
def dof_solimp(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.dof_solimp, val_ptr, self.nv*3 * sizeof(c_double))
@property
def dof_frictionloss(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_frictionloss, dtype=np.double, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_frictionloss.setter
def dof_frictionloss(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.dof_frictionloss, val_ptr, self.nv*1 * sizeof(c_double))
@property
def dof_armature(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_armature, dtype=np.double, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_armature.setter
def dof_armature(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.dof_armature, val_ptr, self.nv*1 * sizeof(c_double))
@property
def dof_damping(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_damping, dtype=np.double, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_damping.setter
def dof_damping(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.dof_damping, val_ptr, self.nv*1 * sizeof(c_double))
@property
def dof_invweight0(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.dof_invweight0, dtype=np.double, count=(self.nv*1)), (self.nv, 1, ))
arr.setflags(write=False)
return arr
@dof_invweight0.setter
def dof_invweight0(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.dof_invweight0, val_ptr, self.nv*1 * sizeof(c_double))
@property
def geom_type(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_type, dtype=np.int, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_type.setter
def geom_type(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.geom_type, val_ptr, self.ngeom*1 * sizeof(c_int))
@property
def geom_contype(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_contype, dtype=np.int, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_contype.setter
def geom_contype(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.geom_contype, val_ptr, self.ngeom*1 * sizeof(c_int))
@property
def geom_conaffinity(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_conaffinity, dtype=np.int, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_conaffinity.setter
def geom_conaffinity(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.geom_conaffinity, val_ptr, self.ngeom*1 * sizeof(c_int))
@property
def geom_condim(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_condim, dtype=np.int, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_condim.setter
def geom_condim(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.geom_condim, val_ptr, self.ngeom*1 * sizeof(c_int))
@property
def geom_bodyid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_bodyid, dtype=np.int, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_bodyid.setter
def geom_bodyid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.geom_bodyid, val_ptr, self.ngeom*1 * sizeof(c_int))
@property
def geom_dataid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_dataid, dtype=np.int, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_dataid.setter
def geom_dataid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.geom_dataid, val_ptr, self.ngeom*1 * sizeof(c_int))
@property
def geom_matid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_matid, dtype=np.int, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_matid.setter
def geom_matid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.geom_matid, val_ptr, self.ngeom*1 * sizeof(c_int))
@property
def geom_group(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_group, dtype=np.int, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_group.setter
def geom_group(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.geom_group, val_ptr, self.ngeom*1 * sizeof(c_int))
@property
def geom_solmix(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_solmix, dtype=np.double, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_solmix.setter
def geom_solmix(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_solmix, val_ptr, self.ngeom*1 * sizeof(c_double))
@property
def geom_solref(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_solref, dtype=np.double, count=(self.ngeom*2)), (self.ngeom, 2, ))
arr.setflags(write=False)
return arr
@geom_solref.setter
def geom_solref(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_solref, val_ptr, self.ngeom*2 * sizeof(c_double))
@property
def geom_solimp(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_solimp, dtype=np.double, count=(self.ngeom*3)), (self.ngeom, 3, ))
arr.setflags(write=False)
return arr
@geom_solimp.setter
def geom_solimp(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_solimp, val_ptr, self.ngeom*3 * sizeof(c_double))
@property
def geom_size(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_size, dtype=np.double, count=(self.ngeom*3)), (self.ngeom, 3, ))
arr.setflags(write=False)
return arr
@geom_size.setter
def geom_size(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_size, val_ptr, self.ngeom*3 * sizeof(c_double))
@property
def geom_rbound(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_rbound, dtype=np.double, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_rbound.setter
def geom_rbound(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_rbound, val_ptr, self.ngeom*1 * sizeof(c_double))
@property
def geom_pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_pos, dtype=np.double, count=(self.ngeom*3)), (self.ngeom, 3, ))
arr.setflags(write=False)
return arr
@geom_pos.setter
def geom_pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_pos, val_ptr, self.ngeom*3 * sizeof(c_double))
@property
def geom_quat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_quat, dtype=np.double, count=(self.ngeom*4)), (self.ngeom, 4, ))
arr.setflags(write=False)
return arr
@geom_quat.setter
def geom_quat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_quat, val_ptr, self.ngeom*4 * sizeof(c_double))
@property
def geom_friction(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_friction, dtype=np.double, count=(self.ngeom*3)), (self.ngeom, 3, ))
arr.setflags(write=False)
return arr
@geom_friction.setter
def geom_friction(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_friction, val_ptr, self.ngeom*3 * sizeof(c_double))
@property
def geom_margin(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_margin, dtype=np.double, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_margin.setter
def geom_margin(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_margin, val_ptr, self.ngeom*1 * sizeof(c_double))
@property
def geom_gap(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_gap, dtype=np.double, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@geom_gap.setter
def geom_gap(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_gap, val_ptr, self.ngeom*1 * sizeof(c_double))
@property
def geom_user(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_user, dtype=np.double, count=(self.ngeom*self.nuser_geom)), (self.ngeom, self.nuser_geom, ))
arr.setflags(write=False)
return arr
@geom_user.setter
def geom_user(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.geom_user, val_ptr, self.ngeom*self.nuser_geom * sizeof(c_double))
@property
def geom_rgba(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.geom_rgba, dtype=np.float, count=(self.ngeom*4)), (self.ngeom, 4, ))
arr.setflags(write=False)
return arr
@geom_rgba.setter
def geom_rgba(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.geom_rgba, val_ptr, self.ngeom*4 * sizeof(c_float))
@property
def site_type(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.site_type, dtype=np.int, count=(self.nsite*1)), (self.nsite, 1, ))
arr.setflags(write=False)
return arr
@site_type.setter
def site_type(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.site_type, val_ptr, self.nsite*1 * sizeof(c_int))
@property
def site_bodyid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.site_bodyid, dtype=np.int, count=(self.nsite*1)), (self.nsite, 1, ))
arr.setflags(write=False)
return arr
@site_bodyid.setter
def site_bodyid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.site_bodyid, val_ptr, self.nsite*1 * sizeof(c_int))
@property
def site_matid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.site_matid, dtype=np.int, count=(self.nsite*1)), (self.nsite, 1, ))
arr.setflags(write=False)
return arr
@site_matid.setter
def site_matid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.site_matid, val_ptr, self.nsite*1 * sizeof(c_int))
@property
def site_group(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.site_group, dtype=np.int, count=(self.nsite*1)), (self.nsite, 1, ))
arr.setflags(write=False)
return arr
@site_group.setter
def site_group(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.site_group, val_ptr, self.nsite*1 * sizeof(c_int))
@property
def site_size(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.site_size, dtype=np.double, count=(self.nsite*3)), (self.nsite, 3, ))
arr.setflags(write=False)
return arr
@site_size.setter
def site_size(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.site_size, val_ptr, self.nsite*3 * sizeof(c_double))
@property
def site_pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.site_pos, dtype=np.double, count=(self.nsite*3)), (self.nsite, 3, ))
arr.setflags(write=False)
return arr
@site_pos.setter
def site_pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.site_pos, val_ptr, self.nsite*3 * sizeof(c_double))
@property
def site_quat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.site_quat, dtype=np.double, count=(self.nsite*4)), (self.nsite, 4, ))
arr.setflags(write=False)
return arr
@site_quat.setter
def site_quat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.site_quat, val_ptr, self.nsite*4 * sizeof(c_double))
@property
def site_user(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.site_user, dtype=np.double, count=(self.nsite*self.nuser_site)), (self.nsite, self.nuser_site, ))
arr.setflags(write=False)
return arr
@site_user.setter
def site_user(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.site_user, val_ptr, self.nsite*self.nuser_site * sizeof(c_double))
@property
def site_rgba(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.site_rgba, dtype=np.float, count=(self.nsite*4)), (self.nsite, 4, ))
arr.setflags(write=False)
return arr
@site_rgba.setter
def site_rgba(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.site_rgba, val_ptr, self.nsite*4 * sizeof(c_float))
@property
def cam_mode(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cam_mode, dtype=np.int, count=(self.ncam*1)), (self.ncam, 1, ))
arr.setflags(write=False)
return arr
@cam_mode.setter
def cam_mode(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.cam_mode, val_ptr, self.ncam*1 * sizeof(c_int))
@property
def cam_bodyid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cam_bodyid, dtype=np.int, count=(self.ncam*1)), (self.ncam, 1, ))
arr.setflags(write=False)
return arr
@cam_bodyid.setter
def cam_bodyid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.cam_bodyid, val_ptr, self.ncam*1 * sizeof(c_int))
@property
def cam_targetbodyid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cam_targetbodyid, dtype=np.int, count=(self.ncam*1)), (self.ncam, 1, ))
arr.setflags(write=False)
return arr
@cam_targetbodyid.setter
def cam_targetbodyid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.cam_targetbodyid, val_ptr, self.ncam*1 * sizeof(c_int))
@property
def cam_pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cam_pos, dtype=np.double, count=(self.ncam*3)), (self.ncam, 3, ))
arr.setflags(write=False)
return arr
@cam_pos.setter
def cam_pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cam_pos, val_ptr, self.ncam*3 * sizeof(c_double))
@property
def cam_quat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cam_quat, dtype=np.double, count=(self.ncam*4)), (self.ncam, 4, ))
arr.setflags(write=False)
return arr
@cam_quat.setter
def cam_quat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cam_quat, val_ptr, self.ncam*4 * sizeof(c_double))
@property
def cam_poscom0(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cam_poscom0, dtype=np.double, count=(self.ncam*3)), (self.ncam, 3, ))
arr.setflags(write=False)
return arr
@cam_poscom0.setter
def cam_poscom0(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cam_poscom0, val_ptr, self.ncam*3 * sizeof(c_double))
@property
def cam_pos0(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cam_pos0, dtype=np.double, count=(self.ncam*3)), (self.ncam, 3, ))
arr.setflags(write=False)
return arr
@cam_pos0.setter
def cam_pos0(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cam_pos0, val_ptr, self.ncam*3 * sizeof(c_double))
@property
def cam_mat0(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cam_mat0, dtype=np.double, count=(self.ncam*9)), (self.ncam, 9, ))
arr.setflags(write=False)
return arr
@cam_mat0.setter
def cam_mat0(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cam_mat0, val_ptr, self.ncam*9 * sizeof(c_double))
@property
def cam_fovy(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cam_fovy, dtype=np.double, count=(self.ncam*1)), (self.ncam, 1, ))
arr.setflags(write=False)
return arr
@cam_fovy.setter
def cam_fovy(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cam_fovy, val_ptr, self.ncam*1 * sizeof(c_double))
@property
def cam_ipd(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.cam_ipd, dtype=np.double, count=(self.ncam*1)), (self.ncam, 1, ))
arr.setflags(write=False)
return arr
@cam_ipd.setter
def cam_ipd(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.cam_ipd, val_ptr, self.ncam*1 * sizeof(c_double))
@property
def light_mode(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.light_mode, dtype=np.int, count=(self.nlight*1)), (self.nlight, 1, ))
arr.setflags(write=False)
return arr
@light_mode.setter
def light_mode(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.light_mode, val_ptr, self.nlight*1 * sizeof(c_int))
@property
def light_bodyid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.light_bodyid, dtype=np.int, count=(self.nlight*1)), (self.nlight, 1, ))
arr.setflags(write=False)
return arr
@light_bodyid.setter
def light_bodyid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.light_bodyid, val_ptr, self.nlight*1 * sizeof(c_int))
@property
def light_targetbodyid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.light_targetbodyid, dtype=np.int, count=(self.nlight*1)), (self.nlight, 1, ))
arr.setflags(write=False)
return arr
@light_targetbodyid.setter
def light_targetbodyid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.light_targetbodyid, val_ptr, self.nlight*1 * sizeof(c_int))
@property
def light_directional(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.light_directional, dtype=np.uint8, count=(self.nlight*1)), (self.nlight, 1, ))
arr.setflags(write=False)
return arr
@light_directional.setter
def light_directional(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.light_directional, val_ptr, self.nlight*1 * sizeof(c_ubyte))
@property
def light_castshadow(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.light_castshadow, dtype=np.uint8, count=(self.nlight*1)), (self.nlight, 1, ))
arr.setflags(write=False)
return arr
@light_castshadow.setter
def light_castshadow(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.light_castshadow, val_ptr, self.nlight*1 * sizeof(c_ubyte))
@property
def light_active(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.light_active, dtype=np.uint8, count=(self.nlight*1)), (self.nlight, 1, ))
arr.setflags(write=False)
return arr
@light_active.setter
def light_active(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.light_active, val_ptr, self.nlight*1 * sizeof(c_ubyte))
@property
def light_pos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.light_pos, dtype=np.double, count=(self.nlight*3)), (self.nlight, 3, ))
arr.setflags(write=False)
return arr
@light_pos.setter
def light_pos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.light_pos, val_ptr, self.nlight*3 * sizeof(c_double))
@property
def light_dir(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.light_dir, dtype=np.double, count=(self.nlight*3)), (self.nlight, 3, ))
arr.setflags(write=False)
return arr
@light_dir.setter
def light_dir(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.light_dir, val_ptr, self.nlight*3 * sizeof(c_double))
@property
def light_poscom0(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.light_poscom0, dtype=np.double, count=(self.nlight*3)), (self.nlight, 3, ))
arr.setflags(write=False)
return arr
@light_poscom0.setter
def light_poscom0(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.light_poscom0, val_ptr, self.nlight*3 * sizeof(c_double))
@property
def light_pos0(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.light_pos0, dtype=np.double, count=(self.nlight*3)), (self.nlight, 3, ))
arr.setflags(write=False)
return arr
@light_pos0.setter
def light_pos0(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.light_pos0, val_ptr, self.nlight*3 * sizeof(c_double))
@property
def light_dir0(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.light_dir0, dtype=np.double, count=(self.nlight*3)), (self.nlight, 3, ))
arr.setflags(write=False)
return arr
@light_dir0.setter
def light_dir0(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.light_dir0, val_ptr, self.nlight*3 * sizeof(c_double))
@property
def light_attenuation(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.light_attenuation, dtype=np.float, count=(self.nlight*3)), (self.nlight, 3, ))
arr.setflags(write=False)
return arr
@light_attenuation.setter
def light_attenuation(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.light_attenuation, val_ptr, self.nlight*3 * sizeof(c_float))
@property
def light_cutoff(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.light_cutoff, dtype=np.float, count=(self.nlight*1)), (self.nlight, 1, ))
arr.setflags(write=False)
return arr
@light_cutoff.setter
def light_cutoff(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.light_cutoff, val_ptr, self.nlight*1 * sizeof(c_float))
@property
def light_exponent(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.light_exponent, dtype=np.float, count=(self.nlight*1)), (self.nlight, 1, ))
arr.setflags(write=False)
return arr
@light_exponent.setter
def light_exponent(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.light_exponent, val_ptr, self.nlight*1 * sizeof(c_float))
@property
def light_ambient(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.light_ambient, dtype=np.float, count=(self.nlight*3)), (self.nlight, 3, ))
arr.setflags(write=False)
return arr
@light_ambient.setter
def light_ambient(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.light_ambient, val_ptr, self.nlight*3 * sizeof(c_float))
@property
def light_diffuse(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.light_diffuse, dtype=np.float, count=(self.nlight*3)), (self.nlight, 3, ))
arr.setflags(write=False)
return arr
@light_diffuse.setter
def light_diffuse(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.light_diffuse, val_ptr, self.nlight*3 * sizeof(c_float))
@property
def light_specular(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.light_specular, dtype=np.float, count=(self.nlight*3)), (self.nlight, 3, ))
arr.setflags(write=False)
return arr
@light_specular.setter
def light_specular(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.light_specular, val_ptr, self.nlight*3 * sizeof(c_float))
@property
def mesh_faceadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mesh_faceadr, dtype=np.int, count=(self.nmesh*1)), (self.nmesh, 1, ))
arr.setflags(write=False)
return arr
@mesh_faceadr.setter
def mesh_faceadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.mesh_faceadr, val_ptr, self.nmesh*1 * sizeof(c_int))
@property
def mesh_facenum(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mesh_facenum, dtype=np.int, count=(self.nmesh*1)), (self.nmesh, 1, ))
arr.setflags(write=False)
return arr
@mesh_facenum.setter
def mesh_facenum(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.mesh_facenum, val_ptr, self.nmesh*1 * sizeof(c_int))
@property
def mesh_vertadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mesh_vertadr, dtype=np.int, count=(self.nmesh*1)), (self.nmesh, 1, ))
arr.setflags(write=False)
return arr
@mesh_vertadr.setter
def mesh_vertadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.mesh_vertadr, val_ptr, self.nmesh*1 * sizeof(c_int))
@property
def mesh_vertnum(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mesh_vertnum, dtype=np.int, count=(self.nmesh*1)), (self.nmesh, 1, ))
arr.setflags(write=False)
return arr
@mesh_vertnum.setter
def mesh_vertnum(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.mesh_vertnum, val_ptr, self.nmesh*1 * sizeof(c_int))
@property
def mesh_graphadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mesh_graphadr, dtype=np.int, count=(self.nmesh*1)), (self.nmesh, 1, ))
arr.setflags(write=False)
return arr
@mesh_graphadr.setter
def mesh_graphadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.mesh_graphadr, val_ptr, self.nmesh*1 * sizeof(c_int))
@property
def mesh_vert(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mesh_vert, dtype=np.float, count=(self.nmeshvert*3)), (self.nmeshvert, 3, ))
arr.setflags(write=False)
return arr
@mesh_vert.setter
def mesh_vert(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.mesh_vert, val_ptr, self.nmeshvert*3 * sizeof(c_float))
@property
def mesh_normal(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mesh_normal, dtype=np.float, count=(self.nmeshvert*3)), (self.nmeshvert, 3, ))
arr.setflags(write=False)
return arr
@mesh_normal.setter
def mesh_normal(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.mesh_normal, val_ptr, self.nmeshvert*3 * sizeof(c_float))
@property
def mesh_face(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mesh_face, dtype=np.int, count=(self.nmeshface*3)), (self.nmeshface, 3, ))
arr.setflags(write=False)
return arr
@mesh_face.setter
def mesh_face(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.mesh_face, val_ptr, self.nmeshface*3 * sizeof(c_int))
@property
def mesh_graph(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mesh_graph, dtype=np.int, count=(self.nmeshgraph*1)), (self.nmeshgraph, 1, ))
arr.setflags(write=False)
return arr
@mesh_graph.setter
def mesh_graph(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.mesh_graph, val_ptr, self.nmeshgraph*1 * sizeof(c_int))
@property
def hfield_size(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.hfield_size, dtype=np.double, count=(self.nhfield*4)), (self.nhfield, 4, ))
arr.setflags(write=False)
return arr
@hfield_size.setter
def hfield_size(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.hfield_size, val_ptr, self.nhfield*4 * sizeof(c_double))
@property
def hfield_nrow(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.hfield_nrow, dtype=np.int, count=(self.nhfield*1)), (self.nhfield, 1, ))
arr.setflags(write=False)
return arr
@hfield_nrow.setter
def hfield_nrow(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.hfield_nrow, val_ptr, self.nhfield*1 * sizeof(c_int))
@property
def hfield_ncol(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.hfield_ncol, dtype=np.int, count=(self.nhfield*1)), (self.nhfield, 1, ))
arr.setflags(write=False)
return arr
@hfield_ncol.setter
def hfield_ncol(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.hfield_ncol, val_ptr, self.nhfield*1 * sizeof(c_int))
@property
def hfield_adr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.hfield_adr, dtype=np.int, count=(self.nhfield*1)), (self.nhfield, 1, ))
arr.setflags(write=False)
return arr
@hfield_adr.setter
def hfield_adr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.hfield_adr, val_ptr, self.nhfield*1 * sizeof(c_int))
@property
def hfield_data(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.hfield_data, dtype=np.float, count=(self.nhfielddata*1)), (self.nhfielddata, 1, ))
arr.setflags(write=False)
return arr
@hfield_data.setter
def hfield_data(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.hfield_data, val_ptr, self.nhfielddata*1 * sizeof(c_float))
@property
def tex_type(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.tex_type, dtype=np.int, count=(self.ntex*1)), (self.ntex, 1, ))
arr.setflags(write=False)
return arr
@tex_type.setter
def tex_type(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.tex_type, val_ptr, self.ntex*1 * sizeof(c_int))
@property
def tex_height(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.tex_height, dtype=np.int, count=(self.ntex*1)), (self.ntex, 1, ))
arr.setflags(write=False)
return arr
@tex_height.setter
def tex_height(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.tex_height, val_ptr, self.ntex*1 * sizeof(c_int))
@property
def tex_width(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.tex_width, dtype=np.int, count=(self.ntex*1)), (self.ntex, 1, ))
arr.setflags(write=False)
return arr
@tex_width.setter
def tex_width(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.tex_width, val_ptr, self.ntex*1 * sizeof(c_int))
@property
def tex_adr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.tex_adr, dtype=np.int, count=(self.ntex*1)), (self.ntex, 1, ))
arr.setflags(write=False)
return arr
@tex_adr.setter
def tex_adr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.tex_adr, val_ptr, self.ntex*1 * sizeof(c_int))
@property
def tex_rgb(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.tex_rgb, dtype=np.uint8, count=(self.ntexdata*1)), (self.ntexdata, 1, ))
arr.setflags(write=False)
return arr
@tex_rgb.setter
def tex_rgb(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.tex_rgb, val_ptr, self.ntexdata*1 * sizeof(c_ubyte))
@property
def mat_texid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mat_texid, dtype=np.int, count=(self.nmat*1)), (self.nmat, 1, ))
arr.setflags(write=False)
return arr
@mat_texid.setter
def mat_texid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.mat_texid, val_ptr, self.nmat*1 * sizeof(c_int))
@property
def mat_texuniform(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mat_texuniform, dtype=np.uint8, count=(self.nmat*1)), (self.nmat, 1, ))
arr.setflags(write=False)
return arr
@mat_texuniform.setter
def mat_texuniform(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.mat_texuniform, val_ptr, self.nmat*1 * sizeof(c_ubyte))
@property
def mat_texrepeat(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mat_texrepeat, dtype=np.float, count=(self.nmat*2)), (self.nmat, 2, ))
arr.setflags(write=False)
return arr
@mat_texrepeat.setter
def mat_texrepeat(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.mat_texrepeat, val_ptr, self.nmat*2 * sizeof(c_float))
@property
def mat_emission(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mat_emission, dtype=np.float, count=(self.nmat*1)), (self.nmat, 1, ))
arr.setflags(write=False)
return arr
@mat_emission.setter
def mat_emission(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.mat_emission, val_ptr, self.nmat*1 * sizeof(c_float))
@property
def mat_specular(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mat_specular, dtype=np.float, count=(self.nmat*1)), (self.nmat, 1, ))
arr.setflags(write=False)
return arr
@mat_specular.setter
def mat_specular(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.mat_specular, val_ptr, self.nmat*1 * sizeof(c_float))
@property
def mat_shininess(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mat_shininess, dtype=np.float, count=(self.nmat*1)), (self.nmat, 1, ))
arr.setflags(write=False)
return arr
@mat_shininess.setter
def mat_shininess(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.mat_shininess, val_ptr, self.nmat*1 * sizeof(c_float))
@property
def mat_reflectance(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mat_reflectance, dtype=np.float, count=(self.nmat*1)), (self.nmat, 1, ))
arr.setflags(write=False)
return arr
@mat_reflectance.setter
def mat_reflectance(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.mat_reflectance, val_ptr, self.nmat*1 * sizeof(c_float))
@property
def mat_rgba(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.mat_rgba, dtype=np.float, count=(self.nmat*4)), (self.nmat, 4, ))
arr.setflags(write=False)
return arr
@mat_rgba.setter
def mat_rgba(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.mat_rgba, val_ptr, self.nmat*4 * sizeof(c_float))
@property
def pair_dim(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.pair_dim, dtype=np.int, count=(self.npair*1)), (self.npair, 1, ))
arr.setflags(write=False)
return arr
@pair_dim.setter
def pair_dim(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.pair_dim, val_ptr, self.npair*1 * sizeof(c_int))
@property
def pair_geom1(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.pair_geom1, dtype=np.int, count=(self.npair*1)), (self.npair, 1, ))
arr.setflags(write=False)
return arr
@pair_geom1.setter
def pair_geom1(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.pair_geom1, val_ptr, self.npair*1 * sizeof(c_int))
@property
def pair_geom2(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.pair_geom2, dtype=np.int, count=(self.npair*1)), (self.npair, 1, ))
arr.setflags(write=False)
return arr
@pair_geom2.setter
def pair_geom2(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.pair_geom2, val_ptr, self.npair*1 * sizeof(c_int))
@property
def pair_signature(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.pair_signature, dtype=np.int, count=(self.npair*1)), (self.npair, 1, ))
arr.setflags(write=False)
return arr
@pair_signature.setter
def pair_signature(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.pair_signature, val_ptr, self.npair*1 * sizeof(c_int))
@property
def pair_solref(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.pair_solref, dtype=np.double, count=(self.npair*2)), (self.npair, 2, ))
arr.setflags(write=False)
return arr
@pair_solref.setter
def pair_solref(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.pair_solref, val_ptr, self.npair*2 * sizeof(c_double))
@property
def pair_solimp(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.pair_solimp, dtype=np.double, count=(self.npair*3)), (self.npair, 3, ))
arr.setflags(write=False)
return arr
@pair_solimp.setter
def pair_solimp(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.pair_solimp, val_ptr, self.npair*3 * sizeof(c_double))
@property
def pair_margin(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.pair_margin, dtype=np.double, count=(self.npair*1)), (self.npair, 1, ))
arr.setflags(write=False)
return arr
@pair_margin.setter
def pair_margin(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.pair_margin, val_ptr, self.npair*1 * sizeof(c_double))
@property
def pair_gap(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.pair_gap, dtype=np.double, count=(self.npair*1)), (self.npair, 1, ))
arr.setflags(write=False)
return arr
@pair_gap.setter
def pair_gap(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.pair_gap, val_ptr, self.npair*1 * sizeof(c_double))
@property
def pair_friction(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.pair_friction, dtype=np.double, count=(self.npair*5)), (self.npair, 5, ))
arr.setflags(write=False)
return arr
@pair_friction.setter
def pair_friction(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.pair_friction, val_ptr, self.npair*5 * sizeof(c_double))
@property
def exclude_signature(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.exclude_signature, dtype=np.int, count=(self.nexclude*1)), (self.nexclude, 1, ))
arr.setflags(write=False)
return arr
@exclude_signature.setter
def exclude_signature(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.exclude_signature, val_ptr, self.nexclude*1 * sizeof(c_int))
@property
def eq_type(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.eq_type, dtype=np.int, count=(self.neq*1)), (self.neq, 1, ))
arr.setflags(write=False)
return arr
@eq_type.setter
def eq_type(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.eq_type, val_ptr, self.neq*1 * sizeof(c_int))
@property
def eq_obj1id(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.eq_obj1id, dtype=np.int, count=(self.neq*1)), (self.neq, 1, ))
arr.setflags(write=False)
return arr
@eq_obj1id.setter
def eq_obj1id(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.eq_obj1id, val_ptr, self.neq*1 * sizeof(c_int))
@property
def eq_obj2id(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.eq_obj2id, dtype=np.int, count=(self.neq*1)), (self.neq, 1, ))
arr.setflags(write=False)
return arr
@eq_obj2id.setter
def eq_obj2id(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.eq_obj2id, val_ptr, self.neq*1 * sizeof(c_int))
@property
def eq_active(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.eq_active, dtype=np.uint8, count=(self.neq*1)), (self.neq, 1, ))
arr.setflags(write=False)
return arr
@eq_active.setter
def eq_active(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.eq_active, val_ptr, self.neq*1 * sizeof(c_ubyte))
@property
def eq_solref(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.eq_solref, dtype=np.double, count=(self.neq*2)), (self.neq, 2, ))
arr.setflags(write=False)
return arr
@eq_solref.setter
def eq_solref(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.eq_solref, val_ptr, self.neq*2 * sizeof(c_double))
@property
def eq_solimp(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.eq_solimp, dtype=np.double, count=(self.neq*3)), (self.neq, 3, ))
arr.setflags(write=False)
return arr
@eq_solimp.setter
def eq_solimp(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.eq_solimp, val_ptr, self.neq*3 * sizeof(c_double))
@property
def eq_data(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.eq_data, dtype=np.double, count=(self.neq*7)), (self.neq, 7, ))
arr.setflags(write=False)
return arr
@eq_data.setter
def eq_data(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.eq_data, val_ptr, self.neq*7 * sizeof(c_double))
@property
def tendon_adr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.tendon_adr, dtype=np.int, count=(self.ntendon*1)), (self.ntendon, 1, ))
arr.setflags(write=False)
return arr
@tendon_adr.setter
def tendon_adr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.tendon_adr, val_ptr, self.ntendon*1 * sizeof(c_int))
@property
def tendon_num(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.tendon_num, dtype=np.int, count=(self.ntendon*1)), (self.ntendon, 1, ))
arr.setflags(write=False)
return arr
@tendon_num.setter
def tendon_num(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.tendon_num, val_ptr, self.ntendon*1 * sizeof(c_int))
@property
def tendon_matid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.tendon_matid, dtype=np.int, count=(self.ntendon*1)), (self.ntendon, 1, ))
arr.setflags(write=False)
return arr
@tendon_matid.setter
def tendon_matid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.tendon_matid, val_ptr, self.ntendon*1 * sizeof(c_int))
@property
def tendon_limited(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.tendon_limited, dtype=np.uint8, count=(self.ntendon*1)), (self.ntendon, 1, ))
arr.setflags(write=False)
return arr
@tendon_limited.setter
def tendon_limited(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.tendon_limited, val_ptr, self.ntendon*1 * sizeof(c_ubyte))
@property
def tendon_frictional(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.tendon_frictional, dtype=np.uint8, count=(self.ntendon*1)), (self.ntendon, 1, ))
arr.setflags(write=False)
return arr
@tendon_frictional.setter
def tendon_frictional(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.tendon_frictional, val_ptr, self.ntendon*1 * sizeof(c_ubyte))
@property
def tendon_width(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.tendon_width, dtype=np.double, count=(self.ntendon*1)), (self.ntendon, 1, ))
arr.setflags(write=False)
return arr
@tendon_width.setter
def tendon_width(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.tendon_width, val_ptr, self.ntendon*1 * sizeof(c_double))
@property
def tendon_solref_lim(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.tendon_solref_lim, dtype=np.double, count=(self.ntendon*2)), (self.ntendon, 2, ))
arr.setflags(write=False)
return arr
@tendon_solref_lim.setter
def tendon_solref_lim(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.tendon_solref_lim, val_ptr, self.ntendon*2 * sizeof(c_double))
@property
def tendon_solimp_lim(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.tendon_solimp_lim, dtype=np.double, count=(self.ntendon*3)), (self.ntendon, 3, ))
arr.setflags(write=False)
return arr
@tendon_solimp_lim.setter
def tendon_solimp_lim(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.tendon_solimp_lim, val_ptr, self.ntendon*3 * sizeof(c_double))
@property
def tendon_solref_fri(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.tendon_solref_fri, dtype=np.double, count=(self.ntendon*2)), (self.ntendon, 2, ))
arr.setflags(write=False)
return arr
@tendon_solref_fri.setter
def tendon_solref_fri(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.tendon_solref_fri, val_ptr, self.ntendon*2 * sizeof(c_double))
@property
def tendon_solimp_fri(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.tendon_solimp_fri, dtype=np.double, count=(self.ntendon*3)), (self.ntendon, 3, ))
arr.setflags(write=False)
return arr
@tendon_solimp_fri.setter
def tendon_solimp_fri(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.tendon_solimp_fri, val_ptr, self.ntendon*3 * sizeof(c_double))
@property
def tendon_range(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.tendon_range, dtype=np.double, count=(self.ntendon*2)), (self.ntendon, 2, ))
arr.setflags(write=False)
return arr
@tendon_range.setter
def tendon_range(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.tendon_range, val_ptr, self.ntendon*2 * sizeof(c_double))
@property
def tendon_margin(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.tendon_margin, dtype=np.double, count=(self.ntendon*1)), (self.ntendon, 1, ))
arr.setflags(write=False)
return arr
@tendon_margin.setter
def tendon_margin(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.tendon_margin, val_ptr, self.ntendon*1 * sizeof(c_double))
@property
def tendon_stiffness(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.tendon_stiffness, dtype=np.double, count=(self.ntendon*1)), (self.ntendon, 1, ))
arr.setflags(write=False)
return arr
@tendon_stiffness.setter
def tendon_stiffness(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.tendon_stiffness, val_ptr, self.ntendon*1 * sizeof(c_double))
@property
def tendon_damping(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.tendon_damping, dtype=np.double, count=(self.ntendon*1)), (self.ntendon, 1, ))
arr.setflags(write=False)
return arr
@tendon_damping.setter
def tendon_damping(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.tendon_damping, val_ptr, self.ntendon*1 * sizeof(c_double))
@property
def tendon_frictionloss(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.tendon_frictionloss, dtype=np.double, count=(self.ntendon*1)), (self.ntendon, 1, ))
arr.setflags(write=False)
return arr
@tendon_frictionloss.setter
def tendon_frictionloss(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.tendon_frictionloss, val_ptr, self.ntendon*1 * sizeof(c_double))
@property
def tendon_lengthspring(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.tendon_lengthspring, dtype=np.double, count=(self.ntendon*1)), (self.ntendon, 1, ))
arr.setflags(write=False)
return arr
@tendon_lengthspring.setter
def tendon_lengthspring(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.tendon_lengthspring, val_ptr, self.ntendon*1 * sizeof(c_double))
@property
def tendon_length0(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.tendon_length0, dtype=np.double, count=(self.ntendon*1)), (self.ntendon, 1, ))
arr.setflags(write=False)
return arr
@tendon_length0.setter
def tendon_length0(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.tendon_length0, val_ptr, self.ntendon*1 * sizeof(c_double))
@property
def tendon_invweight0(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.tendon_invweight0, dtype=np.double, count=(self.ntendon*1)), (self.ntendon, 1, ))
arr.setflags(write=False)
return arr
@tendon_invweight0.setter
def tendon_invweight0(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.tendon_invweight0, val_ptr, self.ntendon*1 * sizeof(c_double))
@property
def tendon_user(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.tendon_user, dtype=np.double, count=(self.ntendon*self.nuser_tendon)), (self.ntendon, self.nuser_tendon, ))
arr.setflags(write=False)
return arr
@tendon_user.setter
def tendon_user(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.tendon_user, val_ptr, self.ntendon*self.nuser_tendon * sizeof(c_double))
@property
def tendon_rgba(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.tendon_rgba, dtype=np.float, count=(self.ntendon*4)), (self.ntendon, 4, ))
arr.setflags(write=False)
return arr
@tendon_rgba.setter
def tendon_rgba(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_float))
memmove(self._wrapped.contents.tendon_rgba, val_ptr, self.ntendon*4 * sizeof(c_float))
@property
def wrap_type(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.wrap_type, dtype=np.int, count=(self.nwrap*1)), (self.nwrap, 1, ))
arr.setflags(write=False)
return arr
@wrap_type.setter
def wrap_type(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.wrap_type, val_ptr, self.nwrap*1 * sizeof(c_int))
@property
def wrap_objid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.wrap_objid, dtype=np.int, count=(self.nwrap*1)), (self.nwrap, 1, ))
arr.setflags(write=False)
return arr
@wrap_objid.setter
def wrap_objid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.wrap_objid, val_ptr, self.nwrap*1 * sizeof(c_int))
@property
def wrap_prm(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.wrap_prm, dtype=np.double, count=(self.nwrap*1)), (self.nwrap, 1, ))
arr.setflags(write=False)
return arr
@wrap_prm.setter
def wrap_prm(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.wrap_prm, val_ptr, self.nwrap*1 * sizeof(c_double))
@property
def actuator_trntype(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_trntype, dtype=np.int, count=(self.nu*1)), (self.nu, 1, ))
arr.setflags(write=False)
return arr
@actuator_trntype.setter
def actuator_trntype(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.actuator_trntype, val_ptr, self.nu*1 * sizeof(c_int))
@property
def actuator_dyntype(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_dyntype, dtype=np.int, count=(self.nu*1)), (self.nu, 1, ))
arr.setflags(write=False)
return arr
@actuator_dyntype.setter
def actuator_dyntype(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.actuator_dyntype, val_ptr, self.nu*1 * sizeof(c_int))
@property
def actuator_gaintype(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_gaintype, dtype=np.int, count=(self.nu*1)), (self.nu, 1, ))
arr.setflags(write=False)
return arr
@actuator_gaintype.setter
def actuator_gaintype(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.actuator_gaintype, val_ptr, self.nu*1 * sizeof(c_int))
@property
def actuator_biastype(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_biastype, dtype=np.int, count=(self.nu*1)), (self.nu, 1, ))
arr.setflags(write=False)
return arr
@actuator_biastype.setter
def actuator_biastype(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.actuator_biastype, val_ptr, self.nu*1 * sizeof(c_int))
@property
def actuator_trnid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_trnid, dtype=np.int, count=(self.nu*2)), (self.nu, 2, ))
arr.setflags(write=False)
return arr
@actuator_trnid.setter
def actuator_trnid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.actuator_trnid, val_ptr, self.nu*2 * sizeof(c_int))
@property
def actuator_ctrllimited(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_ctrllimited, dtype=np.uint8, count=(self.nu*1)), (self.nu, 1, ))
arr.setflags(write=False)
return arr
@actuator_ctrllimited.setter
def actuator_ctrllimited(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.actuator_ctrllimited, val_ptr, self.nu*1 * sizeof(c_ubyte))
@property
def actuator_forcelimited(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_forcelimited, dtype=np.uint8, count=(self.nu*1)), (self.nu, 1, ))
arr.setflags(write=False)
return arr
@actuator_forcelimited.setter
def actuator_forcelimited(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_ubyte))
memmove(self._wrapped.contents.actuator_forcelimited, val_ptr, self.nu*1 * sizeof(c_ubyte))
@property
def actuator_dynprm(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_dynprm, dtype=np.double, count=(self.nu*3)), (self.nu, 3, ))
arr.setflags(write=False)
return arr
@actuator_dynprm.setter
def actuator_dynprm(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.actuator_dynprm, val_ptr, self.nu*3 * sizeof(c_double))
@property
def actuator_gainprm(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_gainprm, dtype=np.double, count=(self.nu*3)), (self.nu, 3, ))
arr.setflags(write=False)
return arr
@actuator_gainprm.setter
def actuator_gainprm(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.actuator_gainprm, val_ptr, self.nu*3 * sizeof(c_double))
@property
def actuator_biasprm(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_biasprm, dtype=np.double, count=(self.nu*3)), (self.nu, 3, ))
arr.setflags(write=False)
return arr
@actuator_biasprm.setter
def actuator_biasprm(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.actuator_biasprm, val_ptr, self.nu*3 * sizeof(c_double))
@property
def actuator_ctrlrange(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_ctrlrange, dtype=np.double, count=(self.nu*2)), (self.nu, 2, ))
arr.setflags(write=False)
return arr
@actuator_ctrlrange.setter
def actuator_ctrlrange(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.actuator_ctrlrange, val_ptr, self.nu*2 * sizeof(c_double))
@property
def actuator_forcerange(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_forcerange, dtype=np.double, count=(self.nu*2)), (self.nu, 2, ))
arr.setflags(write=False)
return arr
@actuator_forcerange.setter
def actuator_forcerange(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.actuator_forcerange, val_ptr, self.nu*2 * sizeof(c_double))
@property
def actuator_gear(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_gear, dtype=np.double, count=(self.nu*6)), (self.nu, 6, ))
arr.setflags(write=False)
return arr
@actuator_gear.setter
def actuator_gear(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.actuator_gear, val_ptr, self.nu*6 * sizeof(c_double))
@property
def actuator_cranklength(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_cranklength, dtype=np.double, count=(self.nu*1)), (self.nu, 1, ))
arr.setflags(write=False)
return arr
@actuator_cranklength.setter
def actuator_cranklength(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.actuator_cranklength, val_ptr, self.nu*1 * sizeof(c_double))
@property
def actuator_invweight0(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_invweight0, dtype=np.double, count=(self.nu*1)), (self.nu, 1, ))
arr.setflags(write=False)
return arr
@actuator_invweight0.setter
def actuator_invweight0(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.actuator_invweight0, val_ptr, self.nu*1 * sizeof(c_double))
@property
def actuator_length0(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_length0, dtype=np.double, count=(self.nu*1)), (self.nu, 1, ))
arr.setflags(write=False)
return arr
@actuator_length0.setter
def actuator_length0(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.actuator_length0, val_ptr, self.nu*1 * sizeof(c_double))
@property
def actuator_lengthrange(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_lengthrange, dtype=np.double, count=(self.nu*2)), (self.nu, 2, ))
arr.setflags(write=False)
return arr
@actuator_lengthrange.setter
def actuator_lengthrange(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.actuator_lengthrange, val_ptr, self.nu*2 * sizeof(c_double))
@property
def actuator_user(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.actuator_user, dtype=np.double, count=(self.nu*self.nuser_actuator)), (self.nu, self.nuser_actuator, ))
arr.setflags(write=False)
return arr
@actuator_user.setter
def actuator_user(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.actuator_user, val_ptr, self.nu*self.nuser_actuator * sizeof(c_double))
@property
def sensor_type(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.sensor_type, dtype=np.int, count=(self.nsensor*1)), (self.nsensor, 1, ))
arr.setflags(write=False)
return arr
@sensor_type.setter
def sensor_type(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.sensor_type, val_ptr, self.nsensor*1 * sizeof(c_int))
@property
def sensor_objid(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.sensor_objid, dtype=np.int, count=(self.nsensor*1)), (self.nsensor, 1, ))
arr.setflags(write=False)
return arr
@sensor_objid.setter
def sensor_objid(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.sensor_objid, val_ptr, self.nsensor*1 * sizeof(c_int))
@property
def sensor_dim(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.sensor_dim, dtype=np.int, count=(self.nsensor*1)), (self.nsensor, 1, ))
arr.setflags(write=False)
return arr
@sensor_dim.setter
def sensor_dim(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.sensor_dim, val_ptr, self.nsensor*1 * sizeof(c_int))
@property
def sensor_adr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.sensor_adr, dtype=np.int, count=(self.nsensor*1)), (self.nsensor, 1, ))
arr.setflags(write=False)
return arr
@sensor_adr.setter
def sensor_adr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.sensor_adr, val_ptr, self.nsensor*1 * sizeof(c_int))
@property
def sensor_scale(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.sensor_scale, dtype=np.double, count=(self.nsensor*1)), (self.nsensor, 1, ))
arr.setflags(write=False)
return arr
@sensor_scale.setter
def sensor_scale(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.sensor_scale, val_ptr, self.nsensor*1 * sizeof(c_double))
@property
def sensor_user(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.sensor_user, dtype=np.double, count=(self.nsensor*self.nuser_sensor)), (self.nsensor, self.nuser_sensor, ))
arr.setflags(write=False)
return arr
@sensor_user.setter
def sensor_user(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.sensor_user, val_ptr, self.nsensor*self.nuser_sensor * sizeof(c_double))
@property
def numeric_adr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.numeric_adr, dtype=np.int, count=(self.nnumeric*1)), (self.nnumeric, 1, ))
arr.setflags(write=False)
return arr
@numeric_adr.setter
def numeric_adr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.numeric_adr, val_ptr, self.nnumeric*1 * sizeof(c_int))
@property
def numeric_size(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.numeric_size, dtype=np.int, count=(self.nnumeric*1)), (self.nnumeric, 1, ))
arr.setflags(write=False)
return arr
@numeric_size.setter
def numeric_size(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.numeric_size, val_ptr, self.nnumeric*1 * sizeof(c_int))
@property
def numeric_data(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.numeric_data, dtype=np.double, count=(self.nnumericdata*1)), (self.nnumericdata, 1, ))
arr.setflags(write=False)
return arr
@numeric_data.setter
def numeric_data(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.numeric_data, val_ptr, self.nnumericdata*1 * sizeof(c_double))
@property
def text_adr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.text_adr, dtype=np.int, count=(self.ntext*1)), (self.ntext, 1, ))
arr.setflags(write=False)
return arr
@text_adr.setter
def text_adr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.text_adr, val_ptr, self.ntext*1 * sizeof(c_int))
@property
def text_data(self):
return self._wrapped.contents.text_data
@property
def key_time(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.key_time, dtype=np.double, count=(self.nkey*1)), (self.nkey, 1, ))
arr.setflags(write=False)
return arr
@key_time.setter
def key_time(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.key_time, val_ptr, self.nkey*1 * sizeof(c_double))
@property
def key_qpos(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.key_qpos, dtype=np.double, count=(self.nkey*self.nq)), (self.nkey, self.nq, ))
arr.setflags(write=False)
return arr
@key_qpos.setter
def key_qpos(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.key_qpos, val_ptr, self.nkey*self.nq * sizeof(c_double))
@property
def key_qvel(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.key_qvel, dtype=np.double, count=(self.nkey*self.nv)), (self.nkey, self.nv, ))
arr.setflags(write=False)
return arr
@key_qvel.setter
def key_qvel(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.key_qvel, val_ptr, self.nkey*self.nv * sizeof(c_double))
@property
def key_act(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.key_act, dtype=np.double, count=(self.nkey*self.na)), (self.nkey, self.na, ))
arr.setflags(write=False)
return arr
@key_act.setter
def key_act(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_double))
memmove(self._wrapped.contents.key_act, val_ptr, self.nkey*self.na * sizeof(c_double))
@property
def name_bodyadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.name_bodyadr, dtype=np.int, count=(self.nbody*1)), (self.nbody, 1, ))
arr.setflags(write=False)
return arr
@name_bodyadr.setter
def name_bodyadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.name_bodyadr, val_ptr, self.nbody*1 * sizeof(c_int))
@property
def name_jntadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.name_jntadr, dtype=np.int, count=(self.njnt*1)), (self.njnt, 1, ))
arr.setflags(write=False)
return arr
@name_jntadr.setter
def name_jntadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.name_jntadr, val_ptr, self.njnt*1 * sizeof(c_int))
@property
def name_geomadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.name_geomadr, dtype=np.int, count=(self.ngeom*1)), (self.ngeom, 1, ))
arr.setflags(write=False)
return arr
@name_geomadr.setter
def name_geomadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.name_geomadr, val_ptr, self.ngeom*1 * sizeof(c_int))
@property
def name_siteadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.name_siteadr, dtype=np.int, count=(self.nsite*1)), (self.nsite, 1, ))
arr.setflags(write=False)
return arr
@name_siteadr.setter
def name_siteadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.name_siteadr, val_ptr, self.nsite*1 * sizeof(c_int))
@property
def name_camadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.name_camadr, dtype=np.int, count=(self.ncam*1)), (self.ncam, 1, ))
arr.setflags(write=False)
return arr
@name_camadr.setter
def name_camadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.name_camadr, val_ptr, self.ncam*1 * sizeof(c_int))
@property
def name_lightadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.name_lightadr, dtype=np.int, count=(self.nlight*1)), (self.nlight, 1, ))
arr.setflags(write=False)
return arr
@name_lightadr.setter
def name_lightadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.name_lightadr, val_ptr, self.nlight*1 * sizeof(c_int))
@property
def name_meshadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.name_meshadr, dtype=np.int, count=(self.nmesh*1)), (self.nmesh, 1, ))
arr.setflags(write=False)
return arr
@name_meshadr.setter
def name_meshadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.name_meshadr, val_ptr, self.nmesh*1 * sizeof(c_int))
@property
def name_hfieldadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.name_hfieldadr, dtype=np.int, count=(self.nhfield*1)), (self.nhfield, 1, ))
arr.setflags(write=False)
return arr
@name_hfieldadr.setter
def name_hfieldadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.name_hfieldadr, val_ptr, self.nhfield*1 * sizeof(c_int))
@property
def name_texadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.name_texadr, dtype=np.int, count=(self.ntex*1)), (self.ntex, 1, ))
arr.setflags(write=False)
return arr
@name_texadr.setter
def name_texadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.name_texadr, val_ptr, self.ntex*1 * sizeof(c_int))
@property
def name_matadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.name_matadr, dtype=np.int, count=(self.nmat*1)), (self.nmat, 1, ))
arr.setflags(write=False)
return arr
@name_matadr.setter
def name_matadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.name_matadr, val_ptr, self.nmat*1 * sizeof(c_int))
@property
def name_eqadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.name_eqadr, dtype=np.int, count=(self.neq*1)), (self.neq, 1, ))
arr.setflags(write=False)
return arr
@name_eqadr.setter
def name_eqadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.name_eqadr, val_ptr, self.neq*1 * sizeof(c_int))
@property
def name_tendonadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.name_tendonadr, dtype=np.int, count=(self.ntendon*1)), (self.ntendon, 1, ))
arr.setflags(write=False)
return arr
@name_tendonadr.setter
def name_tendonadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.name_tendonadr, val_ptr, self.ntendon*1 * sizeof(c_int))
@property
def name_actuatoradr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.name_actuatoradr, dtype=np.int, count=(self.nu*1)), (self.nu, 1, ))
arr.setflags(write=False)
return arr
@name_actuatoradr.setter
def name_actuatoradr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.name_actuatoradr, val_ptr, self.nu*1 * sizeof(c_int))
@property
def name_sensoradr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.name_sensoradr, dtype=np.int, count=(self.nsensor*1)), (self.nsensor, 1, ))
arr.setflags(write=False)
return arr
@name_sensoradr.setter
def name_sensoradr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.name_sensoradr, val_ptr, self.nsensor*1 * sizeof(c_int))
@property
def name_numericadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.name_numericadr, dtype=np.int, count=(self.nnumeric*1)), (self.nnumeric, 1, ))
arr.setflags(write=False)
return arr
@name_numericadr.setter
def name_numericadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.name_numericadr, val_ptr, self.nnumeric*1 * sizeof(c_int))
@property
def name_textadr(self):
arr = np.reshape(np.fromiter(self._wrapped.contents.name_textadr, dtype=np.int, count=(self.ntext*1)), (self.ntext, 1, ))
arr.setflags(write=False)
return arr
@name_textadr.setter
def name_textadr(self, value):
val_ptr = np.array(value, dtype=np.float64).ctypes.data_as(POINTER(c_int))
memmove(self._wrapped.contents.name_textadr, val_ptr, self.ntext*1 * sizeof(c_int))
@property
def names(self):
return self._wrapped.contents.names
| 224,081 | 35.849531 | 187 | py |
rllab | rllab-master/rllab/mujoco_py/util.py | import ctypes, os, sys
from ctypes import *
import six
# MAXINT on Python 2, undefined on Python 3
MAXINT = 9223372036854775807
class UserString:
def __init__(self, seq):
if isinstance(seq, basestring):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
def __int__(self): return int(self.data)
def __long__(self): return long(self.data)
def __float__(self): return float(self.data)
def __complex__(self): return complex(self.data)
def __hash__(self): return hash(self.data)
def __cmp__(self, string):
if isinstance(string, UserString):
return cmp(self.data, string.data)
else:
return cmp(self.data, string)
def __contains__(self, char):
return char in self.data
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.__class__(self.data[index])
def __getslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
return self.__class__(self.data[start:end])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, basestring):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + str(other))
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other + self.data)
else:
return self.__class__(str(other) + self.data)
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __mod__(self, args):
return self.__class__(self.data % args)
# the following methods are defined in alphabetical order:
def capitalize(self): return self.__class__(self.data.capitalize())
def center(self, width, *args):
return self.__class__(self.data.center(width, *args))
def count(self, sub, start=0, end=MAXINT):
return self.data.count(sub, start, end)
def decode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.decode(encoding, errors))
else:
return self.__class__(self.data.decode(encoding))
else:
return self.__class__(self.data.decode())
def encode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.encode(encoding, errors))
else:
return self.__class__(self.data.encode(encoding))
else:
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=MAXINT):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=MAXINT):
return self.data.find(sub, start, end)
def index(self, sub, start=0, end=MAXINT):
return self.data.index(sub, start, end)
def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum()
def isdecimal(self): return self.data.isdecimal()
def isdigit(self): return self.data.isdigit()
def islower(self): return self.data.islower()
def isnumeric(self): return self.data.isnumeric()
def isspace(self): return self.data.isspace()
def istitle(self): return self.data.istitle()
def isupper(self): return self.data.isupper()
def join(self, seq): return self.data.join(seq)
def ljust(self, width, *args):
return self.__class__(self.data.ljust(width, *args))
def lower(self): return self.__class__(self.data.lower())
def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
def partition(self, sep):
return self.data.partition(sep)
def replace(self, old, new, maxsplit=-1):
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=MAXINT):
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=MAXINT):
return self.data.rindex(sub, start, end)
def rjust(self, width, *args):
return self.__class__(self.data.rjust(width, *args))
def rpartition(self, sep):
return self.data.rpartition(sep)
def rstrip(self, chars=None): return self.__class__(self.data.rstrip(chars))
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def rsplit(self, sep=None, maxsplit=-1):
return self.data.rsplit(sep, maxsplit)
def splitlines(self, keepends=0): return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=MAXINT):
return self.data.startswith(prefix, start, end)
def strip(self, chars=None): return self.__class__(self.data.strip(chars))
def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper())
def zfill(self, width): return self.__class__(self.data.zfill(width))
class MutableString(UserString):
"""mutable string objects
Python strings are immutable objects. This has the advantage, that
strings may be used as dictionary keys. If this property isn't needed
and you insist on changing string values in place instead, you may cheat
and use MutableString.
But the purpose of this class is an educational one: to prevent
people from inventing their own mutable string class derived
from UserString and than forget thereby to remove (override) the
__hash__ method inherited from UserString. This would lead to
errors that would be very hard to track down.
A faster and better solution is to rewrite your program using lists."""
def __init__(self, string=""):
self.data = string
def __hash__(self):
raise TypeError("unhashable type (it is mutable)")
def __setitem__(self, index, sub):
if index < 0:
index += len(self.data)
if index < 0 or index >= len(self.data): raise IndexError
self.data = self.data[:index] + sub + self.data[index+1:]
def __delitem__(self, index):
if index < 0:
index += len(self.data)
if index < 0 or index >= len(self.data): raise IndexError
self.data = self.data[:index] + self.data[index+1:]
def __setslice__(self, start, end, sub):
start = max(start, 0); end = max(end, 0)
if isinstance(sub, UserString):
self.data = self.data[:start]+sub.data+self.data[end:]
elif isinstance(sub, basestring):
self.data = self.data[:start]+sub+self.data[end:]
else:
self.data = self.data[:start]+str(sub)+self.data[end:]
def __delslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
self.data = self.data[:start] + self.data[end:]
def immutable(self):
return UserString(self.data)
def __iadd__(self, other):
if isinstance(other, UserString):
self.data += other.data
elif isinstance(other, basestring):
self.data += other
else:
self.data += str(other)
return self
def __imul__(self, n):
self.data *= n
return self
class String(MutableString, Union):
_fields_ = [('raw', POINTER(c_char)),
('data', c_char_p)]
def __init__(self, obj=""):
if isinstance(obj, six.text_type):
self.data = obj.encode('ascii')
elif isinstance(obj, six.binary_type):
self.data = obj
elif isinstance(obj, UserString):
self.data = six.b(obj)
else:
self.raw = obj
def __len__(self):
return self.data and len(self.data) or 0
def from_param(cls, obj):
# Convert None or 0
if obj is None or obj == 0:
return cls(POINTER(c_char)())
# Convert from String
elif isinstance(obj, String):
return obj
# Convert from str
elif isinstance(obj, str):
return cls(obj)
# Convert from c_char_p
elif isinstance(obj, c_char_p):
return obj
# Convert from POINTER(c_char)
elif isinstance(obj, POINTER(c_char)):
return obj
# Convert from raw pointer
elif isinstance(obj, int):
return cls(cast(obj, POINTER(c_char)))
# Convert from object
else:
return String.from_param(obj._as_parameter_)
from_param = classmethod(from_param)
def ReturnString(obj, func=None, arguments=None):
return String.from_param(obj)
| 9,058 | 38.047414 | 80 | py |
rllab | rllab-master/rllab/mujoco_py/__init__.py | from .mjviewer import MjViewer
from .mjcore import MjModel
from .mjcore import register_license
import os
from .mjconstants import *
register_license(os.path.join(os.path.dirname(__file__),
'../../vendor/mujoco/mjkey.txt'))
| 255 | 27.444444 | 63 | py |
rllab | rllab-master/rllab/mujoco_py/mjconstants.py | MOUSE_ROTATE_V = 1
MOUSE_ROTATE_H = 2
MOUSE_MOVE_V = 3
MOUSE_MOVE_H = 4
MOUSE_ZOOM = 5
mjOBJ_BODY = 1
| 103 | 12 | 18 | py |
rllab | rllab-master/rllab/misc/autoargs.py | from rllab.misc.console import colorize
import inspect
# pylint: disable=redefined-builtin
# pylint: disable=protected-access
def arg(name, type=None, help=None, nargs=None, mapper=None, choices=None,
prefix=True):
def wrap(fn):
assert fn.__name__ == '__init__'
if not hasattr(fn, '_autoargs_info'):
fn._autoargs_info = dict()
fn._autoargs_info[name] = dict(
type=type,
help=help,
nargs=nargs,
choices=choices,
mapper=mapper,
)
return fn
return wrap
def prefix(prefix_):
def wrap(fn):
assert fn.__name__ == '__init__'
fn._autoargs_prefix = prefix_
return fn
return wrap
def _get_prefix(cls):
from rllab.mdp.base import MDP
from rllab.policies.base import Policy
from rllab.baselines.base import Baseline
from rllab.algos.base import Algorithm
if hasattr(cls.__init__, '_autoargs_prefix'):
return cls.__init__._autoargs_prefix
elif issubclass(cls, MDP):
return 'mdp_'
elif issubclass(cls, Algorithm):
return 'algo_'
elif issubclass(cls, Baseline):
return 'baseline_'
elif issubclass(cls, Policy):
return 'policy_'
else:
return ""
def _get_info(cls_or_fn):
if isinstance(cls_or_fn, type):
if hasattr(cls_or_fn.__init__, '_autoargs_info'):
return cls_or_fn.__init__._autoargs_info
return {}
else:
if hasattr(cls_or_fn, '_autoargs_info'):
return cls_or_fn._autoargs_info
return {}
def _t_or_f(s):
ua = str(s).upper()
if ua == 'TRUE'[:len(ua)]:
return True
elif ua == 'FALSE'[:len(ua)]:
return False
else:
raise ValueError('Unrecognized boolean value: %s' % s)
def add_args(_):
def _add_args(cls, parser):
args_info = _get_info(cls)
prefix_ = _get_prefix(cls)
for arg_name, arg_info in args_info.items():
type = arg_info['type']
# unfortunately boolean type doesn't work
if type == bool:
type = _t_or_f
parser.add_argument(
'--' + prefix_ + arg_name,
help=arg_info['help'],
choices=arg_info['choices'],
type=type,
nargs=arg_info['nargs'])
return _add_args
def new_from_args(_):
def _new_from_args(cls, parsed_args, *args, **params):
silent = params.pop("_silent", False)
args_info = _get_info(cls)
prefix_ = _get_prefix(cls)
# params = dict()
for arg_name, arg_info in args_info.items():
prefixed_arg_name = prefix_ + arg_name
if hasattr(parsed_args, prefixed_arg_name):
val = getattr(parsed_args, prefixed_arg_name)
if val is not None:
if arg_info['mapper']:
params[arg_name] = arg_info['mapper'](val)
else:
params[arg_name] = val
if not silent:
print(colorize(
"using argument %s with value %s" % (arg_name, val),
"yellow"))
return cls(*args, **params)
return _new_from_args
def inherit(base_func):
assert base_func.__name__ == '__init__'
def wrap(func):
assert func.__name__ == '__init__'
func._autoargs_info = dict(
_get_info(base_func),
**_get_info(func)
)
return func
return wrap
def get_all_parameters(cls, parsed_args):
prefix = _get_prefix(cls)
if prefix is None or len(prefix) == 0:
raise ValueError('Cannot retrieve parameters without prefix')
info = _get_info(cls)
if inspect.ismethod(cls.__init__):
spec = inspect.getargspec(cls.__init__)
if spec.defaults is None:
arg_defaults = {}
else:
arg_defaults = dict(list(zip(spec.args[::-1], spec.defaults[::-1])))
else:
arg_defaults = {}
all_params = {}
for arg_name, arg_info in info.items():
prefixed_name = prefix + arg_name
arg_value = None
if hasattr(parsed_args, prefixed_name):
arg_value = getattr(parsed_args, prefixed_name)
if arg_value is None and arg_name in arg_defaults:
arg_value = arg_defaults[arg_name]
if arg_value is not None:
all_params[arg_name] = arg_value
return all_params
| 4,540 | 29.072848 | 80 | py |
rllab | rllab-master/rllab/misc/resolve.py | from pydoc import locate
import types
from rllab.misc.ext import iscanr
def classesinmodule(module):
md = module.__dict__
return [
md[c] for c in md if (
isinstance(md[c], type) and md[c].__module__ == module.__name__
)
]
def locate_with_hint(class_path, prefix_hints=[]):
module_or_class = locate(class_path)
if module_or_class is None:
# for hint in iscanr(lambda x, y: x + "." + y, prefix_hints):
# module_or_class = locate(hint + "." + class_path)
# if module_or_class:
# break
hint = ".".join(prefix_hints)
module_or_class = locate(hint + "." + class_path)
return module_or_class
def load_class(class_path, superclass=None, prefix_hints=[]):
module_or_class = locate_with_hint(class_path, prefix_hints)
if module_or_class is None:
raise ValueError("Cannot find module or class under path %s" % class_path)
if type(module_or_class) == types.ModuleType:
if superclass:
classes = [x for x in classesinmodule(module_or_class) if issubclass(x, superclass)]
if len(classes) == 0:
if superclass:
raise ValueError('Could not find any subclasses of %s defined in module %s' % (str(superclass), class_path))
else:
raise ValueError('Could not find any classes defined in module %s' % (class_path))
elif len(classes) > 1:
if superclass:
raise ValueError('Multiple subclasses of %s are defined in the module %s' % (str(superclass), class_path))
else:
raise ValueError('Multiple classes are defined in the module %s' % (class_path))
else:
return classes[0]
elif isinstance(module_or_class, type):
if superclass is None or issubclass(module_or_class, superclass):
return module_or_class
else:
raise ValueError('The class %s is not a subclass of %s' % (str(module_or_class), str(superclass)))
else:
raise ValueError('Unsupported object: %s' % str(module_or_class))
| 2,123 | 39.075472 | 124 | py |
rllab | rllab-master/rllab/misc/nb_utils.py | import os.path as osp
import numpy as np
import csv
import matplotlib.pyplot as plt
import json
import joblib
from glob import glob
import os
def plot_experiments(name_or_patterns, legend=False, post_processing=None, key='AverageReturn'):
if not isinstance(name_or_patterns, (list, tuple)):
name_or_patterns = [name_or_patterns]
data_folder = osp.abspath(osp.join(osp.dirname(__file__), '../../data'))
files = []
for name_or_pattern in name_or_patterns:
matched_files = glob(osp.join(data_folder, name_or_pattern))
files += matched_files
files = sorted(files)
print('plotting the following experiments:')
for f in files:
print(f)
plots = []
legends = []
for f in files:
exp_name = osp.basename(f)
returns = []
with open(osp.join(f, 'progress.csv'), 'rb') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row[key]:
returns.append(float(row[key]))
returns = np.array(returns)
if post_processing:
returns = post_processing(returns)
plots.append(plt.plot(returns)[0])
legends.append(exp_name)
if legend:
plt.legend(plots, legends)
class Experiment(object):
def __init__(self, progress, params, pkl_data=None):
self.progress = progress
self.params = params
self.pkl_data = pkl_data
self.flat_params = self._flatten_params(params)
self.name = params["exp_name"]
def _flatten_params(self, params, depth=2):
flat_params = dict()
for k, v in params.items():
if isinstance(v, dict) and depth != 0:
for subk, subv in self._flatten_params(v, depth=depth - 1).items():
if subk == "_name":
flat_params[k] = subv
else:
flat_params[k + "_" + subk] = subv
else:
flat_params[k] = v
return flat_params
def uniq(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
class ExperimentDatabase(object):
def __init__(self, data_folder, names_or_patterns='*'):
self._load_experiments(data_folder, names_or_patterns)
def _read_data(self, progress_file):
entries = dict()
with open(progress_file, 'rb') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
for k, v in row.items():
if k not in entries:
entries[k] = []
entries[k].append(float(v))
entries = dict([(k, np.array(v)) for k, v in entries.items()])
return entries
def _read_params(self, params_file):
with open(params_file, "r") as f:
return json.loads(f.read())
def _load_experiments(self, data_folder, name_or_patterns):
if not isinstance(name_or_patterns, (list, tuple)):
name_or_patterns = [name_or_patterns]
files = []
for name_or_pattern in name_or_patterns:
matched_files = glob(
osp.join(data_folder, name_or_pattern)) # golb gives a list of all files satisfying pattern
files += matched_files # this will include twice the same file if it satisfies 2 patterns
experiments = []
progress_f = None
params_f = None
pkl_data = None
for f in files:
if os.path.isdir(f):
try:
progress = self._read_data(osp.join(f, "progress.csv"))
params = self._read_params(osp.join(f, "params.json"))
params["exp_name"] = osp.basename(f)
if os.path.isfile(osp.join(f, "params.pkl")):
pkl_data = joblib.load(osp.join(f, "params.pkl"))
experiments.append(Experiment(progress, params, pkl_data))
else:
experiments.append(Experiment(progress, params))
except Exception as e:
print(e)
elif 'progress.csv' in f: # in case you're giving as datafolder the dir that contains the files!
progress_f = self._read_data(f)
elif 'params.json' in f:
params_f = self._read_params(f)
elif 'params.pkl' in f:
print('about to load', f)
pkl_data = joblib.load(f)
if params_f and progress_f:
if pkl_data:
experiments.append(Experiment(progress_f, params_f, pkl_data))
else:
experiments.append(Experiment(progress_f, params_f))
self._experiments = experiments
def plot_experiments(self, key=None, legend=None, color_key=None, filter_exp=None, **kwargs):
experiments = list(self.filter_experiments(**kwargs))
if filter_exp:
experiments = list(filter(filter_exp, experiments))
plots = []
legends = []
color_pool = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
color_map = dict()
if color_key is not None:
exp_color_keys = uniq([exp.flat_params.get(
color_key, None) for exp in experiments])
if len(exp_color_keys) > len(color_pool):
raise NotImplementedError
for exp_color_key, color in zip(exp_color_keys, color_pool):
print("%s: %s" % (str(exp_color_key), color))
color_map = dict(list(zip(exp_color_keys, color_pool)))
used_legends = []
legend_list = []
for exp in experiments:
exp_color_key = None
if color_key is not None:
exp_color_key = exp.flat_params.get(color_key, None)
exp_color = color_map.get(exp_color_key, None)
else:
exp_color = None
plots.append(plt.plot(exp.progress.get(
key, [0]), color=exp_color)[0])
if legend is not None:
legends.append(exp.flat_params[legend])
elif exp_color_key is not None and exp_color_key not in used_legends:
used_legends.append(exp_color_key)
legend_list.append(plots[-1])
if len(legends) > 0:
plt.legend(plots, legends)
elif len(legend_list) > 0:
plt.legend(legend_list, used_legends)
def filter_experiments(self, **kwargs):
for exp in self._experiments:
exp_params = exp.flat_params
match = True
for key, val in kwargs.items():
if exp_params.get(key, None) != val:
match = False
break
if match:
yield exp
def unique(self, param_key):
return uniq([exp.flat_params[param_key] for exp in self._experiments if param_key in exp.flat_params])
| 6,975 | 37.32967 | 110 | py |
rllab | rllab-master/rllab/misc/special.py | import numpy as np
import scipy
import scipy.signal
import theano.tensor.nnet
import theano.tensor as TT
import theano.tensor.extra_ops
from collections import OrderedDict
def weighted_sample(weights, objects):
"""
Return a random item from objects, with the weighting defined by weights
(which must sum to 1).
"""
# An array of the weights, cumulatively summed.
cs = np.cumsum(weights)
# Find the index of the first weight over a random value.
idx = sum(cs < np.random.rand())
return objects[min(idx, len(objects) - 1)]
def weighted_sample_n(prob_matrix, items):
s = prob_matrix.cumsum(axis=1)
r = np.random.rand(prob_matrix.shape[0])
k = (s < r.reshape((-1, 1))).sum(axis=1)
n_items = len(items)
return items[np.minimum(k, n_items - 1)]
# compute softmax for each row
def softmax(x):
shifted = x - np.max(x, axis=-1, keepdims=True)
expx = np.exp(shifted)
return expx / np.sum(expx, axis=-1, keepdims=True)
def softmax_sym(x):
return theano.tensor.nnet.softmax(x)
# compute entropy for each row
def cat_entropy(x):
return -np.sum(x * np.log(x), axis=-1)
# compute perplexity for each row
def cat_perplexity(x):
return np.exp(cat_entropy(x))
def explained_variance_1d(ypred, y):
assert y.ndim == 1 and ypred.ndim == 1
vary = np.var(y)
if np.isclose(vary, 0):
if np.var(ypred) > 0:
return 0
else:
return 1
return 1 - np.var(y - ypred) / (vary + 1e-8)
def to_onehot(ind, dim):
ret = np.zeros(dim)
ret[ind] = 1
return ret
def to_onehot_n(inds, dim):
ret = np.zeros((len(inds), dim))
ret[np.arange(len(inds)), inds] = 1
return ret
def to_onehot_sym(ind, dim):
assert ind.ndim == 1
return theano.tensor.extra_ops.to_one_hot(ind, dim)
def from_onehot(v):
return np.nonzero(v)[0][0]
def from_onehot_n(v):
if len(v) == 0:
return []
return np.nonzero(v)[1]
def normalize_updates(old_mean, old_std, new_mean, new_std, old_W, old_b):
"""
Compute the updates for normalizing the last (linear) layer of a neural
network
"""
# Make necessary transformation so that
# (W_old * h + b_old) * std_old + mean_old == \
# (W_new * h + b_new) * std_new + mean_new
new_W = old_W * old_std[0] / (new_std[0] + 1e-6)
new_b = (old_b * old_std[0] + old_mean[0] - new_mean[0]) / (new_std[0] + 1e-6)
return OrderedDict([
(old_W, TT.cast(new_W, old_W.dtype)),
(old_b, TT.cast(new_b, old_b.dtype)),
(old_mean, new_mean),
(old_std, new_std),
])
def discount_cumsum(x, discount):
# See https://docs.scipy.org/doc/scipy/reference/tutorial/signal.html#difference-equation-filtering
# Here, we have y[t] - discount*y[t+1] = x[t]
# or rev(y)[t] - discount*rev(y)[t-1] = rev(x)[t]
return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
def discount_return(x, discount):
return np.sum(x * (discount ** np.arange(len(x))))
def rk4(derivs, y0, t, *args, **kwargs):
"""
Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
*y0*
initial state vector
*t*
sample times
*derivs*
returns the derivative of the system and has the
signature ``dy = derivs(yi, ti)``
*args*
additional arguments passed to the derivative function
*kwargs*
additional keyword arguments passed to the derivative function
Example 1 ::
## 2D system
def derivs6(x,t):
d1 = x[0] + 2*x[1]
d2 = -3*x[0] + 4*x[1]
return (d1, d2)
dt = 0.0005
t = arange(0.0, 2.0, dt)
y0 = (1,2)
yout = rk4(derivs6, y0, t)
Example 2::
## 1D system
alpha = 2
def derivs(x,t):
return -alpha*x + exp(-t)
y0 = 1
yout = rk4(derivs, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
"""
try:
Ny = len(y0)
except TypeError:
yout = np.zeros((len(t),), np.float_)
else:
yout = np.zeros((len(t), Ny), np.float_)
yout[0] = y0
i = 0
for i in np.arange(len(t) - 1):
thist = t[i]
dt = t[i + 1] - thist
dt2 = dt / 2.0
y0 = yout[i]
k1 = np.asarray(derivs(y0, thist, *args, **kwargs))
k2 = np.asarray(derivs(y0 + dt2 * k1, thist + dt2, *args, **kwargs))
k3 = np.asarray(derivs(y0 + dt2 * k2, thist + dt2, *args, **kwargs))
k4 = np.asarray(derivs(y0 + dt * k3, thist + dt, *args, **kwargs))
yout[i + 1] = y0 + dt / 6.0 * (k1 + 2 * k2 + 2 * k3 + k4)
return yout
| 4,895 | 24.633508 | 103 | py |
rllab | rllab-master/rllab/misc/ext.py | from path import Path
import sys
import pickle as pickle
import random
from rllab.misc.console import colorize, Message
from collections import OrderedDict
import numpy as np
import operator
from functools import reduce
sys.setrecursionlimit(50000)
def extract(x, *keys):
if isinstance(x, (dict, lazydict)):
return tuple(x[k] for k in keys)
elif isinstance(x, list):
return tuple([xi[k] for xi in x] for k in keys)
else:
raise NotImplementedError
def extract_dict(x, *keys):
return {k: x[k] for k in keys if k in x}
def flatten(xs):
return [x for y in xs for x in y]
def compact(x):
"""
For a dictionary this removes all None values, and for a list this removes
all None elements; otherwise it returns the input itself.
"""
if isinstance(x, dict):
return dict((k, v) for k, v in x.items() if v is not None)
elif isinstance(x, list):
return [elem for elem in x if elem is not None]
return x
def cached_function(inputs, outputs):
import theano
with Message("Hashing theano fn"):
if hasattr(outputs, '__len__'):
hash_content = tuple(map(theano.pp, outputs))
else:
hash_content = theano.pp(outputs)
cache_key = hex(hash(hash_content) & (2 ** 64 - 1))[:-1]
cache_dir = Path('~/.hierctrl_cache')
cache_dir = cache_dir.expanduser()
cache_dir.mkdir_p()
cache_file = cache_dir / ('%s.pkl' % cache_key)
if cache_file.exists():
with Message("unpickling"):
with open(cache_file, "rb") as f:
try:
return pickle.load(f)
except Exception:
pass
with Message("compiling"):
fun = compile_function(inputs, outputs)
with Message("picking"):
with open(cache_file, "wb") as f:
pickle.dump(fun, f, protocol=pickle.HIGHEST_PROTOCOL)
return fun
# Immutable, lazily evaluated dict
class lazydict(object):
def __init__(self, **kwargs):
self._lazy_dict = kwargs
self._dict = {}
def __getitem__(self, key):
if key not in self._dict:
self._dict[key] = self._lazy_dict[key]()
return self._dict[key]
def __setitem__(self, i, y):
self.set(i, y)
def get(self, key, default=None):
if key in self._lazy_dict:
return self[key]
return default
def set(self, key, value):
self._lazy_dict[key] = value
def iscanl(f, l, base=None):
started = False
for x in l:
if base or started:
base = f(base, x)
else:
base = x
started = True
yield base
def iscanr(f, l, base=None):
started = False
for x in list(l)[::-1]:
if base or started:
base = f(x, base)
else:
base = x
started = True
yield base
def scanl(f, l, base=None):
return list(iscanl(f, l, base))
def scanr(f, l, base=None):
return list(iscanr(f, l, base))
def compile_function(inputs=None, outputs=None, updates=None, givens=None, log_name=None, **kwargs):
import theano
if log_name:
msg = Message("Compiling function %s" % log_name)
msg.__enter__()
ret = theano.function(
inputs=inputs,
outputs=outputs,
updates=updates,
givens=givens,
on_unused_input='ignore',
allow_input_downcast=True,
**kwargs
)
if log_name:
msg.__exit__(None, None, None)
return ret
def new_tensor(name, ndim, dtype):
import theano.tensor as TT
return TT.TensorType(dtype, (False,) * ndim)(name)
def new_tensor_like(name, arr_like):
return new_tensor(name, arr_like.ndim, arr_like.dtype)
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def is_iterable(obj):
return isinstance(obj, str) or getattr(obj, '__iter__', False)
# cut the path for any time >= t
def truncate_path(p, t):
return dict((k, p[k][:t]) for k in p)
def concat_paths(p1, p2):
import numpy as np
return dict((k1, np.concatenate([p1[k1], p2[k1]])) for k1 in list(p1.keys()) if k1 in p2)
def path_len(p):
return len(p["states"])
def shuffled(sequence):
deck = list(sequence)
while len(deck):
i = random.randint(0, len(deck) - 1) # choose random card
card = deck[i] # take the card
deck[i] = deck[-1] # put top card in its place
deck.pop() # remove top card
yield card
seed_ = None
def set_seed(seed):
seed %= 4294967294
global seed_
seed_ = seed
import lasagne
random.seed(seed)
np.random.seed(seed)
lasagne.random.set_rng(np.random.RandomState(seed))
try:
import tensorflow as tf
tf.set_random_seed(seed)
except Exception as e:
print(e)
print((
colorize(
'using seed %s' % (str(seed)),
'green'
)
))
def get_seed():
return seed_
def flatten_hessian(cost, wrt, consider_constant=None,
disconnected_inputs='raise', block_diagonal=True):
"""
:type cost: Scalar (0-dimensional) Variable.
:type wrt: Vector (1-dimensional tensor) 'Variable' or list of
vectors (1-dimensional tensors) Variables
:param consider_constant: a list of expressions not to backpropagate
through
:type disconnected_inputs: string
:param disconnected_inputs: Defines the behaviour if some of the variables
in ``wrt`` are not part of the computational graph computing ``cost``
(or if all links are non-differentiable). The possible values are:
- 'ignore': considers that the gradient on these parameters is zero.
- 'warn': consider the gradient zero, and print a warning.
- 'raise': raise an exception.
:return: either a instance of Variable or list/tuple of Variables
(depending upon `wrt`) repressenting the Hessian of the `cost`
with respect to (elements of) `wrt`. If an element of `wrt` is not
differentiable with respect to the output, then a zero
variable is returned. The return value is of same type
as `wrt`: a list/tuple or TensorVariable in all cases.
"""
import theano
from theano.tensor import arange
# Check inputs have the right format
import theano.tensor as TT
from theano import Variable
from theano import grad
assert isinstance(cost, Variable), \
"tensor.hessian expects a Variable as `cost`"
assert cost.ndim == 0, \
"tensor.hessian expects a 0 dimensional variable as `cost`"
using_list = isinstance(wrt, list)
using_tuple = isinstance(wrt, tuple)
if isinstance(wrt, (list, tuple)):
wrt = list(wrt)
else:
wrt = [wrt]
hessians = []
if not block_diagonal:
expr = TT.concatenate([
grad(cost, input, consider_constant=consider_constant,
disconnected_inputs=disconnected_inputs).flatten()
for input in wrt
])
for input in wrt:
assert isinstance(input, Variable), \
"tensor.hessian expects a (list of) Variable as `wrt`"
# assert input.ndim == 1, \
# "tensor.hessian expects a (list of) 1 dimensional variable " \
# "as `wrt`"
if block_diagonal:
expr = grad(cost, input, consider_constant=consider_constant,
disconnected_inputs=disconnected_inputs).flatten()
# It is possible that the inputs are disconnected from expr,
# even if they are connected to cost.
# This should not be an error.
hess, updates = theano.scan(lambda i, y, x: grad(
y[i],
x,
consider_constant=consider_constant,
disconnected_inputs='ignore').flatten(),
sequences=arange(expr.shape[0]),
non_sequences=[expr, input])
assert not updates, \
("Scan has returned a list of updates. This should not "
"happen! Report this to theano-users (also include the "
"script that generated the error)")
hessians.append(hess)
if block_diagonal:
from theano.gradient import format_as
return format_as(using_list, using_tuple, hessians)
else:
return TT.concatenate(hessians, axis=1)
def flatten_tensor_variables(ts):
import theano.tensor as TT
return TT.concatenate(list(map(TT.flatten, ts)))
def flatten_shape_dim(shape):
return reduce(operator.mul, shape, 1)
def print_lasagne_layer(layer, prefix=""):
params = ""
if layer.name:
params += ", name=" + layer.name
if getattr(layer, 'nonlinearity', None):
params += ", nonlinearity=" + layer.nonlinearity.__name__
params = params[2:]
print(prefix + layer.__class__.__name__ + "[" + params + "]")
if hasattr(layer, 'input_layers') and layer.input_layers is not None:
[print_lasagne_layer(x, prefix + " ") for x in layer.input_layers]
elif hasattr(layer, 'input_layer') and layer.input_layer is not None:
print_lasagne_layer(layer.input_layer, prefix + " ")
def unflatten_tensor_variables(flatarr, shapes, symb_arrs):
import theano.tensor as TT
import numpy as np
arrs = []
n = 0
for (shape, symb_arr) in zip(shapes, symb_arrs):
size = np.prod(list(shape))
arr = flatarr[n:n + size].reshape(shape)
if arr.type.broadcastable != symb_arr.type.broadcastable:
arr = TT.patternbroadcast(arr, symb_arr.type.broadcastable)
arrs.append(arr)
n += size
return arrs
"""
Devide function f's inputs into several slices. Evaluate f on those slices, and then average the result. It is useful when memory is not enough to process all data at once.
Assume:
1. each of f's inputs is iterable and composed of multiple "samples"
2. outputs can be averaged over "samples"
"""
def sliced_fun(f, n_slices):
def sliced_f(sliced_inputs, non_sliced_inputs=None):
if non_sliced_inputs is None:
non_sliced_inputs = []
if isinstance(non_sliced_inputs, tuple):
non_sliced_inputs = list(non_sliced_inputs)
n_paths = len(sliced_inputs[0])
slice_size = max(1, n_paths // n_slices)
ret_vals = None
for start in range(0, n_paths, slice_size):
inputs_slice = [v[start:start + slice_size] for v in sliced_inputs]
slice_ret_vals = f(*(inputs_slice + non_sliced_inputs))
if not isinstance(slice_ret_vals, (tuple, list)):
slice_ret_vals_as_list = [slice_ret_vals]
else:
slice_ret_vals_as_list = slice_ret_vals
scaled_ret_vals = [
np.asarray(v) * len(inputs_slice[0]) for v in slice_ret_vals_as_list]
if ret_vals is None:
ret_vals = scaled_ret_vals
else:
ret_vals = [x + y for x, y in zip(ret_vals, scaled_ret_vals)]
ret_vals = [v / n_paths for v in ret_vals]
if not isinstance(slice_ret_vals, (tuple, list)):
ret_vals = ret_vals[0]
elif isinstance(slice_ret_vals, tuple):
ret_vals = tuple(ret_vals)
return ret_vals
return sliced_f
def stdize(data, eps=1e-6):
return (data - np.mean(data, axis=0)) / (np.std(data, axis=0) + eps)
def iterate_minibatches_generic(input_lst=None, batchsize=None, shuffle=False):
if batchsize is None:
batchsize = len(input_lst[0])
assert all(len(x) == len(input_lst[0]) for x in input_lst)
if shuffle:
indices = np.arange(len(input_lst[0]))
np.random.shuffle(indices)
for start_idx in range(0, len(input_lst[0]), batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield [input[excerpt] for input in input_lst]
| 12,215 | 30.163265 | 172 | py |
rllab | rllab-master/rllab/misc/instrument.py | import os
import re
import subprocess
import base64
import os.path as osp
import pickle as pickle
import inspect
import hashlib
import sys
from contextlib import contextmanager
import errno
from rllab.core.serializable import Serializable
from rllab import config
from rllab.misc.console import mkdir_p
from rllab.misc import ext
from io import StringIO
import datetime
import dateutil.tz
import json
import time
import numpy as np
from rllab.misc.ext import AttrDict
from rllab.viskit.core import flatten
import collections
class StubBase(object):
def __getitem__(self, item):
return StubMethodCall(self, "__getitem__", args=[item], kwargs=dict())
def __getattr__(self, item):
try:
return super(self.__class__, self).__getattribute__(item)
except AttributeError:
if item.startswith("__") and item.endswith("__"):
raise
return StubAttr(self, item)
def __pow__(self, power, modulo=None):
return StubMethodCall(self, "__pow__", [power, modulo], dict())
def __call__(self, *args, **kwargs):
return StubMethodCall(self.obj, self.attr_name, args, kwargs)
def __add__(self, other):
return StubMethodCall(self, "__add__", [other], dict())
def __rmul__(self, other):
return StubMethodCall(self, "__rmul__", [other], dict())
def __div__(self, other):
return StubMethodCall(self, "__div__", [other], dict())
def __rdiv__(self, other):
return StubMethodCall(BinaryOp(), "rdiv", [self, other], dict()) # self, "__rdiv__", [other], dict())
def __rpow__(self, power, modulo=None):
return StubMethodCall(self, "__rpow__", [power, modulo], dict())
class BinaryOp(Serializable):
def __init__(self):
Serializable.quick_init(self, locals())
def rdiv(self, a, b):
return b / a
# def __init__(self, opname, a, b):
# self.opname = opname
# self.a = a
# self.b = b
class StubAttr(StubBase):
def __init__(self, obj, attr_name):
self.__dict__["_obj"] = obj
self.__dict__["_attr_name"] = attr_name
@property
def obj(self):
return self.__dict__["_obj"]
@property
def attr_name(self):
return self.__dict__["_attr_name"]
def __str__(self):
return "StubAttr(%s, %s)" % (str(self.obj), str(self.attr_name))
class StubMethodCall(StubBase, Serializable):
def __init__(self, obj, method_name, args, kwargs):
self._serializable_initialized = False
Serializable.quick_init(self, locals())
self.obj = obj
self.method_name = method_name
self.args = args
self.kwargs = kwargs
def __str__(self):
return "StubMethodCall(%s, %s, %s, %s)" % (
str(self.obj), str(self.method_name), str(self.args), str(self.kwargs))
class StubClass(StubBase):
def __init__(self, proxy_class):
self.proxy_class = proxy_class
def __call__(self, *args, **kwargs):
if len(args) > 0:
# Convert the positional arguments to keyword arguments
spec = inspect.getargspec(self.proxy_class.__init__)
kwargs = dict(list(zip(spec.args[1:], args)), **kwargs)
args = tuple()
return StubObject(self.proxy_class, *args, **kwargs)
def __getstate__(self):
return dict(proxy_class=self.proxy_class)
def __setstate__(self, dict):
self.proxy_class = dict["proxy_class"]
def __getattr__(self, item):
if hasattr(self.proxy_class, item):
return StubAttr(self, item)
raise AttributeError
def __str__(self):
return "StubClass(%s)" % self.proxy_class
class StubObject(StubBase):
def __init__(self, __proxy_class, *args, **kwargs):
if len(args) > 0:
spec = inspect.getargspec(__proxy_class.__init__)
kwargs = dict(list(zip(spec.args[1:], args)), **kwargs)
args = tuple()
self.proxy_class = __proxy_class
self.args = args
self.kwargs = kwargs
def __getstate__(self):
return dict(args=self.args, kwargs=self.kwargs, proxy_class=self.proxy_class)
def __setstate__(self, dict):
self.args = dict["args"]
self.kwargs = dict["kwargs"]
self.proxy_class = dict["proxy_class"]
def __getattr__(self, item):
# why doesnt the commented code work?
# return StubAttr(self, item)
# checks bypassed to allow for accesing instance fileds
if hasattr(self.proxy_class, item):
return StubAttr(self, item)
raise AttributeError('Cannot get attribute %s from %s' % (item, self.proxy_class))
def __str__(self):
return "StubObject(%s, *%s, **%s)" % (str(self.proxy_class), str(self.args), str(self.kwargs))
class VariantDict(AttrDict):
def __init__(self, d, hidden_keys):
super(VariantDict, self).__init__(d)
self._hidden_keys = hidden_keys
def dump(self):
return {k: v for k, v in self.items() if k not in self._hidden_keys}
class VariantGenerator(object):
"""
Usage:
vg = VariantGenerator()
vg.add("param1", [1, 2, 3])
vg.add("param2", ['x', 'y'])
vg.variants() => # all combinations of [1,2,3] x ['x','y']
Supports noncyclic dependency among parameters:
vg = VariantGenerator()
vg.add("param1", [1, 2, 3])
vg.add("param2", lambda param1: [param1+1, param1+2])
vg.variants() => # ..
"""
def __init__(self):
self._variants = []
self._populate_variants()
self._hidden_keys = []
for k, vs, cfg in self._variants:
if cfg.get("hide", False):
self._hidden_keys.append(k)
def add(self, key, vals, **kwargs):
self._variants.append((key, vals, kwargs))
def _populate_variants(self):
methods = inspect.getmembers(
self.__class__, predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x))
methods = [x[1].__get__(self, self.__class__)
for x in methods if getattr(x[1], '__is_variant', False)]
for m in methods:
self.add(m.__name__, m, **getattr(m, "__variant_config", dict()))
def variants(self, randomized=False):
ret = list(self.ivariants())
if randomized:
np.random.shuffle(ret)
return list(map(self.variant_dict, ret))
def variant_dict(self, variant):
return VariantDict(variant, self._hidden_keys)
def to_name_suffix(self, variant):
suffix = []
for k, vs, cfg in self._variants:
if not cfg.get("hide", False):
suffix.append(k + "_" + str(variant[k]))
return "_".join(suffix)
def ivariants(self):
dependencies = list()
for key, vals, _ in self._variants:
if hasattr(vals, "__call__"):
args = inspect.getargspec(vals).args
if hasattr(vals, 'im_self') or hasattr(vals, "__self__"):
# remove the first 'self' parameter
args = args[1:]
dependencies.append((key, set(args)))
else:
dependencies.append((key, set()))
sorted_keys = []
# topo sort all nodes
while len(sorted_keys) < len(self._variants):
# get all nodes with zero in-degree
free_nodes = [k for k, v in dependencies if len(v) == 0]
if len(free_nodes) == 0:
error_msg = "Invalid parameter dependency: \n"
for k, v in dependencies:
if len(v) > 0:
error_msg += k + " depends on " + " & ".join(v) + "\n"
raise ValueError(error_msg)
dependencies = [(k, v)
for k, v in dependencies if k not in free_nodes]
# remove the free nodes from the remaining dependencies
for _, v in dependencies:
v.difference_update(free_nodes)
sorted_keys += free_nodes
return self._ivariants_sorted(sorted_keys)
def _ivariants_sorted(self, sorted_keys):
if len(sorted_keys) == 0:
yield dict()
else:
first_keys = sorted_keys[:-1]
first_variants = self._ivariants_sorted(first_keys)
last_key = sorted_keys[-1]
last_vals = [v for k, v, _ in self._variants if k == last_key][0]
if hasattr(last_vals, "__call__"):
last_val_keys = inspect.getargspec(last_vals).args
if hasattr(last_vals, 'im_self') or hasattr(last_vals, '__self__'):
last_val_keys = last_val_keys[1:]
else:
last_val_keys = None
for variant in first_variants:
if hasattr(last_vals, "__call__"):
last_variants = last_vals(
**{k: variant[k] for k in last_val_keys})
for last_choice in last_variants:
yield AttrDict(variant, **{last_key: last_choice})
else:
for last_choice in last_vals:
yield AttrDict(variant, **{last_key: last_choice})
def variant(*args, **kwargs):
def _variant(fn):
fn.__is_variant = True
fn.__variant_config = kwargs
return fn
if len(args) == 1 and isinstance(args[0], collections.Callable):
return _variant(args[0])
return _variant
def stub(glbs):
# replace the __init__ method in all classes
# hacky!!!
for k, v in list(glbs.items()):
# look at all variables that are instances of a class (not yet Stub)
if isinstance(v, type) and v != StubClass:
glbs[k] = StubClass(v) # and replaces them by a the same but Stub
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
exp_count = 0
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
remote_confirmed = False
def run_experiment_lite(
stub_method_call=None,
batch_tasks=None,
exp_prefix="experiment",
exp_name=None,
log_dir=None,
script="scripts/run_experiment_lite.py",
python_command="python",
mode="local",
dry=False,
docker_image=None,
aws_config=None,
env=None,
variant=None,
use_gpu=False,
sync_s3_pkl=False,
sync_s3_png=False,
sync_s3_log=False,
sync_log_on_termination=True,
confirm_remote=True,
terminate_machine=True,
periodic_sync=True,
periodic_sync_interval=15,
sync_all_data_node_to_s3=True,
use_cloudpickle=None,
pre_commands=None,
added_project_directories=[],
**kwargs):
"""
Serialize the stubbed method call and run the experiment using the specified mode.
:param stub_method_call: A stubbed method call.
:param script: The name of the entrance point python script
:param mode: Where & how to run the experiment. Should be one of "local", "local_docker", "ec2",
and "lab_kube".
:param dry: Whether to do a dry-run, which only prints the commands without executing them.
:param exp_prefix: Name prefix for the experiments
:param docker_image: name of the docker image. Ignored if using local mode.
:param aws_config: configuration for AWS. Only used under EC2 mode
:param env: extra environment variables
:param kwargs: All other parameters will be passed directly to the entrance python script.
:param variant: If provided, should be a dictionary of parameters
:param use_gpu: Whether the launched task is running on GPU. This triggers a few configuration changes including
certain environment flags
:param sync_s3_pkl: Whether to sync pkl files during execution of the experiment (they will always be synced at
the end of the experiment)
:param sync_s3_png: Whether to sync png files during execution of the experiment (they will always be synced at
the end of the experiment)
:param sync_s3_log: Whether to sync log files during execution of the experiment (they will always be synced at
the end of the experiment)
:param confirm_remote: Whether to confirm before launching experiments remotely
:param terminate_machine: Whether to terminate machine after experiment finishes. Only used when using
mode="ec2". This is useful when one wants to debug after an experiment finishes abnormally.
:param periodic_sync: Whether to synchronize certain experiment files periodically during execution.
:param periodic_sync_interval: Time interval between each periodic sync, in seconds.
"""
assert stub_method_call is not None or batch_tasks is not None, "Must provide at least either stub_method_call or batch_tasks"
if use_cloudpickle is None:
for maybe_stub in (batch_tasks or [stub_method_call]):
# decide mode
if isinstance(maybe_stub, StubBase):
use_cloudpickle = False
else:
assert hasattr(maybe_stub, '__call__')
use_cloudpickle = True
# ensure variant exists
if variant is None:
variant = dict()
if batch_tasks is None:
batch_tasks = [
dict(
kwargs,
pre_commands=pre_commands,
stub_method_call=stub_method_call,
exp_name=exp_name,
log_dir=log_dir,
env=env,
variant=variant,
use_cloudpickle=use_cloudpickle
)
]
global exp_count
global remote_confirmed
config.USE_GPU = use_gpu
# params_list = []
for task in batch_tasks:
call = task.pop("stub_method_call")
if use_cloudpickle:
import cloudpickle
data = base64.b64encode(cloudpickle.dumps(call)).decode("utf-8")
else:
data = base64.b64encode(pickle.dumps(call)).decode("utf-8")
task["args_data"] = data
exp_count += 1
params = dict(kwargs)
if task.get("exp_name", None) is None:
task["exp_name"] = "%s_%s_%04d" % (
exp_prefix, timestamp, exp_count)
if task.get("log_dir", None) is None:
task["log_dir"] = config.LOG_DIR + "/local/" + \
exp_prefix.replace("_", "-") + "/" + task["exp_name"]
if task.get("variant", None) is not None:
variant = task.pop("variant")
if "exp_name" not in variant:
variant["exp_name"] = task["exp_name"]
task["variant_data"] = base64.b64encode(pickle.dumps(variant)).decode("utf-8")
elif "variant" in task:
del task["variant"]
task["remote_log_dir"] = osp.join(
config.AWS_S3_PATH, exp_prefix.replace("_", "-"), task["exp_name"])
task["env"] = task.get("env", dict()) or dict()
task["env"]["RLLAB_USE_GPU"] = str(use_gpu)
if mode not in ["local", "local_docker"] and not remote_confirmed and not dry and confirm_remote:
remote_confirmed = query_yes_no(
"Running in (non-dry) mode %s. Confirm?" % mode)
if not remote_confirmed:
sys.exit(1)
if hasattr(mode, "__call__"):
if docker_image is None:
docker_image = config.DOCKER_IMAGE
mode(
task,
docker_image=docker_image,
use_gpu=use_gpu,
exp_prefix=exp_prefix,
script=script,
python_command=python_command,
sync_s3_pkl=sync_s3_pkl,
sync_log_on_termination=sync_log_on_termination,
periodic_sync=periodic_sync,
periodic_sync_interval=periodic_sync_interval,
sync_all_data_node_to_s3=sync_all_data_node_to_s3,
)
elif mode == "local":
for task in batch_tasks:
del task["remote_log_dir"]
env = task.pop("env", None)
command = to_local_command(
task,
python_command=python_command,
script=osp.join(config.PROJECT_PATH, script),
use_gpu=use_gpu
)
print(command)
if dry:
return
try:
if env is None:
env = dict()
subprocess.call(
command, shell=True, env=dict(os.environ, **env))
except Exception as e:
print(e)
if isinstance(e, KeyboardInterrupt):
raise
elif mode == "local_docker":
if docker_image is None:
docker_image = config.DOCKER_IMAGE
for task in batch_tasks:
del task["remote_log_dir"]
env = task.pop("env", None)
command = to_docker_command(
task, # these are the params. Pre and Post command can be here
docker_image=docker_image,
script=script,
env=env,
use_gpu=use_gpu,
use_tty=True,
python_command=python_command,
)
print(command)
if dry:
return
p = subprocess.Popen(command, shell=True)
try:
p.wait()
except KeyboardInterrupt:
try:
print("terminating")
p.terminate()
except OSError:
print("os error!")
pass
p.wait()
elif mode == "ec2":
if docker_image is None:
docker_image = config.DOCKER_IMAGE
s3_code_path = s3_sync_code(config, dry=dry, added_project_directories=added_project_directories)
launch_ec2(batch_tasks,
exp_prefix=exp_prefix,
docker_image=docker_image,
python_command=python_command,
script=script,
aws_config=aws_config,
dry=dry,
terminate_machine=terminate_machine,
use_gpu=use_gpu,
code_full_path=s3_code_path,
sync_s3_pkl=sync_s3_pkl,
sync_s3_png=sync_s3_png,
sync_s3_log=sync_s3_log,
sync_log_on_termination=sync_log_on_termination,
periodic_sync=periodic_sync,
periodic_sync_interval=periodic_sync_interval)
elif mode == "lab_kube":
# assert env is None
# first send code folder to s3
s3_code_path = s3_sync_code(config, dry=dry)
if docker_image is None:
docker_image = config.DOCKER_IMAGE
for task in batch_tasks:
# if 'env' in task:
# assert task.pop('env') is None
# TODO: dangerous when there are multiple tasks?
task["resources"] = params.pop(
"resources", config.KUBE_DEFAULT_RESOURCES)
task["node_selector"] = params.pop(
"node_selector", config.KUBE_DEFAULT_NODE_SELECTOR)
task["exp_prefix"] = exp_prefix
pod_dict = to_lab_kube_pod(
task, code_full_path=s3_code_path, docker_image=docker_image, script=script, is_gpu=use_gpu,
python_command=python_command,
sync_s3_pkl=sync_s3_pkl, periodic_sync=periodic_sync,
periodic_sync_interval=periodic_sync_interval,
sync_all_data_node_to_s3=sync_all_data_node_to_s3,
terminate_machine=terminate_machine,
)
pod_str = json.dumps(pod_dict, indent=1)
if dry:
print(pod_str)
dir = "{pod_dir}/{exp_prefix}".format(
pod_dir=config.POD_DIR, exp_prefix=exp_prefix)
ensure_dir(dir)
fname = "{dir}/{exp_name}.json".format(
dir=dir,
exp_name=task["exp_name"]
)
with open(fname, "w") as fh:
fh.write(pod_str)
kubecmd = "kubectl create -f %s" % fname
print(kubecmd)
if dry:
return
retry_count = 0
wait_interval = 1
while retry_count <= 5:
try:
return_code = subprocess.call(kubecmd, shell=True)
if return_code == 0:
break
retry_count += 1
print("trying again...")
time.sleep(wait_interval)
except Exception as e:
if isinstance(e, KeyboardInterrupt):
raise
print(e)
else:
raise NotImplementedError
_find_unsafe = re.compile(r'[a-zA-Z0-9_^@%+=:,./-]').search
def ensure_dir(dirname):
"""
Ensure that a named directory exists; if it does not, attempt to create it.
"""
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _shellquote(s):
"""Return a shell-escaped version of the string *s*."""
if not s:
return "''"
if _find_unsafe(s) is None:
return s
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return "'" + s.replace("'", "'\"'\"'") + "'"
def _to_param_val(v):
if v is None:
return ""
elif isinstance(v, list):
return " ".join(map(_shellquote, list(map(str, v))))
else:
return _shellquote(str(v))
def to_local_command(params, python_command="python", script=osp.join(config.PROJECT_PATH,
'scripts/run_experiment.py'),
use_gpu=False):
command = python_command + " " + script
if use_gpu and not config.USE_TF:
command = "THEANO_FLAGS='device=gpu,dnn.enabled=auto,floatX=float32' " + command
for k, v in config.ENV.items():
command = ("%s=%s " % (k, v)) + command
pre_commands = params.pop("pre_commands", None)
post_commands = params.pop("post_commands", None)
if pre_commands is not None or post_commands is not None:
print("Not executing the pre_commands: ", pre_commands, ", nor post_commands: ", post_commands)
for k, v in params.items():
if isinstance(v, dict):
for nk, nv in v.items():
if str(nk) == "_name":
command += " --%s %s" % (k, _to_param_val(nv))
else:
command += \
" --%s_%s %s" % (k, nk, _to_param_val(nv))
else:
command += " --%s %s" % (k, _to_param_val(v))
return command
def to_docker_command(params, docker_image, python_command="python", script='scripts/run_experiment_lite.py',
pre_commands=None, use_tty=False,
mujoco_path=None,
post_commands=None, dry=False, use_gpu=False, env=None, local_code_dir=None):
"""
:param params: The parameters for the experiment. If logging directory parameters are provided, we will create
docker volume mapping to make sure that the logging files are created at the correct locations
:param docker_image: docker image to run the command on
:param script: script command for running experiment
:return:
"""
log_dir = params.get("log_dir")
docker_args = params.pop("docker_args", "")
if pre_commands is None:
pre_commands = params.pop("pre_commands", None)
if post_commands is None:
post_commands = params.pop("post_commands", None)
if mujoco_path is None:
mujoco_path = config.MUJOCO_KEY_PATH
# script = 'rllab/' + script
# if not dry:
# create volume for logging directory
if use_gpu:
command_prefix = "nvidia-docker run"
else:
command_prefix = "docker run"
docker_log_dir = config.DOCKER_LOG_DIR
if env is None:
env = dict()
env = dict(
env,
AWS_ACCESS_KEY_ID=config.AWS_ACCESS_KEY,
AWS_SECRET_ACCESS_KEY=config.AWS_ACCESS_SECRET,
)
if env is not None:
for k, v in env.items():
command_prefix += " -e \"{k}={v}\"".format(k=k, v=v)
command_prefix += " -v {local_mujoco_key_dir}:{docker_mujoco_key_dir}".format(
local_mujoco_key_dir=mujoco_path, docker_mujoco_key_dir='/root/.mujoco')
command_prefix += " -v {local_log_dir}:{docker_log_dir}".format(
local_log_dir=log_dir,
docker_log_dir=docker_log_dir
)
command_prefix += docker_args
if local_code_dir is None:
local_code_dir = config.PROJECT_PATH
command_prefix += " -v {local_code_dir}:{docker_code_dir}".format(
local_code_dir=local_code_dir,
docker_code_dir=config.DOCKER_CODE_DIR
)
params = dict(params, log_dir=docker_log_dir)
if use_tty:
command_prefix += " -ti " + docker_image + " /bin/bash -c "
else:
command_prefix += " -i " + docker_image + " /bin/bash -c "
command_list = list()
if pre_commands is not None:
command_list.extend(pre_commands)
command_list.append("echo \"Running in docker\"")
command_list.append(to_local_command(
params, python_command=python_command, script=osp.join(config.DOCKER_CODE_DIR, script), use_gpu=use_gpu))
# We for 2 min sleep after termination to allow for last syncs.
if post_commands is None:
post_commands = ['sleep 120']
command_list.extend(post_commands)
return command_prefix + "'" + "; ".join(command_list) + "'"
def dedent(s):
lines = [l.strip() for l in s.split('\n')]
return '\n'.join(lines)
def launch_ec2(params_list, exp_prefix, docker_image, code_full_path,
python_command="python",
script='scripts/run_experiment.py',
aws_config=None, dry=False, terminate_machine=True, use_gpu=False, sync_s3_pkl=False,
sync_s3_png=False,
sync_s3_log=False,
sync_log_on_termination=True,
periodic_sync=True, periodic_sync_interval=15):
if len(params_list) == 0:
return
default_config = dict(
image_id=config.AWS_IMAGE_ID,
instance_type=config.AWS_INSTANCE_TYPE,
key_name=config.AWS_KEY_NAME,
spot=config.AWS_SPOT,
spot_price=config.AWS_SPOT_PRICE,
iam_instance_profile_name=config.AWS_IAM_INSTANCE_PROFILE_NAME,
security_groups=config.AWS_SECURITY_GROUPS,
security_group_ids=config.AWS_SECURITY_GROUP_IDS,
network_interfaces=config.AWS_NETWORK_INTERFACES,
)
if aws_config is None:
aws_config = dict()
aws_config = dict(default_config, **aws_config)
sio = StringIO()
sio.write("#!/bin/bash\n")
sio.write("{\n")
sio.write("""
die() { status=$1; shift; echo "FATAL: $*"; exit $status; }
""")
sio.write("""
EC2_INSTANCE_ID="`wget -q -O - http://169.254.169.254/latest/meta-data/instance-id`"
""")
sio.write("""
aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=Name,Value={exp_name} --region {aws_region}
""".format(exp_name=params_list[0].get("exp_name"), aws_region=config.AWS_REGION_NAME))
if config.LABEL:
sio.write("""
aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=owner,Value={label} --region {aws_region}
""".format(label=config.LABEL, aws_region=config.AWS_REGION_NAME))
sio.write("""
aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=exp_prefix,Value={exp_prefix} --region {aws_region}
""".format(exp_prefix=exp_prefix, aws_region=config.AWS_REGION_NAME))
sio.write("""
service docker start
""")
sio.write("""
docker --config /home/ubuntu/.docker pull {docker_image}
""".format(docker_image=docker_image))
sio.write("""
export AWS_DEFAULT_REGION={aws_region}
""".format(aws_region=config.AWS_REGION_NAME))
if config.FAST_CODE_SYNC:
# sio.write("""
# aws s3 cp {code_full_path} /tmp/rllab_code.tar.gz --region {aws_region}
# """.format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,
# aws_region=config.AWS_REGION_NAME))
sio.write("""
aws s3 cp {code_full_path} /tmp/rllab_code.tar.gz
""".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR))
sio.write("""
mkdir -p {local_code_path}
""".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,
aws_region=config.AWS_REGION_NAME))
sio.write("""
tar -zxvf /tmp/rllab_code.tar.gz -C {local_code_path}
""".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,
aws_region=config.AWS_REGION_NAME))
else:
# sio.write("""
# aws s3 cp --recursive {code_full_path} {local_code_path} --region {aws_region}
# """.format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,
# aws_region=config.AWS_REGION_NAME))
sio.write("""
aws s3 cp --recursive {code_full_path} {local_code_path}
""".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR))
s3_mujoco_key_path = config.AWS_CODE_SYNC_S3_PATH + '/.mujoco/'
# sio.write("""
# aws s3 cp --recursive {} {} --region {}
# """.format(s3_mujoco_key_path, config.MUJOCO_KEY_PATH, config.AWS_REGION_NAME))
sio.write("""
aws s3 cp --recursive {} {}
""".format(s3_mujoco_key_path, config.MUJOCO_KEY_PATH))
sio.write("""
cd {local_code_path}
""".format(local_code_path=config.DOCKER_CODE_DIR))
for params in params_list:
log_dir = params.get("log_dir")
remote_log_dir = params.pop("remote_log_dir")
env = params.pop("env", None)
sio.write("""
aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=Name,Value={exp_name} --region {aws_region}
""".format(exp_name=params.get("exp_name"), aws_region=config.AWS_REGION_NAME))
sio.write("""
mkdir -p {log_dir}
""".format(log_dir=log_dir))
if periodic_sync:
include_png = " --include '*.png' " if sync_s3_png else " "
include_pkl = " --include '*.pkl' " if sync_s3_pkl else " "
include_log = " --include '*.log' " if sync_s3_log else " "
# sio.write("""
# while /bin/true; do
# aws s3 sync --exclude '*' {include_png} {include_pkl} {include_log}--include '*.csv' --include '*.json' {log_dir} {remote_log_dir} --region {aws_region}
# sleep {periodic_sync_interval}
# done & echo sync initiated""".format(include_png=include_png, include_pkl=include_pkl, include_log=include_log,
# log_dir=log_dir, remote_log_dir=remote_log_dir,
# aws_region=config.AWS_REGION_NAME,
# periodic_sync_interval=periodic_sync_interval))
sio.write("""
while /bin/true; do
aws s3 sync --exclude '*' {include_png} {include_pkl} {include_log}--include '*.csv' --include '*.json' {log_dir} {remote_log_dir}
sleep {periodic_sync_interval}
done & echo sync initiated""".format(include_png=include_png, include_pkl=include_pkl, include_log=include_log,
log_dir=log_dir, remote_log_dir=remote_log_dir,
periodic_sync_interval=periodic_sync_interval))
if sync_log_on_termination:
# sio.write("""
# while /bin/true; do
# if [ -z $(curl -Is http://169.254.169.254/latest/meta-data/spot/termination-time | head -1 | grep 404 | cut -d \ -f 2) ]
# then
# logger "Running shutdown hook."
# aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log --region {aws_region}
# aws s3 cp --recursive {log_dir} {remote_log_dir} --region {aws_region}
# break
# else
# # Spot instance not yet marked for termination.
# sleep 5
# fi
# done & echo log sync initiated
# """.format(log_dir=log_dir, remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))
sio.write("""
while /bin/true; do
if [ -z $(curl -Is http://169.254.169.254/latest/meta-data/spot/termination-time | head -1 | grep 404 | cut -d \ -f 2) ]
then
logger "Running shutdown hook."
aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log
aws s3 cp --recursive {log_dir} {remote_log_dir}
break
else
# Spot instance not yet marked for termination.
sleep 5
fi
done & echo log sync initiated
""".format(log_dir=log_dir, remote_log_dir=remote_log_dir))
if use_gpu:
sio.write("""
for i in {1..800}; do su -c "nvidia-modprobe -u -c=0" ubuntu && break || sleep 3; done
systemctl start nvidia-docker
""")
sio.write("""
{command}
""".format(command=to_docker_command(params, docker_image, python_command=python_command, script=script,
use_gpu=use_gpu, env=env,
local_code_dir=config.DOCKER_CODE_DIR)))
# sio.write("""
# aws s3 cp --recursive {log_dir} {remote_log_dir} --region {aws_region}
# """.format(log_dir=log_dir, remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))
sio.write("""
aws s3 cp --recursive {log_dir} {remote_log_dir}
""".format(log_dir=log_dir, remote_log_dir=remote_log_dir))
# sio.write("""
# aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log --region {aws_region}
# """.format(remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))
sio.write("""
aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log
""".format(remote_log_dir=remote_log_dir))
if terminate_machine:
sio.write("""
EC2_INSTANCE_ID="`wget -q -O - http://169.254.169.254/latest/meta-data/instance-id || die \"wget instance-id has failed: $?\"`"
aws ec2 terminate-instances --instance-ids $EC2_INSTANCE_ID --region {aws_region}
""".format(aws_region=config.AWS_REGION_NAME))
sio.write("} >> /home/ubuntu/user_data.log 2>&1\n")
full_script = dedent(sio.getvalue())
import boto3
import botocore
if aws_config["spot"]:
ec2 = boto3.client(
"ec2",
region_name=config.AWS_REGION_NAME,
aws_access_key_id=config.AWS_ACCESS_KEY,
aws_secret_access_key=config.AWS_ACCESS_SECRET,
)
else:
ec2 = boto3.resource(
"ec2",
region_name=config.AWS_REGION_NAME,
aws_access_key_id=config.AWS_ACCESS_KEY,
aws_secret_access_key=config.AWS_ACCESS_SECRET,
)
if len(full_script) > 10000 or len(base64.b64encode(full_script.encode()).decode("utf-8")) > 10000:
# Script too long; need to upload script to s3 first.
# We're being conservative here since the actual limit is 16384 bytes
s3_path = upload_file_to_s3(full_script)
sio = StringIO()
sio.write("#!/bin/bash\n")
sio.write("""
aws s3 cp {s3_path} /home/ubuntu/remote_script.sh --region {aws_region} && \\
chmod +x /home/ubuntu/remote_script.sh && \\
bash /home/ubuntu/remote_script.sh
""".format(s3_path=s3_path, aws_region=config.AWS_REGION_NAME))
user_data = dedent(sio.getvalue())
else:
user_data = full_script
print(full_script)
with open("/tmp/full_script", "w") as f:
f.write(full_script)
instance_args = dict(
ImageId=aws_config["image_id"],
KeyName=aws_config["key_name"],
UserData=user_data,
InstanceType=aws_config["instance_type"],
EbsOptimized=config.EBS_OPTIMIZED,
SecurityGroups=aws_config["security_groups"],
SecurityGroupIds=aws_config["security_group_ids"],
NetworkInterfaces=aws_config["network_interfaces"],
IamInstanceProfile=dict(
Name=aws_config["iam_instance_profile_name"],
),
**config.AWS_EXTRA_CONFIGS,
)
if len(instance_args["NetworkInterfaces"]) > 0:
# disable_security_group = query_yes_no(
# "Cannot provide both network interfaces and security groups info. Do you want to disable security group settings?",
# default="yes",
# )
disable_security_group = True
if disable_security_group:
instance_args.pop("SecurityGroups")
instance_args.pop("SecurityGroupIds")
if aws_config.get("placement", None) is not None:
instance_args["Placement"] = aws_config["placement"]
if not aws_config["spot"]:
instance_args["MinCount"] = 1
instance_args["MaxCount"] = 1
print("************************************************************")
print(instance_args["UserData"])
print("************************************************************")
if aws_config["spot"]:
instance_args["UserData"] = base64.b64encode(instance_args["UserData"].encode()).decode("utf-8")
spot_args = dict(
DryRun=dry,
InstanceCount=1,
LaunchSpecification=instance_args,
SpotPrice=aws_config["spot_price"],
# ClientToken=params_list[0]["exp_name"],
)
import pprint
pprint.pprint(spot_args)
if not dry:
response = ec2.request_spot_instances(**spot_args)
print(response)
spot_request_id = response['SpotInstanceRequests'][
0]['SpotInstanceRequestId']
for _ in range(10):
try:
ec2.create_tags(
Resources=[spot_request_id],
Tags=[
{'Key': 'Name', 'Value': params_list[0]["exp_name"]}
],
)
break
except botocore.exceptions.ClientError:
continue
else:
import pprint
pprint.pprint(instance_args)
ec2.create_instances(
DryRun=dry,
**instance_args
)
S3_CODE_PATH = None
def s3_sync_code(config, dry=False, added_project_directories=[]):
global S3_CODE_PATH
if S3_CODE_PATH is not None:
return S3_CODE_PATH
base = config.AWS_CODE_SYNC_S3_PATH
has_git = True
if config.FAST_CODE_SYNC:
try:
current_commit = subprocess.check_output(
["git", "rev-parse", "HEAD"]).strip().decode("utf-8")
except subprocess.CalledProcessError as _:
print("Warning: failed to execute git commands")
current_commit = None
file_name = str(timestamp) + "_" + hashlib.sha224(
subprocess.check_output(["pwd"]) + str(current_commit).encode() + str(timestamp).encode()
).hexdigest() + ".tar.gz"
file_path = "/tmp/" + file_name
tar_cmd = ["tar", "-zcvf", file_path, "-C", config.PROJECT_PATH]
for pattern in config.FAST_CODE_SYNC_IGNORES:
tar_cmd += ["--exclude", pattern]
tar_cmd += ["-h", "."]
for path in added_project_directories:
tar_cmd.append("-C")
tar_cmd.append(path)
tar_cmd += ["."]
remote_path = "%s/%s" % (base, file_name)
upload_cmd = ["aws", "s3", "cp", file_path, remote_path]
mujoco_key_cmd = [
"aws", "s3", "sync", config.MUJOCO_KEY_PATH, "{}/.mujoco/".format(base)]
print(" ".join(tar_cmd))
print(" ".join(upload_cmd))
print(" ".join(mujoco_key_cmd))
if not dry:
subprocess.check_call(tar_cmd)
subprocess.check_call(upload_cmd)
try:
subprocess.check_call(mujoco_key_cmd)
except Exception as e:
print(e)
S3_CODE_PATH = remote_path
return remote_path
else:
try:
current_commit = subprocess.check_output(
["git", "rev-parse", "HEAD"]).strip().decode("utf-8")
clean_state = len(
subprocess.check_output(["git", "status", "--porcelain"])) == 0
except subprocess.CalledProcessError as _:
print("Warning: failed to execute git commands")
has_git = False
dir_hash = base64.b64encode(subprocess.check_output(["pwd"])).decode("utf-8")
code_path = "%s_%s" % (
dir_hash,
(current_commit if clean_state else "%s_dirty_%s" % (current_commit, timestamp)) if
has_git else timestamp
)
full_path = "%s/%s" % (base, code_path)
cache_path = "%s/%s" % (base, dir_hash)
cache_cmds = ["aws", "s3", "cp", "--recursive"] + \
flatten(["--exclude", "%s" % pattern] for pattern in config.CODE_SYNC_IGNORES) + \
[cache_path, full_path]
cmds = ["aws", "s3", "cp", "--recursive"] + \
flatten(["--exclude", "%s" % pattern] for pattern in config.CODE_SYNC_IGNORES) + \
[".", full_path]
caching_cmds = ["aws", "s3", "cp", "--recursive"] + \
flatten(["--exclude", "%s" % pattern] for pattern in config.CODE_SYNC_IGNORES) + \
[full_path, cache_path]
mujoco_key_cmd = [
"aws", "s3", "sync", config.MUJOCO_KEY_PATH, "{}/.mujoco/".format(base)]
print(cache_cmds, cmds, caching_cmds, mujoco_key_cmd)
if not dry:
subprocess.check_call(cache_cmds)
subprocess.check_call(cmds)
subprocess.check_call(caching_cmds)
try:
subprocess.check_call(mujoco_key_cmd)
except Exception:
print('Unable to sync mujoco keys!')
S3_CODE_PATH = full_path
return full_path
def upload_file_to_s3(script_content):
import tempfile
import uuid
f = tempfile.NamedTemporaryFile(delete=False)
f.write(script_content.encode())
f.close()
remote_path = os.path.join(
config.AWS_CODE_SYNC_S3_PATH, "oversize_bash_scripts", str(uuid.uuid4()))
subprocess.check_call(["aws", "s3", "cp", f.name, remote_path])
os.unlink(f.name)
return remote_path
def to_lab_kube_pod(
params, docker_image, code_full_path,
python_command="python",
script='scripts/run_experiment.py',
is_gpu=False,
sync_s3_pkl=False,
periodic_sync=True,
periodic_sync_interval=15,
sync_all_data_node_to_s3=False,
terminate_machine=True
):
"""
:param params: The parameters for the experiment. If logging directory parameters are provided, we will create
docker volume mapping to make sure that the logging files are created at the correct locations
:param docker_image: docker image to run the command on
:param script: script command for running experiment
:return:
"""
log_dir = params.get("log_dir")
remote_log_dir = params.pop("remote_log_dir")
resources = params.pop("resources")
node_selector = params.pop("node_selector")
exp_prefix = params.pop("exp_prefix")
kube_env = [
{"name": k, "value": v}
for k, v in (params.pop("env", None) or dict()).items()
]
mkdir_p(log_dir)
pre_commands = list()
pre_commands.append('mkdir -p ~/.aws')
pre_commands.append('mkdir ~/.mujoco')
# fetch credentials from the kubernetes secret file
pre_commands.append('echo "[default]" >> ~/.aws/credentials')
pre_commands.append(
"echo \"aws_access_key_id = %s\" >> ~/.aws/credentials" % config.AWS_ACCESS_KEY)
pre_commands.append(
"echo \"aws_secret_access_key = %s\" >> ~/.aws/credentials" % config.AWS_ACCESS_SECRET)
s3_mujoco_key_path = config.AWS_CODE_SYNC_S3_PATH + '/.mujoco/'
pre_commands.append(
'aws s3 cp --recursive {} {}'.format(s3_mujoco_key_path, '~/.mujoco'))
if config.FAST_CODE_SYNC:
pre_commands.append('aws s3 cp %s /tmp/rllab_code.tar.gz' % code_full_path)
pre_commands.append('mkdir -p %s' % config.DOCKER_CODE_DIR)
pre_commands.append('tar -zxvf /tmp/rllab_code.tar.gz -C %s' % config.DOCKER_CODE_DIR)
else:
pre_commands.append('aws s3 cp --recursive %s %s' %
(code_full_path, config.DOCKER_CODE_DIR))
pre_commands.append('cd %s' % config.DOCKER_CODE_DIR)
pre_commands.append('mkdir -p %s' %
(log_dir))
if sync_all_data_node_to_s3:
print('Syncing all data from node to s3.')
if periodic_sync:
if sync_s3_pkl:
pre_commands.append("""
while /bin/true; do
aws s3 sync {log_dir} {remote_log_dir} --region {aws_region} --quiet
sleep {periodic_sync_interval}
done & echo sync initiated""".format(log_dir=log_dir, remote_log_dir=remote_log_dir,
aws_region=config.AWS_REGION_NAME,
periodic_sync_interval=periodic_sync_interval))
else:
pre_commands.append("""
while /bin/true; do
aws s3 sync {log_dir} {remote_log_dir} --region {aws_region} --quiet
sleep {periodic_sync_interval}
done & echo sync initiated""".format(log_dir=log_dir, remote_log_dir=remote_log_dir,
aws_region=config.AWS_REGION_NAME,
periodic_sync_interval=periodic_sync_interval))
else:
if periodic_sync:
if sync_s3_pkl:
pre_commands.append("""
while /bin/true; do
aws s3 sync --exclude '*' --include '*.csv' --include '*.json' --include '*.pkl' {log_dir} {remote_log_dir} --region {aws_region} --quiet
sleep {periodic_sync_interval}
done & echo sync initiated""".format(log_dir=log_dir, remote_log_dir=remote_log_dir,
aws_region=config.AWS_REGION_NAME,
periodic_sync_interval=periodic_sync_interval))
else:
pre_commands.append("""
while /bin/true; do
aws s3 sync --exclude '*' --include '*.csv' --include '*.json' {log_dir} {remote_log_dir} --region {aws_region} --quiet
sleep {periodic_sync_interval}
done & echo sync initiated""".format(log_dir=log_dir, remote_log_dir=remote_log_dir,
aws_region=config.AWS_REGION_NAME,
periodic_sync_interval=periodic_sync_interval))
# copy the file to s3 after execution
post_commands = list()
post_commands.append('aws s3 cp --recursive %s %s' %
(log_dir,
remote_log_dir))
if not terminate_machine:
post_commands.append('sleep infinity')
command_list = list()
if pre_commands is not None:
command_list.extend(pre_commands)
command_list.append("echo \"Running in docker\"")
command_list.append(
"%s 2>&1 | tee -a %s" % (
to_local_command(params, python_command=python_command, script=script),
"%s/stdouterr.log" % log_dir
)
)
if post_commands is not None:
command_list.extend(post_commands)
command = "; ".join(command_list)
pod_name = config.KUBE_PREFIX + params["exp_name"]
# underscore is not allowed in pod names
pod_name = pod_name.replace("_", "-")
print("Is gpu: ", is_gpu)
if not is_gpu:
return {
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": pod_name,
"labels": {
"owner": config.LABEL,
"expt": pod_name,
"exp_time": timestamp,
"exp_prefix": exp_prefix,
},
},
"spec": {
"containers": [
{
"name": "foo",
"image": docker_image,
"command": [
"/bin/bash",
"-c",
"-li", # to load conda env file
command,
],
"resources": resources,
"imagePullPolicy": "Always",
}
],
"restartPolicy": "Never",
"nodeSelector": node_selector,
"dnsPolicy": "Default",
}
}
return {
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": pod_name,
"labels": {
"owner": config.LABEL,
"expt": pod_name,
"exp_time": timestamp,
"exp_prefix": exp_prefix,
},
},
"spec": {
"containers": [
{
"name": "foo",
"image": docker_image,
"env": kube_env,
"command": [
"/bin/bash",
"-c",
"-li", # to load conda env file
command,
],
"resources": resources,
"imagePullPolicy": "Always",
# gpu specific
"volumeMounts": [
{
"name": "nvidia",
"mountPath": "/usr/local/nvidia",
"readOnly": True,
}
],
"securityContext": {
"privileged": True,
}
}
],
"volumes": [
{
"name": "nvidia",
"hostPath": {
"path": "/var/lib/docker/volumes/nvidia_driver_352.63/_data",
}
}
],
"restartPolicy": "Never",
"nodeSelector": node_selector,
"dnsPolicy": "Default",
}
}
def concretize(maybe_stub):
if isinstance(maybe_stub, StubMethodCall):
obj = concretize(maybe_stub.obj)
method = getattr(obj, maybe_stub.method_name)
args = concretize(maybe_stub.args)
kwargs = concretize(maybe_stub.kwargs)
return method(*args, **kwargs)
elif isinstance(maybe_stub, StubClass):
return maybe_stub.proxy_class
elif isinstance(maybe_stub, StubAttr):
obj = concretize(maybe_stub.obj)
attr_name = maybe_stub.attr_name
attr_val = getattr(obj, attr_name)
return concretize(attr_val)
elif isinstance(maybe_stub, StubObject):
if not hasattr(maybe_stub, "__stub_cache"):
args = concretize(maybe_stub.args)
kwargs = concretize(maybe_stub.kwargs)
try:
maybe_stub.__stub_cache = maybe_stub.proxy_class(
*args, **kwargs)
except Exception as e:
print(("Error while instantiating %s" % maybe_stub.proxy_class))
import traceback
traceback.print_exc()
ret = maybe_stub.__stub_cache
return ret
elif isinstance(maybe_stub, dict):
# make sure that there's no hidden caveat
ret = dict()
for k, v in maybe_stub.items():
ret[concretize(k)] = concretize(v)
return ret
elif isinstance(maybe_stub, (list, tuple)):
return maybe_stub.__class__(list(map(concretize, maybe_stub)))
else:
return maybe_stub
| 54,609 | 38.658678 | 174 | py |
rllab | rllab-master/rllab/misc/tensor_utils.py | import operator
import numpy as np
def flatten_tensors(tensors):
if len(tensors) > 0:
return np.concatenate([np.reshape(x, [-1]) for x in tensors])
else:
return np.asarray([])
def unflatten_tensors(flattened, tensor_shapes):
tensor_sizes = list(map(np.prod, tensor_shapes))
indices = np.cumsum(tensor_sizes)[:-1]
return [np.reshape(pair[0], pair[1]) for pair in zip(np.split(flattened, indices), tensor_shapes)]
def pad_tensor(x, max_len, mode='zero'):
padding = np.zeros_like(x[0])
if mode == 'last':
padding = x[-1]
return np.concatenate([
x,
np.tile(padding, (max_len - len(x),) + (1,) * np.ndim(x[0]))
])
def pad_tensor_n(xs, max_len):
ret = np.zeros((len(xs), max_len) + xs[0].shape[1:], dtype=xs[0].dtype)
for idx, x in enumerate(xs):
ret[idx][:len(x)] = x
return ret
def pad_tensor_dict(tensor_dict, max_len, mode='zero'):
keys = list(tensor_dict.keys())
ret = dict()
for k in keys:
if isinstance(tensor_dict[k], dict):
ret[k] = pad_tensor_dict(tensor_dict[k], max_len, mode=mode)
else:
ret[k] = pad_tensor(tensor_dict[k], max_len, mode=mode)
return ret
def flatten_first_axis_tensor_dict(tensor_dict):
keys = list(tensor_dict.keys())
ret = dict()
for k in keys:
if isinstance(tensor_dict[k], dict):
ret[k] = flatten_first_axis_tensor_dict(tensor_dict[k])
else:
old_shape = tensor_dict[k].shape
ret[k] = tensor_dict[k].reshape((-1,) + old_shape[2:])
return ret
def high_res_normalize(probs):
return [x / sum(map(float, probs)) for x in list(map(float, probs))]
def stack_tensor_list(tensor_list):
return np.array(tensor_list)
# tensor_shape = np.array(tensor_list[0]).shape
# if tensor_shape is tuple():
# return np.array(tensor_list)
# return np.vstack(tensor_list)
def stack_tensor_dict_list(tensor_dict_list):
"""
Stack a list of dictionaries of {tensors or dictionary of tensors}.
:param tensor_dict_list: a list of dictionaries of {tensors or dictionary of tensors}.
:return: a dictionary of {stacked tensors or dictionary of stacked tensors}
"""
keys = list(tensor_dict_list[0].keys())
ret = dict()
for k in keys:
example = tensor_dict_list[0][k]
if isinstance(example, dict):
v = stack_tensor_dict_list([x[k] for x in tensor_dict_list])
else:
v = stack_tensor_list([x[k] for x in tensor_dict_list])
ret[k] = v
return ret
def concat_tensor_list_subsample(tensor_list, f):
return np.concatenate(
[t[np.random.choice(len(t), int(np.ceil(len(t) * f)), replace=False)] for t in tensor_list], axis=0)
def concat_tensor_dict_list_subsample(tensor_dict_list, f):
keys = list(tensor_dict_list[0].keys())
ret = dict()
for k in keys:
example = tensor_dict_list[0][k]
if isinstance(example, dict):
v = concat_tensor_dict_list_subsample([x[k] for x in tensor_dict_list], f)
else:
v = concat_tensor_list_subsample([x[k] for x in tensor_dict_list], f)
ret[k] = v
return ret
def concat_tensor_list(tensor_list):
return np.concatenate(tensor_list, axis=0)
def concat_tensor_dict_list(tensor_dict_list):
keys = list(tensor_dict_list[0].keys())
ret = dict()
for k in keys:
example = tensor_dict_list[0][k]
if isinstance(example, dict):
v = concat_tensor_dict_list([x[k] for x in tensor_dict_list])
else:
v = concat_tensor_list([x[k] for x in tensor_dict_list])
ret[k] = v
return ret
def split_tensor_dict_list(tensor_dict):
keys = list(tensor_dict.keys())
ret = None
for k in keys:
vals = tensor_dict[k]
if isinstance(vals, dict):
vals = split_tensor_dict_list(vals)
if ret is None:
ret = [{k: v} for v in vals]
else:
for v, cur_dict in zip(vals, ret):
cur_dict[k] = v
return ret
def truncate_tensor_list(tensor_list, truncated_len):
return tensor_list[:truncated_len]
def truncate_tensor_dict(tensor_dict, truncated_len):
ret = dict()
for k, v in tensor_dict.items():
if isinstance(v, dict):
ret[k] = truncate_tensor_dict(v, truncated_len)
else:
ret[k] = truncate_tensor_list(v, truncated_len)
return ret
| 4,490 | 28.741722 | 108 | py |
rllab | rllab-master/rllab/misc/tabulate.py | # -*- coding: utf-8 -*-
# Taken from John's code
"""Pretty-print tabular data."""
from collections import namedtuple
from platform import python_version_tuple
import re
if python_version_tuple()[0] < "3":
from itertools import izip_longest
from functools import partial
_none_type = type(None)
_int_type = int
_float_type = float
_text_type = str
_binary_type = str
else:
from itertools import zip_longest as izip_longest
from functools import reduce, partial
_none_type = type(None)
_int_type = int
_float_type = float
_text_type = str
_binary_type = bytes
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.7.2"
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
# A table structure is suppposed to be:
#
# --- lineabove ---------
# headerrow
# --- linebelowheader ---
# datarow
# --- linebewteenrows ---
# ... (more datarows) ...
# --- linebewteenrows ---
# last datarow
# --- linebelow ---------
#
# TableFormat's line* elements can be
#
# - either None, if the element is not used,
# - or a Line tuple,
# - or a function: [col_widths], [col_alignments] -> string.
#
# TableFormat's *row elements can be
#
# - either None, if the element is not used,
# - or a DataRow tuple,
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
#
# padding (an integer) is the amount of white space around data values.
#
# with_header_hide:
#
# - either None, to display all table elements unconditionally,
# - or a list of elements not to be displayed if the table has column headers.
#
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| ' }
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator*2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _latex_line_begin_tabular(colwidths, colaligns):
alignment = { "left": "l", "right": "r", "center": "c", "decimal": "r" }
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\\begin{tabular}{" + tabular_columns_fmt + "}\n\hline"
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=DataRow("", "&", "\\\\"),
datarow=DataRow("", "&", "\\\\"),
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
_invisible_codes = re.compile("\x1b\[\d*m") # ANSI color codes
_invisible_codes_bytes = re.compile(b"\x1b\[\d*m") # ANSI color codes
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except ValueError:
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
"""
return _isconvertible(float, string)
def _isint(string):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is int or \
(isinstance(string, _binary_type) or isinstance(string, _text_type)) and \
_isconvertible(int, string)
def _type(string, has_invisible=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isint(string):
return int
elif _isnumber(string):
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s, has_invisible=True):
"""Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = "{0:>%ds}" % iwidth
return fmt.format(s)
def _padright(width, s, has_invisible=True):
"""Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = "{0:<%ds}" % iwidth
return fmt.format(s)
def _padboth(width, s, has_invisible=True):
"""Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = "{0:^%ds}" % iwidth
return fmt.format(s)
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return len(_strip_invisible(s))
else:
return len(_text_type(s))
def _align_column(strings, alignment, minwidth=0, has_invisible=True):
"""[string] -> [padded_string]
>>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal")))
[' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234']
>>> list(map(str,_align_column(['123.4', '56.7890'], None)))
['123.4', '56.7890']
"""
if alignment == "right":
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
return strings
else:
strings = [s.strip() for s in strings]
padfn = _padright
if has_invisible:
width_fn = _visible_width
else:
width_fn = len
maxwidth = max(max(list(map(width_fn, strings))), minwidth)
padded_strings = [padfn(maxwidth, s, has_invisible) for s in strings]
return padded_strings
def _more_generic(type1, type2):
types = { _none_type: 0, int: 1, float: 2, _binary_type: 3, _text_type: 4 }
invtypes = { 4: _text_type, 3: _binary_type, 2: float, 1: int, 0: _none_type }
moregeneric = max(types.get(type1, 4), types.get(type2, 4))
return invtypes[moregeneric]
def _column_type(strings, has_invisible=True):
"""The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
types = [_type(s, has_invisible) for s in strings ]
return reduce(_more_generic, types, int)
def _format(val, valtype, floatfmt, missingval=""):
"""Format a value accoding to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
return _text_type(val, "ascii")
elif valtype is float:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width):
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
def _normalize_tabular_data(tabular_data, headers):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = list(tabular_data.keys())
rows = list(zip_longest(*list(tabular_data.values()))) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = list(tabular_data.keys())
vals = tabular_data.values # values matrix doesn't need to be transposed
names = tabular_data.index
rows = [[v]+list(row) for v,row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type,keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")): # namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif headers == "keys" and len(rows) > 0: # keys are column indices
headers = list(map(_text_type, list(range(len(rows[0])))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(headers)
rows = list(map(list,rows))
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""]*(ncols - nhs) + headers
return rows, headers
def tabulate(tabular_data, headers=[], tablefmt="simple",
floatfmt="g", numalign="decimal", stralign="left",
missingval=""):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, a two-dimensional NumPy array,
NumPy record array, or a Pandas' dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point.
`None` values are replaced with a `missingval` string:
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
and 'latex'. Variable `tabulate_formats` contains the list of
currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"""
list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
if has_invisible:
width_fn = _visible_width
else:
width_fn = len
# format rows and columns, convert numeric values to strings
cols = list(zip(*list_of_lists))
coltypes = list(map(_column_type, cols))
cols = [[_format(v, ct, floatfmt, missingval) for v in c]
for c,ct in zip(cols, coltypes)]
# align columns
aligns = [numalign if ct in [int,float] else stralign for ct in coltypes]
minwidths = [width_fn(h)+2 for h in headers] if headers else [0]*len(cols)
cols = [_align_column(c, a, minw, has_invisible)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, cols)]
headers = [_align_header(h, a, minw)
for h, a, minw in zip(headers, aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns)
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill*w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _pad_row(cells, padding):
if cells:
pad = " "*padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _format_table(fmt, headers, rows, colwidths, colaligns):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2*pad) for w in colwidths]
padded_headers = _pad_row(headers, pad)
padded_rows = [_pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.lineabove))
if padded_headers:
lines.append(_build_row(padded_headers, padded_widths, colaligns, headerrow))
if fmt.linebelowheader and "linebelowheader" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelowheader))
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
lines.append(_build_line(padded_widths, colaligns, fmt.linebetweenrows))
# the last row without a line below
lines.append(_build_row(padded_rows[-1], padded_widths, colaligns, fmt.datarow))
else:
for row in padded_rows:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
if fmt.linebelow and "linebelow" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelow))
return "\n".join(lines)
| 28,995 | 33.072855 | 197 | py |
rllab | rllab-master/rllab/misc/logger.py | from enum import Enum
from rllab.misc.tabulate import tabulate
from rllab.misc.console import mkdir_p, colorize
from rllab.misc.autoargs import get_all_parameters
from contextlib import contextmanager
import numpy as np
import os
import os.path as osp
import sys
import datetime
import dateutil.tz
import csv
import joblib
import json
import pickle
import base64
_prefixes = []
_prefix_str = ''
_tabular_prefixes = []
_tabular_prefix_str = ''
_tabular = []
_text_outputs = []
_tabular_outputs = []
_text_fds = {}
_tabular_fds = {}
_tabular_header_written = set()
_snapshot_dir = None
_snapshot_mode = 'all'
_snapshot_gap = 1
_log_tabular_only = False
_header_printed = False
def _add_output(file_name, arr, fds, mode='a'):
if file_name not in arr:
mkdir_p(os.path.dirname(file_name))
arr.append(file_name)
fds[file_name] = open(file_name, mode)
def _remove_output(file_name, arr, fds):
if file_name in arr:
fds[file_name].close()
del fds[file_name]
arr.remove(file_name)
def push_prefix(prefix):
_prefixes.append(prefix)
global _prefix_str
_prefix_str = ''.join(_prefixes)
def add_text_output(file_name):
_add_output(file_name, _text_outputs, _text_fds, mode='a')
def remove_text_output(file_name):
_remove_output(file_name, _text_outputs, _text_fds)
def add_tabular_output(file_name):
_add_output(file_name, _tabular_outputs, _tabular_fds, mode='w')
def remove_tabular_output(file_name):
if _tabular_fds[file_name] in _tabular_header_written:
_tabular_header_written.remove(_tabular_fds[file_name])
_remove_output(file_name, _tabular_outputs, _tabular_fds)
def set_snapshot_dir(dir_name):
global _snapshot_dir
_snapshot_dir = dir_name
def get_snapshot_dir():
return _snapshot_dir
def get_snapshot_mode():
return _snapshot_mode
def set_snapshot_mode(mode):
global _snapshot_mode
_snapshot_mode = mode
def get_snapshot_gap():
return _snapshot_gap
def set_snapshot_gap(gap):
global _snapshot_gap
_snapshot_gap = gap
def set_log_tabular_only(log_tabular_only):
global _log_tabular_only
_log_tabular_only = log_tabular_only
def get_log_tabular_only():
return _log_tabular_only
def log(s, with_prefix=True, with_timestamp=True, color=None):
out = s
if with_prefix:
out = _prefix_str + out
if with_timestamp:
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y-%m-%d %H:%M:%S.%f %Z')
out = "%s | %s" % (timestamp, out)
if color is not None:
out = colorize(out, color)
if not _log_tabular_only:
# Also log to stdout
print(out)
for fd in list(_text_fds.values()):
fd.write(out + '\n')
fd.flush()
sys.stdout.flush()
def record_tabular(key, val):
_tabular.append((_tabular_prefix_str + str(key), str(val)))
def push_tabular_prefix(key):
_tabular_prefixes.append(key)
global _tabular_prefix_str
_tabular_prefix_str = ''.join(_tabular_prefixes)
def pop_tabular_prefix():
del _tabular_prefixes[-1]
global _tabular_prefix_str
_tabular_prefix_str = ''.join(_tabular_prefixes)
@contextmanager
def prefix(key):
push_prefix(key)
try:
yield
finally:
pop_prefix()
@contextmanager
def tabular_prefix(key):
push_tabular_prefix(key)
yield
pop_tabular_prefix()
class TerminalTablePrinter(object):
def __init__(self):
self.headers = None
self.tabulars = []
def print_tabular(self, new_tabular):
if self.headers is None:
self.headers = [x[0] for x in new_tabular]
else:
assert len(self.headers) == len(new_tabular)
self.tabulars.append([x[1] for x in new_tabular])
self.refresh()
def refresh(self):
import os
rows, columns = os.popen('stty size', 'r').read().split()
tabulars = self.tabulars[-(int(rows) - 3):]
sys.stdout.write("\x1b[2J\x1b[H")
sys.stdout.write(tabulate(tabulars, self.headers))
sys.stdout.write("\n")
table_printer = TerminalTablePrinter()
def dump_tabular(*args, **kwargs):
wh = kwargs.pop("write_header", None)
if len(_tabular) > 0:
if _log_tabular_only:
table_printer.print_tabular(_tabular)
else:
for line in tabulate(_tabular).split('\n'):
log(line, *args, **kwargs)
tabular_dict = dict(_tabular)
# Also write to the csv files
# This assumes that the keys in each iteration won't change!
for tabular_fd in list(_tabular_fds.values()):
writer = csv.DictWriter(tabular_fd, fieldnames=list(tabular_dict.keys()))
if wh or (wh is None and tabular_fd not in _tabular_header_written):
writer.writeheader()
_tabular_header_written.add(tabular_fd)
writer.writerow(tabular_dict)
tabular_fd.flush()
del _tabular[:]
def pop_prefix():
del _prefixes[-1]
global _prefix_str
_prefix_str = ''.join(_prefixes)
def save_itr_params(itr, params):
if _snapshot_dir:
if _snapshot_mode == 'all':
file_name = osp.join(_snapshot_dir, 'itr_%d.pkl' % itr)
joblib.dump(params, file_name, compress=3)
elif _snapshot_mode == 'last':
# override previous params
file_name = osp.join(_snapshot_dir, 'params.pkl')
joblib.dump(params, file_name, compress=3)
elif _snapshot_mode == "gap":
if itr % _snapshot_gap == 0:
file_name = osp.join(_snapshot_dir, 'itr_%d.pkl' % itr)
joblib.dump(params, file_name, compress=3)
elif _snapshot_mode == 'none':
pass
else:
raise NotImplementedError
def log_parameters(log_file, args, classes):
log_params = {}
for param_name, param_value in args.__dict__.items():
if any([param_name.startswith(x) for x in list(classes.keys())]):
continue
log_params[param_name] = param_value
for name, cls in classes.items():
if isinstance(cls, type):
params = get_all_parameters(cls, args)
params["_name"] = getattr(args, name)
log_params[name] = params
else:
log_params[name] = getattr(cls, "__kwargs", dict())
log_params[name]["_name"] = cls.__module__ + "." + cls.__class__.__name__
mkdir_p(os.path.dirname(log_file))
with open(log_file, "w") as f:
json.dump(log_params, f, indent=2, sort_keys=True)
def stub_to_json(stub_sth):
from rllab.misc import instrument
if isinstance(stub_sth, instrument.StubObject):
assert len(stub_sth.args) == 0
data = dict()
for k, v in stub_sth.kwargs.items():
data[k] = stub_to_json(v)
data["_name"] = stub_sth.proxy_class.__module__ + "." + stub_sth.proxy_class.__name__
return data
elif isinstance(stub_sth, instrument.StubAttr):
return dict(
obj=stub_to_json(stub_sth.obj),
attr=stub_to_json(stub_sth.attr_name)
)
elif isinstance(stub_sth, instrument.StubMethodCall):
return dict(
obj=stub_to_json(stub_sth.obj),
method_name=stub_to_json(stub_sth.method_name),
args=stub_to_json(stub_sth.args),
kwargs=stub_to_json(stub_sth.kwargs),
)
elif isinstance(stub_sth, instrument.BinaryOp):
return "binary_op"
elif isinstance(stub_sth, instrument.StubClass):
return stub_sth.proxy_class.__module__ + "." + stub_sth.proxy_class.__name__
elif isinstance(stub_sth, dict):
return {stub_to_json(k): stub_to_json(v) for k, v in stub_sth.items()}
elif isinstance(stub_sth, (list, tuple)):
return list(map(stub_to_json, stub_sth))
elif type(stub_sth) == type(lambda: None):
if stub_sth.__module__ is not None:
return stub_sth.__module__ + "." + stub_sth.__name__
return stub_sth.__name__
elif "theano" in str(type(stub_sth)):
return repr(stub_sth)
return stub_sth
class MyEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, type):
return {'$class': o.__module__ + "." + o.__name__}
elif isinstance(o, Enum):
return {'$enum': o.__module__ + "." + o.__class__.__name__ + '.' + o.name}
return json.JSONEncoder.default(self, o)
def log_parameters_lite(log_file, args):
log_params = {}
for param_name, param_value in args.__dict__.items():
log_params[param_name] = param_value
if args.args_data is not None:
stub_method = pickle.loads(base64.b64decode(args.args_data))
method_args = stub_method.kwargs
log_params["json_args"] = dict()
for k, v in list(method_args.items()):
log_params["json_args"][k] = stub_to_json(v)
kwargs = stub_method.obj.kwargs
for k in ["baseline", "env", "policy"]:
if k in kwargs:
log_params["json_args"][k] = stub_to_json(kwargs.pop(k))
log_params["json_args"]["algo"] = stub_to_json(stub_method.obj)
mkdir_p(os.path.dirname(log_file))
with open(log_file, "w") as f:
json.dump(log_params, f, indent=2, sort_keys=True, cls=MyEncoder)
def log_variant(log_file, variant_data):
mkdir_p(os.path.dirname(log_file))
if hasattr(variant_data, "dump"):
variant_data = variant_data.dump()
variant_json = stub_to_json(variant_data)
with open(log_file, "w") as f:
json.dump(variant_json, f, indent=2, sort_keys=True, cls=MyEncoder)
def record_tabular_misc_stat(key, values, placement='back'):
if placement == 'front':
prefix = ""
suffix = key
else:
prefix = key
suffix = ""
if len(values) > 0:
record_tabular(prefix + "Average" + suffix, np.average(values))
record_tabular(prefix + "Std" + suffix, np.std(values))
record_tabular(prefix + "Median" + suffix, np.median(values))
record_tabular(prefix + "Min" + suffix, np.min(values))
record_tabular(prefix + "Max" + suffix, np.max(values))
else:
record_tabular(prefix + "Average" + suffix, np.nan)
record_tabular(prefix + "Std" + suffix, np.nan)
record_tabular(prefix + "Median" + suffix, np.nan)
record_tabular(prefix + "Min" + suffix, np.nan)
record_tabular(prefix + "Max" + suffix, np.nan)
| 10,538 | 29.197708 | 93 | py |
rllab | rllab-master/rllab/misc/mako_utils.py |
def compute_rect_vertices(fromp, to, radius):
x1, y1 = fromp
x2, y2 = to
if abs(y1 - y2) < 1e-6:
dx = 0
dy = radius
else:
dx = radius * 1.0 / (((x1 - x2) / (y1 - y2)) ** 2 + 1) ** 0.5
# equivalently dx = radius * (y2-y1).to_f / ((x2-x1)**2 + (y2-y1)**2)**0.5
dy = (radius**2 - dx**2) ** 0.5
dy *= -1 if (x1 - x2) * (y1 - y2) > 0 else 1
return ";".join([",".join(map(str, r)) for r in [
[x1 + dx, y1 + dy],
[x2 + dx, y2 + dy],
[x2 - dx, y2 - dy],
[x1 - dx, y1 - dy],
]])
| 569 | 26.142857 | 82 | py |
rllab | rllab-master/rllab/misc/viewer2d.py | import pygame
import pygame.gfxdraw
import numpy as np
class Colors(object):
black = (0, 0, 0)
white = (255, 255, 255)
blue = (0, 0, 255)
red = (255, 0, 0)
green = (0, 255, 0)
class Viewer2D(object):
def __init__(self, size=(640, 480), xlim=None, ylim=None):
pygame.init()
screen = pygame.display.set_mode(size)
#surface = pygame.surface(size, pygame.SRCALPHA)
if xlim is None:
xlim = (0, size[0])
if ylim is None:
ylim = (0, size[1])
self._screen = screen
#self._surface = surface
#self.screen.blit(self.surface, (0, 0))
self._xlim = xlim
self._ylim = ylim
@property
def xlim(self):
return self._xlim
@xlim.setter
def xlim(self, value):
self._xlim = value
@property
def ylim(self):
return self._ylim
@ylim.setter
def ylim(self, value):
self._ylim = value
def reset(self):
self.fill(Colors.white)
def fill(self, color):
self.screen.fill(color)
def scale_x(self, world_x):
xmin, xmax = self.xlim
return int((world_x - xmin) * self.screen.get_width() / (xmax - xmin))
def scale_y(self, world_y):
ymin, ymax = self.ylim
return int((self.screen.get_height() - (world_y - ymin) * self.screen.get_height() / (ymax - ymin)))
def scale_point(self, point):
x, y = point
return (self.scale_x(x), self.scale_y(y))
@property
def scale_factor(self):
xmin, xmax = self.xlim
ymin, ymax = self.ylim
return min(self.screen.get_width() / (xmax - xmin), self.screen.get_height() / (ymax - ymin))
def scale_size(self, size):
if hasattr(size, '__len__'):
x, y = size
return (self.scale_x(x + self.xlim[0]), self.screen.get_height() - self.scale_y(y + self.ylim[0]))
return size * self.scale_factor
def line(self, color, p1, p2, width=None):
if width is None:
width = 1
else:
width = int(width * self.scale_factor)
x1, y1 = self.scale_point(p1)
x2, y2 = self.scale_point(p2)
pygame.draw.line(self.screen, color, (x1, y1), (x2, y2), width)
def circle(self, color, p, radius):
pygame.draw.circle(self.screen, color, self.scale_point(p), int(self.scale_size(radius)))
def rect(self, color, center, size):
cx, cy = self.scale_point(center)
w, h = self.scale_size(size)
if len(color) > 3:
s = pygame.Surface((w, h), pygame.SRCALPHA)
s.fill(color)
self.screen.blit(s, (cx-w/2, cy-h/2))
#pygame.draw.rect(self.surface, color, pygame.Rect(cx-w/2, cy-h/2, w, h))
else:
pygame.draw.rect(self.screen, color, pygame.Rect(cx-w/2, cy-h/2, w, h))
def polygon(self, color, points):
if len(color) > 3:
s = pygame.Surface((self.screen.get_width(), self.screen.get_height()), pygame.SRCALPHA)
s.fill((0, 0, 0, 0))
pygame.draw.polygon(s, color, list(map(self.scale_point, points)))
self.screen.blit(s, (0, 0))
else:
pygame.draw.polygon(self.screen, color, list(map(self.scale_point, points)))
@property
def screen(self):
return self._screen
def loop_once(self):
pygame.display.flip()
# Draw a checker background
def checker(self, colors=[Colors.white, Colors.black], granularity=4, offset=(0, 0)):
screen_height = self.screen.get_height()
screen_width = self.screen.get_width()
screen_size = min(screen_height, screen_width)
checker_size = int(screen_size / granularity)
offset_x = self.scale_x(offset[0] + self.xlim[0])
offset_y = self.scale_y(offset[1] + self.ylim[0])
start_idx = int(offset_x / checker_size) + int(offset_y / checker_size)
offset_x = ((offset_x % checker_size) + checker_size) % checker_size
offset_y = ((offset_y % checker_size) + checker_size) % checker_size
for row in range(-1, int(np.ceil(screen_height * 1.0 / checker_size))+1):
for col in range(-1, int(np.ceil(screen_width * 1.0 / checker_size))+1):
the_square = (col*checker_size+offset_x, row*checker_size+offset_y, checker_size, checker_size)
self.screen.fill(colors[(start_idx+row+col)%2], the_square)
def pause(self):
print("press any key on the screen to continue...")
while True:
event = pygame.event.wait()
if event.type == pygame.KEYDOWN:
break
print("continuing")
| 4,668 | 33.330882 | 111 | py |
rllab | rllab-master/rllab/misc/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/rllab/misc/console.py | import sys
import time
import os
import errno
import shlex
import pydoc
import inspect
import collections
color2num = dict(
gray=30,
red=31,
green=32,
yellow=33,
blue=34,
magenta=35,
cyan=36,
white=37,
crimson=38
)
def colorize(string, color, bold=False, highlight=False):
attr = []
num = color2num[color]
if highlight:
num += 10
attr.append(str(num))
if bold:
attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def log(s): # , send_telegram=False):
print(s)
sys.stdout.flush()
class SimpleMessage(object):
def __init__(self, msg, logger=log):
self.msg = msg
self.logger = logger
def __enter__(self):
print(self.msg)
self.tstart = time.time()
def __exit__(self, etype, *args):
maybe_exc = "" if etype is None else " (with exception)"
self.logger("done%s in %.3f seconds" %
(maybe_exc, time.time() - self.tstart))
MESSAGE_DEPTH = 0
class Message(object):
def __init__(self, msg):
self.msg = msg
def __enter__(self):
global MESSAGE_DEPTH # pylint: disable=W0603
print(colorize('\t' * MESSAGE_DEPTH + '=: ' + self.msg, 'magenta'))
self.tstart = time.time()
MESSAGE_DEPTH += 1
def __exit__(self, etype, *args):
global MESSAGE_DEPTH # pylint: disable=W0603
MESSAGE_DEPTH -= 1
maybe_exc = "" if etype is None else " (with exception)"
print(colorize('\t' * MESSAGE_DEPTH + "done%s in %.3f seconds" % (maybe_exc, time.time() - self.tstart), 'magenta'))
def prefix_log(prefix, logger=log):
return lambda s: logger(prefix + s)
def tee_log(file_name):
f = open(file_name, 'w+')
def logger(s):
log(s)
f.write(s)
f.write('\n')
f.flush()
return logger
def collect_args():
splitted = shlex.split(' '.join(sys.argv[1:]))
return {arg_name[2:]: arg_val
for arg_name, arg_val in zip(splitted[::2], splitted[1::2])}
def type_hint(arg_name, arg_type):
def wrap(f):
meta = getattr(f, '__tweak_type_hint_meta__', None)
if meta is None:
f.__tweak_type_hint_meta__ = meta = {}
meta[arg_name] = arg_type
return f
return wrap
def tweak(fun_or_val, identifier=None):
if isinstance(fun_or_val, collections.Callable):
return tweakfun(fun_or_val, identifier)
return tweakval(fun_or_val, identifier)
def tweakval(val, identifier):
if not identifier:
raise ValueError('Must provide an identifier for tweakval to work')
args = collect_args()
for k, v in args.items():
stripped = k.replace('-', '_')
if stripped == identifier:
log('replacing %s in %s with %s' % (stripped, str(val), str(v)))
return type(val)(v)
return val
def tweakfun(fun, alt=None):
"""Make the arguments (or the function itself) tweakable from command line.
See tests/test_misc_console.py for examples.
NOTE: this only works for the initial launched process, since other processes
will get different argv. What this means is that tweak() calls wrapped in a function
to be invoked in a child process might not behave properly.
"""
cls = getattr(fun, 'im_class', None)
method_name = fun.__name__
if alt:
cmd_prefix = alt
elif cls:
cmd_prefix = cls + '.' + method_name
else:
cmd_prefix = method_name
cmd_prefix = cmd_prefix.lower()
args = collect_args()
if cmd_prefix in args:
fun = pydoc.locate(args[cmd_prefix])
if type(fun) == type:
argspec = inspect.getargspec(fun.__init__)
else:
argspec = inspect.getargspec(fun)
# TODO handle list arguments
defaults = dict(
list(zip(argspec.args[-len(argspec.defaults or []):], argspec.defaults or [])))
replaced_kwargs = {}
cmd_prefix += '-'
if type(fun) == type:
meta = getattr(fun.__init__, '__tweak_type_hint_meta__', {})
else:
meta = getattr(fun, '__tweak_type_hint_meta__', {})
for k, v in args.items():
if k.startswith(cmd_prefix):
stripped = k[len(cmd_prefix):].replace('-', '_')
if stripped in meta:
log('replacing %s in %s with %s' % (stripped, str(fun), str(v)))
replaced_kwargs[stripped] = meta[stripped](v)
elif stripped not in argspec.args:
raise ValueError(
'%s is not an explicit parameter of %s' % (stripped, str(fun)))
elif stripped not in defaults:
raise ValueError(
'%s does not have a default value in method %s' % (stripped, str(fun)))
elif defaults[stripped] is None:
raise ValueError(
'Cannot infer type of %s in method %s from None value' % (stripped, str(fun)))
else:
log('replacing %s in %s with %s' % (stripped, str(fun), str(v)))
# TODO more proper conversions
replaced_kwargs[stripped] = type(defaults[stripped])(v)
def tweaked(*args, **kwargs):
all_kw = dict(list(zip(argspec[0], args)) +
list(kwargs.items()) + list(replaced_kwargs.items()))
return fun(**all_kw)
return tweaked
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
| 6,692 | 28.615044 | 124 | py |
rllab | rllab-master/rllab/misc/overrides.py | #
# Copyright 2015 Mikko Korpela
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import dis
__VERSION__ = '0.5'
if sys.version > '3':
long = int
def overrides(method):
"""Decorator to indicate that the decorated method overrides a method in superclass.
The decorator code is executed while loading class. Using this method should have minimal runtime performance
implications.
This is based on my idea about how to do this and fwc:s highly improved algorithm for the implementation
fwc:s algorithm : http://stackoverflow.com/a/14631397/308189
my answer : http://stackoverflow.com/a/8313042/308189
How to use:
from overrides import overrides
class SuperClass(object):
def method(self):
return 2
class SubClass(SuperClass):
@overrides
def method(self):
return 1
:raises AssertionError if no match in super classes for the method name
:return method with possibly added (if the method doesn't have one) docstring from super class
"""
# nop for now due to py3 compatibility
return method
# for super_class in _get_base_classes(sys._getframe(2), method.__globals__):
# if hasattr(super_class, method.__name__):
# if not method.__doc__:
# method.__doc__ = getattr(super_class, method.__name__).__doc__
# return method
# raise AssertionError('No super class method found for "%s"' % method.__name__)
def _get_base_classes(frame, namespace):
return [_get_base_class(class_name_components, namespace) for class_name_components in _get_base_class_names(frame)]
def _get_base_class_names(frame):
"""Get baseclass names from the code object"""
co, lasti = frame.f_code, frame.f_lasti
code = co.co_code
i = 0
extended_arg = 0
extends = []
while i <= lasti:
c = code[i]
op = ord(c)
i += 1
if op >= dis.HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1])*256 + extended_arg
extended_arg = 0
i += 2
if op == dis.EXTENDED_ARG:
extended_arg = oparg*int(65536)
if op in dis.hasconst:
if type(co.co_consts[oparg]) == str:
extends = []
elif op in dis.hasname:
if dis.opname[op] == 'LOAD_NAME':
extends.append(('name', co.co_names[oparg]))
if dis.opname[op] == 'LOAD_ATTR':
extends.append(('attr', co.co_names[oparg]))
items = []
previous_item = []
for t, s in extends:
if t == 'name':
if previous_item:
items.append(previous_item)
previous_item = [s]
else:
previous_item += [s]
if previous_item:
items.append(previous_item)
return items
def _get_base_class(components, namespace):
obj = namespace[components[0]]
for component in components[1:]:
obj = getattr(obj, component)
return obj
| 3,547 | 32.471698 | 120 | py |
rllab | rllab-master/rllab/misc/meta.py | 0 | 0 | 0 | py | |
rllab | rllab-master/rllab/misc/krylov.py | import numpy as np
from rllab.misc.ext import sliced_fun
EPS = np.finfo('float64').tiny
def cg(f_Ax, b, cg_iters=10, callback=None, verbose=False, residual_tol=1e-10):
"""
Demmel p 312
"""
p = b.copy()
r = b.copy()
x = np.zeros_like(b)
rdotr = r.dot(r)
fmtstr = "%10i %10.3g %10.3g"
titlestr = "%10s %10s %10s"
if verbose: print(titlestr % ("iter", "residual norm", "soln norm"))
for i in range(cg_iters):
if callback is not None:
callback(x)
if verbose: print(fmtstr % (i, rdotr, np.linalg.norm(x)))
z = f_Ax(p)
v = rdotr / p.dot(z)
x += v * p
r -= v * z
newrdotr = r.dot(r)
mu = newrdotr / rdotr
p = r + mu * p
rdotr = newrdotr
if rdotr < residual_tol:
break
if callback is not None:
callback(x)
if verbose: print(fmtstr % (i + 1, rdotr, np.linalg.norm(x))) # pylint: disable=W0631
return x
def preconditioned_cg(f_Ax, f_Minvx, b, cg_iters=10, callback=None, verbose=False, residual_tol=1e-10):
"""
Demmel p 318
"""
x = np.zeros_like(b)
r = b.copy()
p = f_Minvx(b)
y = p
ydotr = y.dot(r)
fmtstr = "%10i %10.3g %10.3g"
titlestr = "%10s %10s %10s"
if verbose: print(titlestr % ("iter", "residual norm", "soln norm"))
for i in range(cg_iters):
if callback is not None:
callback(x, f_Ax)
if verbose: print(fmtstr % (i, ydotr, np.linalg.norm(x)))
z = f_Ax(p)
v = ydotr / p.dot(z)
x += v * p
r -= v * z
y = f_Minvx(r)
newydotr = y.dot(r)
mu = newydotr / ydotr
p = y + mu * p
ydotr = newydotr
if ydotr < residual_tol:
break
if verbose: print(fmtstr % (cg_iters, ydotr, np.linalg.norm(x)))
return x
def test_cg():
A = np.random.randn(5, 5)
A = A.T.dot(A)
b = np.random.randn(5)
x = cg(lambda x: A.dot(x), b, cg_iters=5, verbose=True) # pylint: disable=W0108
assert np.allclose(A.dot(x), b)
x = preconditioned_cg(lambda x: A.dot(x), lambda x: np.linalg.solve(A, x), b, cg_iters=5,
verbose=True) # pylint: disable=W0108
assert np.allclose(A.dot(x), b)
x = preconditioned_cg(lambda x: A.dot(x), lambda x: x / np.diag(A), b, cg_iters=5,
verbose=True) # pylint: disable=W0108
assert np.allclose(A.dot(x), b)
def lanczos(f_Ax, b, k):
"""
Runs Lanczos algorithm to generate a orthogonal basis for the Krylov subspace
b, Ab, A^2b, ...
as well as the upper hessenberg matrix T = Q^T A Q
from Demmel ch 6
"""
assert k > 1
alphas = []
betas = []
qs = []
q = b / np.linalg.norm(b)
beta = 0
qm = np.zeros_like(b)
for j in range(k):
qs.append(q)
z = f_Ax(q)
alpha = q.dot(z)
alphas.append(alpha)
z -= alpha * q + beta * qm
beta = np.linalg.norm(z)
betas.append(beta)
print("beta", beta)
if beta < 1e-9:
print("lanczos: early after %i/%i dimensions" % (j + 1, k))
break
else:
qm = q
q = z / beta
return np.array(qs, 'float64').T, np.array(alphas, 'float64'), np.array(betas[:-1], 'float64')
def lanczos2(f_Ax, b, k, residual_thresh=1e-9):
"""
Runs Lanczos algorithm to generate a orthogonal basis for the Krylov subspace
b, Ab, A^2b, ...
as well as the upper hessenberg matrix T = Q^T A Q
from Demmel ch 6
"""
b = b.astype('float64')
assert k > 1
H = np.zeros((k, k))
qs = []
q = b / np.linalg.norm(b)
beta = 0
for j in range(k):
qs.append(q)
z = f_Ax(q.astype('float64')).astype('float64')
for (i, q) in enumerate(qs):
H[j, i] = H[i, j] = h = q.dot(z)
z -= h * q
beta = np.linalg.norm(z)
if beta < residual_thresh:
print("lanczos2: stopping early after %i/%i dimensions residual %f < %f" % (j + 1, k, beta, residual_thresh))
break
else:
q = z / beta
return np.array(qs).T, H[:len(qs), :len(qs)]
def make_tridiagonal(alphas, betas):
assert len(alphas) == len(betas) + 1
N = alphas.size
out = np.zeros((N, N), 'float64')
out.flat[0:N ** 2:N + 1] = alphas
out.flat[1:N ** 2 - N:N + 1] = betas
out.flat[N:N ** 2 - 1:N + 1] = betas
return out
def tridiagonal_eigenvalues(alphas, betas):
T = make_tridiagonal(alphas, betas)
return np.linalg.eigvalsh(T)
def test_lanczos():
np.set_printoptions(precision=4)
A = np.random.randn(5, 5)
A = A.T.dot(A)
b = np.random.randn(5)
f_Ax = lambda x: A.dot(x) # pylint: disable=W0108
Q, alphas, betas = lanczos(f_Ax, b, 10)
H = make_tridiagonal(alphas, betas)
assert np.allclose(Q.T.dot(A).dot(Q), H)
assert np.allclose(Q.dot(H).dot(Q.T), A)
assert np.allclose(np.linalg.eigvalsh(H), np.linalg.eigvalsh(A))
Q, H1 = lanczos2(f_Ax, b, 10)
assert np.allclose(H, H1, atol=1e-6)
print("ritz eigvals:")
for i in range(1, 6):
Qi = Q[:, :i]
Hi = Qi.T.dot(A).dot(Qi)
print(np.linalg.eigvalsh(Hi)[::-1])
print("true eigvals:")
print(np.linalg.eigvalsh(A)[::-1])
print("lanczos on ill-conditioned problem")
A = np.diag(10 ** np.arange(5))
Q, H1 = lanczos2(f_Ax, b, 10)
print(np.linalg.eigvalsh(H1))
print("lanczos on ill-conditioned problem with noise")
def f_Ax_noisy(x):
return A.dot(x) + np.random.randn(x.size) * 1e-3
Q, H1 = lanczos2(f_Ax_noisy, b, 10)
print(np.linalg.eigvalsh(H1))
if __name__ == "__main__":
test_lanczos()
test_cg()
| 5,760 | 24.604444 | 121 | py |
rllab | rllab-master/rllab/regressors/gaussian_mlp_regressor.py | import lasagne
import lasagne.layers as L
import lasagne.nonlinearities as NL
import numpy as np
import theano
import theano.tensor as TT
from rllab.core.lasagne_layers import ParamLayer
from rllab.core.lasagne_powered import LasagnePowered
from rllab.core.network import MLP
from rllab.core.serializable import Serializable
from rllab.misc import logger
from rllab.misc.ext import compile_function
from rllab.optimizers.lbfgs_optimizer import LbfgsOptimizer
from rllab.optimizers.penalty_lbfgs_optimizer import PenaltyLbfgsOptimizer
from rllab.distributions.diagonal_gaussian import DiagonalGaussian
from rllab.misc.ext import iterate_minibatches_generic
class GaussianMLPRegressor(LasagnePowered):
"""
A class for performing regression by fitting a Gaussian distribution to the outputs.
"""
def __init__(
self,
input_shape,
output_dim,
mean_network=None,
hidden_sizes=(32, 32),
hidden_nonlinearity=NL.rectify,
optimizer=None,
use_trust_region=True,
step_size=0.01,
learn_std=True,
init_std=1.0,
adaptive_std=False,
std_share_network=False,
std_hidden_sizes=(32, 32),
std_nonlinearity=None,
normalize_inputs=True,
normalize_outputs=True,
name=None,
batchsize=None,
subsample_factor=1.,
):
"""
:param input_shape: Shape of the input data.
:param output_dim: Dimension of output.
:param hidden_sizes: Number of hidden units of each layer of the mean network.
:param hidden_nonlinearity: Non-linearity used for each layer of the mean network.
:param optimizer: Optimizer for minimizing the negative log-likelihood.
:param use_trust_region: Whether to use trust region constraint.
:param step_size: KL divergence constraint for each iteration
:param learn_std: Whether to learn the standard deviations. Only effective if adaptive_std is False. If
adaptive_std is True, this parameter is ignored, and the weights for the std network are always learned.
:param adaptive_std: Whether to make the std a function of the states.
:param std_share_network: Whether to use the same network as the mean.
:param std_hidden_sizes: Number of hidden units of each layer of the std network. Only used if
`std_share_network` is False. It defaults to the same architecture as the mean.
:param std_nonlinearity: Non-linearity used for each layer of the std network. Only used if `std_share_network`
is False. It defaults to the same non-linearity as the mean.
"""
Serializable.quick_init(self, locals())
self._batchsize = batchsize
self._subsample_factor = subsample_factor
if optimizer is None:
if use_trust_region:
optimizer = PenaltyLbfgsOptimizer()
else:
optimizer = LbfgsOptimizer()
self._optimizer = optimizer
if mean_network is None:
mean_network = MLP(
input_shape=input_shape,
output_dim=output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=None,
)
l_mean = mean_network.output_layer
if adaptive_std:
l_log_std = MLP(
input_shape=input_shape,
input_var=mean_network.input_layer.input_var,
output_dim=output_dim,
hidden_sizes=std_hidden_sizes,
hidden_nonlinearity=std_nonlinearity,
output_nonlinearity=None,
).output_layer
else:
l_log_std = ParamLayer(
mean_network.input_layer,
num_units=output_dim,
param=lasagne.init.Constant(np.log(init_std)),
name="output_log_std",
trainable=learn_std,
)
LasagnePowered.__init__(self, [l_mean, l_log_std])
xs_var = mean_network.input_layer.input_var
ys_var = TT.matrix("ys")
old_means_var = TT.matrix("old_means")
old_log_stds_var = TT.matrix("old_log_stds")
x_mean_var = theano.shared(
np.zeros((1,) + input_shape, dtype=theano.config.floatX),
name="x_mean",
broadcastable=(True,) + (False,) * len(input_shape)
)
x_std_var = theano.shared(
np.ones((1,) + input_shape, dtype=theano.config.floatX),
name="x_std",
broadcastable=(True,) + (False,) * len(input_shape)
)
y_mean_var = theano.shared(
np.zeros((1, output_dim), dtype=theano.config.floatX),
name="y_mean",
broadcastable=(True, False)
)
y_std_var = theano.shared(
np.ones((1, output_dim), dtype=theano.config.floatX),
name="y_std",
broadcastable=(True, False)
)
normalized_xs_var = (xs_var - x_mean_var) / x_std_var
normalized_ys_var = (ys_var - y_mean_var) / y_std_var
normalized_means_var = L.get_output(
l_mean, {mean_network.input_layer: normalized_xs_var})
normalized_log_stds_var = L.get_output(
l_log_std, {mean_network.input_layer: normalized_xs_var})
means_var = normalized_means_var * y_std_var + y_mean_var
log_stds_var = normalized_log_stds_var + TT.log(y_std_var)
normalized_old_means_var = (old_means_var - y_mean_var) / y_std_var
normalized_old_log_stds_var = old_log_stds_var - TT.log(y_std_var)
dist = self._dist = DiagonalGaussian(output_dim)
normalized_dist_info_vars = dict(
mean=normalized_means_var, log_std=normalized_log_stds_var)
mean_kl = TT.mean(dist.kl_sym(
dict(mean=normalized_old_means_var,
log_std=normalized_old_log_stds_var),
normalized_dist_info_vars,
))
loss = - \
TT.mean(dist.log_likelihood_sym(
normalized_ys_var, normalized_dist_info_vars))
self._f_predict = compile_function([xs_var], means_var)
self._f_pdists = compile_function([xs_var], [means_var, log_stds_var])
self._l_mean = l_mean
self._l_log_std = l_log_std
optimizer_args = dict(
loss=loss,
target=self,
network_outputs=[normalized_means_var, normalized_log_stds_var],
)
if use_trust_region:
optimizer_args["leq_constraint"] = (mean_kl, step_size)
optimizer_args["inputs"] = [
xs_var, ys_var, old_means_var, old_log_stds_var]
else:
optimizer_args["inputs"] = [xs_var, ys_var]
self._optimizer.update_opt(**optimizer_args)
self._use_trust_region = use_trust_region
self._name = name
self._normalize_inputs = normalize_inputs
self._normalize_outputs = normalize_outputs
self._mean_network = mean_network
self._x_mean_var = x_mean_var
self._x_std_var = x_std_var
self._y_mean_var = y_mean_var
self._y_std_var = y_std_var
def fit(self, xs, ys):
if self._subsample_factor < 1:
num_samples_tot = xs.shape[0]
idx = np.random.randint(0, num_samples_tot, int(num_samples_tot * self._subsample_factor))
xs, ys = xs[idx], ys[idx]
if self._normalize_inputs:
# recompute normalizing constants for inputs
self._x_mean_var.set_value(
np.mean(xs, axis=0, keepdims=True).astype(theano.config.floatX))
self._x_std_var.set_value(
(np.std(xs, axis=0, keepdims=True) + 1e-8).astype(theano.config.floatX))
if self._normalize_outputs:
# recompute normalizing constants for outputs
self._y_mean_var.set_value(
np.mean(ys, axis=0, keepdims=True).astype(theano.config.floatX))
self._y_std_var.set_value(
(np.std(ys, axis=0, keepdims=True) + 1e-8).astype(theano.config.floatX))
if self._name:
prefix = self._name + "_"
else:
prefix = ""
# FIXME: needs batch computation to avoid OOM.
loss_before, loss_after, mean_kl, batch_count = 0., 0., 0., 0
for batch in iterate_minibatches_generic(input_lst=[xs, ys], batchsize=self._batchsize, shuffle=True):
batch_count += 1
xs, ys = batch
if self._use_trust_region:
old_means, old_log_stds = self._f_pdists(xs)
inputs = [xs, ys, old_means, old_log_stds]
else:
inputs = [xs, ys]
loss_before += self._optimizer.loss(inputs)
self._optimizer.optimize(inputs)
loss_after += self._optimizer.loss(inputs)
if self._use_trust_region:
mean_kl += self._optimizer.constraint_val(inputs)
logger.record_tabular(prefix + 'LossBefore', loss_before / batch_count)
logger.record_tabular(prefix + 'LossAfter', loss_after / batch_count)
logger.record_tabular(prefix + 'dLoss', loss_before - loss_after / batch_count)
if self._use_trust_region:
logger.record_tabular(prefix + 'MeanKL', mean_kl / batch_count)
def predict(self, xs):
"""
Return the maximum likelihood estimate of the predicted y.
:param xs:
:return:
"""
return self._f_predict(xs)
def sample_predict(self, xs):
"""
Sample one possible output from the prediction distribution.
:param xs:
:return:
"""
means, log_stds = self._f_pdists(xs)
return self._dist.sample(dict(mean=means, log_std=log_stds))
def predict_log_likelihood(self, xs, ys):
means, log_stds = self._f_pdists(xs)
return self._dist.log_likelihood(ys, dict(mean=means, log_std=log_stds))
def log_likelihood_sym(self, x_var, y_var):
normalized_xs_var = (x_var - self._x_mean_var) / self._x_std_var
normalized_means_var, normalized_log_stds_var = \
L.get_output([self._l_mean, self._l_log_std], {
self._mean_network.input_layer: normalized_xs_var})
means_var = normalized_means_var * self._y_std_var + self._y_mean_var
log_stds_var = normalized_log_stds_var + TT.log(self._y_std_var)
return self._dist.log_likelihood_sym(y_var, dict(mean=means_var, log_std=log_stds_var))
def get_param_values(self, **tags):
return LasagnePowered.get_param_values(self, **tags)
def set_param_values(self, flattened_params, **tags):
return LasagnePowered.set_param_values(self, flattened_params, **tags)
| 10,913 | 38.258993 | 119 | py |
rllab | rllab-master/rllab/regressors/categorical_mlp_regressor.py | import lasagne.layers as L
import lasagne.nonlinearities as NL
import numpy as np
import theano
import theano.tensor as TT
from rllab.core.lasagne_powered import LasagnePowered
from rllab.core.network import MLP
from rllab.core.serializable import Serializable
from rllab.distributions.categorical import Categorical
from rllab.misc import ext
from rllab.misc import logger
from rllab.misc import special
from rllab.optimizers.lbfgs_optimizer import LbfgsOptimizer
from rllab.optimizers.penalty_lbfgs_optimizer import PenaltyLbfgsOptimizer
NONE = list()
class CategoricalMLPRegressor(LasagnePowered):
"""
A class for performing regression (or classification, really) by fitting a categorical distribution to the outputs.
Assumes that the outputs will be always a one hot vector.
"""
def __init__(
self,
input_shape,
output_dim,
prob_network=None,
hidden_sizes=(32, 32),
hidden_nonlinearity=NL.rectify,
optimizer=None,
use_trust_region=True,
step_size=0.01,
normalize_inputs=True,
name=None,
):
"""
:param input_shape: Shape of the input data.
:param output_dim: Dimension of output.
:param hidden_sizes: Number of hidden units of each layer of the mean network.
:param hidden_nonlinearity: Non-linearity used for each layer of the mean network.
:param optimizer: Optimizer for minimizing the negative log-likelihood.
:param use_trust_region: Whether to use trust region constraint.
:param step_size: KL divergence constraint for each iteration
"""
Serializable.quick_init(self, locals())
if optimizer is None:
if use_trust_region:
optimizer = PenaltyLbfgsOptimizer()
else:
optimizer = LbfgsOptimizer()
self.output_dim = output_dim
self._optimizer = optimizer
if prob_network is None:
prob_network = MLP(
input_shape=input_shape,
output_dim=output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=NL.softmax,
)
l_prob = prob_network.output_layer
LasagnePowered.__init__(self, [l_prob])
xs_var = prob_network.input_layer.input_var
ys_var = TT.imatrix("ys")
old_prob_var = TT.matrix("old_prob")
x_mean_var = theano.shared(
np.zeros((1,) + input_shape),
name="x_mean",
broadcastable=(True,) + (False,) * len(input_shape)
)
x_std_var = theano.shared(
np.ones((1,) + input_shape),
name="x_std",
broadcastable=(True,) + (False,) * len(input_shape)
)
normalized_xs_var = (xs_var - x_mean_var) / x_std_var
prob_var = L.get_output(l_prob, {prob_network.input_layer: normalized_xs_var})
old_info_vars = dict(prob=old_prob_var)
info_vars = dict(prob=prob_var)
dist = self._dist = Categorical(output_dim)
mean_kl = TT.mean(dist.kl_sym(old_info_vars, info_vars))
loss = - TT.mean(dist.log_likelihood_sym(ys_var, info_vars))
predicted = special.to_onehot_sym(TT.argmax(prob_var, axis=1), output_dim)
self._f_predict = ext.compile_function([xs_var], predicted)
self._f_prob = ext.compile_function([xs_var], prob_var)
self._prob_network = prob_network
self._l_prob = l_prob
optimizer_args = dict(
loss=loss,
target=self,
network_outputs=[prob_var],
)
if use_trust_region:
optimizer_args["leq_constraint"] = (mean_kl, step_size)
optimizer_args["inputs"] = [xs_var, ys_var, old_prob_var]
else:
optimizer_args["inputs"] = [xs_var, ys_var]
self._optimizer.update_opt(**optimizer_args)
self._use_trust_region = use_trust_region
self._name = name
self._normalize_inputs = normalize_inputs
self._x_mean_var = x_mean_var
self._x_std_var = x_std_var
def fit(self, xs, ys):
if self._normalize_inputs:
# recompute normalizing constants for inputs
self._x_mean_var.set_value(np.mean(xs, axis=0, keepdims=True))
self._x_std_var.set_value(np.std(xs, axis=0, keepdims=True) + 1e-8)
if self._use_trust_region:
old_prob = self._f_prob(xs)
inputs = [xs, ys, old_prob]
else:
inputs = [xs, ys]
loss_before = self._optimizer.loss(inputs)
if self._name:
prefix = self._name + "_"
else:
prefix = ""
logger.record_tabular(prefix + 'LossBefore', loss_before)
self._optimizer.optimize(inputs)
loss_after = self._optimizer.loss(inputs)
logger.record_tabular(prefix + 'LossAfter', loss_after)
logger.record_tabular(prefix + 'dLoss', loss_before - loss_after)
def predict(self, xs):
return self._f_predict(np.asarray(xs))
def predict_log_likelihood(self, xs, ys):
prob = self._f_prob(np.asarray(xs))
return self._dist.log_likelihood(np.asarray(ys), dict(prob=prob))
def log_likelihood_sym(self, x_var, y_var):
normalized_xs_var = (x_var - self._x_mean_var) / self._x_std_var
prob = L.get_output(self._l_prob, {self._prob_network.input_layer: normalized_xs_var})
return self._dist.log_likelihood_sym(TT.cast(y_var, 'int32'), dict(prob=prob))
def get_param_values(self, **tags):
return LasagnePowered.get_param_values(self, **tags)
def set_param_values(self, flattened_params, **tags):
return LasagnePowered.set_param_values(self, flattened_params, **tags)
| 5,876 | 34.403614 | 119 | py |
rllab | rllab-master/rllab/regressors/gaussian_conv_regressor.py | import numpy as np
import lasagne
import lasagne.layers as L
import lasagne.nonlinearities as NL
import theano
import theano.tensor as TT
from rllab.misc.ext import compile_function
from rllab.core.lasagne_layers import ParamLayer
from rllab.core.lasagne_powered import LasagnePowered
from rllab.core.network import ConvNetwork
from rllab.misc import tensor_utils
from rllab.optimizers.lbfgs_optimizer import LbfgsOptimizer
from rllab.optimizers.penalty_lbfgs_optimizer import PenaltyLbfgsOptimizer
from rllab.distributions.diagonal_gaussian import DiagonalGaussian
from rllab.core.serializable import Serializable
from rllab.misc.ext import iterate_minibatches_generic
from rllab.misc import logger
class GaussianConvRegressor(LasagnePowered):
"""
A class for performing regression by fitting a Gaussian distribution to the outputs.
"""
def __init__(
self,
name,
input_shape,
output_dim,
hidden_sizes,
conv_filters,conv_filter_sizes,conv_strides,conv_pads,
hidden_nonlinearity=NL.rectify,
mean_network=None,
optimizer=None,
use_trust_region=True,
step_size=0.01,
subsample_factor=1.0,
batchsize=None,
learn_std=True,
init_std=1.0,
adaptive_std=False,
std_share_network=False,
std_conv_filters=[],std_conv_filters_sizes=[],std_conv_strides=[],std_conv_pads=[],
std_hidden_sizes=(32, 32),
std_nonlinearity=None,
normalize_inputs=True,
normalize_outputs=True,
):
"""
:param input_shape: usually for images of the form (width,height,channel)
:param output_dim: Dimension of output.
:param hidden_sizes: Number of hidden units of each layer of the mean network.
:param hidden_nonlinearity: Non-linearity used for each layer of the mean network.
:param optimizer: Optimizer for minimizing the negative log-likelihood.
:param use_trust_region: Whether to use trust region constraint.
:param step_size: KL divergence constraint for each iteration
:param learn_std: Whether to learn the standard deviations. Only effective if adaptive_std is False. If
adaptive_std is True, this parameter is ignored, and the weights for the std network are always learned.
:param adaptive_std: Whether to make the std a function of the states.
:param std_share_network: Whether to use the same network as the mean.
:param std_hidden_sizes: Number of hidden units of each layer of the std network. Only used if
`std_share_network` is False. It defaults to the same architecture as the mean.
:param std_nonlinearity: Non-linearity used for each layer of the std network. Only used if `std_share_network`
is False. It defaults to the same non-linearity as the mean.
"""
Serializable.quick_init(self, locals())
if optimizer is None:
if use_trust_region:
optimizer = PenaltyLbfgsOptimizer("optimizer")
else:
optimizer = LbfgsOptimizer("optimizer")
self._optimizer = optimizer
self.input_shape = input_shape
if mean_network is None:
mean_network = ConvNetwork(
name="mean_network",
input_shape=input_shape,
output_dim=output_dim,
conv_filters=conv_filters,
conv_filter_sizes=conv_filter_sizes,
conv_strides=conv_strides,
conv_pads=conv_pads,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=None,
)
l_mean = mean_network.output_layer
if adaptive_std:
l_log_std = ConvNetwork(
name="log_std_network",
input_shape=input_shape,
input_var=mean_network.input_layer.input_var,
output_dim=output_dim,
conv_filters=std_conv_filters,
conv_filter_sizes=std_conv_filter_sizes,
conv_strides=std_conv_strides,
conv_pads=std_conv_pads,
hidden_sizes=std_hidden_sizes,
hidden_nonlinearity=std_nonlinearity,
output_nonlinearity=None,
).output_layer
else:
l_log_std = ParamLayer(
mean_network.input_layer,
num_units=output_dim,
param=lasagne.init.Constant(np.log(init_std)),
name="output_log_std",
trainable=learn_std,
)
LasagnePowered.__init__(self, [l_mean, l_log_std])
xs_var = mean_network.input_layer.input_var
ys_var = TT.matrix("ys")
old_means_var = TT.matrix("old_means")
old_log_stds_var = TT.matrix("old_log_stds")
x_mean_var = theano.shared(
np.zeros((1,np.prod(input_shape)), dtype=theano.config.floatX),
name="x_mean",
broadcastable=(True,False),
)
x_std_var = theano.shared(
np.ones((1,np.prod(input_shape)), dtype=theano.config.floatX),
name="x_std",
broadcastable=(True,False),
)
y_mean_var = theano.shared(
np.zeros((1, output_dim), dtype=theano.config.floatX),
name="y_mean",
broadcastable=(True, False)
)
y_std_var = theano.shared(
np.ones((1, output_dim), dtype=theano.config.floatX),
name="y_std",
broadcastable=(True, False)
)
normalized_xs_var = (xs_var - x_mean_var) / x_std_var
normalized_ys_var = (ys_var - y_mean_var) / y_std_var
normalized_means_var = L.get_output(
l_mean, {mean_network.input_layer: normalized_xs_var})
normalized_log_stds_var = L.get_output(
l_log_std, {mean_network.input_layer: normalized_xs_var})
means_var = normalized_means_var * y_std_var + y_mean_var
log_stds_var = normalized_log_stds_var + TT.log(y_std_var)
normalized_old_means_var = (old_means_var - y_mean_var) / y_std_var
normalized_old_log_stds_var = old_log_stds_var - TT.log(y_std_var)
dist = self._dist = DiagonalGaussian(output_dim)
normalized_dist_info_vars = dict(
mean=normalized_means_var, log_std=normalized_log_stds_var)
mean_kl = TT.mean(dist.kl_sym(
dict(mean=normalized_old_means_var,
log_std=normalized_old_log_stds_var),
normalized_dist_info_vars,
))
loss = - \
TT.mean(dist.log_likelihood_sym(
normalized_ys_var, normalized_dist_info_vars))
self._f_predict = compile_function([xs_var], means_var)
self._f_pdists = compile_function([xs_var], [means_var, log_stds_var])
self._l_mean = l_mean
self._l_log_std = l_log_std
optimizer_args = dict(
loss=loss,
target=self,
network_outputs=[normalized_means_var, normalized_log_stds_var],
)
if use_trust_region:
optimizer_args["leq_constraint"] = (mean_kl, step_size)
optimizer_args["inputs"] = [
xs_var, ys_var, old_means_var, old_log_stds_var]
else:
optimizer_args["inputs"] = [xs_var, ys_var]
self._optimizer.update_opt(**optimizer_args)
self._use_trust_region = use_trust_region
self._name = name
self._normalize_inputs = normalize_inputs
self._normalize_outputs = normalize_outputs
self._mean_network = mean_network
self._x_mean_var = x_mean_var
self._x_std_var = x_std_var
self._y_mean_var = y_mean_var
self._y_std_var = y_std_var
self._subsample_factor = subsample_factor
self._batchsize = batchsize
def fit(self, xs, ys):
if self._subsample_factor < 1:
num_samples_tot = xs.shape[0]
idx = np.random.randint(0, num_samples_tot, int(num_samples_tot * self._subsample_factor))
xs, ys = xs[idx], ys[idx]
if self._normalize_inputs:
# recompute normalizing constants for inputs
self._x_mean_var.set_value(
np.mean(xs, axis=0, keepdims=True).astype(theano.config.floatX))
self._x_std_var.set_value(
(np.std(xs, axis=0, keepdims=True) + 1e-8).astype(theano.config.floatX))
if self._normalize_outputs:
# recompute normalizing constants for outputs
self._y_mean_var.set_value(
np.mean(ys, axis=0, keepdims=True).astype(theano.config.floatX))
self._y_std_var.set_value(
(np.std(ys, axis=0, keepdims=True) + 1e-8).astype(theano.config.floatX))
if self._name:
prefix = self._name + "_"
else:
prefix = ""
# FIXME: needs batch computation to avoid OOM.
loss_before, loss_after, mean_kl, batch_count = 0., 0., 0., 0
for batch in iterate_minibatches_generic(input_lst=[xs, ys], batchsize=self._batchsize, shuffle=True):
batch_count += 1
xs, ys = batch
if self._use_trust_region:
old_means, old_log_stds = self._f_pdists(xs)
inputs = [xs, ys, old_means, old_log_stds]
else:
inputs = [xs, ys]
loss_before += self._optimizer.loss(inputs)
self._optimizer.optimize(inputs)
loss_after += self._optimizer.loss(inputs)
if self._use_trust_region:
mean_kl += self._optimizer.constraint_val(inputs)
logger.record_tabular(prefix + 'LossBefore', loss_before / batch_count)
logger.record_tabular(prefix + 'LossAfter', loss_after / batch_count)
logger.record_tabular(prefix + 'dLoss', loss_before - loss_after / batch_count)
if self._use_trust_region:
logger.record_tabular(prefix + 'MeanKL', mean_kl / batch_count)
def predict(self, xs):
"""
Return the maximum likelihood estimate of the predicted y.
:param xs:
:return:
"""
return self._f_predict(xs)
def sample_predict(self, xs):
"""
Sample one possible output from the prediction distribution.
:param xs:
:return:
"""
means, log_stds = self._f_pdists(xs)
return self._dist.sample(dict(mean=means, log_std=log_stds))
def predict_log_likelihood(self, xs, ys):
means, log_stds = self._f_pdists(xs)
return self._dist.log_likelihood(ys, dict(mean=means, log_std=log_stds))
def log_likelihood_sym(self, x_var, y_var):
normalized_xs_var = (x_var - self._x_mean_var) / self._x_std_var
normalized_means_var, normalized_log_stds_var = \
L.get_output([self._l_mean, self._l_log_std], {
self._mean_network.input_layer: normalized_xs_var})
means_var = normalized_means_var * self._y_std_var + self._y_mean_var
log_stds_var = normalized_log_stds_var + TT.log(self._y_std_var)
return self._dist.log_likelihood_sym(y_var, dict(mean=means_var, log_std=log_stds_var))
def get_param_values(self, **tags):
return LasagnePowered.get_param_values(self, **tags)
def set_param_values(self, flattened_params, **tags):
return LasagnePowered.set_param_values(self, flattened_params, **tags)
| 11,624 | 38.675768 | 119 | py |
rllab | rllab-master/rllab/regressors/product_regressor.py |
import numpy as np
from rllab.core.serializable import Serializable
class ProductRegressor(Serializable):
"""
A class for performing MLE regression by fitting a product distribution to the outputs. A separate regressor will
be trained for each individual input distribution.
"""
def __init__(self, regressors):
"""
:param regressors: List of individual regressors
"""
Serializable.quick_init(self, locals())
self.regressors = regressors
self.output_dims = [x.output_dim for x in regressors]
def _split_ys(self, ys):
ys = np.asarray(ys)
split_ids = np.cumsum(self.output_dims)[:-1]
return np.split(ys, split_ids, axis=1)
def fit(self, xs, ys):
for regressor, split_ys in zip(self.regressors, self._split_ys(ys)):
regressor.fit(xs, split_ys)
def predict(self, xs):
return np.concatenate([
regressor.predict(xs) for regressor in self.regressors
], axis=1)
def sample_predict(self, xs):
return np.concatenate([
regressor.sample_predict(xs) for regressor in self.regressors
], axis=1)
def predict_log_likelihood(self, xs, ys):
return np.sum([
regressor.predict_log_likelihood(xs, split_ys)
for regressor, split_ys in zip(self.regressors, self._split_ys(ys))
], axis=0)
def get_param_values(self, **tags):
return np.concatenate(
[regressor.get_param_values(**tags) for regressor in self.regressors]
)
def set_param_values(self, flattened_params, **tags):
param_dims = [
np.prod(regressor.get_param_shapes(**tags))
for regressor in self.regressors
]
split_ids = np.cumsum(param_dims)[:-1]
for regressor, split_param_values in zip(self.regressors, np.split(flattened_params, split_ids)):
regressor.set_param_values(split_param_values)
| 2,022 | 32.716667 | 117 | py |
rllab | rllab-master/rllab/regressors/__init__.py | __author__ = 'dementrock'
| 26 | 12.5 | 25 | py |
rllab | rllab-master/rllab/q_functions/base.py | from rllab.core.parameterized import Parameterized
class QFunction(Parameterized):
pass
| 94 | 14.833333 | 50 | py |
rllab | rllab-master/rllab/q_functions/continuous_mlp_q_function.py | import lasagne
import lasagne.layers as L
import lasagne.nonlinearities as NL
import lasagne.init
import theano.tensor as TT
from rllab.q_functions.base import QFunction
from rllab.core.lasagne_powered import LasagnePowered
from rllab.core.lasagne_layers import batch_norm
from rllab.core.serializable import Serializable
from rllab.misc import ext
class ContinuousMLPQFunction(QFunction, LasagnePowered):
def __init__(
self,
env_spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=NL.rectify,
hidden_W_init=lasagne.init.HeUniform(),
hidden_b_init=lasagne.init.Constant(0.),
action_merge_layer=-2,
output_nonlinearity=None,
output_W_init=lasagne.init.Uniform(-3e-3, 3e-3),
output_b_init=lasagne.init.Uniform(-3e-3, 3e-3),
bn=False):
Serializable.quick_init(self, locals())
l_obs = L.InputLayer(shape=(None, env_spec.observation_space.flat_dim), name="obs")
l_action = L.InputLayer(shape=(None, env_spec.action_space.flat_dim), name="actions")
n_layers = len(hidden_sizes) + 1
if n_layers > 1:
action_merge_layer = \
(action_merge_layer % n_layers + n_layers) % n_layers
else:
action_merge_layer = 1
l_hidden = l_obs
for idx, size in enumerate(hidden_sizes):
if bn:
l_hidden = batch_norm(l_hidden)
if idx == action_merge_layer:
l_hidden = L.ConcatLayer([l_hidden, l_action])
l_hidden = L.DenseLayer(
l_hidden,
num_units=size,
W=hidden_W_init,
b=hidden_b_init,
nonlinearity=hidden_nonlinearity,
name="h%d" % (idx + 1)
)
if action_merge_layer == n_layers:
l_hidden = L.ConcatLayer([l_hidden, l_action])
l_output = L.DenseLayer(
l_hidden,
num_units=1,
W=output_W_init,
b=output_b_init,
nonlinearity=output_nonlinearity,
name="output"
)
output_var = L.get_output(l_output, deterministic=True).flatten()
self._f_qval = ext.compile_function([l_obs.input_var, l_action.input_var], output_var)
self._output_layer = l_output
self._obs_layer = l_obs
self._action_layer = l_action
self._output_nonlinearity = output_nonlinearity
LasagnePowered.__init__(self, [l_output])
def get_qval(self, observations, actions):
return self._f_qval(observations, actions)
def get_qval_sym(self, obs_var, action_var, **kwargs):
qvals = L.get_output(
self._output_layer,
{self._obs_layer: obs_var, self._action_layer: action_var},
**kwargs
)
return TT.reshape(qvals, (-1,))
| 2,914 | 31.752809 | 94 | py |
rllab | rllab-master/rllab/q_functions/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/rllab/exploration_strategies/base.py | class ExplorationStrategy(object):
def get_action(self, t, observation, policy, **kwargs):
raise NotImplementedError
def reset(self):
pass
| 164 | 22.571429 | 59 | py |
rllab | rllab-master/rllab/exploration_strategies/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/rllab/exploration_strategies/ou_strategy.py | from rllab.misc.overrides import overrides
from rllab.misc.ext import AttrDict
from rllab.core.serializable import Serializable
from rllab.spaces.box import Box
from rllab.exploration_strategies.base import ExplorationStrategy
import numpy as np
import numpy.random as nr
class OUStrategy(ExplorationStrategy, Serializable):
"""
This strategy implements the Ornstein-Uhlenbeck process, which adds
time-correlated noise to the actions taken by the deterministic policy.
The OU process satisfies the following stochastic differential equation:
dxt = theta*(mu - xt)*dt + sigma*dWt
where Wt denotes the Wiener process
"""
def __init__(self, env_spec, mu=0, theta=0.15, sigma=0.3, **kwargs):
assert isinstance(env_spec.action_space, Box)
assert len(env_spec.action_space.shape) == 1
Serializable.quick_init(self, locals())
self.mu = mu
self.theta = theta
self.sigma = sigma
self.action_space = env_spec.action_space
self.state = np.ones(self.action_space.flat_dim) * self.mu
self.reset()
def __getstate__(self):
d = Serializable.__getstate__(self)
d["state"] = self.state
return d
def __setstate__(self, d):
Serializable.__setstate__(self, d)
self.state = d["state"]
@overrides
def reset(self):
self.state = np.ones(self.action_space.flat_dim) * self.mu
def evolve_state(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * nr.randn(len(x))
self.state = x + dx
return self.state
@overrides
def get_action(self, t, observation, policy, **kwargs):
action, _ = policy.get_action(observation)
ou_state = self.evolve_state()
return np.clip(action + ou_state, self.action_space.low, self.action_space.high)
if __name__ == "__main__":
ou = OUStrategy(env_spec=AttrDict(action_space=Box(low=-1, high=1, shape=(1,))), mu=0, theta=0.15, sigma=0.3)
states = []
for i in range(1000):
states.append(ou.evolve_state()[0])
import matplotlib.pyplot as plt
plt.plot(states)
plt.show()
| 2,159 | 32.230769 | 113 | py |
rllab | rllab-master/rllab/exploration_strategies/gaussian_strategy.py | from rllab.core.serializable import Serializable
from rllab.spaces.box import Box
from rllab.exploration_strategies.base import ExplorationStrategy
import numpy as np
class GaussianStrategy(ExplorationStrategy, Serializable):
"""
This strategy adds Gaussian noise to the action taken by the deterministic policy.
"""
def __init__(self, env_spec, max_sigma=1.0, min_sigma=0.1, decay_period=1000000):
assert isinstance(env_spec.action_space, Box)
assert len(env_spec.action_space.shape) == 1
Serializable.quick_init(self, locals())
self._max_sigma = max_sigma
self._min_sigma = min_sigma
self._decay_period = decay_period
self._action_space = env_spec.action_space
def get_action(self, t, observation, policy, **kwargs):
action, agent_info = policy.get_action(observation)
sigma = self._max_sigma - (self._max_sigma - self._min_sigma) * min(1.0, t * 1.0 / self._decay_period)
return np.clip(action + np.random.normal(size=len(action)) * sigma, self._action_space.low,
self._action_space.high)
| 1,118 | 42.038462 | 110 | py |
rllab | rllab-master/rllab/optimizers/first_order_optimizer.py | from rllab.misc import ext
from rllab.misc import logger
from rllab.core.serializable import Serializable
# from rllab.algo.first_order_method import parse_update_method
from rllab.optimizers.minibatch_dataset import BatchDataset
from collections import OrderedDict
import time
import lasagne.updates
import theano
import pyprind
from functools import partial
class FirstOrderOptimizer(Serializable):
"""
Performs (stochastic) gradient descent, possibly using fancier methods like adam etc.
"""
def __init__(
self,
update_method=lasagne.updates.adam,
learning_rate=1e-3,
max_epochs=1000,
tolerance=1e-6,
batch_size=32,
callback=None,
verbose=False,
**kwargs):
"""
:param max_epochs:
:param tolerance:
:param update_method:
:param batch_size: None or an integer. If None the whole dataset will be used.
:param callback:
:param kwargs:
:return:
"""
Serializable.quick_init(self, locals())
self._opt_fun = None
self._target = None
self._callback = callback
update_method = partial(update_method, learning_rate=learning_rate)
self._update_method = update_method
self._max_epochs = max_epochs
self._tolerance = tolerance
self._batch_size = batch_size
self._verbose = verbose
def update_opt(self, loss, target, inputs, extra_inputs=None, gradients=None, **kwargs):
"""
:param loss: Symbolic expression for the loss function.
:param target: A parameterized object to optimize over. It should implement methods of the
:class:`rllab.core.paramerized.Parameterized` class.
:param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
:param inputs: A list of symbolic variables as inputs
:return: No return value.
"""
self._target = target
if gradients is None:
gradients = theano.grad(loss, target.get_params(trainable=True), disconnected_inputs='ignore')
updates = self._update_method(gradients, target.get_params(trainable=True))
updates = OrderedDict([(k, v.astype(k.dtype)) for k, v in updates.items()])
if extra_inputs is None:
extra_inputs = list()
self._opt_fun = ext.lazydict(
f_loss=lambda: ext.compile_function(inputs + extra_inputs, loss),
f_opt=lambda: ext.compile_function(
inputs=inputs + extra_inputs,
outputs=loss,
updates=updates,
)
)
def loss(self, inputs, extra_inputs=None):
if extra_inputs is None:
extra_inputs = tuple()
return self._opt_fun["f_loss"](*(tuple(inputs) + extra_inputs))
def optimize_gen(self, inputs, extra_inputs=None, callback=None, yield_itr=None):
if len(inputs) == 0:
# Assumes that we should always sample mini-batches
raise NotImplementedError
f_opt = self._opt_fun["f_opt"]
f_loss = self._opt_fun["f_loss"]
if extra_inputs is None:
extra_inputs = tuple()
last_loss = f_loss(*(tuple(inputs) + extra_inputs))
start_time = time.time()
dataset = BatchDataset(
inputs, self._batch_size,
extra_inputs=extra_inputs
#, randomized=self._randomized
)
itr = 0
for epoch in pyprind.prog_bar(list(range(self._max_epochs))):
for batch in dataset.iterate(update=True):
f_opt(*batch)
if yield_itr is not None and (itr % (yield_itr+1)) == 0:
yield
itr += 1
new_loss = f_loss(*(tuple(inputs) + extra_inputs))
if self._verbose:
logger.log("Epoch %d, loss %s" % (epoch, new_loss))
if self._callback or callback:
elapsed = time.time() - start_time
callback_args = dict(
loss=new_loss,
params=self._target.get_param_values(trainable=True) if self._target else None,
itr=epoch,
elapsed=elapsed,
)
if self._callback:
self._callback(callback_args)
if callback:
callback(**callback_args)
if abs(last_loss - new_loss) < self._tolerance:
break
last_loss = new_loss
def optimize(self, inputs, **kwargs):
for _ in self.optimize_gen(inputs, **kwargs):
pass
| 4,720 | 33.210145 | 112 | py |
rllab | rllab-master/rllab/optimizers/penalty_lbfgs_optimizer.py | from rllab.misc.ext import compile_function, lazydict, flatten_tensor_variables
from rllab.misc import logger
from rllab.core.serializable import Serializable
import theano.tensor as TT
import theano
import numpy as np
import scipy.optimize
class PenaltyLbfgsOptimizer(Serializable):
"""
Performs constrained optimization via penalized L-BFGS. The penalty term is adaptively adjusted to make sure that
the constraint is satisfied.
"""
def __init__(
self,
max_opt_itr=20,
initial_penalty=1.0,
min_penalty=1e-2,
max_penalty=1e6,
increase_penalty_factor=2,
decrease_penalty_factor=0.5,
max_penalty_itr=10,
adapt_penalty=True):
Serializable.quick_init(self, locals())
self._max_opt_itr = max_opt_itr
self._penalty = initial_penalty
self._initial_penalty = initial_penalty
self._min_penalty = min_penalty
self._max_penalty = max_penalty
self._increase_penalty_factor = increase_penalty_factor
self._decrease_penalty_factor = decrease_penalty_factor
self._max_penalty_itr = max_penalty_itr
self._adapt_penalty = adapt_penalty
self._opt_fun = None
self._target = None
self._max_constraint_val = None
self._constraint_name = None
def update_opt(self, loss, target, leq_constraint, inputs, constraint_name="constraint", *args, **kwargs):
"""
:param loss: Symbolic expression for the loss function.
:param target: A parameterized object to optimize over. It should implement methods of the
:class:`rllab.core.paramerized.Parameterized` class.
:param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
:param inputs: A list of symbolic variables as inputs
:return: No return value.
"""
constraint_term, constraint_value = leq_constraint
penalty_var = TT.scalar("penalty")
penalized_loss = loss + penalty_var * constraint_term
self._target = target
self._max_constraint_val = constraint_value
self._constraint_name = constraint_name
def get_opt_output():
flat_grad = flatten_tensor_variables(theano.grad(
penalized_loss, target.get_params(trainable=True), disconnected_inputs='ignore'
))
return [penalized_loss.astype('float64'), flat_grad.astype('float64')]
self._opt_fun = lazydict(
f_loss=lambda: compile_function(inputs, loss, log_name="f_loss"),
f_constraint=lambda: compile_function(inputs, constraint_term, log_name="f_constraint"),
f_penalized_loss=lambda: compile_function(
inputs=inputs + [penalty_var],
outputs=[penalized_loss, loss, constraint_term],
log_name="f_penalized_loss",
),
f_opt=lambda: compile_function(
inputs=inputs + [penalty_var],
outputs=get_opt_output(),
log_name="f_opt"
)
)
def loss(self, inputs):
return self._opt_fun["f_loss"](*inputs)
def constraint_val(self, inputs):
return self._opt_fun["f_constraint"](*inputs)
def optimize(self, inputs):
inputs = tuple(inputs)
try_penalty = np.clip(
self._penalty, self._min_penalty, self._max_penalty)
penalty_scale_factor = None
f_opt = self._opt_fun["f_opt"]
f_penalized_loss = self._opt_fun["f_penalized_loss"]
def gen_f_opt(penalty):
def f(flat_params):
self._target.set_param_values(flat_params, trainable=True)
return f_opt(*(inputs + (penalty,)))
return f
cur_params = self._target.get_param_values(trainable=True).astype('float64')
opt_params = cur_params
for penalty_itr in range(self._max_penalty_itr):
logger.log('trying penalty=%.3f...' % try_penalty)
itr_opt_params, _, _ = scipy.optimize.fmin_l_bfgs_b(
func=gen_f_opt(try_penalty), x0=cur_params,
maxiter=self._max_opt_itr
)
_, try_loss, try_constraint_val = f_penalized_loss(*(inputs + (try_penalty,)))
logger.log('penalty %f => loss %f, %s %f' %
(try_penalty, try_loss, self._constraint_name, try_constraint_val))
# Either constraint satisfied, or we are at the last iteration already and no alternative parameter
# satisfies the constraint
if try_constraint_val < self._max_constraint_val or \
(penalty_itr == self._max_penalty_itr - 1 and opt_params is None):
opt_params = itr_opt_params
if not self._adapt_penalty:
break
# Decide scale factor on the first iteration, or if constraint violation yields numerical error
if penalty_scale_factor is None or np.isnan(try_constraint_val):
# Increase penalty if constraint violated, or if constraint term is NAN
if try_constraint_val > self._max_constraint_val or np.isnan(try_constraint_val):
penalty_scale_factor = self._increase_penalty_factor
else:
# Otherwise (i.e. constraint satisfied), shrink penalty
penalty_scale_factor = self._decrease_penalty_factor
opt_params = itr_opt_params
else:
# After the first iteration, the penalty is not None. Now whenever constraint is crossed, we stop adjust
if penalty_scale_factor > 1 and \
try_constraint_val <= self._max_constraint_val:
break
elif penalty_scale_factor < 1 and \
try_constraint_val >= self._max_constraint_val:
break
# check if the penalty was already at the bounds. Otherwise update it. Here penalty_scale_fact is never None
if try_penalty >= self._max_penalty and penalty_scale_factor > 1:
logger.log('_max_penalty has already been tried!')
self._penalty = try_penalty # useless: if we were at max_penalty it means a previous itr already set it
break
elif try_penalty <= self._min_penalty and penalty_scale_factor < 1:
logger.log('_min_penalty has already been tried!')
self._penalty = try_penalty
break
else:
try_penalty *= penalty_scale_factor
try_penalty = np.clip(try_penalty, self._min_penalty, self._max_penalty)
self._penalty = try_penalty
self._target.set_param_values(opt_params, trainable=True)
| 6,910 | 41.925466 | 120 | py |
rllab | rllab-master/rllab/optimizers/hessian_free_optimizer.py | from rllab.misc.ext import compile_function, lazydict
from rllab.core.serializable import Serializable
from rllab.optimizers.hf import hf_optimizer
import time
from rllab.optimizers.minibatch_dataset import BatchDataset
class HessianFreeOptimizer(Serializable):
"""
Performs unconstrained optimization via Hessian-Free Optimization
"""
def __init__(self, max_opt_itr=20, batch_size=32, cg_batch_size=100, callback=None):
Serializable.quick_init(self, locals())
self._max_opt_itr = max_opt_itr
self._opt_fun = None
self._target = None
self._batch_size = batch_size
self._cg_batch_size = cg_batch_size
self._hf_optimizer = None
self._callback = callback
def update_opt(self, loss, target, inputs, network_outputs, extra_inputs=None):
"""
:param loss: Symbolic expression for the loss function.
:param target: A parameterized object to optimize over. It should implement methods of the
:class:`rllab.core.paramerized.Parameterized` class.
:param inputs: A list of symbolic variables as inputs
:return: No return value.
"""
self._target = target
if extra_inputs is None:
extra_inputs = list()
self._hf_optimizer = hf_optimizer(
_p=target.get_params(trainable=True),
inputs=(inputs + extra_inputs),
s=network_outputs,
costs=[loss],
)
self._opt_fun = lazydict(
f_loss=lambda: compile_function(inputs + extra_inputs, loss),
)
def loss(self, inputs, extra_inputs=None):
if extra_inputs is None:
extra_inputs = list()
return self._opt_fun["f_loss"](*(inputs + extra_inputs))
def optimize(self, inputs, extra_inputs=None):
if extra_inputs is None:
extra_inputs = list()
# import ipdb; ipdb.set_trace()
dataset = BatchDataset(inputs=inputs, batch_size=self._batch_size, extra_inputs=extra_inputs)
cg_dataset = BatchDataset(inputs=inputs, batch_size=self._cg_batch_size, extra_inputs=extra_inputs)
itr = [0]
start_time = time.time()
if self._callback:
def opt_callback():
loss = self._opt_fun["f_loss"](*(inputs + extra_inputs))
elapsed = time.time() - start_time
self._callback(dict(
loss=loss,
params=self._target.get_param_values(trainable=True),
itr=itr[0],
elapsed=elapsed,
))
itr[0] += 1
else:
opt_callback = None
self._hf_optimizer.train(
gradient_dataset=dataset,
cg_dataset=cg_dataset,
itr_callback=opt_callback,
num_updates=self._max_opt_itr,
preconditioner=True,
verbose=True
)
| 2,936 | 32.758621 | 107 | py |
rllab | rllab-master/rllab/optimizers/hf.py | # Author: Nicolas Boulanger-Lewandowski
# University of Montreal, 2012-2013
import numpy, sys
import theano
import theano.tensor as T
import pickle
import os
from rllab.misc.ext import compile_function
import collections
def gauss_newton_product(cost, p, v, s): # this computes the product Gv = J'HJv (G is the Gauss-Newton matrix)
if not isinstance(s, (list, tuple)):
s = [s]
sum_Gv = None
for si in s:
Jv = T.Rop(si, p, v)
HJv = T.grad(T.sum(T.grad(cost, si, disconnected_inputs='ignore') * Jv), si, consider_constant=[Jv], disconnected_inputs='ignore')
Gv = T.grad(T.sum(HJv * si), p, consider_constant=[HJv, Jv], disconnected_inputs='ignore')
Gv = list(map(T.as_tensor_variable, Gv)) # for CudaNdarray
if sum_Gv is None:
sum_Gv = Gv
else:
sum_Gv = [a+b for a, b in zip(Gv, sum_Gv)]
return sum_Gv
class hf_optimizer:
'''Black-box Theano-based Hessian-free optimizer.
See (Martens, ICML 2010) and (Martens & Sutskever, ICML 2011) for details.
Useful functions:
__init__ :
Compiles necessary Theano functions from symbolic expressions.
train :
Performs HF optimization following the above references.'''
def __init__(self, _p, inputs, s, costs, h=None, ha=None):
'''Constructs and compiles the necessary Theano functions.
p : list of Theano shared variables
Parameters of the model to be optimized.
inputs : list of Theano variables
Symbolic variables that are inputs to your graph (they should also
include your model 'output'). Your training examples must fit these.
s : Theano variable
Symbolic variable with respect to which the Hessian of the objective is
positive-definite, implicitly defining the Gauss-Newton matrix. Typically,
it is the activation of the output layer.
costs : list of Theano variables
Monitoring costs, the first of which will be the optimized objective.
h: Theano variable or None
Structural damping is applied to this variable (typically the hidden units
of an RNN).
ha: Theano variable or None
Symbolic variable that implicitly defines the Gauss-Newton matrix for the
structural damping term (typically the activation of the hidden layer). If
None, it will be set to `h`.'''
self.p = _p
self.shapes = [i.get_value().shape for i in _p]
self.sizes = list(map(numpy.prod, self.shapes))
self.positions = numpy.cumsum([0] + self.sizes)[:-1]
g = T.grad(costs[0], _p)
g = list(map(T.as_tensor_variable, g)) # for CudaNdarray
self.f_gc = compile_function(inputs, g + costs) # during gradient computation
self.f_cost = compile_function(inputs, costs) # for quick cost evaluation
symbolic_types = T.scalar, T.vector, T.matrix, T.tensor3, T.tensor4
v = [symbolic_types[len(i)]() for i in self.shapes]
Gv = gauss_newton_product(costs[0], _p, v, s)
coefficient = T.scalar() # this is lambda*mu
if h is not None: # structural damping with cross-entropy
h_constant = symbolic_types[h.ndim]() # T.Rop does not support `consider_constant` yet, so use `givens`
structural_damping = coefficient * (
-h_constant * T.log(h + 1e-10) - (1 - h_constant) * T.log((1 - h) + 1e-10)).sum() / h.shape[0]
if ha is None: ha = h
Gv_damping = gauss_newton_product(structural_damping, _p, v, ha)
Gv = [a + b for a, b in zip(Gv, Gv_damping)]
givens = {h_constant: h}
else:
givens = {}
self.function_Gv = compile_function(inputs + v + [coefficient], Gv, givens=givens)
def quick_cost(self, delta=0):
# quickly evaluate objective (costs[0]) over the CG batch
# for `current params` + delta
# delta can be a flat vector or a list (else it is not used)
if isinstance(delta, numpy.ndarray):
delta = self.flat_to_list(delta)
if type(delta) in (list, tuple):
for i, d in zip(self.p, delta):
i.set_value(i.get_value() + d)
cost = numpy.mean([self.f_cost(*i)[0] for i in self.cg_dataset.iterate(update=False)])
if type(delta) in (list, tuple):
for i, d in zip(self.p, delta):
i.set_value(i.get_value() - d)
return cost
def cg(self, b, verbose=False):
if self.preconditioner:
M = self.lambda_ * numpy.ones_like(b)
for inputs in self.cg_dataset.iterate(update=False):
M += self.list_to_flat(self.f_gc(*inputs)[:len(self.p)]) ** 2 # / self.cg_dataset.number_batches**2
# print 'precond~%.3f,' % (M - self.lambda_).mean(),
M **= -0.75 # actually 1/M
sys.stdout.flush()
else:
M = 1.0
x = self.cg_last_x if hasattr(self, 'cg_last_x') else numpy.zeros_like(b) # sharing information between CG runs
r = b - self.batch_Gv(x)
d = M * r
delta_new = numpy.dot(r, d)
phi = []
backtracking = []
backspaces = 0
for i in range(1, 1 + self.max_cg_iterations):
# adapted from http://www.cs.cmu.edu/~quake-papers/painless-conjugate-gradient.pdf (p.51)
q = self.batch_Gv(d)
dq = numpy.dot(d, q)
# assert dq > 0, 'negative curvature'
alpha = delta_new / dq
x = x + alpha * d
r = r - alpha * q
s = M * r
delta_old = delta_new
delta_new = numpy.dot(r, s)
d = s + (delta_new / delta_old) * d
if i >= int(numpy.ceil(1.3 ** len(backtracking))):
backtracking.append((self.quick_cost(x), x.copy(), i))
phi_i = -0.5 * numpy.dot(x, r + b)
phi.append(phi_i)
if verbose:
progress = ' [CG iter %i, phi=%+.5f, cost=%.5f]' % (i, phi_i, backtracking[-1][0])
sys.stdout.write('\b' * backspaces + progress)
sys.stdout.flush()
backspaces = len(progress)
k = max(10, i / 10)
if i > k and phi_i < 0 and (phi_i - phi[-k - 1]) / phi_i < k * 0.0005:
break
self.cg_last_x = x.copy()
if self.global_backtracking:
j = numpy.argmin([b[0] for b in backtracking])
else:
j = len(backtracking) - 1
while j > 0 and backtracking[j - 1][0] < backtracking[j][0]:
j -= 1
if verbose:
print(' backtracked %i/%i' % (backtracking[j][2], i), end=' ')
sys.stdout.flush()
return backtracking[j] + (i,)
def flat_to_list(self, vector):
return [vector[position:position + size].reshape(shape) for shape, size, position in
zip(self.shapes, self.sizes, self.positions)]
def list_to_flat(self, l):
return numpy.concatenate([i.flatten() for i in l])
def batch_Gv(self, vector, lambda_=None):
v = self.flat_to_list(vector)
if lambda_ is None: lambda_ = self.lambda_
result = lambda_ * vector # Tikhonov damping
for inputs in self.cg_dataset.iterate(False):
result += self.list_to_flat(
self.function_Gv(*(inputs + v + [lambda_ * self.mu]))) / self.cg_dataset.number_batches
return result
def train(self, gradient_dataset, cg_dataset, initial_lambda=0.1, mu=0.03, global_backtracking=False,
preconditioner=False, max_cg_iterations=250, num_updates=100, validation=None, validation_frequency=1,
patience=numpy.inf, save_progress=None, itr_callback=None, verbose=False):
'''Performs HF training.
gradient_dataset : SequenceDataset-like object
Defines batches used to compute the gradient.
The `iterate(update=True)` method should yield shuffled training examples
(tuples of variables matching your graph inputs).
The same examples MUST be returned between multiple calls to iterator(),
unless update is True, in which case the next batch should be different.
cg_dataset : SequenceDataset-like object
Defines batches used to compute CG iterations.
initial_lambda : float
Initial value of the Tikhonov damping coefficient.
mu : float
Coefficient for structural damping.
global_backtracking : Boolean
If True, backtracks as much as necessary to find the global minimum among
all CG iterates. Else, Martens' heuristic is used.
preconditioner : Boolean
Whether to use Martens' preconditioner.
max_cg_iterations : int
CG stops after this many iterations regardless of the stopping criterion.
num_updates : int
Training stops after this many parameter updates regardless of `patience`.
validation: SequenceDataset object, (lambda : tuple) callback, or None
If a SequenceDataset object is provided, the training monitoring costs
will be evaluated on that validation dataset.
If a callback is provided, it should return a list of validation costs
for monitoring, the first of which is also used for early stopping.
If None, no early stopping nor validation monitoring is performed.
validation_frequency: int
Validation is performed every `validation_frequency` updates.
patience: int
Training stops after `patience` updates without improvement in validation
cost.
save_progress: string or None
A checkpoint is automatically saved at this location after each update.
Call the `train` function again with the same parameters to resume
training.'''
self.lambda_ = initial_lambda
self.mu = mu
self.global_backtracking = global_backtracking
self.cg_dataset = cg_dataset
self.preconditioner = preconditioner
self.max_cg_iterations = max_cg_iterations
best = [0, numpy.inf, None] # iteration, cost, params
first_iteration = 1
if isinstance(save_progress, str) and os.path.isfile(save_progress):
save = pickle.load(file(save_progress))
self.cg_last_x, best, self.lambda_, first_iteration, init_p = save
first_iteration += 1
if verbose: print('* recovered saved model')
try:
for u in range(first_iteration, 1 + num_updates):
if verbose: print('update %i/%i,' % (u, num_updates), end=' ')
sys.stdout.flush()
# import ipdb; ipdb.set_trace()
gradient = numpy.zeros(sum(self.sizes), dtype=theano.config.floatX)
costs = []
for inputs in gradient_dataset.iterate(update=True):
# import ipdb; ipdb.set_trace()
result = self.f_gc(*inputs)
gradient += self.list_to_flat(result[:len(self.p)]) / gradient_dataset.number_batches
costs.append(result[len(self.p):])
if verbose: print('cost=', numpy.mean(costs, axis=0), end=' ')
if verbose: print('lambda=%.5f,' % self.lambda_, end=' ')
sys.stdout.flush()
after_cost, flat_delta, backtracking, num_cg_iterations = self.cg(-gradient)
delta_cost = numpy.dot(flat_delta,
gradient + 0.5 * self.batch_Gv(flat_delta, lambda_=0)) # disable damping
before_cost = self.quick_cost()
for i, delta in zip(self.p, self.flat_to_list(flat_delta)):
i.set_value(i.get_value() + delta)
cg_dataset.update()
if itr_callback is not None:
itr_callback()
rho = (after_cost - before_cost) / delta_cost # Levenberg-Marquardt
# print 'rho=%f' %rho,
if rho < 0.25:
self.lambda_ *= 1.5
elif rho > 0.75:
self.lambda_ /= 1.5
if validation is not None and u % validation_frequency == 0:
if hasattr(validation, 'iterate'):
costs = numpy.mean([self.f_cost(*i) for i in validation.iterate()], axis=0)
elif isinstance(validation, collections.Callable):
costs = validation()
if verbose: print('validation=', costs, end=' ')
if costs[0] < best[1]:
best = u, costs[0], [i.get_value().copy() for i in self.p]
if verbose: print('*NEW BEST', end=' ')
if isinstance(save_progress, str):
# do not save dataset states
save = self.cg_last_x, best, self.lambda_, u, [i.get_value().copy() for i in self.p]
pickle.dump(save, file(save_progress, 'wb'), pickle.HIGHEST_PROTOCOL)
if u - best[0] > patience:
if verbose: print('PATIENCE ELAPSED, BAILING OUT')
break
if verbose:
print()
sys.stdout.flush()
except KeyboardInterrupt:
if verbose: print('Interrupted by user.')
if best[2] is None:
best[2] = [i.get_value().copy() for i in self.p]
return best[2]
class SequenceDataset:
'''Slices, shuffles and manages a small dataset for the HF optimizer.'''
def __init__(self, data, batch_size, number_batches, minimum_size=10):
'''SequenceDataset __init__
data : list of lists of numpy arrays
Your dataset will be provided as a list (one list for each graph input) of
variable-length tensors that will be used as mini-batches. Typically, each
tensor is a sequence or a set of examples.
batch_size : int or None
If an int, the mini-batches will be further split in chunks of length
`batch_size`. This is useful for slicing subsequences or provide the full
dataset in a single tensor to be split here. All tensors in `data` must
then have the same leading dimension.
number_batches : int
Number of mini-batches over which you iterate to compute a gradient or
Gauss-Newton matrix product.
minimum_size : int
Reject all mini-batches that end up smaller than this length.'''
self.current_batch = 0
self.number_batches = number_batches
self.items = []
for i_sequence in range(len(data[0])):
if batch_size is None:
self.items.append([data[i][i_sequence] for i in range(len(data))])
else:
for i_step in range(0, len(data[0][i_sequence]) - minimum_size + 1, batch_size):
self.items.append([data[i][i_sequence][i_step:i_step + batch_size] for i in range(len(data))])
self.shuffle()
def shuffle(self):
numpy.random.shuffle(self.items)
def iterate(self, update=True):
for b in range(self.number_batches):
yield self.items[(self.current_batch + b) % len(self.items)]
if update: self.update()
def update(self):
if self.current_batch + self.number_batches >= len(self.items):
self.shuffle()
self.current_batch = 0
else:
self.current_batch += self.number_batches
| 15,714 | 42.896648 | 138 | py |
rllab | rllab-master/rllab/optimizers/conjugate_gradient_optimizer.py | from rllab.misc import ext
from rllab.misc import krylov
from rllab.misc import logger
from rllab.core.serializable import Serializable
import theano.tensor as TT
import theano
import itertools
import numpy as np
from rllab.misc.ext import sliced_fun
from _ast import Num
class PerlmutterHvp(Serializable):
def __init__(self, num_slices=1):
Serializable.quick_init(self, locals())
self.target = None
self.reg_coeff = None
self.opt_fun = None
self._num_slices = num_slices
def update_opt(self, f, target, inputs, reg_coeff):
self.target = target
self.reg_coeff = reg_coeff
params = target.get_params(trainable=True)
constraint_grads = theano.grad(
f, wrt=params, disconnected_inputs='warn')
xs = tuple([ext.new_tensor_like("%s x" % p.name, p) for p in params])
def Hx_plain():
Hx_plain_splits = TT.grad(
TT.sum([TT.sum(g * x)
for g, x in zip(constraint_grads, xs)]),
wrt=params,
disconnected_inputs='warn'
)
return TT.concatenate([TT.flatten(s) for s in Hx_plain_splits])
self.opt_fun = ext.lazydict(
f_Hx_plain=lambda: ext.compile_function(
inputs=inputs + xs,
outputs=Hx_plain(),
log_name="f_Hx_plain",
),
)
def build_eval(self, inputs):
def eval(x):
xs = tuple(self.target.flat_to_params(x, trainable=True))
ret = sliced_fun(self.opt_fun["f_Hx_plain"], self._num_slices)(
inputs, xs) + self.reg_coeff * x
return ret
return eval
class FiniteDifferenceHvp(Serializable):
def __init__(self, base_eps=1e-8, symmetric=True, grad_clip=None, num_slices=1):
Serializable.quick_init(self, locals())
self.base_eps = base_eps
self.symmetric = symmetric
self.grad_clip = grad_clip
self._num_slices = num_slices
def update_opt(self, f, target, inputs, reg_coeff):
self.target = target
self.reg_coeff = reg_coeff
params = target.get_params(trainable=True)
constraint_grads = theano.grad(
f, wrt=params, disconnected_inputs='warn')
flat_grad = ext.flatten_tensor_variables(constraint_grads)
def f_Hx_plain(*args):
inputs_ = args[:len(inputs)]
xs = args[len(inputs):]
flat_xs = np.concatenate([np.reshape(x, (-1,)) for x in xs])
param_val = self.target.get_param_values(trainable=True)
eps = np.cast['float32'](
self.base_eps / (np.linalg.norm(param_val) + 1e-8))
self.target.set_param_values(
param_val + eps * flat_xs, trainable=True)
flat_grad_dvplus = self.opt_fun["f_grad"](*inputs_)
if self.symmetric:
self.target.set_param_values(
param_val - eps * flat_xs, trainable=True)
flat_grad_dvminus = self.opt_fun["f_grad"](*inputs_)
hx = (flat_grad_dvplus - flat_grad_dvminus) / (2 * eps)
self.target.set_param_values(param_val, trainable=True)
else:
self.target.set_param_values(param_val, trainable=True)
flat_grad = self.opt_fun["f_grad"](*inputs_)
hx = (flat_grad_dvplus - flat_grad) / eps
return hx
self.opt_fun = ext.lazydict(
f_grad=lambda: ext.compile_function(
inputs=inputs,
outputs=flat_grad,
log_name="f_grad",
),
f_Hx_plain=lambda: f_Hx_plain,
)
def build_eval(self, inputs):
def eval(x):
xs = tuple(self.target.flat_to_params(x, trainable=True))
ret = sliced_fun(self.opt_fun["f_Hx_plain"], self._num_slices)(
inputs, xs) + self.reg_coeff * x
return ret
return eval
class ConjugateGradientOptimizer(Serializable):
"""
Performs constrained optimization via line search. The search direction is computed using a conjugate gradient
algorithm, which gives x = A^{-1}g, where A is a second order approximation of the constraint and g is the gradient
of the loss function.
"""
def __init__(
self,
cg_iters=10,
reg_coeff=1e-5,
subsample_factor=1.,
backtrack_ratio=0.8,
max_backtracks=15,
accept_violation=False,
hvp_approach=None,
num_slices=1):
"""
:param cg_iters: The number of CG iterations used to calculate A^-1 g
:param reg_coeff: A small value so that A -> A + reg*I
:param subsample_factor: Subsampling factor to reduce samples when using "conjugate gradient. Since the
computation time for the descent direction dominates, this can greatly reduce the overall computation time.
:param accept_violation: whether to accept the descent step if it violates the line search condition after
exhausting all backtracking budgets
:return:
"""
Serializable.quick_init(self, locals())
self._cg_iters = cg_iters
self._reg_coeff = reg_coeff
self._subsample_factor = subsample_factor
self._backtrack_ratio = backtrack_ratio
self._max_backtracks = max_backtracks
self._num_slices = num_slices
self._opt_fun = None
self._target = None
self._max_constraint_val = None
self._constraint_name = None
self._accept_violation = accept_violation
if hvp_approach is None:
hvp_approach = PerlmutterHvp(num_slices)
self._hvp_approach = hvp_approach
def update_opt(self, loss, target, leq_constraint, inputs, extra_inputs=None, constraint_name="constraint", *args,
**kwargs):
"""
:param loss: Symbolic expression for the loss function.
:param target: A parameterized object to optimize over. It should implement methods of the
:class:`rllab.core.paramerized.Parameterized` class.
:param leq_constraint: A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
:param inputs: A list of symbolic variables as inputs, which could be subsampled if needed. It is assumed
that the first dimension of these inputs should correspond to the number of data points
:param extra_inputs: A list of symbolic variables as extra inputs which should not be subsampled
:return: No return value.
"""
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
else:
extra_inputs = tuple(extra_inputs)
constraint_term, constraint_value = leq_constraint
params = target.get_params(trainable=True)
grads = theano.grad(loss, wrt=params, disconnected_inputs='warn')
flat_grad = ext.flatten_tensor_variables(grads)
self._hvp_approach.update_opt(f=constraint_term, target=target, inputs=inputs + extra_inputs,
reg_coeff=self._reg_coeff)
self._target = target
self._max_constraint_val = constraint_value
self._constraint_name = constraint_name
self._opt_fun = ext.lazydict(
f_loss=lambda: ext.compile_function(
inputs=inputs + extra_inputs,
outputs=loss,
log_name="f_loss",
),
f_grad=lambda: ext.compile_function(
inputs=inputs + extra_inputs,
outputs=flat_grad,
log_name="f_grad",
),
f_constraint=lambda: ext.compile_function(
inputs=inputs + extra_inputs,
outputs=constraint_term,
log_name="constraint",
),
f_loss_constraint=lambda: ext.compile_function(
inputs=inputs + extra_inputs,
outputs=[loss, constraint_term],
log_name="f_loss_constraint",
),
)
def loss(self, inputs, extra_inputs=None):
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
return sliced_fun(self._opt_fun["f_loss"], self._num_slices)(inputs, extra_inputs)
def constraint_val(self, inputs, extra_inputs=None):
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
return sliced_fun(self._opt_fun["f_constraint"], self._num_slices)(inputs, extra_inputs)
def optimize(self, inputs, extra_inputs=None, subsample_grouped_inputs=None):
inputs = tuple(inputs)
if extra_inputs is None:
extra_inputs = tuple()
if self._subsample_factor < 1:
if subsample_grouped_inputs is None:
subsample_grouped_inputs = [inputs]
subsample_inputs = tuple()
for inputs_grouped in subsample_grouped_inputs:
n_samples = len(inputs_grouped[0])
inds = np.random.choice(
n_samples, int(n_samples * self._subsample_factor), replace=False)
subsample_inputs += tuple([x[inds] for x in inputs_grouped])
else:
subsample_inputs = inputs
logger.log("computing loss before")
loss_before = sliced_fun(self._opt_fun["f_loss"], self._num_slices)(
inputs, extra_inputs)
logger.log("performing update")
logger.log("computing descent direction")
flat_g = sliced_fun(self._opt_fun["f_grad"], self._num_slices)(
inputs, extra_inputs)
Hx = self._hvp_approach.build_eval(subsample_inputs + extra_inputs)
descent_direction = krylov.cg(Hx, flat_g, cg_iters=self._cg_iters)
initial_step_size = np.sqrt(
2.0 * self._max_constraint_val *
(1. / (descent_direction.dot(Hx(descent_direction)) + 1e-8))
)
if np.isnan(initial_step_size):
initial_step_size = 1.
flat_descent_step = initial_step_size * descent_direction
logger.log("descent direction computed")
prev_param = np.copy(self._target.get_param_values(trainable=True))
n_iter = 0
for n_iter, ratio in enumerate(self._backtrack_ratio ** np.arange(self._max_backtracks)):
cur_step = ratio * flat_descent_step
cur_param = prev_param - cur_step
self._target.set_param_values(cur_param, trainable=True)
loss, constraint_val = sliced_fun(
self._opt_fun["f_loss_constraint"], self._num_slices)(inputs, extra_inputs)
if loss < loss_before and constraint_val <= self._max_constraint_val:
break
if (np.isnan(loss) or np.isnan(constraint_val) or loss >= loss_before or constraint_val >=
self._max_constraint_val) and not self._accept_violation:
logger.log("Line search condition violated. Rejecting the step!")
if np.isnan(loss):
logger.log("Violated because loss is NaN")
if np.isnan(constraint_val):
logger.log("Violated because constraint %s is NaN" %
self._constraint_name)
if loss >= loss_before:
logger.log("Violated because loss not improving")
if constraint_val >= self._max_constraint_val:
logger.log(
"Violated because constraint %s is violated" % self._constraint_name)
self._target.set_param_values(prev_param, trainable=True)
logger.log("backtrack iters: %d" % n_iter)
logger.log("computing loss after")
logger.log("optimization finished")
| 11,870 | 38.969697 | 119 | py |
rllab | rllab-master/rllab/optimizers/__init__.py | 0 | 0 | 0 | py | |
rllab | rllab-master/rllab/optimizers/minibatch_dataset.py | import numpy as np
class BatchDataset(object):
def __init__(self, inputs, batch_size, extra_inputs=None):
self._inputs = [
i for i in inputs
]
if extra_inputs is None:
extra_inputs = []
self._extra_inputs = extra_inputs
self._batch_size = batch_size
if batch_size is not None:
self._ids = np.arange(self._inputs[0].shape[0])
self.update()
@property
def number_batches(self):
if self._batch_size is None:
return 1
return int(np.ceil(self._inputs[0].shape[0] * 1.0 / self._batch_size))
def iterate(self, update=True):
if self._batch_size is None:
yield list(self._inputs) + list(self._extra_inputs)
else:
for itr in range(self.number_batches):
batch_start = itr * self._batch_size
batch_end = (itr + 1) * self._batch_size
batch_ids = self._ids[batch_start:batch_end]
batch = [d[batch_ids] for d in self._inputs]
yield list(batch) + list(self._extra_inputs)
if update:
self.update()
def update(self):
np.random.shuffle(self._ids)
| 1,233 | 30.641026 | 78 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.