python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
|---|---|---|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Puppeteers for gift_refinements."""
from collections.abc import Mapping
import dm_env
from meltingpot.utils.puppeteers import puppeteer
import numpy as np
import tree
Observation = Mapping[str, tree.Structure[np.ndarray]]
class GiftRefinementsCooperator(puppeteer.Puppeteer[tuple[()]]):
"""Cooperator puppeteer for gift refinements.
This puppeteer expresses a cooperative high level policy:
1. Collect tokens when the inventory is empty.
2. If the inventory is not empty, check if there are any refined tokens, if
not, the gift some tokens.
3. If there the player has refined tokens, consume.
This means that a GiftRefinementsCooperator will start by grabbing a token,
and then gift it. As soon as they receive any gift from anyone, they would
consume.
"""
def __init__(
self,
*,
collect_goal: puppeteer.PuppetGoal,
gift_goal: puppeteer.PuppetGoal,
consume_goal: puppeteer.PuppetGoal,
):
"""Initializes the puppeteer.
Args:
collect_goal: goal to emit to puppet when "collecting"
gift_goal: goal to emit to puppet when "gifting"
consume_goal: goal to emit to puppet when "consuming"
"""
self._collect_goal = collect_goal
self._gift_goal = gift_goal
self._consume_goal = consume_goal
def initial_state(self) -> tuple[()]:
"""See base class."""
return ()
def should_consume(self, observation: Observation) -> bool:
"""Decides whether we should consume tokens in our inventory."""
_, refined, twice_refined = observation['INVENTORY']
return bool(refined) or bool(twice_refined)
def step(self, timestep: dm_env.TimeStep,
prev_state: tuple[()]) -> tuple[dm_env.TimeStep, tuple[()]]:
"""See base class."""
if np.sum(timestep.observation['INVENTORY']):
if self.should_consume(timestep.observation):
goal = self._consume_goal
else:
goal = self._gift_goal
else:
goal = self._collect_goal
# Return the encoded cumulant associated with the current goal.
timestep = puppeteer.puppet_timestep(timestep, goal)
return timestep, prev_state
class GiftRefinementsExtremeCooperator(GiftRefinementsCooperator):
"""Cooperator that gifts until it has tokens of type 2 (double refinement).
This means that a GiftRefinementsExtremeCooperator, like the cooperator above,
will start by grabbing a token, and then gift it. However, upon receiving a
gift, they would gift back. Only will they consume if they receive a doubly
refined token.
"""
def should_consume(self, observation: Observation) -> bool:
"""See base class."""
_, _, twice_refined = observation['INVENTORY']
return bool(twice_refined > 0)
|
meltingpot-main
|
meltingpot/utils/puppeteers/gift_refinements.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Puppeteers for coins."""
import dataclasses
import dm_env
from meltingpot.utils.puppeteers import puppeteer
@dataclasses.dataclass(frozen=True)
class ReciprocatorState:
"""Current state of the Reciprocator.
Attributes:
step_count: number of timesteps previously seen in this episode.
spite_until: earliest step_count after which to stop being spiteful.
defect_until: earliest step_count after which to stop defecting.
recent_defection: level of defection on previous timesteps (ordered from
oldest to most recent).
"""
step_count: int
spite_until: int
defect_until: int
recent_defection: tuple[int, ...]
class Reciprocator(puppeteer.Puppeteer[ReciprocatorState]):
"""Puppeteer for a reciprocating agent.
This puppeteer's behavior depends on the behavior of others. In particular, it
tracks the total amount of others' defection, and integrates this signal
using a rolling window.
Initially, the puppet will be in a cooperation mode where it will direct the
puppet to cooperate with others. However, once the total level of
defection reaches threshold, the puppeteer will switch to a defection
routine. This routine starts with some amount of spite, then plain defection.
Once the routine is complete, the puppeteer will return to the cooperative
mode.
At any point, if the integrated level of defection again reaches threshold,
the defection routine will be triggered again from the beginning.
"""
def __init__(
self,
*,
cooperate_goal: puppeteer.PuppetGoal,
defect_goal: puppeteer.PuppetGoal,
spite_goal: puppeteer.PuppetGoal,
partner_defection_signal: str,
recency_window: int,
threshold: int,
frames_to_punish: int,
spiteful_punishment_window: int,
) -> None:
"""Initializes the puppeteer.
Args:
cooperate_goal: goal to emit to puppet when "cooperating".
defect_goal: goal to emit to puppet when "defecting".
spite_goal: goal to emit to puppet when being "spiteful".
partner_defection_signal: key in observations that provides the level of
partner defection in the previous timestep.
recency_window: number of steps over which to remember others' behavior.
threshold: if the total number of (nonunique) cooperators over the
remembered period reaches this threshold, the puppeteer will direct the
puppet to cooperate.
frames_to_punish: the number of steps to not cooperate for when triggered
by others' behavior.
spiteful_punishment_window: the number of steps to bne spiteful for when
triggered by others' behavior.
"""
self._cooperate_goal = cooperate_goal
self._defect_goal = defect_goal
self._spite_goal = spite_goal
self._partner_defection_signal = partner_defection_signal
if threshold > 0:
self._threshold = threshold
else:
raise ValueError('threshold must be positive')
if recency_window > 0:
self._recency_window = recency_window
else:
raise ValueError('recency_window must be positive')
if frames_to_punish > 0:
self._frames_to_punish = frames_to_punish
else:
raise ValueError('frames_to_punish must be positive.')
if 0 <= spiteful_punishment_window <= frames_to_punish:
self._spiteful_punishment_window = spiteful_punishment_window
else:
raise ValueError('spiteful_punishment_window must nonegative and lower '
'than frames_to_punish')
def initial_state(self) -> ReciprocatorState:
"""See base class."""
return ReciprocatorState(
step_count=0, spite_until=0, defect_until=0, recent_defection=())
def step(
self, timestep: dm_env.TimeStep, prev_state: ReciprocatorState
) -> tuple[dm_env.TimeStep, ReciprocatorState]:
"""See base class."""
if timestep.first():
prev_state = self.initial_state()
step_count = prev_state.step_count
spite_until = prev_state.spite_until
defect_until = prev_state.defect_until
recent_defection = prev_state.recent_defection
partner_defection = int(
timestep.observation[self._partner_defection_signal])
recent_defection += (partner_defection,)
recent_defection = recent_defection[-self._recency_window:]
total_recent_defection = sum(recent_defection)
if total_recent_defection >= self._threshold:
spite_until = step_count + self._spiteful_punishment_window
defect_until = step_count + self._frames_to_punish
recent_defection = ()
if step_count < spite_until:
goal = self._spite_goal
elif step_count < defect_until:
goal = self._defect_goal
else:
goal = self._cooperate_goal
timestep = puppeteer.puppet_timestep(timestep, goal)
next_state = ReciprocatorState(
step_count=step_count + 1,
spite_until=spite_until,
defect_until=defect_until,
recent_defection=recent_defection)
return timestep, next_state
|
meltingpot-main
|
meltingpot/utils/puppeteers/coins.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for coins puppeteers."""
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
from meltingpot.testing import puppeteers
from meltingpot.utils.puppeteers import coins
_COOPERATE = mock.sentinel.cooperate
_DEFECT = mock.sentinel.defect
_SPITE = mock.sentinel.spite
_NUM_DEFECTIONS_KEY = 'DEFECTIONS'
def _goals(puppeteer, num_defections, state=None):
observations = [{_NUM_DEFECTIONS_KEY: n} for n in num_defections]
goals, state = puppeteers.goals_from_observations(
puppeteer, observations, state
)
return goals, state
class ReciprocatorTest(parameterized.TestCase):
def test_threshold(self):
puppeteer = coins.Reciprocator(
cooperate_goal=_COOPERATE,
defect_goal=_DEFECT,
spite_goal=_SPITE,
partner_defection_signal=_NUM_DEFECTIONS_KEY,
recency_window=1,
threshold=3,
frames_to_punish=1,
spiteful_punishment_window=0,
)
num_defections = [0, 1, 2, 3]
expected = [_COOPERATE, _COOPERATE, _COOPERATE, _DEFECT]
actual, _ = _goals(puppeteer, num_defections)
self.assertSequenceEqual(actual, expected)
@parameterized.parameters(
[(1, 0, 0, 1), (_COOPERATE, _COOPERATE, _COOPERATE, _COOPERATE)],
[(1, 0, 1), (_COOPERATE, _COOPERATE, _DEFECT)],
[(1, 1), (_COOPERATE, _DEFECT)],
)
def test_recency_window(self, num_defections, expected):
puppeteer = coins.Reciprocator(
cooperate_goal=_COOPERATE,
defect_goal=_DEFECT,
spite_goal=_SPITE,
partner_defection_signal=_NUM_DEFECTIONS_KEY,
recency_window=3,
threshold=2,
frames_to_punish=1,
spiteful_punishment_window=0,
)
actual, _ = _goals(puppeteer, num_defections)
self.assertSequenceEqual(actual, expected)
@parameterized.parameters(1, 2, 3)
def test_defect_duration(self, duration):
puppeteer = coins.Reciprocator(
cooperate_goal=_COOPERATE,
defect_goal=_DEFECT,
spite_goal=_SPITE,
partner_defection_signal=_NUM_DEFECTIONS_KEY,
recency_window=1,
threshold=1,
frames_to_punish=duration,
spiteful_punishment_window=1,
)
num_defections = [1, 0, 0, 0]
expected = (
[_SPITE] + [_DEFECT] * (duration - 1) + [_COOPERATE] * (4 - duration))
actual, _ = _goals(puppeteer, num_defections)
self.assertSequenceEqual(actual, expected)
@parameterized.parameters(1, 2, 4)
def test_spite_duration(self, duration):
puppeteer = coins.Reciprocator(
cooperate_goal=_COOPERATE,
defect_goal=_DEFECT,
spite_goal=_SPITE,
partner_defection_signal=_NUM_DEFECTIONS_KEY,
recency_window=1,
threshold=1,
frames_to_punish=4,
spiteful_punishment_window=duration,
)
num_defections = [1, 0, 0, 0]
expected = [_SPITE] * duration + [_DEFECT] * (4 - duration)
actual, _ = _goals(puppeteer, num_defections)
self.assertSequenceEqual(actual, expected)
def test_resets_on_first(self):
puppeteer = coins.Reciprocator(
cooperate_goal=_COOPERATE,
defect_goal=_DEFECT,
spite_goal=_SPITE,
partner_defection_signal=_NUM_DEFECTIONS_KEY,
recency_window=8,
threshold=1,
frames_to_punish=8,
spiteful_punishment_window=8,
)
_, state = _goals(puppeteer, [0, 0, 1, 0])
num_defections = [0, 0, 0, 0]
expected = [_COOPERATE, _COOPERATE, _COOPERATE, _COOPERATE]
actual, _ = _goals(puppeteer, num_defections, state)
self.assertSequenceEqual(actual, expected)
def test_defection_during_defect_resets_spite(self):
puppeteer = coins.Reciprocator(
cooperate_goal=_COOPERATE,
defect_goal=_DEFECT,
spite_goal=_SPITE,
partner_defection_signal=_NUM_DEFECTIONS_KEY,
recency_window=1,
threshold=1,
frames_to_punish=3,
spiteful_punishment_window=1,
)
num_defections = [1, 0, 1, 0, 0, 0]
expected = [_SPITE, _DEFECT, _SPITE, _DEFECT, _DEFECT, _COOPERATE]
actual, _ = _goals(puppeteer, num_defections)
self.assertSequenceEqual(actual, expected)
def test_defection_during_spite_extends_spite(self):
puppeteer = coins.Reciprocator(
cooperate_goal=_COOPERATE,
defect_goal=_DEFECT,
spite_goal=_SPITE,
partner_defection_signal=_NUM_DEFECTIONS_KEY,
recency_window=1,
threshold=1,
frames_to_punish=3,
spiteful_punishment_window=2,
)
num_defections = [1, 0, 1, 0, 0, 0]
expected = [_SPITE, _SPITE, _SPITE, _SPITE, _DEFECT, _COOPERATE]
actual, _ = _goals(puppeteer, num_defections)
self.assertSequenceEqual(actual, expected)
def test_impulse_response(self):
puppeteer = coins.Reciprocator(
cooperate_goal=_COOPERATE,
defect_goal=_DEFECT,
spite_goal=_SPITE,
partner_defection_signal=_NUM_DEFECTIONS_KEY,
recency_window=4,
threshold=1,
frames_to_punish=2,
spiteful_punishment_window=1,
)
num_defections = [1, 0, 0, 0]
expected = [_SPITE, _DEFECT, _COOPERATE, _COOPERATE]
actual, _ = _goals(puppeteer, num_defections)
self.assertSequenceEqual(actual, expected)
def test_boxcar_response(self):
puppeteer = coins.Reciprocator(
cooperate_goal=_COOPERATE,
defect_goal=_DEFECT,
spite_goal=_SPITE,
partner_defection_signal=_NUM_DEFECTIONS_KEY,
recency_window=4,
threshold=1,
frames_to_punish=2,
spiteful_punishment_window=1,
)
num_defections = [1, 1, 1, 0, 0, 0]
expected = [_SPITE, _SPITE, _SPITE, _DEFECT, _COOPERATE, _COOPERATE]
actual, _ = _goals(puppeteer, num_defections)
self.assertSequenceEqual(actual, expected)
if __name__ == '__main__':
absltest.main()
|
meltingpot-main
|
meltingpot/utils/puppeteers/coins_test.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for in_the_matrix Puppeteers."""
import itertools
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
import dm_env
import immutabledict
from meltingpot.testing import puppeteers
from meltingpot.utils.puppeteers import in_the_matrix
import numpy as np
_RESOURCE_0 = in_the_matrix.Resource(
index=0,
collect_goal=mock.sentinel.collect_0,
interact_goal=mock.sentinel.interact_0,
)
_RESOURCE_1 = in_the_matrix.Resource(
index=1,
collect_goal=mock.sentinel.collect_1,
interact_goal=mock.sentinel.interact_1,
)
_RESOURCE_2 = in_the_matrix.Resource(
index=2,
collect_goal=mock.sentinel.collect_2,
interact_goal=mock.sentinel.interact_2,
)
_INTERACTION = (np.array([1, 1, 1]), np.array([1, 1, 1]))
_NO_INTERACTION = (np.array([-1, -1, -1]), np.array([-1, -1, -1]))
class HelperFunctionTest(parameterized.TestCase):
@parameterized.parameters(
[_INTERACTION, _INTERACTION[1]],
[_NO_INTERACTION, None],
)
def test_get_partner_interaction_inventory(self, interaction, expected):
timestep = dm_env.restart(_observation(None, interaction))
actual = in_the_matrix.get_partner_interaction_inventory(timestep)
np.testing.assert_equal(actual, expected)
@parameterized.parameters(
[_INTERACTION, True],
[_NO_INTERACTION, False],
)
def test_has_interaction(self, interaction, expected):
timestep = dm_env.restart(_observation(None, interaction))
actual = in_the_matrix.has_interaction(timestep)
self.assertEqual(actual, expected)
@parameterized.parameters(
[(0, 0, 0), (mock.ANY, 0)],
[(1, 0, 0), (0, 1)],
[(0, 3, 1), (1, 2)],
[(3, 0, 7), (2, 4)],
)
def test_max_resource_and_margin(self, inventory, expected):
actual = in_the_matrix.max_resource_and_margin(np.array(inventory))
self.assertEqual(actual, expected)
@parameterized.parameters(
[(1, 2, 3), 0, 1, False],
[(1, 2, 3), 1, 1, False],
[(1, 2, 3), 2, 1, True],
[(1, 2, 3), 2, 2, False],
[(1, 2, 5), 2, 1, True],
[(1, 2, 5), 2, 3, True],
)
def test_has_sufficient(self, inventory, target, margin, expected):
actual = in_the_matrix.has_collected_sufficient(
np.array(inventory), target, margin)
self.assertEqual(actual, expected)
@parameterized.parameters(
[(1, 2, 3), 2],
[(1, 2, 5), 2],
[(1, 0, 0), 0],
[(1, 2, 0), 1],
)
def test_partner_max_resource(self, inventory, expected):
timestep = dm_env.restart({
'INTERACTION_INVENTORIES': (None, np.array(inventory)),
})
actual = in_the_matrix.partner_max_resource(timestep)
self.assertEqual(actual, expected)
@parameterized.parameters(
[(1, 2, 3), _RESOURCE_0, 1, _RESOURCE_0.collect_goal],
[(1, 2, 3), _RESOURCE_1, 1, _RESOURCE_1.collect_goal],
[(1, 2, 3), _RESOURCE_2, 1, _RESOURCE_2.interact_goal],
[(1, 2, 3), _RESOURCE_2, 2, _RESOURCE_2.collect_goal],
[(1, 2, 5), _RESOURCE_2, 1, _RESOURCE_2.interact_goal],
[(1, 2, 5), _RESOURCE_2, 3, _RESOURCE_2.interact_goal],
)
@mock.patch.object(immutabledict, 'immutabledict', dict)
def test_collect_or_interact_puppet_timestep(
self, inventory, target, margin, goal):
timestep = dm_env.restart({'INVENTORY': np.array(inventory)})
actual = in_the_matrix.collect_or_interact_puppet_timestep(
timestep, target, margin)
expected = dm_env.restart({'INVENTORY': np.array(inventory), 'GOAL': goal})
np.testing.assert_equal(actual, expected)
def _observation(inventory, interaction=None):
if interaction is None:
interaction = _NO_INTERACTION
return {
'INVENTORY': np.array(inventory),
'INTERACTION_INVENTORIES': np.array(interaction),
}
def _goals_from_observations(puppeteer,
inventories,
interactions=(),
state=None):
observations = []
for inventory, interaction in itertools.zip_longest(inventories,
interactions):
observations.append(_observation(inventory, interaction))
return puppeteers.goals_from_observations(puppeteer, observations, state)
class SpecialistTest(parameterized.TestCase):
def test(self):
puppeteer = in_the_matrix.Specialist(
target=_RESOURCE_1,
margin=1,
)
inventories = [(1, 1, 1), (1, 2, 1), (1, 2, 2), (1, 3, 2)]
expected = [
_RESOURCE_1.collect_goal,
_RESOURCE_1.interact_goal,
_RESOURCE_1.collect_goal,
_RESOURCE_1.interact_goal,
]
actual, _ = _goals_from_observations(puppeteer, inventories)
self.assertEqual(actual, expected)
class ScheduledFlipTest(parameterized.TestCase):
def test(self):
puppeteer = in_the_matrix.ScheduledFlip(
threshold=1,
initial_target=_RESOURCE_1,
final_target=_RESOURCE_2,
initial_margin=1,
final_margin=2,
)
inventories = [(1, 1, 1), (1, 2, 1), (1, 2, 2), (1, 2, 4)]
interactions = [
_NO_INTERACTION, _NO_INTERACTION, _INTERACTION, _INTERACTION
]
expected = [
_RESOURCE_1.collect_goal,
_RESOURCE_1.interact_goal,
_RESOURCE_2.collect_goal,
_RESOURCE_2.interact_goal,
]
actual, _ = _goals_from_observations(puppeteer, inventories, interactions)
self.assertEqual(actual, expected)
class GrimTriggerTest(parameterized.TestCase):
def test_trigger(self):
puppeteer = in_the_matrix.GrimTrigger(
threshold=2,
cooperate_resource=_RESOURCE_1,
defect_resource=_RESOURCE_0,
margin=1,
)
inventories = [
(1, 1),
(1, 2),
(2, 2),
(2, 2),
(3, 2),
(3, 2),
]
interactions = [
((-1, -1), (-1, -1)), # neither
((-1, -1), (1, 0)), # defection
((-1, -1), (0, 1)), # cooperation
((-1, -1), (1, 0)), # defection
((-1, -1), (0, 1)), # cooperation
((-1, -1), (0, 1)), # cooperation
]
expected = [
_RESOURCE_1.collect_goal, # cooperate
_RESOURCE_1.interact_goal, # cooperate
_RESOURCE_1.collect_goal, # cooperate
_RESOURCE_0.collect_goal, # defect
_RESOURCE_0.interact_goal, # defect
_RESOURCE_0.interact_goal, # defect
]
actual, _ = _goals_from_observations(puppeteer, inventories, interactions)
self.assertEqual(actual, expected)
def test_not_grim_after_reset(self):
puppeteer = in_the_matrix.GrimTrigger(
threshold=2,
cooperate_resource=_RESOURCE_1,
defect_resource=_RESOURCE_0,
margin=1,
)
inventories = [
(1, 1),
(1, 2),
(2, 2),
(2, 2),
(3, 2),
(3, 2),
]
interactions = [
((-1, -1), (-1, -1)), # neither
((-1, -1), (1, 0)), # defection
((-1, -1), (0, 1)), # cooperation
((-1, -1), (1, 0)), # defection
((-1, -1), (0, 1)), # cooperation
((-1, -1), (0, 1)), # cooperation
]
expected = [
_RESOURCE_1.collect_goal, # cooperate
_RESOURCE_1.interact_goal, # cooperate
_RESOURCE_1.collect_goal, # cooperate
_RESOURCE_0.collect_goal, # defect
_RESOURCE_0.interact_goal, # defect
_RESOURCE_0.interact_goal, # defect
]
_, state = _goals_from_observations(puppeteer, inventories, interactions)
actual, _ = _goals_from_observations(puppeteer, inventories, interactions,
state)
self.assertEqual(actual, expected)
class TitForTatTest(parameterized.TestCase):
def test(self):
puppeteer = in_the_matrix.TitForTat(
cooperate_resource=_RESOURCE_1,
defect_resource=_RESOURCE_0,
margin=1,
tremble_probability=0,
)
inventories = [
(1, 1, 0), # not ready to interact
(1, 2, 0), # ready to interact if cooperating
(3, 2, 0), # ready to interact if defecting
(2, 3, 0), # ready to interact if cooperating
(3, 2, 0), # ready to interact if defecting
(2, 2, 0), # not ready to interact
]
interactions = [
((-1, -1, -1), (2, 2, 0)), # coplayer cooperates and defects
((-1, -1, -1), (1, 0, 0)), # coplayer defects
((-1, -1, -1), (0, 0, 1)), # coplayer plays other
((-1, -1, -1), (0, 1, 0)), # coplayer cooperates
((-1, -1, -1), (-1, -1, -1)), # no interaction
((-1, -1, -1), (2, 1, 1)), # coplayer defects
]
expected = [
_RESOURCE_1.collect_goal, # cooperate
_RESOURCE_0.collect_goal, # defect
_RESOURCE_0.interact_goal, # continue defecting
_RESOURCE_1.interact_goal, # cooperate
_RESOURCE_1.collect_goal, # continue cooperating
_RESOURCE_0.collect_goal, # defect
]
actual, _ = _goals_from_observations(puppeteer, inventories, interactions)
self.assertEqual(actual, expected)
def test_with_tremble(self):
puppeteer = in_the_matrix.TitForTat(
cooperate_resource=_RESOURCE_1,
defect_resource=_RESOURCE_0,
margin=1,
tremble_probability=1,
)
inventories = [
(1, 1, 0), # not ready to interact
(1, 2, 0), # ready to interact if cooperating
(3, 2, 0), # ready to interact if defecting
(2, 3, 0), # ready to interact if cooperating
(3, 2, 0), # ready to interact if defecting
(2, 2, 0), # not ready to interact
]
interactions = [
((-1, -1, -1), (2, 2, 0)), # coplayer cooperates and defects
((-1, -1, -1), (1, 0, 0)), # coplayer defects
((-1, -1, -1), (0, 0, 1)), # coplayer plays other
((-1, -1, -1), (0, 1, 0)), # coplayer cooperates
((-1, -1, -1), (-1, -1, -1)), # no interaction
((-1, -1, -1), (2, 1, 1)), # coplayer defects
]
expected = [
_RESOURCE_0.collect_goal, # cooperate but tremble and defect
_RESOURCE_1.interact_goal, # defect but tremble and cooperate
_RESOURCE_1.collect_goal, # continue cooperating
_RESOURCE_0.collect_goal, # cooperate but tremble and defect
_RESOURCE_0.interact_goal, # continue defecting
_RESOURCE_1.collect_goal, # defect but tremble and cooperate
]
actual, _ = _goals_from_observations(puppeteer, inventories, interactions)
self.assertEqual(actual, expected)
class CorrigibleTest(parameterized.TestCase):
def test(self):
puppeteer = in_the_matrix.Corrigible(
threshold=1,
cooperate_resource=_RESOURCE_1,
defect_resource=_RESOURCE_0,
margin=2,
tremble_probability=0,
)
inventories = [
(2, 0, 1), # not ready to interact
(4, 1, 0), # ready to interact if defecting
(2, 1, 0), # not ready to interact
(1, 2, 0), # not ready to interact
(2, 1, 0), # not ready to interact
(1, 4, 0), # ready to interact if cooperating
(3, 1, 0), # ready to interact if defecting
]
interactions = [
((-1, -1, -1), (0, 1, 0)), # coplayer cooperates
((-1, -1, -1), (0, 0, 1)), # coplayer plays other
((-1, -1, -1), (1, 0, 0)), # coplayer defects
((-1, -1, -1), (0, 1, 0)), # coplayer cooperates
((-1, -1, -1), (-1, -1, -1)), # no interaction
((-1, -1, -1), (1, 1, 0)), # coplayer cooperates and defects
((-1, -1, -1), (2, 0, 1)), # coplayer defects
]
expected = [
_RESOURCE_0.collect_goal, # defect
_RESOURCE_0.interact_goal, # continue defecting
_RESOURCE_1.collect_goal, # cooperate
_RESOURCE_1.collect_goal, # cooperate
_RESOURCE_1.collect_goal, # continue cooperating
_RESOURCE_1.interact_goal, # continue cooperating
_RESOURCE_0.interact_goal, # defect
]
actual, _ = _goals_from_observations(puppeteer, inventories, interactions)
self.assertEqual(actual, expected)
def test_tremble(self):
puppeteer = in_the_matrix.Corrigible(
threshold=1,
cooperate_resource=_RESOURCE_1,
defect_resource=_RESOURCE_0,
margin=2,
tremble_probability=1,
)
inventories = [
(2, 0, 1), # not ready to interact
(4, 1, 0), # ready to interact if defecting
(2, 1, 0), # not ready to interact
(1, 2, 0), # not ready to interact
(2, 1, 0), # not ready to interact
(1, 4, 0), # ready to interact if cooperating
(3, 1, 0), # ready to interact if defecting
]
interactions = [
((-1, -1, -1), (0, 1, 0)), # coplayer cooperates
((-1, -1, -1), (0, 0, 1)), # coplayer plays other
((-1, -1, -1), (1, 0, 0)), # coplayer defects
((-1, -1, -1), (0, 1, 0)), # coplayer cooperates
((-1, -1, -1), (-1, -1, -1)), # no interaction
((-1, -1, -1), (1, 1, 0)), # coplayer cooperates and defects
((-1, -1, -1), (2, 0, 1)), # coplayer defects
]
expected = [
_RESOURCE_0.collect_goal, # defect
_RESOURCE_0.interact_goal, # continue defecting
_RESOURCE_0.collect_goal, # cooperate but tremble and defect
_RESOURCE_0.collect_goal, # cooperate but tremble and defect
_RESOURCE_0.collect_goal, # continue defecting
_RESOURCE_0.collect_goal, # continue defecting
_RESOURCE_1.collect_goal, # defect but tremble and cooperate
]
actual, _ = _goals_from_observations(puppeteer, inventories, interactions)
self.assertEqual(actual, expected)
class RespondToPreviousTest(parameterized.TestCase):
def test(self):
puppeteer = in_the_matrix.RespondToPrevious(
responses={
_RESOURCE_0: _RESOURCE_2,
_RESOURCE_1: _RESOURCE_0,
_RESOURCE_2: _RESOURCE_1,
},
margin=1,
)
inventories = [
(1, 1, 1),
(1, 2, 1),
(1, 2, 3),
(2, 3, 1),
(3, 2, 1),
(3, 2, 1),
(2, 3, 1),
]
interactions = [
((-1, -1, -1), (-1, -1, -1)), # no interaction
((-1, -1, -1), (1, 0, 0)), # opponent plays 0
((-1, -1, -1), (-1, -1, -1)), # no interaction
((-1, -1, -1), (0, 1, 0)), # opponent plays 1
((-1, -1, -1), (1, 1, 1)), # no clear interaction
((-1, -1, -1), (0, 0, 1)), # opponent plays 2
((-1, -1, -1), (-1, -1, -1)), # no interaction
]
expected = [
mock.ANY, # random
_RESOURCE_2.collect_goal,
_RESOURCE_2.interact_goal,
_RESOURCE_0.collect_goal,
_RESOURCE_0.interact_goal,
_RESOURCE_1.collect_goal,
_RESOURCE_1.interact_goal,
]
actual, _ = _goals_from_observations(puppeteer, inventories, interactions)
self.assertEqual(actual, expected)
class AlternatingSpecialistTest(parameterized.TestCase):
def testOneInteractionPerOption(self):
puppeteer = in_the_matrix.AlternatingSpecialist(
targets=[_RESOURCE_0, _RESOURCE_1, _RESOURCE_2],
interactions_per_target=1,
margin=1,
)
inventories = [
(1, 1, 1),
(1, 2, 1),
(1, 2, 3),
(2, 3, 1),
(3, 2, 1),
(3, 2, 1),
(2, 3, 1),
]
interactions = [
((-1, -1, -1), (-1, -1, -1)), # no interaction
((-1, -1, -1), (1, 0, 0)), # opponent plays 0
((-1, -1, -1), (-1, -1, -1)), # no interaction
((-1, -1, -1), (0, 1, 0)), # opponent plays 1
((-1, -1, -1), (2, 2, 2)), # no clear interaction
((-1, -1, -1), (0, 0, 1)), # opponent plays 2
((-1, -1, -1), (-1, -1, -1)), # no interaction
]
expected = [
_RESOURCE_0.collect_goal,
_RESOURCE_1.interact_goal,
_RESOURCE_1.collect_goal,
_RESOURCE_2.collect_goal,
_RESOURCE_0.interact_goal,
_RESOURCE_1.collect_goal,
_RESOURCE_1.interact_goal,
]
actual, _ = _goals_from_observations(
puppeteer, inventories, interactions)
self.assertEqual(actual, expected)
def testTwoInteractionsPerOption(self):
puppeteer = in_the_matrix.AlternatingSpecialist(
targets=[_RESOURCE_0, _RESOURCE_1, _RESOURCE_2],
interactions_per_target=2,
margin=1,
)
inventories = [
(1, 1, 1),
(1, 1, 1),
(1, 2, 1),
(1, 2, 1),
(1, 2, 3),
(1, 2, 3),
(2, 3, 1),
(2, 3, 1),
(3, 2, 1),
(3, 2, 1),
(3, 2, 1),
(3, 2, 1),
(2, 3, 1),
(2, 3, 1),
]
interactions = [
((-1, -1, -1), (-1, -1, -1)), # no interaction
((-1, -1, -1), (-1, -1, -1)), # no interaction
((-1, -1, -1), (1, 0, 0)), # opponent plays 0
((-1, -1, -1), (1, 0, 0)), # opponent plays 0
((-1, -1, -1), (-1, -1, -1)), # no interaction
((-1, -1, -1), (-1, -1, -1)), # no interaction
((-1, -1, -1), (0, 1, 0)), # opponent plays 1
((-1, -1, -1), (0, 1, 0)), # opponent plays 1
((-1, -1, -1), (2, 2, 2)), # no clear interaction
((-1, -1, -1), (2, 2, 2)), # no clear interaction
((-1, -1, -1), (0, 0, 1)), # opponent plays 2
((-1, -1, -1), (0, 0, 1)), # opponent plays 2
((-1, -1, -1), (-1, -1, -1)), # no interaction
((-1, -1, -1), (-1, -1, -1)), # no interaction
]
expected = [
_RESOURCE_0.collect_goal,
_RESOURCE_0.collect_goal,
_RESOURCE_0.collect_goal,
_RESOURCE_1.interact_goal,
_RESOURCE_1.collect_goal,
_RESOURCE_1.collect_goal,
_RESOURCE_1.interact_goal,
_RESOURCE_2.collect_goal,
_RESOURCE_2.collect_goal,
_RESOURCE_0.interact_goal,
_RESOURCE_0.interact_goal,
_RESOURCE_1.collect_goal,
_RESOURCE_1.interact_goal,
_RESOURCE_1.interact_goal,
]
actual, _ = _goals_from_observations(
puppeteer, inventories, interactions)
self.assertEqual(actual, expected)
if __name__ == '__main__':
absltest.main()
|
meltingpot-main
|
meltingpot/utils/puppeteers/in_the_matrix_test.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Puppeteers for *_coordination_in_the_matrix."""
from typing import Iterable
from meltingpot.utils.puppeteers import in_the_matrix
class CoordinateWithPrevious(in_the_matrix.RespondToPrevious):
"""Puppeteer to use in pure/rationalizable coordination in the matrix.
This bot will always play the same strategy to whatever its partner played in
the previous interaction. So if its last partner played resource A then it
will target resource A, if its last partner played resource B then it
will target resource B, and so on.
Important note: this puppeteer does not discriminate between coplayers. It may
not make sense to use this beyond two-player substrates.
"""
def __init__(
self,
resources: Iterable[in_the_matrix.Resource],
margin: int,
) -> None:
"""Initializes the puppeteer.
Args:
resources: The collectible resources to coordinate on.
margin: Try to collect `margin` more of the target resource than the other
resource before interacting.
"""
responses = {resource: resource for resource in resources}
super().__init__(responses, margin)
|
meltingpot-main
|
meltingpot/utils/puppeteers/coordination_in_the_matrix.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Puppeteers for puppet bots."""
import abc
from typing import Generic, Mapping, NewType, Sequence, Tuple, TypeVar
import dm_env
import immutabledict
import numpy as np
State = TypeVar('State')
PuppetGoal = NewType('PuppetGoal', np.ndarray)
_GOAL_OBSERVATION_KEY = 'GOAL'
_GOAL_DTYPE = np.int32
class Puppeteer(Generic[State], metaclass=abc.ABCMeta):
"""A puppeteer that controls the timestep forwarded to the puppet.
Must not possess any mutable state not in `initial_state`.
"""
@abc.abstractmethod
def initial_state(self) -> State:
"""Returns the initial state of the puppeteer.
Must not have any side effects.
"""
@abc.abstractmethod
def step(self, timestep: dm_env.TimeStep,
prev_state: State) -> Tuple[dm_env.TimeStep, State]:
"""Steps the puppeteer.
Must not have any side effects.
Args:
timestep: information from the environment.
prev_state: the previous state of the puppeteer.
Returns:
timestep: the timestep to forward to the puppet.
next_state: the state for the next step call.
"""
def puppet_timestep(timestep: dm_env.TimeStep,
goal: PuppetGoal) -> dm_env.TimeStep:
"""Returns a timestep with a goal observation added."""
puppet_observation = immutabledict.immutabledict(
timestep.observation, **{_GOAL_OBSERVATION_KEY: goal})
return timestep._replace(observation=puppet_observation)
def puppet_goals(names: Sequence[str],
dtype: ... = _GOAL_DTYPE) -> Mapping[str, PuppetGoal]:
"""Returns a mapping from goal name to a one-hot goal vector for a puppet.
Args:
names: names for each of the corresponding goals.
dtype: dtype of the one-hot goals to return.
"""
goals = np.eye(len(names), dtype=dtype)
goals.setflags(write=False)
return immutabledict.immutabledict(zip(names, goals))
|
meltingpot-main
|
meltingpot/utils/puppeteers/puppeteer.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gift_refinements puppeteers."""
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
from meltingpot.testing import puppeteers
from meltingpot.utils.puppeteers import gift_refinements
_COLLECT = mock.sentinel.collect
_CONSUME = mock.sentinel.consume
_GIFT = mock.sentinel.gift
class GiftRefinementsCooperatorTest(parameterized.TestCase):
@parameterized.parameters(
[(0, 0, 0), _COLLECT],
[(0, 0, 1), _CONSUME],
[(1, 0, 0), _GIFT],
[(2, 0, 0), _GIFT],
[(2, 0, 2), _CONSUME],
[(1, 1, 0), _CONSUME],
[(1, 1, 1), _CONSUME],
[(5, 2, 0), _CONSUME],
[(5, 5, 5), _CONSUME],
)
def test(self, inventory, expected):
puppeteer = gift_refinements.GiftRefinementsCooperator(
collect_goal=_COLLECT,
consume_goal=_CONSUME,
gift_goal=_GIFT,
)
(actual,), _ = puppeteers.goals_from_observations(
puppeteer, [{'INVENTORY': inventory}]
)
self.assertEqual(actual, expected)
class GiftRefinementsExtremeCooperatorTest(parameterized.TestCase):
@parameterized.parameters(
[(0, 0, 0), _COLLECT],
[(0, 0, 1), _CONSUME],
[(1, 0, 0), _GIFT],
[(2, 0, 2), _CONSUME],
[(1, 1, 0), _GIFT],
[(1, 1, 1), _CONSUME],
[(5, 2, 0), _GIFT],
[(5, 5, 5), _CONSUME],
)
def test(self, inventory, expected):
puppeteer = gift_refinements.GiftRefinementsExtremeCooperator(
collect_goal=_COLLECT,
consume_goal=_CONSUME,
gift_goal=_GIFT,
)
(actual,), _ = puppeteers.goals_from_observations(
puppeteer, [{'INVENTORY': inventory}]
)
self.assertEqual(actual, expected)
if __name__ == '__main__':
absltest.main()
|
meltingpot-main
|
meltingpot/utils/puppeteers/gift_refinements_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Puppet policy implementation."""
from typing import Generic, Tuple, TypeVar
import dm_env
from meltingpot.utils.policies import policy
from meltingpot.utils.puppeteers import puppeteer as puppeteer_lib
PuppeteerState = TypeVar('PuppeteerState')
PolicyState = TypeVar('PolicyState')
class PuppetPolicy(policy.Policy[Tuple[PuppeteerState, PolicyState]],
Generic[PuppeteerState, PolicyState]):
"""A puppet policy controlled by a puppeteer function."""
def __init__(
self,
puppeteer: puppeteer_lib.Puppeteer[PuppeteerState],
puppet: policy.Policy[PolicyState]) -> None:
"""Creates a new PuppetBot.
Args:
puppeteer: Puppeteer that will be called at every step to modify the
timestep forwarded to the underlying puppet.
puppet: The puppet policy. Will be closed with this wrapper.
"""
self._puppeteer = puppeteer
self._puppet = puppet
def step(
self,
timestep: dm_env.TimeStep,
prev_state: Tuple[PuppeteerState, PolicyState],
) -> Tuple[int, Tuple[PuppeteerState, PolicyState]]:
"""See base class."""
puppeteer_state, puppet_state = prev_state
puppet_timestep, puppeteer_state = self._puppeteer.step(
timestep, puppeteer_state)
action, puppet_state = self._puppet.step(puppet_timestep, puppet_state)
next_state = (puppeteer_state, puppet_state)
return action, next_state
def initial_state(self) -> Tuple[PuppeteerState, PolicyState]:
"""See base class."""
return (self._puppeteer.initial_state(), self._puppet.initial_state())
def close(self) -> None:
"""See base class."""
self._puppet.close()
|
meltingpot-main
|
meltingpot/utils/policies/puppet_policy.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Policy from a Saved Model."""
import contextlib
import random
import dm_env
from meltingpot.utils.policies import permissive_model
from meltingpot.utils.policies import policy
import numpy as np
import tensorflow as tf
import tree
def _numpy_to_placeholder(
template: tree.Structure[np.ndarray], prefix: str
) -> tree.Structure[tf.Tensor]:
"""Returns placeholders that matches a given template.
Args:
template: template numpy arrays.
prefix: a prefix to add to the placeholder names.
Returns:
A tree of placeholders matching the template arrays' specs.
"""
def fn(path, x):
name = '.'.join(str(x) for x in path)
return tf.compat.v1.placeholder(shape=x.shape, dtype=x.dtype,
name=f'{prefix}.{name}')
return tree.map_structure_with_path(fn, template)
def _downcast(x):
"""Downcasts input to 32-bit precision."""
if not isinstance(x, np.ndarray):
return x
elif x.dtype == np.float64:
return np.asarray(x, dtype=np.float32)
elif x.dtype == np.int64:
return np.asarray(x, dtype=np.int32)
else:
return x
class TF2SavedModelPolicy(policy.Policy[tree.Structure[tf.Tensor]]):
"""Policy wrapping a saved model for TF2 inference.
Note: the model should have methods:
1. `initial_state(random_key)`
2. `step(key, timestep, prev_state)`
that accept unbatched inputs.
"""
def __init__(self, model_path: str, device_name: str = 'cpu') -> None:
"""Initialize a policy instance.
Args:
model_path: Path to the SavedModel.
device_name: Device to load SavedModel onto. Defaults to a cpu device.
See tf.device for supported device names.
"""
self._strategy = tf.distribute.OneDeviceStrategy(device_name)
with self._strategy.scope():
model = tf.saved_model.load(model_path)
self._model = permissive_model.PermissiveModel(model)
def step(
self,
timestep: dm_env.TimeStep,
prev_state: tree.Structure[tf.Tensor],
) -> tuple[int, tree.Structure[tf.Tensor]]:
"""See base class."""
prev_key, prev_state = prev_state
timestep = timestep._replace(
step_type=int(timestep.step_type),
observation=tree.map_structure(_downcast, timestep.observation),
)
next_key, outputs = self._strategy.run(
self._model.step, [prev_key, timestep, prev_state])
(action, _), next_state = outputs
return int(action.numpy()), (next_key, next_state)
def initial_state(self) -> tree.Structure[tf.Tensor]:
"""See base class."""
random_seed = random.getrandbits(32)
seed_key = np.array([0, random_seed], dtype=np.uint32)
key, state = self._strategy.run(self._model.initial_state, [seed_key])
return key, state
def close(self) -> None:
"""See base class."""
class TF1SavedModelPolicy(policy.Policy[tree.Structure[np.ndarray]]):
"""Policy wrapping a saved model for TF1 inference.
Note: the model should have methods:
1. `initial_state(batch_size, trainable)`
2. `step(step_type, reward, discount, observation, prev_state)`
that accept batched inputs and produce batched outputs.
"""
def __init__(self, model_path: str, device_name: str = 'cpu') -> None:
"""Initialize a policy instance.
Args:
model_path: Path to the SavedModel.
device_name: Device to load SavedModel onto. Defaults to a cpu device.
See tf.device for supported device names.
"""
self._device_name = device_name
self._graph = tf.compat.v1.Graph()
self._session = tf.compat.v1.Session(graph=self._graph)
with self._build_context():
model = tf.compat.v1.saved_model.load_v2(model_path)
self._model = permissive_model.PermissiveModel(model)
self._initial_state_outputs = None
self._step_inputs = None
self._step_outputs = None
@contextlib.contextmanager
def _build_context(self):
with self._graph.as_default(): # pylint: disable=not-context-manager
with tf.compat.v1.device(self._device_name):
yield
def _build_initial_state_graph(self) -> None:
"""Builds the TF1 subgraph for the initial_state operation."""
with self._build_context():
key_in = tf.compat.v1.placeholder(shape=[2], dtype=np.uint32)
self._initial_state_outputs = self._model.initial_state(key_in)
self._initial_state_input = key_in
def _build_step_graph(self, timestep, prev_state) -> None:
"""Builds the TF1 subgraph for the step operation.
Args:
timestep: an example timestep.
prev_state: an example previous state.
"""
if not self._initial_state_outputs:
self._build_initial_state_graph()
with self._build_context():
step_type_in = tf.compat.v1.placeholder(
shape=[], dtype=np.int32, name='step_type')
reward_in = tf.compat.v1.placeholder(
shape=[], dtype=np.float32, name='reward')
discount_in = tf.compat.v1.placeholder(
shape=[], dtype=np.float32, name='discount')
observation_in = _numpy_to_placeholder(
timestep.observation, prefix='observation')
timestep_in = dm_env.TimeStep(
step_type=step_type_in,
reward=reward_in,
discount=discount_in,
observation=observation_in)
prev_key_in, prev_state_in = _numpy_to_placeholder(
prev_state, prefix='prev_state')
next_key, outputs = self._model.step(prev_key_in, timestep_in,
prev_state_in)
(action, _), next_state = outputs
input_values = tree.flatten_with_path({
'timestep': timestep_in,
'prev_state': (prev_key_in, prev_state_in),
})
self._step_inputs = dict(input_values)
self._step_outputs = (action, (next_key, next_state))
self._graph.finalize()
def step(
self, timestep: dm_env.TimeStep, prev_state: tree.Structure[np.ndarray]
) -> tuple[int, tree.Structure[np.ndarray]]:
"""See base class."""
timestep = timestep._replace(
step_type=int(timestep.step_type),
observation=tree.map_structure(_downcast, timestep.observation),
)
if not self._step_inputs:
self._build_step_graph(timestep, prev_state)
input_values = tree.flatten_with_path({
'timestep': timestep,
'prev_state': prev_state,
})
feed_dict = {
self._step_inputs[path]: value for path, value in input_values
if path in self._step_inputs
}
action, next_state = self._session.run(self._step_outputs, feed_dict)
return int(action), next_state
def initial_state(self) -> tree.Structure[np.ndarray]:
"""See base class."""
if not self._initial_state_outputs:
self._build_initial_state_graph()
random_seed = random.getrandbits(32)
seed_key = np.array([0, random_seed], dtype=np.uint32)
feed_dict = {self._initial_state_input: seed_key}
return self._session.run(self._initial_state_outputs, feed_dict)
def close(self) -> None:
"""See base class."""
self._session.close()
if tf.executing_eagerly():
SavedModelPolicy = TF2SavedModelPolicy
else:
SavedModelPolicy = TF1SavedModelPolicy
|
meltingpot-main
|
meltingpot/utils/policies/saved_model_policy.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bot policy implementations."""
import abc
from typing import Generic, Tuple, TypeVar
import dm_env
State = TypeVar('State')
class Policy(Generic[State], metaclass=abc.ABCMeta):
"""Abstract base class for a policy.
Must not possess any mutable state not in `initial_state`.
"""
@abc.abstractmethod
def initial_state(self) -> State:
"""Returns the initial state of the agent.
Must not have any side effects.
"""
raise NotImplementedError()
@abc.abstractmethod
def step(self, timestep: dm_env.TimeStep,
prev_state: State) -> Tuple[int, State]:
"""Steps the agent.
Must not have any side effects.
Args:
timestep: information from the environment
prev_state: the previous state of the agent.
Returns:
action: the action to send to the environment.
next_state: the state for the next step call.
"""
raise NotImplementedError()
@abc.abstractmethod
def close(self) -> None:
"""Closes the policy."""
raise NotImplementedError()
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
del args, kwargs
self.close()
|
meltingpot-main
|
meltingpot/utils/policies/policy.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
meltingpot-main
|
meltingpot/utils/policies/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Policy that always returns a fixed action."""
from typing import Tuple
import dm_env
from meltingpot.utils.policies import policy
class FixedActionPolicy(policy.Policy[Tuple[()]]):
"""Always performs the same action, regardless of observations."""
def __init__(self, action: int):
"""Initializes the policy.
Args:
action: The action that that the policy will always emit, regardless of
its observations.
"""
self._action = action
def step(self, timestep: dm_env.TimeStep,
prev_state: Tuple[()]) -> Tuple[int, Tuple[()]]:
"""See base class."""
return self._action, prev_state
def initial_state(self) -> Tuple[()]:
"""See base class."""
return ()
def close(self) -> None:
"""See base class."""
|
meltingpot-main
|
meltingpot/utils/policies/fixed_action_policy.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# LINT.IfChange
"""A permissive wrapper for a SavedModel."""
import copy
import inspect
from typing import Any, Callable, NamedTuple
from absl import logging
import tensorflow as tf
import tree
class _Function(NamedTuple):
"""Function exposing signature and expected canonical arguments."""
func: Callable[..., Any]
signature: inspect.Signature
structured_specs: Any
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
@property
def canonical_arguments(self) -> inspect.BoundArguments:
args, kwargs = copy.deepcopy(self.structured_specs)
return self.signature.bind(*args, **kwargs)
class PermissiveModel:
"""A permissive wrapper for a SavedModel."""
# Disable pytype attribute error checks.
_HAS_DYNAMIC_ATTRIBUTES = True
def __init__(self, model):
self.model = model
self._tables = self.model.function_tables()
self._initialized_tables = {}
def build_parameters(params):
params = [
inspect.Parameter(str(param[0], "utf-8"), param[1])
for param in params
]
# Always include a VAR_KEYWORD to capture any extraneous arguments.
if all([p.kind != inspect.Parameter.VAR_KEYWORD for p in params]):
params.append(
inspect.Parameter("__unused_kwargs", inspect.Parameter.VAR_KEYWORD))
return params
signatures = self.model.function_signatures()
if tf.executing_eagerly():
signatures = tree.map_structure(lambda x: x.numpy(), signatures)
else:
with tf.compat.v1.Session() as sess:
signatures = sess.run(signatures)
signatures = {
func_name: inspect.Signature(build_parameters(func_params))
for func_name, func_params in signatures.items()
}
self.signatures = signatures
# Attach deepfuncs.
for name in self.signatures.keys():
setattr(self, name, self._make_permissive_function(name))
def _maybe_init_tables(self, concrete_func: Any, name: str):
"""Initialise all tables for a function if they are not initialised.
Some functions rely on hash-tables that must be externally initialized. This
method will perform a one-time initialisation of the tables. It does so by
finding the corresponding op that creates the hash-table handles (these will
be different from the ones observed in the initial deepfuncs), and import
the corresponding keys and values.
Args:
concrete_func: A tf.ConcreteFunction corresponding to a deepfunc.
name: The name of the deepfunc.
"""
if name not in self._tables:
return
all_nodes = dict(
main={n.name: n for n in concrete_func.graph.as_graph_def().node})
for func_def in concrete_func.graph.as_graph_def().library.function:
all_nodes[func_def.signature.name] = {
n.name: n for n in func_def.node_def
}
for table_name, (table_keys, table_values) in self._tables[name].items():
table_op = None
for nodes in all_nodes.values():
if table_name in nodes:
if table_op is not None:
raise ValueError(f"Duplicate table op found for {table_name}")
table_op = nodes[table_name]
logging.info("Initialising table for Op `%s`", table_name)
table_handle_name = table_op.attr["shared_name"].s # pytype: disable=attribute-error
table_handle = tf.raw_ops.HashTableV2(
key_dtype=table_keys.dtype,
value_dtype=table_values.dtype,
shared_name=table_handle_name)
tf.raw_ops.LookupTableImportV2(
table_handle=table_handle, keys=table_keys, values=table_values)
self._initialized_tables[name] = self._tables.pop(name) # Only init once.
def _make_permissive_function(self, name: str) -> Callable[..., Any]:
"""Create a permissive version of a function in the SavedModel."""
if name not in self.signatures:
raise ValueError(f"No function named {name} in SavedModel, "
"options are {self.signatures}")
tf_func = getattr(self.model, name)
if hasattr(tf_func, "concrete_functions"):
# tf.RestoredFunction
concrete_func, = tf_func.concrete_functions # Expect only one.
elif hasattr(tf_func, "_list_all_concrete_functions"):
# tf.Function
all_concrete_funcs = tf_func._list_all_concrete_functions() # pylint: disable=protected-access
all_concrete_signatures = [
f.structured_input_signature for f in all_concrete_funcs
]
# This is necessary as tf.saved_model.save can force a retrace on
# tf.Function, resulting in another concrete function with identical
# signature.
unique_concrete_signatures = set([
tuple(tree.flatten_with_path(sig)) for sig in all_concrete_signatures
])
if len(unique_concrete_signatures) != 1:
raise ValueError(
"Expected exactly one unique concrete_function signature, found "
f"the following: {all_concrete_signatures}")
concrete_func = all_concrete_funcs[0]
else:
raise ValueError(f"No concrete functions found on {tf_func}")
self._maybe_init_tables(concrete_func, name)
def func(*args, **kwargs):
bound_args = self.signatures[name].bind(*args, **kwargs)
canonical_args = concrete_func.structured_input_signature
flat_bound_args = tree.flatten_with_path(
(bound_args.args, bound_args.kwargs))
flat_canonical_args = tree.flatten_with_path(canonical_args)
# Check for missing arguments.
flat_bound_args_dict = dict(flat_bound_args)
for arg_path, arg_spec in flat_canonical_args:
if arg_path in flat_bound_args_dict and arg_spec is None:
arg_value = flat_bound_args_dict[arg_path]
if arg_value is not None:
logging.log_first_n(
logging.WARNING,
"Received unexpected argument `%s` for path %s, replaced with "
"None.",
20,
arg_value, arg_path)
flat_bound_args_dict[arg_path] = None
# Filter out extraneous arguments and dictionary keys.
flat_canonical_args_dict = dict(flat_canonical_args)
filtered_flat_bound_args = {
arg_path: arg_value
for arg_path, arg_value in flat_bound_args_dict.items()
if arg_path in flat_canonical_args_dict
}
full_flat_bound_args = [
filtered_flat_bound_args.get(arg_path, None)
for arg_path, _ in flat_canonical_args
]
filtered_args, filtered_kwargs = tree.unflatten_as(
canonical_args, full_flat_bound_args)
return tf_func(*filtered_args, **filtered_kwargs)
return _Function(
func,
copy.deepcopy(self.signatures[name]),
copy.deepcopy(concrete_func.structured_input_signature),
)
# Changes should be pushed back to internal version.
# LINT.ThenChange(//internal/permissive_model.py)
|
meltingpot-main
|
meltingpot/utils/policies/permissive_model.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory for constructing policies."""
import abc
from typing import Callable
import dm_env
from meltingpot.utils.policies import policy
class PolicyFactory(metaclass=abc.ABCMeta):
"""Factory for producing instances of a specific policy."""
def __init__(
self,
*,
timestep_spec: dm_env.TimeStep,
action_spec: dm_env.specs.DiscreteArray,
builder: Callable[[], policy.Policy],
) -> None:
"""Initializes the object.
Args:
timestep_spec: spec of the timestep expected by the policy.
action_spec: spec of the action returned by the policy.
builder: callable that builds the policy.
"""
self._timestep_spec = timestep_spec
self._action_spec = action_spec
self._builder = builder
def timestep_spec(self) -> dm_env.TimeStep:
"""Returns spec of the timestep expected by the policy."""
return self._timestep_spec
def action_spec(self) -> dm_env.specs.DiscreteArray:
"""Returns spec of the action returned by the policy."""
return self._action_spec
def build(self) -> policy.Policy:
"""Returns a policy for the bot."""
return self._builder()
|
meltingpot-main
|
meltingpot/utils/policies/policy_factory.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory class for building scenarios."""
from collections.abc import Collection, Mapping, Sequence
from typing import Callable, Optional
import dm_env
import immutabledict
from meltingpot.utils.policies import policy_factory
from meltingpot.utils.scenarios import scenario as scenario_lib
from meltingpot.utils.substrates import substrate as substrate_lib
from meltingpot.utils.substrates import substrate_factory
SubstrateTransform = Callable[[substrate_lib.Substrate],
substrate_lib.Substrate]
class ScenarioFactory:
"""Constructs populations of bots."""
def __init__(
self,
*,
substrate: substrate_factory.SubstrateFactory,
bots: Mapping[str, policy_factory.PolicyFactory],
bots_by_role: Mapping[str, Collection[str]],
roles: Sequence[str],
is_focal: Sequence[bool],
permitted_observations: Collection[str],
) -> None:
"""Initializes the instance.
Args:
substrate: the factory for the substrate underlying the scenario.
bots: the factory for the policies underlying the background population.
bots_by_role: dict mapping role to bot names that can fill it.
roles: specifies which role should fill the corresponding player slot.
is_focal: which player slots are allocated to focal players.
permitted_observations: the substrate observation keys permitted to be
exposed by the scenario to focal agents.
"""
if len(roles) != len(is_focal):
raise ValueError('roles and is_focal must be the same length')
self._substrate = substrate
self._bots = immutabledict.immutabledict(bots)
self._roles = tuple(roles)
self._is_focal = tuple(is_focal)
self._bots_by_role = immutabledict.immutabledict({
role: tuple(bots) for role, bots in bots_by_role.items()
})
self._permitted_observations = frozenset(permitted_observations)
def num_focal_players(self) -> int:
"""Returns the number of players the scenario supports."""
return sum(self._is_focal)
def focal_player_roles(self) -> Sequence[str]:
"""Returns the roles of the focal players."""
return tuple(
role for n, role in enumerate(self._roles) if self._is_focal[n])
def timestep_spec(self) -> dm_env.TimeStep:
"""Returns spec of timestep sent to a single focal player."""
substrate_timestep_spec = self._substrate.timestep_spec()
substrate_observation_spec = substrate_timestep_spec.observation
focal_observation_spec = immutabledict.immutabledict({
key: spec for key, spec in substrate_observation_spec.items()
if key in self._permitted_observations
})
return substrate_timestep_spec._replace(observation=focal_observation_spec)
def action_spec(self) -> dm_env.specs.DiscreteArray:
"""Returns spec of action expected from a single focal player."""
return self._substrate.action_spec()
def build(self) -> scenario_lib.Scenario:
"""Builds the scenario.
Returns:
The constructed scenario.
"""
return scenario_lib.build_scenario(
substrate=self._substrate.build(self._roles),
bots={name: factory.build() for name, factory in self._bots.items()},
bots_by_role=self._bots_by_role,
roles=self._roles,
is_focal=self._is_focal,
permitted_observations=self._permitted_observations)
def build_transformed(
self, substrate_transform: Optional[SubstrateTransform] = None
) -> scenario_lib.Scenario:
"""Builds the scenario with a transformed substrate.
This method is designed to allow the addition of a wrapper to the underlying
substrate for training purposes. It should not be used during evaluation.
The observations will be unrestricted, and the timestep spec of the returned
scenario will not match self.timestep_spec().
Args:
substrate_transform: transform to apply to underlying substrate.
Returns:
The constructed scenario.
"""
substrate = self._substrate.build(self._roles)
if substrate_transform:
substrate = substrate_transform(substrate)
all_observations = frozenset().union(*substrate.observation_spec())
return scenario_lib.build_scenario(
substrate=substrate,
bots={name: factory.build() for name, factory in self._bots.items()},
bots_by_role=self._bots_by_role,
roles=self._roles,
is_focal=self._is_focal,
permitted_observations=all_observations)
|
meltingpot-main
|
meltingpot/utils/scenarios/scenario_factory.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests of scenarios."""
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
import dm_env
import immutabledict
from meltingpot.utils.policies import policy
from meltingpot.utils.scenarios import population
from meltingpot.utils.scenarios import scenario as scenario_utils
from meltingpot.utils.substrates import substrate as substrate_lib
def _track(source, fields):
destination = []
for field in fields:
getattr(source, field).subscribe(
on_next=destination.append,
on_error=lambda e: destination.append(type(e)),
on_completed=lambda: destination.append('DONE'),
)
return destination
@parameterized.parameters(
((), (), (), ()),
(('a',), (True,), ('a',), ()),
(('a',), (False,), (), ('a',)),
(('a', 'b', 'c'), (True, True, False), ('a', 'b'), ('c',)),
(('a', 'b', 'c'), (False, True, False), ('b',), ('a', 'c')),
)
class PartitionMergeTest(parameterized.TestCase):
def test_partition(self, merged, is_focal, *expected):
actual = scenario_utils._partition(merged, is_focal)
self.assertEqual(actual, expected)
def test_merge(self, expected, is_focal, *partions):
actual = scenario_utils._merge(*partions, is_focal)
self.assertEqual(actual, expected)
class ScenarioWrapperTest(absltest.TestCase):
def test_scenario(self):
substrate = mock.Mock(spec_set=substrate_lib.Substrate)
substrate.reset.return_value = dm_env.TimeStep(
step_type=dm_env.StepType.FIRST,
discount=0,
reward=(10, 20, 30, 40),
observation=(
immutabledict.immutabledict(ok=10, not_ok=100),
immutabledict.immutabledict(ok=20, not_ok=200),
immutabledict.immutabledict(ok=30, not_ok=300),
immutabledict.immutabledict(ok=40, not_ok=400),
),
)
substrate.step.return_value = dm_env.transition(
reward=(11, 21, 31, 41),
observation=(
immutabledict.immutabledict(ok=11, not_ok=101),
immutabledict.immutabledict(ok=21, not_ok=201),
immutabledict.immutabledict(ok=31, not_ok=301),
immutabledict.immutabledict(ok=41, not_ok=401),
),
)
substrate.events.return_value = (
mock.sentinel.event_0, mock.sentinel.event_1)
substrate.action_spec.return_value = tuple(
f'action_spec_{n}' for n in range(4)
)
substrate.observation_spec.return_value = tuple(
immutabledict.immutabledict(
ok=f'ok_spec_{n}', not_ok=f'not_ok_spec_{n}')
for n in range(4)
)
substrate.reward_spec.return_value = tuple(
f'reward_spec_{n}' for n in range(4)
)
substrate.observation.return_value = (
immutabledict.immutabledict(ok=10, not_ok=100),
immutabledict.immutabledict(ok=20, not_ok=200),
immutabledict.immutabledict(ok=30, not_ok=300),
immutabledict.immutabledict(ok=40, not_ok=400),
)
bots = {}
for n in range(2):
bot = mock.Mock(spec_set=policy.Policy)
bot.initial_state.return_value = f'bot_state_{n}'
bot.step.return_value = (n + 10, f'bot_state_{n}')
bots[f'bot_{n}'] = bot
background_population = population.Population(
policies=bots,
names_by_role={'role_0': {'bot_0'}, 'role_1': {'bot_1'}},
roles=['role_0', 'role_1'],
)
with scenario_utils.Scenario(
substrate=substrate_lib.Substrate(substrate),
background_population=background_population,
is_focal=[True, False, True, False],
permitted_observations={'ok'}) as scenario:
observables = scenario.observables()
received = {
'base': _track(observables, ['events', 'action', 'timestep']),
'background': _track(observables.background, ['action', 'timestep']),
'substrate': _track(
observables.substrate, ['events', 'action', 'timestep']),
}
action_spec = scenario.action_spec()
observation_spec = scenario.observation_spec()
reward_spec = scenario.reward_spec()
observation = scenario.observation()
initial_timestep = scenario.reset()
step_timestep = scenario.step([0, 1])
with self.subTest(name='action_spec'):
self.assertEqual(action_spec, ('action_spec_0', 'action_spec_2'))
with self.subTest(name='observation_spec'):
self.assertEqual(observation_spec,
(immutabledict.immutabledict(ok='ok_spec_0'),
immutabledict.immutabledict(ok='ok_spec_2')))
with self.subTest(name='reward_spec'):
self.assertEqual(reward_spec, ('reward_spec_0', 'reward_spec_2'))
with self.subTest(name='observation'):
expected = (
immutabledict.immutabledict(ok=10),
immutabledict.immutabledict(ok=30),
)
self.assertEqual(observation, expected)
with self.subTest(name='events'):
self.assertEmpty(scenario.events())
with self.subTest(name='initial_timestep'):
expected = dm_env.TimeStep(
step_type=dm_env.StepType.FIRST,
discount=0,
reward=(10, 30),
observation=(
immutabledict.immutabledict(ok=10),
immutabledict.immutabledict(ok=30),
),
)
self.assertEqual(initial_timestep, expected)
with self.subTest(name='step_timestep'):
expected = dm_env.transition(
reward=(11, 31),
observation=(
immutabledict.immutabledict(ok=11),
immutabledict.immutabledict(ok=31),
),
)
self.assertEqual(step_timestep, expected)
with self.subTest(name='substrate_step'):
substrate.step.assert_called_once_with((0, 10, 1, 11))
with self.subTest(name='bot_0_step'):
actual = bots['bot_0'].step.call_args_list[0]
expected = mock.call(
timestep=dm_env.TimeStep(
step_type=dm_env.StepType.FIRST,
discount=0,
reward=20,
observation=immutabledict.immutabledict(ok=20, not_ok=200),
),
prev_state='bot_state_0')
self.assertEqual(actual, expected)
with self.subTest(name='bot_1_step'):
actual = bots['bot_1'].step.call_args_list[0]
expected = mock.call(
timestep=dm_env.TimeStep(
step_type=dm_env.StepType.FIRST,
discount=0,
reward=40,
observation=immutabledict.immutabledict(ok=40, not_ok=400),
),
prev_state='bot_state_1')
self.assertEqual(actual, expected)
with self.subTest(name='base_observables'):
expected = [
dm_env.TimeStep(
step_type=dm_env.StepType.FIRST,
discount=0,
reward=(10, 30),
observation=(
immutabledict.immutabledict(ok=10),
immutabledict.immutabledict(ok=30),
),
),
[0, 1],
dm_env.transition(
reward=(11, 31),
observation=(
immutabledict.immutabledict(ok=11),
immutabledict.immutabledict(ok=31),
),
),
'DONE',
'DONE',
'DONE',
]
self.assertEqual(received['base'], expected)
with self.subTest(name='substrate_observables'):
expected = [
dm_env.TimeStep(
step_type=dm_env.StepType.FIRST,
discount=0,
reward=(10, 20, 30, 40),
observation=(
immutabledict.immutabledict(ok=10, not_ok=100),
immutabledict.immutabledict(ok=20, not_ok=200),
immutabledict.immutabledict(ok=30, not_ok=300),
immutabledict.immutabledict(ok=40, not_ok=400),
),
),
mock.sentinel.event_0,
mock.sentinel.event_1,
(0, 10, 1, 11),
dm_env.transition(
reward=(11, 21, 31, 41),
observation=(
immutabledict.immutabledict(ok=11, not_ok=101),
immutabledict.immutabledict(ok=21, not_ok=201),
immutabledict.immutabledict(ok=31, not_ok=301),
immutabledict.immutabledict(ok=41, not_ok=401),
),
),
mock.sentinel.event_0,
mock.sentinel.event_1,
'DONE',
'DONE',
'DONE',
]
self.assertEqual(received['substrate'], expected)
with self.subTest(name='background_observables'):
expected = [
dm_env.TimeStep(
step_type=dm_env.StepType.FIRST,
discount=0,
reward=(20, 40),
observation=(
immutabledict.immutabledict(ok=20, not_ok=200),
immutabledict.immutabledict(ok=40, not_ok=400),
),
),
(10, 11),
dm_env.transition(
reward=(21, 41),
observation=(
immutabledict.immutabledict(ok=21, not_ok=201),
immutabledict.immutabledict(ok=41, not_ok=401),
),
),
'DONE',
'DONE',
]
self.assertEqual(received['background'], expected)
if __name__ == '__main__':
absltest.main()
|
meltingpot-main
|
meltingpot/utils/scenarios/scenario_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scenario factory."""
import concurrent
import random
import threading
from typing import Callable, Collection, List, Mapping, Sequence
import chex
import dm_env
from meltingpot.utils.policies import policy as policy_lib
import reactivex
from reactivex import subject
def _step_fn(policy: policy_lib.Policy,
lock: threading.Lock) -> Callable[[dm_env.TimeStep], int]:
"""Threadsafe stateful step function where the state is encapsulated.
Args:
policy: the underlying policy to use.
lock: a lock that controls access to the policy.
Returns:
A step function that returns an action in response to a timestep.
"""
with lock:
state = policy.initial_state()
def step(timestep: dm_env.TimeStep) -> int:
nonlocal state
with lock:
action, state = policy.step(timestep=timestep, prev_state=state)
return action
return step
@chex.dataclass(frozen=True) # works with tree.
class PopulationObservables:
"""Observables for a population.
Attributes:
names: emits the names of the sampled population on a reset.
action: emits actions sent to the substrate by the poulation.
timestep: emits timesteps sent from the substrate to the population.
"""
names: reactivex.Observable[Sequence[str]]
action: reactivex.Observable[Sequence[int]]
timestep: reactivex.Observable[dm_env.TimeStep]
class Population:
"""A population of policies to use in a scenario."""
def __init__(
self,
*,
policies: Mapping[str, policy_lib.Policy],
names_by_role: Mapping[str, Collection[str]],
roles: Sequence[str]) -> None:
"""Initializes the population.
Args:
policies: the policies to sample from (with replacement) each episode.
Will be closed when the Population is closed.
names_by_role: dict mapping role to bot names that can fill it.
roles: specifies which role should fill the corresponding player slot.
"""
self._policies = dict(policies)
self._names_by_role = {
role: tuple(set(names)) for role, names in names_by_role.items()}
self._roles = tuple(roles)
self._locks = {name: threading.Lock() for name in self._policies}
self._executor = concurrent.futures.ThreadPoolExecutor(
max_workers=len(roles))
self._step_fns: List[Callable[[dm_env.TimeStep], int]] = []
self._action_futures: List[concurrent.futures.Future[int]] = []
self._names_subject = subject.Subject()
self._action_subject = subject.Subject()
self._timestep_subject = subject.Subject()
self._observables = PopulationObservables( # pylint: disable=unexpected-keyword-arg
names=self._names_subject,
action=self._action_subject,
timestep=self._timestep_subject,
)
def close(self):
"""Closes the population."""
for future in self._action_futures:
future.cancel()
self._executor.shutdown(wait=False)
for policy in self._policies.values():
policy.close()
self._names_subject.on_completed()
self._action_subject.on_completed()
self._timestep_subject.on_completed()
def _sample_names(self) -> Sequence[str]:
"""Returns a sample of policy names for the population."""
return [random.choice(self._names_by_role[role]) for role in self._roles]
def reset(self) -> None:
"""Resamples the population."""
names = self._sample_names()
self._names_subject.on_next(names)
self._step_fns = [
_step_fn(policy=self._policies[name], lock=self._locks[name])
for name in names
]
for future in self._action_futures:
future.cancel()
self._action_futures.clear()
def send_timestep(self, timestep: dm_env.TimeStep) -> None:
"""Sends timestep to population for asynchronous processing.
Args:
timestep: The substrate timestep for the population.
Raises:
RuntimeError: previous action has not been awaited.
"""
if self._action_futures:
raise RuntimeError('Previous action not retrieved.')
self._timestep_subject.on_next(timestep)
for n, step_fn in enumerate(self._step_fns):
bot_timestep = timestep._replace(
observation=timestep.observation[n], reward=timestep.reward[n])
future = self._executor.submit(step_fn, bot_timestep)
self._action_futures.append(future)
def await_action(self) -> Sequence[int]:
"""Waits for the population action in response to last timestep.
Returns:
The action for the population.
Raises:
RuntimeError: no timestep has been sent.
"""
if not self._action_futures:
raise RuntimeError('No timestep sent.')
actions = tuple(future.result() for future in self._action_futures)
self._action_futures.clear()
self._action_subject.on_next(actions)
return actions
def observables(self) -> PopulationObservables:
"""Returns the observables for the population."""
return self._observables
|
meltingpot-main
|
meltingpot/utils/scenarios/population.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
meltingpot-main
|
meltingpot/utils/scenarios/__init__.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scenario class."""
from collections.abc import Collection, Iterable, Mapping, Sequence
from typing import Any, TypeVar
import chex
import dm_env
import immutabledict
from meltingpot.utils.policies import policy
from meltingpot.utils.scenarios import population
from meltingpot.utils.substrates import substrate as substrate_lib
from meltingpot.utils.substrates.wrappers import observables
import numpy as np
import reactivex
from reactivex import subject
T = TypeVar('T')
def _restrict_observation(
observation: Mapping[str, T],
permitted_observations: Collection[str],
) -> Mapping[str, T]:
"""Restricts an observation to only the permitted keys."""
return immutabledict.immutabledict({
key: observation[key]
for key in observation if key in permitted_observations
})
def _restrict_observations(
observations: Iterable[Mapping[str, T]],
permitted_observations: Collection[str],
) -> Sequence[Mapping[str, T]]:
"""Restricts multiple observations to only the permitted keys."""
return tuple(
_restrict_observation(observation, permitted_observations)
for observation in observations
)
def _partition(
values: Sequence[T],
is_focal: Sequence[bool],
) -> tuple[Sequence[T], Sequence[T]]:
"""Partitions a sequence into focal and background sequences."""
focal_values = []
background_values = []
for focal, value in zip(is_focal, values):
if focal:
focal_values.append(value)
else:
background_values.append(value)
return tuple(focal_values), tuple(background_values)
def _merge(
focal_values: Sequence[T],
background_values: Sequence[T],
is_focal: Sequence[bool],
) -> Sequence[T]:
"""Merges focal and background sequences into one."""
focal_values = iter(focal_values)
background_values = iter(background_values)
return tuple(
next(focal_values if focal else background_values) for focal in is_focal
)
@chex.dataclass(frozen=True) # works with tree.
class ScenarioObservables(substrate_lib.SubstrateObservables):
"""Observables for a Scenario.
Attributes:
action: emits actions sent to the scenario from (focal) players.
timestep: emits timesteps sent from the scenario to (focal) players.
events: will never emit any events since things like player index are hard
to interpret for a Scenario. Use substrate.events instead.
dmlab2d: will never emit any events since things like player index are hard
to interpret for a Scenario. Use substrate.dmlab2d instead.
background: observables from the perspective of the background players.
substrate: observables for the underlying substrate.
"""
background: population.PopulationObservables
substrate: substrate_lib.SubstrateObservables
class Scenario(substrate_lib.Substrate):
"""An substrate where a number of player slots are filled by bots."""
def __init__(
self,
substrate: substrate_lib.Substrate,
background_population: population.Population,
is_focal: Sequence[bool],
permitted_observations: Collection[str]) -> None:
"""Initializes the scenario.
Args:
substrate: the substrate to add bots to. Will be closed with the scenario.
background_population: the background population to sample bots from. Will
be closed with the scenario.
is_focal: which player slots are allocated to focal players.
permitted_observations: the substrate observation keys permitted to be
exposed by the scenario to focal agents.
"""
num_players = len(substrate.action_spec())
if len(is_focal) != num_players:
raise ValueError(f'is_focal is length {len(is_focal)} but substrate is '
f'{num_players}-player.')
self._substrate = substrate
self._background_population = background_population
self._is_focal = is_focal
self._permitted_observations = frozenset(permitted_observations)
self._focal_action_subject = subject.Subject()
self._focal_timestep_subject = subject.Subject()
self._background_action_subject = subject.Subject()
self._background_timestep_subject = subject.Subject()
self._events_subject = subject.Subject()
self._dmlab2d_observables = observables.Lab2dObservables(
action=reactivex.empty(),
events=reactivex.empty(),
timestep=reactivex.empty(),
)
self._substrate_observables = self._substrate.observables()
self._observables = ScenarioObservables( # pylint: disable=unexpected-keyword-arg
action=self._focal_action_subject,
events=self._events_subject,
timestep=self._focal_timestep_subject,
background=self._background_population.observables(),
substrate=self._substrate_observables,
dmlab2d=self._dmlab2d_observables,
)
def close(self) -> None:
"""See base class."""
self._background_population.close()
self._substrate.close()
self._focal_action_subject.on_completed()
self._focal_timestep_subject.on_completed()
self._events_subject.on_completed()
def _await_full_action(self, focal_action: Sequence[int]) -> Sequence[int]:
"""Returns full action after awaiting bot actions."""
self._focal_action_subject.on_next(focal_action)
background_action = self._background_population.await_action()
return _merge(focal_action, background_action, self._is_focal)
def _split_timestep(
self, timestep: dm_env.TimeStep
) -> tuple[dm_env.TimeStep, dm_env.TimeStep]:
"""Splits multiplayer timestep as needed by agents and bots."""
focal_rewards, background_rewards = _partition(timestep.reward,
self._is_focal)
focal_observations, background_observations = _partition(
timestep.observation, self._is_focal)
focal_observations = _restrict_observations(focal_observations,
self._permitted_observations)
focal_timestep = timestep._replace(
reward=focal_rewards, observation=focal_observations)
background_timestep = timestep._replace(
reward=background_rewards, observation=background_observations)
return focal_timestep, background_timestep
def _send_full_timestep(self, timestep: dm_env.TimeStep) -> dm_env.TimeStep:
"""Returns focal timestep and sends background timestep to bots."""
focal_timestep, background_timestep = self._split_timestep(timestep)
self._background_population.send_timestep(background_timestep)
self._focal_timestep_subject.on_next(focal_timestep)
return focal_timestep
def reset(self) -> dm_env.TimeStep:
"""See base class."""
timestep = self._substrate.reset()
self._background_population.reset()
focal_timestep = self._send_full_timestep(timestep)
for event in self.events():
self._events_subject.on_next(event)
return focal_timestep
def step(self, action: Sequence[int]) -> dm_env.TimeStep:
"""See base class."""
action = self._await_full_action(focal_action=action)
timestep = self._substrate.step(action)
if timestep.step_type.first():
self._background_population.reset()
focal_timestep = self._send_full_timestep(timestep)
for event in self.events():
self._events_subject.on_next(event)
return focal_timestep
def observation(self) -> Sequence[Mapping[str, np.ndarray]]:
observations = self._substrate.observation()
focal_observations, _ = _partition(observations, self._is_focal)
focal_observations = _restrict_observations(focal_observations,
self._permitted_observations)
return focal_observations
def events(self) -> Sequence[tuple[str, Any]]:
"""See base class."""
# Do not emit substrate events as these may not make sense in the context
# of a scenario (e.g. player indices may have changed).
return ()
def action_spec(self) -> Sequence[dm_env.specs.DiscreteArray]:
"""See base class."""
action_spec = self._substrate.action_spec()
focal_action_spec, _ = _partition(action_spec, self._is_focal)
return focal_action_spec
def observation_spec(self) -> Sequence[Mapping[str, dm_env.specs.Array]]:
"""See base class."""
observation_spec = self._substrate.observation_spec()
focal_observation_spec, _ = _partition(observation_spec, self._is_focal)
return _restrict_observations(focal_observation_spec,
self._permitted_observations)
def reward_spec(self) -> Sequence[dm_env.specs.Array]:
"""See base class."""
reward_spec = self._substrate.reward_spec()
focal_reward_spec, _ = _partition(reward_spec, self._is_focal)
return focal_reward_spec
def discount_spec(self, *args, **kwargs) -> ...:
"""See base class."""
return self._substrate.discount_spec(*args, **kwargs)
def list_property(self, *args, **kwargs) -> ...:
"""See base class."""
return self._substrate.list_property(*args, **kwargs)
def write_property(self, *args, **kwargs) -> ...:
"""See base class."""
return self._substrate.write_property(*args, **kwargs)
def read_property(self, *args, **kwargs) -> ...:
"""See base class."""
return self._substrate.read_property(*args, **kwargs)
def observables(self) -> ScenarioObservables:
"""Returns the observables for the scenario."""
return self._observables
def build_scenario(
*,
substrate: substrate_lib.Substrate,
bots: Mapping[str, policy.Policy],
bots_by_role: Mapping[str, Collection[str]],
roles: Sequence[str],
is_focal: Sequence[bool],
permitted_observations: Collection[str],
) -> Scenario:
"""Builds the specified scenario.
Args:
substrate: the substrate underlying the scenario. Will be closed with the
scenario.
bots: the policies underlying the background population. Will be closed when
the Population is closed.
bots_by_role: dict mapping role to bot names that can fill it.
roles: specifies which role should fill the corresponding player slot.
is_focal: which player slots are allocated to focal players.
permitted_observations: the substrate observation keys permitted to be
exposed by the scenario to focal agents. If None will permit any
observation.
Returns:
The constructed scenario.
"""
if len(roles) != len(is_focal):
raise ValueError('roles and is_focal must be the same length.')
background_roles = [role for n, role in enumerate(roles) if not is_focal[n]]
background_population = population.Population(
policies=bots,
names_by_role=bots_by_role,
roles=background_roles)
return Scenario(
substrate=substrate,
background_population=background_population,
is_focal=is_focal,
permitted_observations=permitted_observations)
|
meltingpot-main
|
meltingpot/utils/scenarios/scenario.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for defining substrate specs.
Used to allow substrates to easily define the single-player specs within their
configs.
"""
from typing import Mapping, Optional
import dm_env
import immutabledict
import numpy as np
STEP_TYPE = dm_env.specs.BoundedArray(
shape=(),
dtype=np.int64,
minimum=min(dm_env.StepType),
maximum=max(dm_env.StepType),
name='step_type',
)
DISCOUNT = dm_env.specs.BoundedArray(
shape=(), dtype=np.float64, minimum=0, maximum=1, name='discount')
REWARD = dm_env.specs.Array(shape=(), dtype=np.float64, name='reward')
OBSERVATION = immutabledict.immutabledict({
'READY_TO_SHOOT': dm_env.specs.Array(
shape=(), dtype=np.float64, name='READY_TO_SHOOT'),
'RGB': dm_env.specs.Array(shape=(88, 88, 3), dtype=np.uint8, name='RGB'),
'POSITION': dm_env.specs.Array(shape=(2,), dtype=np.int32, name='POSITION'),
'ORIENTATION': dm_env.specs.Array(
shape=(), dtype=np.int32, name='ORIENTATION'),
})
_ACTION = dm_env.specs.DiscreteArray(
num_values=1, dtype=np.int64, name='action')
def float32(*shape: int, name: Optional[str] = None) -> dm_env.specs.Array:
"""Returns the spec for an np.float32 tensor.
Args:
*shape: the shape of the tensor.
name: optional name for the spec.
"""
return dm_env.specs.Array(shape=shape, dtype=np.float32, name=name)
def float64(*shape: int, name: Optional[str] = None) -> dm_env.specs.Array:
"""Returns the spec for an np.float64 tensor.
Args:
*shape: the shape of the tensor.
name: optional name for the spec.
"""
return dm_env.specs.Array(shape=shape, dtype=np.float64, name=name)
def int32(*shape: int, name: Optional[str] = None) -> dm_env.specs.Array:
"""Returns the spec for an np.int32 tensor.
Args:
*shape: the shape of the tensor.
name: optional name for the spec.
"""
return dm_env.specs.Array(shape=shape, dtype=np.int32, name=name)
def int64(*shape: int, name: Optional[str] = None) -> dm_env.specs.Array:
"""Returns the spec for an np.int32 tensor.
Args:
*shape: the shape of the tensor.
name: optional name for the spec.
"""
return dm_env.specs.Array(shape=shape, dtype=np.int64, name=name)
def action(num_actions: int) -> dm_env.specs.DiscreteArray:
"""Returns the spec for an action.
Args:
num_actions: the number of actions that can be taken.
"""
return _ACTION.replace(num_values=num_actions)
def rgb(height: int,
width: int,
name: Optional[str] = 'RGB') -> dm_env.specs.Array:
"""Returns the spec for an RGB observation.
Args:
height: the height of the observation.
width: the width of the observation.
name: optional name for the spec.
"""
return OBSERVATION['RGB'].replace(shape=(height, width, 3), name=name)
def world_rgb(ascii_map: str,
sprite_size: int,
name: Optional[str] = 'WORLD.RGB') -> dm_env.specs.Array:
"""Returns the spec for a WORLD.RGB observation.
Args:
ascii_map: the height of the observation.
sprite_size: the width of the observation.
name: optional name for the spec.
"""
lines = ascii_map.strip().split('\n')
height = len(lines) * sprite_size
width = len(lines[0]) * sprite_size if height else 0
return rgb(height, width, name)
def inventory(num_resources: int,
name: Optional[str] = 'INVENTORY') -> dm_env.specs.Array:
"""Returns the spec for an INVENTORY observation.
Args:
num_resources: the number of resource types in the inventory.
name: optional name for the spec.
"""
return float64(num_resources, name=name)
def interaction_inventories(
num_resources: int,
name: Optional[str] = 'INTERACTION_INVENTORIES') -> dm_env.specs.Array:
"""Returns the spec for an INTERACTION_INVENTORIES observation.
Args:
num_resources: the number of resource types in the inventory.
name: optional name for the spec.
"""
return float64(2, num_resources, name=name)
def timestep(
observation_spec: Mapping[str, dm_env.specs.Array]) -> dm_env.TimeStep:
"""Returns the spec for a timestep.
Args:
observation_spec: the observation spec. Spec names will be overwritten with
their key.
"""
observation_spec = immutabledict.immutabledict({
name: spec.replace(name=name) for name, spec in observation_spec.items()
})
return dm_env.TimeStep(
step_type=STEP_TYPE,
discount=DISCOUNT,
reward=REWARD,
observation=observation_spec,
)
|
meltingpot-main
|
meltingpot/utils/substrates/specs.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools to help with parsing and procedurally generating ascii maps."""
from collections.abc import Mapping, Sequence
from typing import Any, Union
def a_or_b_with_odds(a_descriptor: Union[str, Mapping[str, Any]],
b_descriptor: Union[str, Mapping[str, Any]],
odds: Sequence[int]) -> Mapping[str, Any]:
"""Return a versus b with specified odds.
Args:
a_descriptor: One possibility. May be either a string or a dict that can
be read by the map parser.
b_descriptor: The other possibility. May be either a string or a dict that
can be read by the map parser.
odds: odds[0] is the number of outcomes where a is returned. odds[1] is
the number of outcomes where b is returned. Thus the probability of
returning a is odds[0] / sum(odds) and the probability of returning
b is odds[1] / sum(odds).
Returns:
The dict descriptor that can be used with the map parser to sample either
a or b at the specified odds.
"""
a_odds, b_odds = odds
choices = [a_descriptor] * a_odds + [b_descriptor] * b_odds
return {"type": "choice", "list": choices}
|
meltingpot-main
|
meltingpot/utils/substrates/map_helpers.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A set of commonly used ASCII art shape and helper functions for DMLab2D."""
import colorsys
from typing import Dict, Optional, Tuple, Union
ColorRGBA = Tuple[int, int, int, int]
ColorRGB = Tuple[int, int, int]
Color = Union[ColorRGB, ColorRGBA]
VEGETAL_GREEN = (100, 120, 0, 255)
LEAF_GREEN = (64, 140, 0, 255)
ALPHA = (0, 0, 0, 0)
WHITE = (255, 255, 255, 255)
BLACK = (0, 0, 0, 255)
DARK_GRAY = (60, 60, 60, 255)
TREE_BROWN = (128, 92, 0, 255)
DARK_FLAME = (226, 88, 34, 255)
LIGHT_FLAME = (226, 184, 34, 255)
DARK_STONE = (153, 153, 153, 255)
LIGHT_STONE = (204, 204, 204, 255)
def rgb_to_rgba(rgb: ColorRGB, alpha: int = 255) -> ColorRGBA:
return (rgb[0], rgb[1], rgb[2], alpha)
def scale_color(color_tuple: ColorRGBA, factor: float,
alpha: Optional[int] = None) -> ColorRGBA:
"""Scale an RGBA color tuple by a given factor.
This function scales, multiplicatively, the RGB values of a color tuple by the
given amount, clamped to a maximum of 255. The alpha channel is either
overwritten by the specified one, or if none is specified, it is inherited by
the original color.
Args:
color_tuple: The original color to scale.
factor: The factor to multiplicatively scale the RGB channels by.
alpha: If provided, the new color will have this alpha, otherwise, inherit
from original color_tuple.
Returns:
A new color tuple, with its RGB channels scaled.
"""
if len(color_tuple) == 3:
color_tuple = rgb_to_rgba(color_tuple) # pytype: disable=wrong-arg-types
scaled = [int(min(x * factor, 255)) for x in color_tuple]
scaled[3] = alpha if alpha is not None else color_tuple[-1]
return tuple(scaled)
# LINT.IfChange
def get_palette(color: Color) -> Dict[str, ColorRGBA]:
"""Convert provided color to a palette suitable for the player text shape.
The overall palette is:
'x' Transparent
',' Black
'O' Dark gray
'o' 45% darker color than the base palette color
'&' 25% darker color than the base palette color
'*' The base color of the palette
'@' 25% lighter color than the base palette color
'#' White
'r' A rotation of the main color: RGB -> RBG
'R' A 25% lighter color than the rotation of the main color: RGB -> RBG
Args:
color (tuple of length 4): Red, Green, Blue, Alpha (transparency).
Returns:
palette (dict): maps palette symbols to suitable colors.
"""
palette = {
"*": (color[0], color[1], color[2], 255),
"&": scale_color(color, 0.75, 255),
"o": scale_color(color, 0.55, 255),
"!": scale_color(color, 0.65, 255),
"~": scale_color(color, 0.9, 255),
"@": scale_color(color, 1.25, 255),
"r": (color[0], color[2], color[1], 255),
"R": scale_color((color[0], color[2], color[1], 255),
1.25, 255),
"%": (178, 206, 234, 255),
"#": WHITE,
"O": DARK_GRAY,
",": BLACK,
"x": ALPHA,
}
return palette
# LINT.ThenChange(//meltingpot/lua/modules/colors.lua)
def flip_horizontal(sprite: str) -> str:
flipped = ""
for line in sprite.split("\n"):
flipped += line[::-1] + "\n"
return flipped[:-1]
def flip_vertical(sprite: str) -> str:
flipped = ""
for line in sprite[1:].split("\n"):
flipped = line + "\n" + flipped
return flipped
def convert_rgb_to_rgba(rgb_tuple: ColorRGB) -> ColorRGBA:
rgba_tuple = (rgb_tuple[0], rgb_tuple[1], rgb_tuple[2], 255)
return rgba_tuple
def adjust_color_brightness(
color_tuple: Union[ColorRGB, ColorRGBA],
factor: float) -> ColorRGBA:
"""Adjust color brightness by first converting to hsv and then back to rgb."""
hsv = colorsys.rgb_to_hsv(color_tuple[0], color_tuple[1], color_tuple[2])
adjusted_hsv = (hsv[0], hsv[1], hsv[2] * factor)
adjusted_rgb = colorsys.hsv_to_rgb(*adjusted_hsv)
if len(color_tuple) == 3:
output_color = (adjusted_rgb[0], adjusted_rgb[1], adjusted_rgb[2], 255)
elif len(color_tuple) == 4:
output_color = (
adjusted_rgb[0], adjusted_rgb[1], adjusted_rgb[2], color_tuple[3])
return tuple([int(x) for x in output_color])
def get_diamond_palette(
base_color: ColorRGB) -> Dict[str, ColorRGBA]:
return {
"x": ALPHA,
"a": (252, 252, 252, 255),
"b": convert_rgb_to_rgba(base_color),
"c": adjust_color_brightness(base_color, 0.25),
"d": convert_rgb_to_rgba(base_color)
}
HD_AVATAR_N = """
xxxxxxxxxxxxxxxx
xxxx*xxxxxxx*xxx
xxxxx*xxxxx*xxxx
xxxxx*&xxx*&xxxx
xxxx@**&@**&@xxx
xx@x@@*&@*&@*x@x
xx@&@@@@@@@@*&*x
xx*x@@@@@@@**x&x
xxxx@@@@@****xxx
xxxxx@******xxxx
xxxxxxooOOOxxxxx
xxxxx*@@@**&xxxx
xxx@@x@@@**x&*xx
xxxx*xOOOOOx*xxx
xxxxxxx&xoxxxxxx
xxxxx@**x@**xxxx
"""
HD_AVATAR_E = """
xxxxxxxxxxxxxxxx
xxxxxx*xxxx*xxxx
xxxxxxx*xx*xxxxx
xxxxxxx*&x&xxxxx
xxxxx@@@@@@*xxxx
xxx@*@@@RRRr*xxx
xxx**&o@R,r,*&xx
xxx@&o@@R,r,&xxx
xxxx@@@*Rrrr&xxx
xxxxxx****o*xxxx
xxxxxx&&OOOxxxxx
xxxxx&*@@**xxxxx
xxxx&&o*@**&xxxx
xxxxoxoOOOO&xxxx
xxxxxxx&xoxxxxxx
xxxxxxx@**@*xxxx
"""
HD_AVATAR_S = """
xxxxxxxxxxxxxxxx
xxxx*xxxxxxx*xxx
xxxxx*xxxxx*xxxx
xxxxx*&xxx*&xxxx
xxxx@@@@@@@@*xxx
xx@x@RRRRRRr*x@x
xx@&@R,RRR,r*&*x
xx*x@R,RRR,r*x&x
xxxx@RRRRrrr*xxx
xxxx@@@ooo***xxx
xxxxxxooOOOxxxxx
xxxxx*@@@**&xxxx
xxx@@x@@@**x&*xx
xxxx*xOOOOOx*xxx
xxxxxxx&xoxxxxxx
xxxxx@**x@**xxxx
"""
HD_AVATAR_W = """
xxxxxxxxxxxxxxxx
xxxxx*xxxx*xxxxx
xxxxxx*xx*xxxxxx
xxxxxx&x*&xxxxxx
xxxxx@@@@***xxxx
xxxx@RRRr**&@&xx
xxx*@,R,r*&@**xx
xxxx@,R,r**&*&xx
xxxx@Rrrr**o&xxx
xxxxx@o@**ooxxxx
xxxxxx&&&ooxxxxx
xxxxxx@@***&xxxx
xxxxx&@@**&&&xxx
xxxxx&OOOO&xoxxx
xxxxxxx&xoxxxxxx
xxxxx@*@**xxxxxx
"""
HD_AVATAR = [HD_AVATAR_N, HD_AVATAR_E, HD_AVATAR_S, HD_AVATAR_W]
HD_AVATAR_N_W_BADGE = """
xxxxxxxxxxxxxxxx
xxxx*xxxxxxx*xxx
xxxxx*xxxxx*xxxx
xxxxx*&xxx*&xxxx
xxxx@**&@**&@xxx
xx@x@@*&@*&@*x@x
xx@&@@@@@@@@*&*x
xx*x@@@@@@@**x&x
xxxx@@@@@****xxx
xxxxx@******xxxx
xxxxxxooOOOxxxxx
xxxxx*@ab**&xxxx
xxx@@x@cd**x&*xx
xxxx*xOOOOOx*xxx
xxxxxxx&xoxxxxxx
xxxxx@**x@**xxxx
"""
HD_AVATAR_E_W_BADGE = """
xxxxxxxxxxxxxxxx
xxxxxx*xxxx*xxxx
xxxxxxx*xx*xxxxx
xxxxxxx*&x&xxxxx
xxxxx@@@@@@*xxxx
xxx@*@@@RRRr*xxx
xxx**&o@R,r,*&xx
xxx@&o@@R,r,&xxx
xxxx@@@*Rrrr&xxx
xxxxxx****o*xxxx
xxxxxx&&OOOxxxxx
xxxxx&*ab**xxxxx
xxxx&&ocd**&xxxx
xxxxoxoOOOO&xxxx
xxxxxxx&xoxxxxxx
xxxxxxx@**@*xxxx
"""
HD_AVATAR_S_W_BADGE = """
xxxxxxxxxxxxxxxx
xxxx*xxxxxxx*xxx
xxxxx*xxxxx*xxxx
xxxxx*&xxx*&xxxx
xxxx@@@@@@@@*xxx
xx@x@RRRRRRr*x@x
xx@&@R,RRR,r*&*x
xx*x@R,RRR,r*x&x
xxxx@RRRRrrr*xxx
xxxx@@@ooo***xxx
xxxxxxooOOOxxxxx
xxxxx*@ab**&xxxx
xxx@@x@cd**x&*xx
xxxx*xOOOOOx*xxx
xxxxxxx&xoxxxxxx
xxxxx@**x@**xxxx
"""
HD_AVATAR_W_W_BADGE = """
xxxxxxxxxxxxxxxx
xxxxx*xxxx*xxxxx
xxxxxx*xx*xxxxxx
xxxxxx&x*&xxxxxx
xxxxx@@@@***xxxx
xxxx@RRRr**&@&xx
xxx*@,R,r*&@**xx
xxxx@,R,r**&*&xx
xxxx@Rrrr**o&xxx
xxxxx@o@**ooxxxx
xxxxxx&&&ooxxxxx
xxxxxx@ab**&xxxx
xxxxx&@cd*&&&xxx
xxxxx&OOOO&xoxxx
xxxxxxx&xoxxxxxx
xxxxx@*@**xxxxxx
"""
HD_AVATAR_W_BADGE = [HD_AVATAR_N_W_BADGE, HD_AVATAR_E_W_BADGE,
HD_AVATAR_S_W_BADGE, HD_AVATAR_W_W_BADGE]
CUTE_AVATAR_N = """
xxxxxxxx
xx*xx*xx
xx****xx
xx&&&&xx
x******x
x&****&x
xx****xx
xx&xx&xx
"""
CUTE_AVATAR_E = """
xxxxxxxx
xx*x*xxx
xx****xx
xx*O*Oxx
x**##*&x
x&****&x
xx****xx
xx&&x&xx
"""
CUTE_AVATAR_S = """
xxxxxxxx
xx*xx*xx
xx****xx
xxO**Oxx
x&*##*&x
x&****&x
xx****xx
xx&xx&xx
"""
CUTE_AVATAR_W = """
xxxxxxxx
xxx*x*xx
xx****xx
xxO*O*xx
x&*##**x
x&****&x
xx****xx
xx&x&&xx
"""
CUTE_AVATAR = [CUTE_AVATAR_N, CUTE_AVATAR_E, CUTE_AVATAR_S, CUTE_AVATAR_W]
CUTE_AVATAR_ALERT_SPRITE = """
xxxxxxxx
xx*xx*xx
xx****xx
x&O**O&x
x&*##*&x
xx****xx
xx****xx
xx&xx&xx
"""
CUTE_AVATAR_ALERT = [CUTE_AVATAR_ALERT_SPRITE] * 4
CUTE_AVATAR_SIT_SPRITE = """
xxxxxxxx
xxxxxxxx
xx*xx*xx
xx****xx
xxO**Oxx
x&*##*&x
x&****BB
xx*&&*bb
"""
CUTE_AVATAR_SIT = [CUTE_AVATAR_SIT_SPRITE] * 4
CUTE_AVATAR_EAT_SPRITE = """
xxxxxxxx
xxxxxxxx
xx*xx*xx
xx****xx
xxO**Oxx
x&*##*&x
x&*BB*&x
xx*bb*xx
"""
CUTE_AVATAR_EAT = [CUTE_AVATAR_EAT_SPRITE] * 4
CUTE_AVATAR_FIRST_BITE_SPRITE = """
xxxxxxxx
xxxxxxxx
xx*xx*xx
xx****xx
xxO**Oxx
x&*BB*&x
x&*bb*&x
xx*&&*xx
"""
CUTE_AVATAR_FIRST_BITE = [CUTE_AVATAR_FIRST_BITE_SPRITE] * 4
CUTE_AVATAR_SECOND_BITE_SPRITE = """
xxxxxxxx
xxxxxxxx
xx*xx*xx
xx****xx
xxO**Oxx
x&*bb*&x
x&****&x
xx*&&*xx
"""
CUTE_AVATAR_SECOND_BITE = [CUTE_AVATAR_SECOND_BITE_SPRITE] * 4
CUTE_AVATAR_LAST_BITE_SPRITE = """
xxxxxxxx
xxxxxxxx
xx*xx*xx
xx****xx
xxO**Oxx
x&*##*&x
x&****&x
xx*&&*xx
"""
CUTE_AVATAR_LAST_BITE = [CUTE_AVATAR_LAST_BITE_SPRITE] * 4
CUTE_AVATAR_W_SHORTS_N = """
xxxxxxxx
xx*xx*xx
xx****xx
xx&&&&xx
x******x
x&****&x
xxabcdxx
xx&xx&xx
"""
CUTE_AVATAR_W_SHORTS_E = """
xxxxxxxx
xx*x*xxx
xx****xx
xx*O*Oxx
x**##*&x
x&****&x
xxabcdxx
xx&&x&xx
"""
CUTE_AVATAR_W_SHORTS_S = """
xxxxxxxx
xx*xx*xx
xx****xx
xxO**Oxx
x&*##*&x
x&****&x
xxabcdxx
xx&xx&xx
"""
CUTE_AVATAR_W_SHORTS_W = """
xxxxxxxx
xxx*x*xx
xx****xx
xxO*O*xx
x&*##**x
x&****&x
xxabcdxx
xx&x&&xx
"""
CUTE_AVATAR_W_SHORTS = [CUTE_AVATAR_W_SHORTS_N, CUTE_AVATAR_W_SHORTS_E,
CUTE_AVATAR_W_SHORTS_S, CUTE_AVATAR_W_SHORTS_W]
PERSISTENCE_PREDATOR_N = """
xxexxexx
xxhhhhxx
xhhhhhhx
shhhhhhs
slhlhlha
aullllua
xauuuuax
xxexxexx
"""
PERSISTENCE_PREDATOR_E = """
xxexxxex
xxsssssx
xshyhhys
shhhhhhh
slhlhlhl
aulllllu
xauuuuua
xxexxxex
"""
PERSISTENCE_PREDATOR_S = """
xxexxexx
xxssssxx
xsyhhysx
shhhhhhs
ahlhlhls
aullllua
xauuuuax
xxexxexx
"""
PERSISTENCE_PREDATOR_W = """
xexxxexx
xsssssxx
syhhyhsx
hhhhhhhs
lhlhlhls
ulllllua
auuuuuax
xexxxexx
"""
PERSISTENCE_PREDATOR = [PERSISTENCE_PREDATOR_N, PERSISTENCE_PREDATOR_E,
PERSISTENCE_PREDATOR_S, PERSISTENCE_PREDATOR_W]
AVATAR_DEFAULT = """
xxxx@@@@@@@@xxxx
xxxx@@@@@@@@xxxx
xxxx@@@@@@@@xxxx
xxxx@@@@@@@@xxxx
xxxx********xxxx
xxxx********xxxx
xx@@**####**@@xx
xx@@**####**@@xx
xx************xx
xx************xx
xx************xx
xx************xx
xxxx**xxxx**xxxx
xxxx**xxxx**xxxx
xxxx**xxxx**xxxx
xxxx**xxxx**xxxx
"""
AVATAR_BIMANUAL = """
xx@@xxxxxxxx@@xx
xx@@xxxxxxxx@@xx
xx@@xx@@@@xx@@xx
xx@@xx@@@@xx@@xx
xx@@xx****xx@@xx
xx@@xx****xx@@xx
xx@@@@####@@@@xx
xx@@@@####@@@@xx
xxxx********xxxx
xxxx********xxxx
xxxx********xxxx
xxxx********xxxx
xxxx**xxxx**xxxx
xxxx**xxxx**xxxx
xxxx**xxxx**xxxx
xxxx**xxxx**xxxx
"""
UNRIPE_BERRY = """
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxx#xxxxxxxx
xxxxx******xxxxx
xxxx********xxxx
xxx**********xxx
xxxxx******xxxxx
xxxxxx****xxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
"""
BERRY = """
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxx#xxxxxxx
xxxxxxx##xxxxxxx
xxx****##****xxx
xx************xx
x**************x
x***@**@*******x
xx***@********xx
xxx**********xxx
xxxx********xxxx
xxxxx******xxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
"""
LEGACY_APPLE = """
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxx##xxxxx
xxxxxxxx##xxxxxx
xxxxxx@##@xxxxxx
xxxxx@@@@@@xxxxx
xxx&&&&&&&&&&xxx
xxx&*&&&&&&&&xxx
xxx&***&&&&&&xxx
xxx**********xxx
xxxxx******xxxxx
xxxxxxx***xxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
"""
HD_APPLE = """
xxxxxxxxxxxxxxxx
xx&&&&xxxxxxxxxx
xxxxoo&xxxxxxxxx
xxxxxxxoxOOxxxxx
xxxxxxxxOOxxxxxx
xxxx@@xxOx@*xxxx
xx@@***O&&***&xx
x@@*#*&O&****&&x
x@*#***&*****&&x
x@*#********&&ox
xx*********&&oxx
xx********&&&oxx
xxx***&&*&&&oxxx
xxxx&ooxx&ooxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
"""
BADGE = """
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
x#####xxxxxxxxxx
x#####xxxxxxxxxx
x#####xxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
"""
COIN = """
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxx@###xxxxxx
xxxxx@@@@##xxxxx
xxxx&&&@@@@#xxxx
xxx&&&&&&&@@#xxx
xxx&*&&&&&&&&xxx
xxx&***&&&&&&xxx
xxx**********xxx
xxxx********xxxx
xxxxx******xxxxx
xxxxxx****xxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
"""
WALL = """
&&&&##&&&&&&&&&&
&@@@##@@@@@@@@@@
****##**********
****##**********
################
################
&&&@@@@@@@##@@&&
&&@@@@@@@@##@@@&
**********##****
**********##****
################
################
****##**********
****##**********
@@@@##@@@@@@@@@@
&&&&##@@@@@@@@@@
"""
TILE = """
otooooxoxooootoo
tttooxoooxoottto
ttttxoooooxttttt
tttxtoooootxttto
otxtttoootttxtoo
oxtttttotttttxoo
xootttoootttooxo
ooootoooootoooox
xootttoootttooxo
oxtttttotttttxoo
otxtttoootttxtoo
tttxtoooootxttto
ttttxoooooxttttt
tttooxoooxoottto
otooooxoxooootoo
oooooooxoooooooo
"""
TILE1 = """
otooooxo
tttooxoo
ttttxooo
tttxtooo
otxtttoo
oxttttto
xootttoo
ooootooo
"""
TILE2 = """
xooootoo
oxoottto
ooxttttt
ootxttto
otttxtoo
tttttxoo
otttooxo
ootoooox
"""
BRICK_WALL_NW_CORNER = """
iiiiiiii
iicccccc
iccccccc
iccooooo
iccoobbb
iccobooo
iccoboob
iccobobo
"""
BRICK_WALL_NE_CORNER = """
iiiiiiii
ccccccii
ccccccci
ooooocci
bbboocci
ooobocci
boobocci
obobocci
"""
BRICK_WALL_SE_CORNER = """
obobocci
boobocci
ooobocci
bbboocci
ooooocci
ccccccci
ccccccii
iiiiiiii
"""
BRICK_WALL_SW_CORNER = """
iccobobo
iccoboob
iccobooo
iccoobbb
iccooooo
iccccccc
iicccccc
iiiiiiii
"""
BRICK_WALL_INNER_NW_CORNER = """
oooooooo
oobbobbb
oboooooo
oboobbob
oboboooo
oooboccc
oboboccc
oboooccc
"""
BRICK_WALL_INNER_NE_CORNER = """
oooooooo
bbobbboo
oooooobo
bobboobo
oooobooo
cccooobo
cccobobo
cccobobo
"""
BRICK_WALL_INNER_SW_CORNER = """
oboboccc
oboooccc
oboboccc
oooboooo
oboobobb
oboooooo
oobbbbob
oooooooo
"""
BRICK_WALL_INNER_SE_CORNER = """
cccobobo
cccobooo
cccooobo
oooobobo
bobboobo
oooooobo
bbbobboo
oooooooo
"""
BRICK_WALL_NORTH = """
iiiiiiii
cccccccc
cccccccc
oooooooo
bbbbobbb
oooooooo
bobbbbob
oooooooo
"""
BRICK_WALL_EAST = """
obobocci
ooobocci
obobocci
obooocci
obobocci
obobocci
ooobocci
obobocci
"""
BRICK_WALL_SOUTH = """
oooooooo
bobbbbob
oooooooo
bbbobbbb
oooooooo
cccccccc
cccccccc
iiiiiiii
"""
BRICK_WALL_WEST = """
iccobobo
iccobooo
iccobobo
iccooobo
iccobobo
iccobobo
iccobooo
iccobobo
"""
FILL = """
iiiiiiii
iiiiiiii
iiiiiiii
iiiiiiii
iiiiiiii
iiiiiiii
iiiiiiii
iiiiiiii
"""
TILED_FLOOR_GREY = """
ooo-ooo-
ooo-ooo-
ooo-ooo-
--------
ooo-ooo-
ooo-ooo-
ooo-ooo-
--------
"""
ACORN = """
xxxxxxxx
xxoooxxx
xoooooxx
xo***oxx
xx@*@xxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
GRASS_STRAIGHT = """
********
*@*@****
*@*@****
********
*****@*@
*****@*@
********
********
"""
GRASS_STRAIGHT_N_EDGE = """
****x*x*
*@*@****
*@*@****
********
*****@*@
*****@*@
********
********
"""
GRASS_STRAIGHT_E_EDGE = """
********
*@*@****
*@*@***x
********
*****@*@
*****@*@
*******x
********
"""
GRASS_STRAIGHT_S_EDGE = """
********
*@*@****
*@*@****
********
*****@*@
*****@*@
********
**x*x***
"""
GRASS_STRAIGHT_W_EDGE = """
********
x@*@****
*@*@****
********
x****@*@
*****@*@
x*******
********
"""
GRASS_STRAIGHT_NW_CORNER = """
x***x***
*@*@****
*@*@****
x*******
*****@*@
*****@*@
********
********
"""
GRASS_STRAIGHT_NE_CORNER = """
****x**x
*@*@****
*@*@****
*******x
*****@*@
*****@*@
********
********
"""
GRASS_STRAIGHT_SE_CORNER = """
********
*@*@****
*@*@***x
********
*****@*@
*****@*@
********
***x***x
"""
GRASS_STRAIGHT_SW_CORNER = """
********
*@*@****
*@*@****
x*******
*****@*@
*****@*@
********
x***x***
"""
BUTTON = """
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xx************xx
xx************xx
xx**########**xx
xx**########**xx
xx**########**xx
xx**########**xx
xx**########**xx
xx**########**xx
xx**########**xx
xx**########**xx
xx************xx
xx************xx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
"""
PLUS_IN_BOX = """
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xx************xx
xx************xx
xx**##@@@@##**xx
xx**##@@@@##**xx
xx**@@@@@@@@**xx
xx**@@@@@@@@**xx
xx**@@@@@@@@**xx
xx**@@@@@@@@**xx
xx**##@@@@##**xx
xx**##@@@@##**xx
xx************xx
xx************xx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
"""
TREE = """
xx@@@@@@@@@@@@xx
xx@@@@@@@@@@@@xx
xx@@@@@@@@@@@@xx
xx@@@@@@@@@@@@xx
xx@@@@@@@@@@@@xx
xx@@@@@@@@@@@@xx
xxxx@@****@@xxxx
xxxx@@****@@xxxx
xxxxxx****xxxxxx
xxxxxx****xxxxxx
xxxxxx****xxxxxx
xxxxxx****xxxxxx
xxxxxx****xxxxxx
xxxxxx****xxxxxx
xxxxxx****xxxxxx
xxxxxx****xxxxxx
"""
POTATO_PATCH = """
xx@@xxxxxxxxxx@@
xx@@xxxxxxxxxx@@
xxxx@@xxxxxxxx@@
xxxx@@xxxxxx@@xx
xxxxxx@@@@xx@@xx
xxxxxx@@@@xx@@xx
@@@@@@****@@xxxx
@@@@@@****@@xxxx
xxxx@@****@@xxxx
xxxx@@****@@xxxx
xx@@xx@@@@xx@@xx
xx@@xx@@@@xx@@@@
@@xxxxxx@@xx@@@@
@@xxxxxx@@xxxxxx
@@xxxxxxxx@@xxxx
@@xxxxxxxx@@xxxx
"""
FIRE = """
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxx&&&&xx&&xx
xxxxxx&&&&xx&&xx
xx&&xx****xx**xx
xx&&xx****xx**xx
xx************xx
xx************xx
xx@@@@****@@@@xx
xx@@@@****@@@@xx
xxxx@@@@@@@@xxxx
xxxx@@@@@@@@xxxx
xx@@@@xxxx@@@@xx
xx@@@@xxxx@@@@xx
"""
STONE_QUARRY = """
@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@
@@xx##xxxxxx##@@
@@xx##xxxxxx##@@
@@xxxx##xx##xx@@
@@xxxx##xx##xx@@
@@xx##xxxxxx##@@
@@xx##xxxxxx##@@
@@##xxxx##xxxx@@
@@##xxxx##xxxx@@
@@xx##xxxxxx##@@
@@xx##xxxxxx##@@
@@##xxxxxx##xx@@
@@##xxxxxx##xx@@
@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@
"""
WATER_1 = """
**~~*ooo~~~oo~**
~~~o**~~~~~~~**o
ooo~***~~~~~***~
o~~~~**~~*****~~
~~~~*****@@**~~o
o~**********~oo~
o**~~~~~~***o~~~
*oo~~~~~~o**~~~~
~~~ooooooo~**~**
*~~~~oooo~~*@~**
**~~~~oo~~~~**~~
~**~~~~oo~~~**~~
~*@*~~~~oo~~**~~
~~*@**~~~~o**~~~
~~~~********~~~~
~~**~~~~ooo~***~
"""
WATER_2 = """
*~~*~oo~~~~oo~~*
~~oo*~~~~~~~~**~
oo~~~**~~~***~~o
~~~*********~~~~
~~~****@@**~~~oo
o~**********oo~~
~***~~~~~~***~~~
*~~oooo~ooo**~~~
~~~~~~oooo~~*@**
*~~~~~~~~oo~***~
~**~~~~~~~o~**~~
~~**~~~~~~o**~~~
~~*@**~~~~**~~~~
~~~~********~~~~
~~~**~~~~oo***~~
~***~~~oo~~~~**~
"""
WATER_3 = """
***oooo~~~oo**~*
oo~**~~~~~~~**oo
~~~***~~~~~***~~
o~~~~********ooo
~ooo~*@@*****~~~
~~o*****oo****~~
~~**~~oooo~***~~
~*~~~~~~~oo~**~~
*~~~~~~~~~oo*@**
*~~~~~~~~~~***~~
*~~~~~~~~~**o~~~
~**~~~~~~**~oo~~
~*@**~~~**~~~o~~
~~*@******~~o~~~
~~**~~~~~***~~~~
~**~~~~ooo~~***~
"""
WATER_4 = """
*~~*~oo~~ooo~~~*
~ooo*~~~~~~~***o
o~~~~**~~~**~~~~
~~~**@******~~~~
o~~***@@@**~~~oo
~o**********oo~~
~***~~~~~o***~~~
*~oooo~oooo**~~~
~~~~~oooo~~~*@**
*~~~~~~ooo~~***~
~**~~~~~~oo~**~~
~~**~~~~~~o***~~
~~**~~~~~~o**~~~
~~~*@@*~~~**o~~~
~~~~**@******~~~
~***~~~oo~~~~**~
"""
BOAT_FRONT_L = """
xxxxxxxxxxxxx***
xxxxxxxxxxxx*@@@
xxxxxxxxxxx**ooo
xxxxxxxxxx*&*@@@
xxxxxxxx**@&*@@@
xxxxxxx*@@o@&***
xxxxxx*@@o@***&&
xxxxx*@@o@*&&*&&
xxxx*@@o@*&&&*&&
xxxx*@@@*&&&&&*&
xxx*@o@*&&&***@*
xx*@@o*&&***@o@*
xx*@@o***@@*o@@*
x*@@@***o@@*o@@*
x*@@@*@*@o@*****
*@@@*@@*@o@*@@o*
"""
BOAT_FRONT_R = """
***xxxxxxxxxxxxx
@@@*xxxxxxxxxxxx
ooo**xxxxxxxxxxx
@@@*&*xxxxxxxxxx
@@@*&@**xxxxxxxx
***&@o@@*xxxxxxx
&&***@o@@*xxxxxx
&&*&&*@o@@*xxxxx
&&*&&&*@o@@*xxxx
&*&&&&&*@@@*xxxx
@@***&&&*@o@*xxx
@o@@***&&*o@@*xx
@@@@*@@***o@@*xx
@@oo*@@@***o@@*x
@o@@*****@*@o@*x
@o@@*@o@*@@*o@@*
"""
BOAT_REAR_L = """
*@@o*@o*@o@*@@@*
x**@@*@*@o@*****
x*@*****@o@*@@@*
xx*&o@***@@*@@@*
xx*&&o@@@***@@@*
xxx*&&ooo@@*****
xxxx*&&@@oo@*@@@
xxxx*&&&@@@o*ooo
xxxxx*&&&@@@*@@@
xxxxxx*&&&&@*ooo
xxxxxxx*&&&&*@@@
xxxxxxxx**&&*&&&
xxxxxxxxxx*&*&&&
xxxxxxxxxxx**&&&
xxxxxxxxxxxx*&&&
xxxxxxxxxxxxx***
"""
BOAT_REAR_R = """
@o@*@@o*@o@*@o@*
@o@*@@o*o@*@o**x
@o@**********&*x
@@o*@@****o@&*xx
@@o****@@o@&&*xx
*****@@oo@&&*xxx
@@@*@oo@@&&*xxxx
ooo*o@@@&&&*xxxx
@@@*@@@&&&*xxxxx
ooo*@&&&&*xxxxxx
@@@*&&&&*xxxxxxx
&&&*&&**xxxxxxxx
&&&*&*xxxxxxxxxx
&&&**xxxxxxxxxxx
&&&*xxxxxxxxxxxx
***xxxxxxxxxxxxx
"""
BOAT_SEAT_L = """
*@@o*@@o*@@@*@o*
*@@o*o@o*@o@*@o*
*@@o*@@o*@o@****
*@@o*@o@*@o@*@@*
*@@o*******@*o@*
*@o@*@oo@@@*****
*@o@*@@@oooooo@@
*@o@******@@@oo@
*@o@*&&&&&******
*@o@*****&&&&&&&
*o@@*@@@********
*o@@*&&&*&&@*@@*
*o@@*&&&*&&&*&&*
*o@@*****&&&*&&*
*@@@*@@@*&&&*&&*
*@@o*@o@*o@@*@o*
"""
BOAT_SEAT_R = """
o@@*@@@*@o@*o@@*
o@@*@@@*@@@*o@@*
@o@*****o@o*@@@*
@o@*@@@*ooo*@@@*
@@@*@*******@@o*
*****ooo@o@*@@o*
@@o@o@@@o@@*@@o*
@@@@@@******@o@*
******&&&&&*@o@*
&&&&&&&*****@o@*
********@o@*@@o*
@o@*o@&*&&&*o@o*
****&&&*&&&*@o@*
&&&*&&&*****@o@*
&&&*&&&*@o@*@o@*
@@@*@@o*@o@*@o@*
"""
OAR_DOWN_L = """
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxx****
xxxxx#xxx***#@&&
xx##xx***#@@&***
xxxxx*#@&&***xxx
xx#xxx****xx#xxx
xxx##xxxxxx#xxxx
x#xxx###x##xxxxx
xxxxxxxxxxxxx#xx
xx##xxxxxxx##xxx
xxxxxx###xxxxxxx
"""
OAR_UP_L = """
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xx****xxxxxxxxxx
x*@@##**xxxxxxxx
*&@@@@#**xxxxxxx
*&&@@@@@#****xxx
x*&&&***&@@@#***
xx***xxx****&@@#
xxxxxxxxxxxx****
xxxxxxxxxxxxxxxx
xx#xx#xxxxxxxxxx
xxx##xxxx#xxxxxx
#xxxxxxx#xxxxxxx
xx##xx#xxxx##xxx
xxxxxxxx##xxxxxx
xx####xxxxxxxxxx
"""
OAR_DOWN_R = """
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
****xxxxxxxxxxxx
&&@#***xxx#xxxxx
***&@@#***xx##xx
xxx***&&@#*xxxxx
xxx#xx****xxx#xx
xxxx#xxxxxx##xxx
xxxxx##x###xxx#x
xx#xxxxxxxxxxxxx
xxx##xxxxxxx##xx
xxxxxxx###xxxxxx
"""
OAR_UP_R = """
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxx****xx
xxxxxxxx**##@@*x
xxxxxxx**#@@@@&*
xxx****#@@@@@&&*
***#@@@&***&&&*x
#@@&****xxx***xx
****xxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxx#xx#xx
xxxxxx#xxxx##xxx
xxxxxxx#xxxxxxx#
xxx##xxxx#xx##xx
xxxxxx##xxxxxxxx
xxxxxxxxxx####xx
"""
BARRIER_ON = """
x*xxxxxxxxxxxxxx
*#*xxxxxxxxxxxxx
*@*xxxxxxxxxxxxx
*@*xxxxxxxxxxxxx
*@*xxxxxxxxxxxxx
*@*xxxxxxxxxxxxx
***************x
*&@@@@@@@@@@@##*
*&&&@@@@@@@@@@&*
***************x
*&*xxxxxxxxxxxxx
*@*xxxxxxxxxxxxx
*@*xxxxxxxxxxxxx
*@*xxxxxxxxxxxxx
*&*xxxxxxxxxxxxx
***xxxxxxxxxxxxx
"""
BARRIER_OFF = """
x*x**xxxxxxxxxxx
*#*##*xxxxxxxxxx
*@*@#*xxxxxxxxxx
*&*@@*xxxxxxxxxx
**@@&*xxxxxxxxxx
**@@*xxxxxxxxxxx
**@@*xxxxxxxxxxx
*@@&*xxxxxxxxxxx
*&&*xxxxxxxxxxxx
****xxxxxxxxxxxx
*&*xxxxxxxxxxxxx
*@*xxxxxxxxxxxxx
*@*xxxxxxxxxxxxx
*@*xxxxxxxxxxxxx
*&*xxxxxxxxxxxxx
***xxxxxxxxxxxxx
"""
FLAG = """
xO@@xxxx
xO**@xxx
xO***xxx
xOxx&&xx
xOxxxoox
xOxxxxxx
xOxxxxxx
xxxxxxxx
"""
FLAG_HELD_N = """
xO@@@xxx
xO***xxx
xO**&&xx
xOxxx&&x
xxxxxxox
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
FLAG_HELD_E = """
xxxx@*Ox
xx@***Ox
x&***oOx
*&oxxxOx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
FLAG_HELD_S = """
x@xxxxxx
xx&*x@Ox
xxx&**Ox
xxxxo&Ox
xxxxxxOx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
FLAG_HELD_W = """
xxxO@xxx
xxxOO*@x
xxxxOo&*
xxxxOOx*
xxxxxOxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
FLAG_HELD = [FLAG_HELD_N, FLAG_HELD_E, FLAG_HELD_S, FLAG_HELD_W]
ROCK = """
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxllllllllxxxx
xxxlr***kkkrrxxx
xxlr*****kkkksxx
xxrr****kkkkksxx
xxr****kkkkkksxx
xxr*****kkkkksxx
xxr******kksssxx
xxr*****kkksssxx
xxr****kkkssssxx
xxrr***ssspspsxx
xxxlspspppppsxxx
xxxxlsssssssxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
"""
PAPER = """
xxxxxxxxxxxxxxxx
x**************x
x@@@***@@**@@@*x
x@**@*@**@*@**@x
x@@@**@@@@*@@@*x
x@****@**@*@***x
x@****@**@*@***x
x**************x
x**************x
x**@@@@**@@@***x
x**@*****@**@**x
x**@@@***@@@***x
x**@*****@**@**x
x**@@@@**@**@**x
x**************x
xxxxxxxxxxxxxxxx
"""
SCISSORS = """
xx##xxxxxxxxx##x
xx*x#xxxxxxx#x*x
xx*xx#xxxxx#xx*x
xx*xxx#xxx#xxx*x
xx*xxx##xx#xxx*x
xxx****##xx***xx
xxxxxxx>##xxxxxx
xxxxxxxx>##xxxxx
xxxxx#xxx>##xxxx
xxxx##>xxx>##xxx
xxx##>xxxxx>##xx
xx##>xxxxxxx>##x
x##>xxxxxxxxx>##
x#>xxxxxxxxxxx>#
x>xxxxxxxxxxxxx>
xxxxxxxxxxxxxxxx
"""
SPACER_N = """
xx****xx
x*****~x
x**&!~~x
**&&!o~~
~~o!!o~~
~~oooo~~
x~~~~~~x
x~~xx~~x
"""
SPACER_E = """
xxx****x
xx*****~
&&**#%%%
&!**%%%%
!o~*****
oo~~***~
xx~~~~~~
xx~~xx~~
"""
SPACER_S = """
xx***~xx
x*****~x
x*#%%%~x
**%%%%~~
~*****~~
~~~**~~~
x~~~~~~x
x~~xx~~x
"""
SPACER_W = """
x***~xxx
*****~xx
#%%%*~!!
%%%%*~!o
*****~oo
~***~~oo
~~~~~~xx
~~xx~~xx
"""
SPACER_TAGGED_S = """
xxxxxxxx
x##xxxxx
xx##x##x
xxx##xxx
x*****~x
x~~**~~x
x~~~~~~x
x~~xx~~x
"""
SPACER = [SPACER_N, SPACER_E, SPACER_S, SPACER_W]
SPACER_TAGGED = [SPACER_TAGGED_S, SPACER_TAGGED_S, SPACER_TAGGED_S,
SPACER_TAGGED_S]
NW_SHIP_WALL = """
oooooooo
o#######
o#######
o#######
o#######
o#######
o#######
o######x
"""
NS_SHIP_WALL = """
oooooooo
########
########
########
########
########
########
xxxxxxxx
"""
NE_SHIP_WALL = """
oooooooo
#######x
#######x
#######x
#######x
#######x
#######x
o######x
"""
EW_SHIP_WALL = """
o######x
o######x
o######x
o######x
o######x
o######x
o######x
o######x
"""
SE_SHIP_WALL = """
o######x
#######x
#######x
#######x
#######x
#######x
#######x
xxxxxxxx
"""
SW_SHIP_WALL = """
o######x
o#######
o#######
o#######
o#######
o#######
o#######
xxxxxxxx
"""
SHIP_WALL_CAP_S = """
o######x
o######x
o######x
o######x
o######x
o######x
o######x
xxxxxxxx
"""
SHIP_WALL_TCOUPLING_W = """
o######x
o#######
o#######
o#######
o#######
o#######
o#######
o######x
"""
SHIP_WALL_TCOUPLING_E = """
o######x
#######x
#######x
#######x
#######x
#######x
#######x
o######x
"""
SHIP_WALL_TCOUPLING_N = """
oooooooo
########
########
########
########
########
########
o######x
"""
SHIP_WALL_TCOUPLING_S = """
o######x
########
########
########
########
########
########
xxxxxxxx
"""
N_SHIP_SOLID_WALL = """
oooooooo
########
########
########
########
########
########
########
"""
E_SHIP_SOLID_WALL = """
#######x
#######x
#######x
#######x
#######x
#######x
#######x
#######x
"""
S_SHIP_SOLID_WALL = """
########
########
########
########
########
########
########
xxxxxxxx
"""
W_SHIP_SOLID_WALL = """
o#######
o#######
o#######
o#######
o#######
o#######
o#######
o#######
"""
NW_GRATE = """
X*******
X*@&&&&&
X*&&&x&x
X*&&&x&x
o*&&&x&x
o*&&&x&x
o*&&&x&x
o*&&&x&x
"""
N_GRATE = """
********
&&&&&&&&
&x&x&x&x
&x&x&x&x
&x&x&x&x
&x&x&x&x
&x&x&x&x
&x&x&x&x
"""
NE_GRATE = """
********
&&&&&&@~
&x&x&&&~
&x&x&&&~
&x&x&&&~
&x&x&&&~
&x&x&&&~
&x&x&&&~
"""
W_GRATE = """
X*&&&&&&
X*&&&x&x
X*&&&x&x
X*&&&x&x
o*&&&x&x
o*&&&x&x
o*&&&x&x
o*&&&&&&
"""
INNER_GRATE = """
&&&&&&&&
&x&x&x&x
&x&x&x&x
&x&x&x&x
&x&x&x&x
&x&x&x&x
&x&x&x&x
&&&&&&&&
"""
E_GRATE = """
&&&&&&&~
&x&x&&&~
&x&x&&&~
&x&x&&&~
&x&x&&&~
&x&x&&&~
&x&x&&&~
&&&&&&&~
"""
SE_GRATE = """
&x&x&&&~
&x&x&&&~
&x&x&&&~
&x&x&&&~
&x&x&&&~
&x&x&&&~
&&&&&&@~
~~~~~~~~
"""
S_GRATE = """
&x&x&x&x
&x&x&x&x
&x&x&x&x
&x&x&x&x
&x&x&x&x
&x&x&x&x
&&&&&&&&
~~~~~~~~
"""
SW_GRATE = """
X*&&&x&x
X*&&&x&x
X*&&&x&x
X*&&&x&x
o*&&&x&x
o*&&&x&x
o*@&&&&&
o*~~~~~~
"""
GLASS_WALL = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
******@@
@@******
!!!!!!!!
"""
WOOD_FLOOR = """
xxx-xxxx
--------
x-xxxxxx
--------
xxxxx-xx
--------
xxxxxxx-
--------
"""
METAL_TILE = """
oxxOoxxO
xxxoxxxo
xxxxxxxx
xxOxxxOx
xOoxxOox
xoxxxoxx
xxxxxxxx
OxxxOxxx
"""
METAL_PANEL = """
///////-
///////-
///////-
///////-
--------
////-///
////-///
--------
"""
THRESHOLD = """
xxxxxxxx
XXXXXXXX
xxxxxxxx
XXXXXXXX
xxxxxxxx
XXXXXXXX
xxxxxxxx
XXXXXXXX
"""
THRESHOLD_VERTICAL = """
xXxXxXxX
xXxXxXxX
xXxXxXxX
xXxXxXxX
xXxXxXxX
xXxXxXxX
xXxXxXxX
xXxXxXxX
"""
CHECKERED_TILE = """
XXXXxxxx
XXXXxxxx
XXXXxxxx
XXXXxxxx
xxxxXXXX
xxxxXXXX
xxxxXXXX
xxxxXXXX
"""
GEM = """
xxxxxxxx
xxx~~xxx
xx~**&xx
xx~*!&xx
xx~!!&xx
xxx&&xxx
xxxxxxxx
xxxxxxxx
"""
SMALL_SPHERE = """
xxxxxxxx
xx+~~+xx
xx~@*&xx
xx~**&xx
xx+&&+xx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
DIRT_PATTERN = """
xxxxxxxx
xXXXxxxx
xXXXxxxx
xxxxxxxx
xxxxXXXx
xxxxxXXx
xxxXXxxx
xxxxXXXX
"""
FRUIT_TREE = """
x@@@@@@x
x@Z@Z@@x
x@@Z@Z@x
xx@**@xx
xxx**xxx
xxx**xxx
xxx**xxx
xxxxxxxx
"""
BATTERY_FLOOR = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
aaaaafwx
aaaaaxxx
AAAAAgwx
xxxxxxxx
xxxxxxxx
"""
BATTERY_GRASPED = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
adddafwx
ADDDAxxx
AAAAAgwx
xxxxxxxx
xxxxxxxx
"""
BATTERY_FULL = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxaoooax
xxaoooaf
xxAOOOAg
xxxxxxxx
xxxxxxxx
"""
BATTERY_DRAINED_ONE = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxa#ooax
xxa#ooaf
xxA#OOAg
xxxxxxxx
xxxxxxxx
"""
BATTERY_DRAINED_TWO = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxa##oax
xxa##oaf
xxA##OAg
xxxxxxxx
xxxxxxxx
"""
BATTERY_DRAINED = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxa###ax
xxa###af
xxAAAAAg
xxxxxxxx
xxxxxxxx
"""
BATTERY_FLASHING = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xx#####x
xx#####f
xx#####g
xxxxxxxx
xxxxxxxx
"""
WIRES = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxwff
xxxxxxxg
xxxxxwgx
xxxxxxxx
xxxxxxxx
"""
PLUG_SOCKET = """
xxxxxxxx
xxxxsssx
xxxxsAsx
xxxxsgff
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
CONSUMPTION_STARS = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxx-xx
xxxx---x
xx-xx-xx
x---xxxx
xx-xxxxx
"""
CONSUMPTION_STARS_2 = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxx-xx
xxxx---x
xx-xx-xx
x---xxxx
xx-xxxxx
"""
# Positional Goods sprites.
CROWN = """
x#@#x@#x
xx####xx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
BRONZE_CAP = """
xxxxxxxx
xx####xx
xx####xx
x@xxxx@x
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
# Boat Race 2.0 sprites.
SEMAPHORE_FLAG = """
xxxxxxxx
x*@xxxxx
x*&@xxxx
x*&@@xxx
x*&@@xxx
x|xxxxxx
x|xxxxxx
xxxxxxxx
"""
# Allelopathic Harvest 2.0 sprites.
SOIL = """
xXDxDDxx
XdDdDDDx
DdDDdDdd
dDdDDdDd
xDdDdDdX
DDDDDDXd
ddDdDDdD
xDdDdDDx
"""
BERRY_SEEDS = """
xxxxxxxx
xxxxxxxx
xxxOxxxx
xxxxoxOx
xxoxxxxx
xxxxxxxx
xxxxoxxx
xxxxxxxx
"""
BERRY_RIPE = """
xxxxxxxx
xxxxxxxx
xxooxxxx
xxooOOxx
xxxdOOxx
xxxddxxx
xxxxxxxx
xxxxxxxx
"""
# Territory 2.0 sprites.
NW_HIGHLIGHT = """
x*******
*xxxxxxx
*xxxxxxx
*xxxxxxx
*xxxxxxx
*xxxxxxx
*xxxxxxx
*xxxxxxx
"""
NE_HIGHLIGHT = """
*******x
xxxxxxxo
xxxxxxxo
xxxxxxxo
xxxxxxxo
xxxxxxxo
xxxxxxxo
xxxxxxxo
"""
E_W_HIGHLIGHT = """
*xxxxxxo
*xxxxxxo
*xxxxxxo
*xxxxxxo
*xxxxxxo
*xxxxxxo
*xxxxxxo
*xxxxxxo
"""
N_S_HIGHLIGHT = """
********
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
oooooooo
"""
SE_HIGHLIGHT = """
xxxxxxxo
xxxxxxxo
xxxxxxxo
xxxxxxxo
xxxxxxxo
xxxxxxxo
xxxxxxxo
ooooooox
"""
SW_HIGHLIGHT = """
*xxxxxxx
*xxxxxxx
*xxxxxxx
*xxxxxxx
*xxxxxxx
*xxxxxxx
*xxxxxxx
xooooooo
"""
CUTE_AVATAR_HOLDING_PAINTBRUSH_N = """
xxxxxOOO
xx*xx*+x
xx****-x
xx&&&&&x
x******x
x&****xx
xx****xx
xx&xx&xx
"""
CUTE_AVATAR_HOLDING_PAINTBRUSH_E = """
xxxxxxxx
xx*x*xxx
xx****xx
xx*,*,xx
x**##*&&
x&****xx
xx****xx
xx&&x&xx
"""
CUTE_AVATAR_HOLDING_PAINTBRUSH_S = """
xxxxxxxx
xx*xx*xx
xx****xx
xx,**,xx
x&*##*&x
x&****&x
xx****-x
xx&xx&+x
"""
CUTE_AVATAR_HOLDING_PAINTBRUSH_W = """
xxxxxxxx
xxx*x*xx
xx****xx
xx,*,*xx
&&*##**x
xx****&x
xx****xx
xx&x&&xx
"""
CUTE_AVATAR_HOLDING_PAINTBRUSH = [CUTE_AVATAR_HOLDING_PAINTBRUSH_N,
CUTE_AVATAR_HOLDING_PAINTBRUSH_E,
CUTE_AVATAR_HOLDING_PAINTBRUSH_S,
CUTE_AVATAR_HOLDING_PAINTBRUSH_W
]
PAINTBRUSH_N = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxx*&o
xxxxx*k&
xxxxxkkk
"""
PAINTBRUSH_E = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxOk**xx
-+Okk&xx
xxOk&oxx
xxxxxxxx
xxxxxxxx
"""
PAINTBRUSH_S = """
xxxxxOOO
xxxxxkkk
xxxxx&k*
xxxxxo&*
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
PAINTBRUSH_W = flip_horizontal(PAINTBRUSH_E)
PAINTBRUSH = [PAINTBRUSH_N, PAINTBRUSH_E, PAINTBRUSH_S, PAINTBRUSH_W]
WALL = """
**#*****
**#*****
########
*****#**
*****#**
########
**#*****
**#*****
"""
GRAINY_FLOOR = """
+*+*++*+
*+*+**+*
+*+****+
****+*+*
*+*+****
**+***++
+*+*+**+
***+**+*
"""
GRASS_STRAIGHT_N_CAP = """
x***x**x
*@*@****
*@*@****
x*******
*****@*@
*****@*x
********
********
"""
SHADOW_W = """
#@*xxxxx
#*x~xxxx
#@*xxxxx
#*x~xxxx
#@*xxxxx
#*x~xxxx
#@*xxxxx
#*x~xxxx
"""
SHADOW_E = """
xxxxx*@#
xxxx~x*#
xxxxx*@#
xxxx~x*#
xxxxx*@#
xxxx~x*#
xxxxx*@#
xxxx~x*#
"""
SHADOW_N = """
########
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
APPLE_TREE_STOUT = """
xxxxxxxx
xaxaaaax
aabbaaoa
baaaoaax
bobaaaob
bbbabIbb
xbIbbbIx
xxIxxxIx
"""
BANANA_TREE = """
xxaaaxax
xaoaabba
abooaaaa
bbbbaaob
bobIboob
xooxIIbx
xxxxIxxx
xxxxIxxx
"""
ORANGE_TREE = """
xxaaaxxx
xaoaabba
abaoaaaa
bbbbaaob
bobIbaab
xbbIIbbx
xxxxIxxx
xxxxIxxx
"""
GOLD_MINE_TREE = """
xxxxxxxx
xaaaaxax
aobbaaaa
baIoIIax
boIIIoob
bbbabIbb
xbbxbbbx
xxxxxxxx
"""
FENCE_NW_CORNER = """
aaaxxaax
aaaxxaax
bbbdcbbd
cddedbbe
aaexxbcx
aaedcbcd
bbe#ebbe
cd####b#
"""
FENCE_N = """
xaaxxaax
xaaxxaax
cbbdcbbd
dbbedcbe
xbbxxcbx
cbbdcbbd
dbb#dbbe
#b####b#
"""
FENCE_NE_CORNER = """
xaaaxxxx
xaaaxxxx
cbbbxxxx
dbcdxxxx
xbaa##xx
cbaa##xx
d#bb#xxx
##cd#xxx
"""
FENCE_INNER_NE_CORNER = """
##aa##xx
x#aa##xx
xxbb#xxx
xxcd#xxx
xxaa##xx
xxaa##xx
xxbb#xxx
xxcd#xxx
"""
FENCE_E = """
xxaa##xx
xxaa##xx
xxbb#xxx
xxcd#xxx
xxaa##xx
xxaa##xx
xxbb#xxx
xxcd#xxx
"""
FENCE_SE_CORNER = """
xaaa##xx
xaaa##xx
cbbd#xxx
dcbb#xxx
xbbb##xx
dccb##xx
#ccc#xxx
##c##xxx
"""
FENCE_S = """
xaaxxaax
xaaxxaax
cbbdcbbd
dbbedcbe
xbbxxcbx
cbbdcbbd
dbb#dbbe
#b####b#
"""
FENCE_SW_CORNER = """
aaa#xaax
aaa#xaax
cbbdcbbd
bbcedbbe
bbb#xbcx
bccdcbcd
ccc#ebbe
#c####b#
"""
FENCE_W = """
aa##xxxx
aa##xxxx
bb#xxxxx
cd#xxxxx
aa##xxxx
aa##xxxx
bb#xxxxx
cd#xxxxx
"""
FENCE_INNER_NW_CORNER = """
aa######
aa##xx##
bb#xxxxx
cd#xxxxx
aa##xxxx
aa##xxxx
bb#xxxxx
cd#xxxxx
"""
FENCE_SHADOW_S = """
########
xx##xx##
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
FENCE_SHADOW_SE = """
######xx
xx####xx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
FENCE_SHADOW_S = """
########
xx##xx##
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
FENCE_SHADOW_SW = """
x#######
xx##xx##
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
MAGIC_GRAPPLED_AVATAR = """
xpPppPpx
pP*PP*Pp
pP****Pp
pPO**OPp
P&*##*&P
P&****&P
pP****Pp
pP&PP&Pp
"""
MAGIC_HOLDING_SHELL = """
x~*~~*~x
~*~**~*~
~*~~~~*~
~*~~~~*~
*~~~~~~*
*~~~~~~*
~*~~~~*~
~*~**~*~
"""
MAGIC_BEAM_N_FACING = """
xx~~~~xx
xx*~~*xx
xx*~~*xx
xx*~~*xx
xx*~~*xx
xx*~~*xx
xx*~~*xx
xx~~~~xx
"""
MAGIC_BEAM_E_FACING = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
~*******
~~~~~~~~
~*******
xxxxxxxx
xxxxxxxx
"""
MAGIC_BEAM_S_FACING = flip_vertical(MAGIC_BEAM_N_FACING)
MAGIC_BEAM_W_FACING = flip_horizontal(MAGIC_BEAM_E_FACING)
MAGIC_BEAM = [MAGIC_BEAM_N_FACING, MAGIC_BEAM_E_FACING,
MAGIC_BEAM_S_FACING, MAGIC_BEAM_W_FACING]
MAGIC_HANDS_N_FACING = """
xx~xx~xx
x~xxxx~x
~*xxxx*~
*~xxxx~*
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
MAGIC_HANDS_E_FACING = """
xxxxxxxx
xxxxx~xx
xx*~~~~*
x*x*~~*x
xx*~~~~*
xxxx~xxx
xxxxxxxx
xxxxxxxx
"""
MAGIC_HANDS_S_FACING = flip_vertical(MAGIC_HANDS_N_FACING)
MAGIC_HANDS_W_FACING = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
x*xxxx*x
*x*xx*x*
~*~xx~*~
x~~~~~~x
xx~~~~xx
"""
FRUIT_TREE = """
x#x####x
##ww##o#
w###o##w
x#######
wow###ow
wwwwwIww
xwIwwwIx
xxIxxxIx
"""
FRUIT_TREE_BARREN = """
x#x####x
##ww####
w######w
x#######
w#w####w
wwwwwIww
xwIwwwIx
xxIxxxIx
"""
DIAMOND_PRINCESS_CUT = """
xxxxxxxx
xxxaaaxx
xxdbabdx
xxbdbdbx
xxxcbcxx
xxxxcxxx
xxxxxxxx
xxxxxxxx
"""
INNER_WALLS_NW = """
xbbbbbbb
bbaaaaaa
babbbbbb
babbbbbb
babbbbbb
babbbbbb
babbbbbb
babbbbbb
"""
INNER_WALLS_NE = """
bbbbbbbx
aaaaaabd
bbbbbbcd
bbbbbbcd
bbbbbbcd
bbbbbbcd
bbbbbbcd
bbbbbbcd
"""
INNER_WALLS_W_INTERSECT_SE = """
babbbbbb
babbbbbb
babbbbbb
babbbbbb
babbbbbb
babbbbbb
babbbbbc
babbbbcd
"""
INNER_WALLS_W = """
babbbbbb
babbbbbb
babbbbbb
babbbbbb
babbbbbb
babbbbbb
babbbbbb
babbbbbb
"""
INNER_WALLS_SE = """
bbbbbbcd
bbbbbbcd
bbbbbbcd
bbbbbbcd
bbbbbbcd
bbbbbbcd
ccccccdd
dddddddx
"""
INNER_WALLS_SW = """
babbbbbb
babbbbbb
babbbbbb
babbbbbb
babbbbbb
babbbbbb
bbcccccc
xddddddd
"""
INNER_WALLS_E_INTERSECT_SW = """
bbbbbbcd
bbbbbbcd
bbbbbbcd
bbbbbbcd
bbbbbbcd
bbbbbbcd
ccbbbbcd
ddbbbbcd
"""
INNER_WALLS_E = """
bbbbbbcd
bbbbbbcd
bbbbbbcd
bbbbbbcd
bbbbbbcd
bbbbbbcd
bbbbbbcd
bbbbbbcd
"""
INNER_WALLS_VERTICAL = """
babbbbcd
babbbbcd
babbbbcd
babbbbcd
babbbbcd
babbbbcd
babbbbcd
babbbbcd
"""
INNER_WALLS_S_CAP = """
babbbbcd
babbbbcd
babbbbcd
babbbbcd
babbbbcd
bbbbbbcd
xbccccdd
xddddddx
"""
INNER_WALLS_N_CAP = """
xbbbbbbx
bbaaaabd
babbbbcd
babbbbcd
babbbbcd
babbbbcd
babbbbcd
babbbbcd
"""
CONVERTER_HOPPER = """
xxxxxxxx
eeeeeeee
e>>>>>>e
e<<<<<<e
e,,,,,,e
e,,,,,,e
e,,,,,,e
e,,,,,,e
"""
CONVERTER_ACCEPTANCE_INDICATOR = """
ecccccee
gcddddfg
gcd[]dfg
gcd_]dfg
gcddddfg
gcddddfg
gcddddfg
gaaaaacg
"""
CONVERTER_IDLE = """
ga`bb`cg
gabbbbcg
gccccccg
ghhhhhhg
ghhAAhhg
hhBBBChh
xxAAABxx
xxBBBCxx
"""
CONVERTER_ON = """
ga!bb!cg
gabbbbcg
gccccccg
ghhhhhhg
ghhAAhhg
hhBBBChh
xxAAABxx
xxBBBCxx
"""
CONVERTER_ON_FIRST = """
ga!bb!cg
gabbbbcg
gccccccg
ghhhhhhg
ghhBBhhg
hhAAABhh
xBBBBCCx
xxAAABxx
"""
CONVERTER_ON_SECOND = """
ga!bb!cg
gabbbbcg
gccccccg
ghhhhhhg
ghhBBhhg
hhAAABhh
xxBBBCxx
xAAAABBx
"""
CONVERTER_ON_THIRD = """
ga`bb!cg
gabbbbcg
gccccccg
ghhhhhhg
ghhAAhhg
hhBBBChh
xxAAABxx
xxBBBCxx
"""
CONVERTER_ON_FOURTH = """
ga`bb!cg
gabbbbcg
gccccccg
ghhhhhhg
ghhBBhhg
hhAAABhh
xBBBBCCx
xxAAABxx
"""
CONVERTER_ON_FIFTH = """
ga`bb!cg
gabbbbcg
gccccccg
ghhhhhhg
ghhBBhhg
hhAAABhh
xxBBBCxx
xAAAABBx
"""
CONVERTER_DISPENSER_IDLE = """
xxAAABxx
*ffffff*
hhhhhhhh
h<,,,,<h
h>>>>>>h
hhhhhhhh
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
CONVERTER_DISPENSER_RETRIEVING = """
*ffffff*
hhhhhhhh
h<,,,,<h
h>>>>>>h
hhhhhhhh
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
CONVERTER_DISPENSER_DISPENSING = """
xxAAABxx
xxBBBCxx
*ffffff*
hhhhhhhh
h<,,,,<h
h>>>>>>h
hhhhhhhh
xxxxxxxx
xxxxxxxx
"""
SQUARE = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
CYTOAVATAR_EMPTY_N = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xx&**xxx
x&****xx
x&****xx
xx&&&xxx
"""
CYTOAVATAR_EMPTY_E = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xx&***xx
x&*,*,*x
x&*****x
xx&&&&xx
"""
CYTOAVATAR_EMPTY_S = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xx&**xxx
x&,*,*xx
x&****xx
xx&&&xxx
"""
CYTOAVATAR_EMPTY_W = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xx****xx
x&,*,**x
x&*****x
xx&&&&xx
"""
CYTOAVATAR_EMPTY = [CYTOAVATAR_EMPTY_N, CYTOAVATAR_EMPTY_E,
CYTOAVATAR_EMPTY_S, CYTOAVATAR_EMPTY_W]
CYTOAVATAR_HOLDING_ONE_N = """
xxxxxxxx
xx&**xxx
x&****xx
x&&&&&xx
&&ooo&&x
&ooooo&x
&&ooo&&x
x&&&&&xx
"""
CYTOAVATAR_HOLDING_ONE_E = """
xxxxxxxx
xx&***xx
x&*,*,*x
x&*****x
&&oooo*x
&ooooo&x
&&ooo&&x
x&&&&&xx
"""
CYTOAVATAR_HOLDING_ONE_S = """
xxxxxxxx
xx&**xxx
x&,*,*xx
x&****xx
&&ooo**x
&ooooo&x
&&ooo&&x
x&&&&&xx
"""
CYTOAVATAR_HOLDING_ONE_W = """
xxxxxxxx
x****xxx
&,*,**xx
&*****xx
&oooo**x
&ooooo&x
&&ooo&&x
x&&&&&xx
"""
CYTOAVATAR_HOLDING_ONE = [CYTOAVATAR_HOLDING_ONE_N, CYTOAVATAR_HOLDING_ONE_E,
CYTOAVATAR_HOLDING_ONE_S, CYTOAVATAR_HOLDING_ONE_W]
CYTOAVATAR_HOLDING_MULTI_N = """
xx&***xx
x&*****x
x&&&&&&x
&&oooo&&
&oooooo&
&oooooo&
&&oooo&&
x&&&&&&x
"""
CYTOAVATAR_HOLDING_MULTI_E = """
xx&***xx
x&*,*,*x
x&*****x
&&oooo&&
&oooooo&
&oooooo&
&&oooo&&
x&&&&&&x
"""
CYTOAVATAR_HOLDING_MULTI_S = """
xx&***xx
x&,**,*x
x&*****x
&&oooo&&
&oooooo&
&oooooo&
&&oooo&&
x&&&&&&x
"""
CYTOAVATAR_HOLDING_MULTI_W = """
xx&***xx
x&,*,**x
x&*****x
&&oooo&&
&oooooo&
&oooooo&
&&oooo&&
x&&&&&&x
"""
CYTOAVATAR_HOLDING_MULTI = [CYTOAVATAR_HOLDING_MULTI_N,
CYTOAVATAR_HOLDING_MULTI_E,
CYTOAVATAR_HOLDING_MULTI_S,
CYTOAVATAR_HOLDING_MULTI_W]
SINGLE_HOLDING_LIQUID = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xlllxxxx
xxlllxxx
xxxxxxxx
"""
SINGLE_HOLDING_SOLID = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxSsxx
xxxxssxx
xxxxxxxx
"""
MULTI_HOLDING_SECOND_LIQUID = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxLLxxx
xxxxLLLx
xxxxxLxx
xxxxxxxx
"""
MULTI_HOLDING_SOLID = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxSsxx
xxxxssxx
xxxxxxxx
xxxxxxxx
"""
PETRI_DISH_NW_WALL_CORNER = """
xxx&&&&&
xx&~~~~~
x&*ooooo
&~o*oooo
&~oo*o&&
&~ooo&@@
&~oo&@@@
&~oo&@@#
"""
PETRI_DISH_NE_WALL_CORNER = flip_horizontal(PETRI_DISH_NW_WALL_CORNER)
PETRI_DISH_SE_WALL_CORNER = flip_vertical(PETRI_DISH_NE_WALL_CORNER)
PETRI_DISH_SW_WALL_CORNER = flip_vertical(PETRI_DISH_NW_WALL_CORNER)
PETRI_DISH_N_WALL = """
&&&&&&&&
~~~~~~~~
oooooooo
oooooooo
&&&&&&&&
@@@@@@@@
@@@@@@@@
########
"""
PETRI_DISH_W_WALL = """
&~oo&@@#
&~oo&@@#
&~oo&@@#
&~oo&@@#
&~oo&@@#
&~oo&@@#
&~oo&@@#
&~oo&@@#
"""
GRID_FLOOR_LARGE = """
@@@@@@@#
@@@@@@@#
@@@@@@@#
@@@@@@@#
@@@@@@@#
@@@@@@@#
@@@@@@@#
########
"""
PETRI_DISH_E_WALL = flip_horizontal(PETRI_DISH_W_WALL)
PETRI_DISH_S_WALL = flip_vertical(PETRI_DISH_N_WALL)
SOLID = """
xxxxxxxb
xxxxxxxb
xxxSsxxb
xxSSssxb
xxssZZxb
xxxsZxxb
xxxxxxxb
bbbbbbbb
"""
GAS = """
xxxxxxgx
GxxGxGxx
xxGxxxGg
xxggxgxx
xGgxgGgx
GxxxGgxx
xxgxxxxx
xxxxxxgx
"""
LIQUID = """
xxxxxxxb
xxxxxxxb
xwwwllxb
wwlllxxb
xxLwwllb
xwwwllxb
xxllwwLl
bbbbbbbb
"""
SOLID_S_CAP = """
xxxxxxxx
xxxxxxxx
xxxSsxxx
xxSSssxx
xxssZZxx
xxxsZxxx
xxxsZxxx
xxxsZxxx
"""
SOLID_E_CAP = """
xxxxxxxx
xxxxxxxx
xxxSsxxx
xxSSssss
xxssZZZZ
xxxsZxxx
xxxxxxxx
xxxxxxxx
"""
SOLID_N_CAP = """
xxxSsxxx
xxxSsxxx
xxxSsxxx
xxSSssxx
xxssZZxx
xxxsZxxx
xxxxxxxx
xxxxxxxx
"""
SOLID_W_CAP = """
xxxxxxxx
xxxxxxxx
xxxSsxxx
SSSSssxx
ssssZZxx
xxxsZxxx
xxxxxxxx
xxxxxxxx
"""
SOLID_X_COUPLING = """
xxx*sxxx
xxx*sxxx
xxxSsxxx
SSSSssss
ssssZZZZ
xxxsZxxx
xxxsZxxx
xxxsZxxx
"""
SOLID_ES_COUPLING = """
xxxxxxxx
xxxxxxxx
xxxSsxxx
xxSSssss
xxssZZZZ
xxxsZxxx
xxxsZxxx
xxxsZxxx
"""
SOLID_SW_COUPLING = """
xxxxxxxx
xxxxxxxx
xxxSsxxx
SSSSssxx
ssssZZxx
xxxsZxxx
xxxsZxxx
xxxsZxxx
"""
SOLID_NW_COUPLING = """
xxxSsxxx
xxxSsxxx
xxxSsxxx
SSSSssxx
ssssZZxx
xxxsZxxx
xxxxxxxx
xxxxxxxx
"""
SOLID_NE_COUPLING = """
xxxSsxxx
xxxSsxxx
xxxSsxxx
xxSSssss
xxssZZZZ
xxxsZxxx
xxxxxxxx
xxxxxxxx
"""
SOLID_S_TCOUPLING = """
xxxSsxxx
xxxSsxxx
xxxSsxxx
SSSSssss
ssssZZZZ
xxxsZxxx
xxxxxxxx
xxxxxxxx
"""
SOLID_E_TCOUPLING = """
xxxSsxxx
xxxSsxxx
xxxSsxxx
SSSSssxx
ssssZZxx
xxxsZxxx
xxxsZxxx
xxxsZxxx
"""
SOLID_N_TCOUPLING = """
xxxxxxxx
xxxxxxxx
xxxSsxxx
SSSSssss
ssssZZZZ
xxxsZxxx
xxxsZxxx
xxxsZxxx
"""
SOLID_W_TCOUPLING = """
xxxSsxxx
xxxSsxxx
xxxSsxxx
xxSSssss
xxssZZZZ
xxxsZxxx
xxxsZxxx
xxxsZxxx
"""
SOLID_NS_COUPLING = """
xxxSsxxx
xxxSsxxx
xxxSsxxx
xxSSssxx
xxssZZxx
xxxsZxxx
xxxsZxxx
xxxsZxxx
"""
SOLID_EW_COUPLING = """
xxxxxxxx
xxxxxxxx
xxxSsxxx
SSSSssss
ssssZZZZ
xxxsZxxx
xxxxxxxx
xxxxxxxx
"""
APPLE = """
xxxxxxxx
xxxxxxxx
xxo|*xxx
x*#|**xx
x*****xx
x#***#xx
xx###xxx
xxxxxxxx
"""
APPLE_JUMP = """
xxxxxxxx
xxo|*xxx
x*#|**xx
x*****xx
x#***#xx
xx###xxx
xxxxxxxx
xxxxxxxx
"""
N_EDGE = """
********
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
N_HALF_EDGE = """
xxxx****
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
W_EDGE = """
*xxxxxxx
*xxxxxxx
*xxxxxxx
*xxxxxxx
*xxxxxxx
*xxxxxxx
*xxxxxxx
*xxxxxxx
"""
S_EDGE = flip_vertical(N_EDGE)
E_EDGE = flip_horizontal(W_EDGE)
PAINTER_STAND_S_FACING = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xoooooox
xo!**!ox
********
*WWWWWW*
wWWWWWWx
"""
PAINTER_STAND_E_FACING = """
xxxxx**x
xxxxx*WW
xxxxx*WW
xxxx**WW
xxxx**WW
xxxxx*WW
xxxxx*WW
xxxxx**w
"""
PAINTER_STAND_N_FACING = flip_vertical(PAINTER_STAND_S_FACING)
PAINTER_STAND_W_FACING = flip_horizontal(PAINTER_STAND_E_FACING)
PAINTER_COVER = """
xWWWWWWx
xWWWWWWw
wWWWWWWw
wWWWWWWx
xWWWWWWw
wWWWWWWx
wWWWWWWw
xWWWWWWx
"""
RECEIVER_MOUTH = """
@~!!&&&&
*~GGGgl&
*~BBGgl&
*~BBGgl&
*~BBGgl&
*~BBGgl&
*~GGGgl&
~~!!!&&&
"""
RECEIVER_BACK = """
xxxx@@@@
exxx@***
eded@***
aded@***
bded@***
ceae@***
cxxx@***
xxxx~~~~
"""
CARBON_INDICATOR = """
xxxxxxxx
xxxxxxxx
xxxxxOoO
xxxxxoOo
xxxxxOoO
xxxxxsSs
xxxxxxxx
xxxxxxxx
"""
WOOD_INDICATOR = """
xxxxxxxx
xxxxxxxx
xxxxxOOO
xxxxxooo
xxxxxOOO
xxxxxsss
xxxxxxxx
xxxxxxxx
"""
METAL_INDICATOR = """
xxxxxxxx
xxxxxxxx
xxxxxOOO
xxxxxOOO
xxxxxOOO
xxxxxsss
xxxxxxxx
xxxxxxxx
"""
CONVEYOR_BELT_1 = """
YYYBBBBY
Mhshshsh
Mhyyshyy
Myyhsyyh
Myyhsyyh
Mhyyshyy
Mhshshsh
BYYYYBBB
"""
CONVEYOR_BELT_2 = """
YBBBBYYY
shshshMh
yyshyyMh
yhsyyhMy
yhsyyhMy
yyshyyMh
shshshMh
YYYBBBBY
"""
CONVEYOR_BELT_3 = """
BBBYYYYB
shshMhsh
shyyMhyy
syyhMyyh
syyhMyyh
shyyMhyy
shshMhsh
YBBBBYYY
"""
CONVEYOR_BELT_4 = """
BYYYYBBB
shMhshsh
yyMhyysh
yhMyyhsy
yhMyyhsy
yyMhyysh
shMhshsh
BBBYYYYB
"""
CONVEYOR_BELT_S_1 = """
YhyhhyhB
BsyyyysB
BhhyyhhB
BssssssY
BhyhhyhY
YsyyyysY
YhhyyhhY
YMMMMMMB
"""
CONVEYOR_BELT_S_2 = """
YhhyyhhY
YMMMMMMB
YhyhhyhB
BsyyyysB
BhhyyhhB
BssssssY
BhyhhyhY
YsyyyysY
"""
CONVEYOR_BELT_S_3 = """
BhyhhyhY
YsyyyysY
YhhyyhhY
YMMMMMMB
YhyhhyhB
BsyyyysB
BhhyyhhB
BssssssY
"""
CONVEYOR_BELT_S_4 = """
BhhyyhhB
BssssssY
BhyhhyhY
YsyyyysY
YhhyyhhY
YMMMMMMB
YhyhhyhB
BsyyyysB
"""
CONVEYOR_BELT_ANCHOR_TOP_RIGHT = """
:xxxxxxx
:,xxxxxx
:,xxxxxx
:,xxxxxx
:,xxxxxx
:,xxxxxx
:,xxxxxx
:,xxxxxx
"""
CONVEYOR_BELT_ANCHOR_RIGHT = """
:,xxxxxx
:,xxxxxx
:,xxxxxx
:,xxxxxx
:,xxxxxx
:,xxxxxx
:,xxxxxx
:,xxxxxx
"""
CONVEYOR_BELT_ANCHOR_TOP_LEFT = """
xxxxxxxxg
xxxxxxxgG
xxxxxxxgG
xxxxxxxgG
xxxxxxxgG
xxxxxxxgG
xxxxxxxgG
xxxxxxxgG
"""
CONVEYOR_BELT_ANCHOR_LEFT = """
xxxxxxxgG
xxxxxxxgG
xxxxxxxgG
xxxxxxxgG
xxxxxxxgG
xxxxxxxgG
xxxxxxxgG
xxxxxxxgG
"""
METAL_FLOOR_DOUBLE_SPACED = """
--------
----xo--
--------
--xo----
--------
xo------
--------
--------
"""
CARBON_OBJECT = """
xxxxxxxx
xx*!*!xx
xx!*!*xx
xx*!*!xx
xx!*!*xx
xxsSsSxx
xxxxxxxx
xxxxxxxx
"""
WOOD_OBJECT = """
xxxxxxxx
xx!!!!xx
xx***@xx
xx!!!!xx
xx***@xx
xxSSSSxx
xxxxxxxx
xxxxxxxx
"""
METAL_OBJECT = """
xxxxxxxx
xx***#xx
xx*@@#xx
xx*@@#xx
xx####xx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
METAL_DROPPING_1 = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxx@@#xx
xxx@@#xx
xxx###xx
xxxxxxxx
"""
METAL_DROPPING_2 = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xx##xxxx
xx##xxxx
"""
CARBON_DROPPING_1 = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxsSsxx
xxxSsSxx
xxxsSsxx
xxxxxxxx
"""
CARBON_DROPPING_2 = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxsSxxxx
xxSsxxxx
"""
RECEIVER_PUSHING_1 = """
xxxx@@@@
xexx@***
xede@***
xade@***
xbde@***
xcea@***
xcxx@***
xxxx~~~~
"""
RECEIVER_PUSHING_2 = """
xxxx@@@@
exxd@***
eded@***
aded@***
bded@***
ceae@***
cxxe@***
xxxx~~~~
"""
RECEIVER_PUSHING_3 = """
xxxx@@@@
exex@***
eded@***
aded@***
bded@***
ceae@***
cxax@***
xxxx~~~~
"""
RECEIVER_PUSHING_4 = """
xxxx@@@@
edxx@***
eded@***
aded@***
bded@***
ceae@***
cexx@***
xxxx~~~~
"""
DIAMOND = """
xxxabxxx
xxaabbxx
xaaabbbx
aaaabbbb
ddddcccc
xdddcccx
xxddccxx
xxxdcxxx
"""
SMALL_DIAMOND = """
xxxxxxxx
xxxabxxx
xxaabbxx
xaaabbbx
xdddcccx
xxddccxx
xxxdcxxx
xxxxxxxx
"""
CUTE_AVATAR_RANK_FIRST_N = """
xxxxxx,,
x*xx*x,,
x****,xx
x&&&&xxx
******xx
&****&xx
x****xxx
x&xx&xxx
"""
CUTE_AVATAR_RANK_FIRST_E = """
xxxxx,,,
x*x*xx,,
x****x,,
x*O*O,xx
**##*&xx
&****&xx
x****xxx
x&&x&xxx
"""
CUTE_AVATAR_RANK_FIRST_S = """
xxxxxx,,
x*xx*x,,
x****,xx
xO**Oxxx
&*##*&xx
&****&xx
x****xxx
x&xx&xxx
"""
CUTE_AVATAR_RANK_FIRST_W = """
xxxxx,,,
xx*x*x,,
x****x,,
xO*O*,xx
&*##**xx
&****&xx
x****xxx
x&x&&xxx
"""
CUTE_AVATAR_RANK_FIRST = [CUTE_AVATAR_RANK_FIRST_N, CUTE_AVATAR_RANK_FIRST_E,
CUTE_AVATAR_RANK_FIRST_S, CUTE_AVATAR_RANK_FIRST_W]
CUTE_AVATAR_RANK_SECOND_N = """
xxxxxx,,
x*xx*x,,
x****,xx
x&&&&xxx
******xx
&****&xx
x****xxx
x&xx&xxx
"""
CUTE_AVATAR_RANK_SECOND_E = """
xxxxx,,,
x*x*xx,,
x****x,,
x*O*O,xx
**##*&xx
&****&xx
x****xxx
x&&x&xxx
"""
CUTE_AVATAR_RANK_SECOND_S = """
xxxxxx,,
x*xx*x,,
x****,xx
xO**Oxxx
&*##*&xx
&****&xx
x****xxx
x&xx&xxx
"""
CUTE_AVATAR_RANK_SECOND_W = """
xxxxx,,,
xx*x*x,,
x****x,,
xO*O*,xx
&*##**xx
&****&xx
x****xxx
x&x&&xxx
"""
CUTE_AVATAR_RANK_SECOND = [CUTE_AVATAR_RANK_SECOND_N, CUTE_AVATAR_RANK_SECOND_E,
CUTE_AVATAR_RANK_SECOND_S, CUTE_AVATAR_RANK_SECOND_W]
CUTE_AVATAR_RANK_RUNNER_UP_N = """
xxxxxx,,
x*xx*x,,
x****,xx
x&&&&xxx
******xx
&****&xx
x****xxx
x&xx&xxx
"""
CUTE_AVATAR_RANK_RUNNER_UP_E = """
xxxxx,,,
x*x*xx,,
x****x,,
x*O*O,xx
**##*&xx
&****&xx
x****xxx
x&&x&xxx
"""
CUTE_AVATAR_RANK_RUNNER_UP_S = """
xxxxxx,,
x*xx*x,,
x****,xx
xO**Oxxx
&*##*&xx
&****&xx
x****xxx
x&xx&xxx
"""
CUTE_AVATAR_RANK_RUNNER_UP_W = """
xxxxx,,,
xx*x*x,,
x****x,,
xO*O*,xx
&*##**xx
&****&xx
x****xxx
x&x&&xxx
"""
CUTE_AVATAR_RANK_RUNNER_UP = [
CUTE_AVATAR_RANK_RUNNER_UP_N, CUTE_AVATAR_RANK_RUNNER_UP_E,
CUTE_AVATAR_RANK_RUNNER_UP_S, CUTE_AVATAR_RANK_RUNNER_UP_W
]
CUTE_AVATAR_ARMS_UP_N = """
xxpxxpxx
xp*xx*px
pP****Pp
P&&&&&&P
x******x
xx****xx
xx****xx
xx&xx&xx
"""
CUTE_AVATAR_ARMS_UP_E = """
xxxxxxxx
xx*x*xxx
xx****xx
xx*O*OpP
x*&##*&&
xx****pP
xx****xx
xx&&x&xx
"""
CUTE_AVATAR_ARMS_UP_S = """
xxxxxxxx
xx*xx*xx
xx****xx
xPO**OPx
P&*##*&P
pP****Pp
xp****px
xx&pp&xx
"""
CUTE_AVATAR_ARMS_UP_W = """
xxxxxxxx
xxx*x*xx
xx****xx
PpO*O*xx
&&*##&*x
Pp****xx
xx****xx
xx&x&&xx
"""
CUTE_AVATAR_ARMS_UP = [CUTE_AVATAR_ARMS_UP_N, CUTE_AVATAR_ARMS_UP_E,
CUTE_AVATAR_ARMS_UP_S, CUTE_AVATAR_ARMS_UP_W]
COIN_MAGICALLY_HELD = """
xxxx,,,,,,,,xxxx
xxxx,,,,,,,,xxxx
xxxx,,,,,,,,xxxx
xxxx~~@###~~xxxx
,,,~~@@@@##~~,,,
,,~~&&&@@@@#~~,,
,,~&&&&&&&@@#~,,
,,~&*&&&&&&&&~,,
,,~&***&&&&&&~,,
,,~**********~,,
,,~~********~~,,
,,,~~******~~,,,
xxxx~~****~~xxxx
xxxx,,,,,,,,xxxx
xxxx,,,,,,,,xxxx
xxxx,,,,,,,,xxxx
"""
MARKET_CHAIR = """
,,,,,,,,
,,,,,,,,
,,,,,,,,
,,,**,,,
,,*++*,,
,,+*+*,,
,,,,,,,,
,,,,,,,,
"""
# Externality Mushrooms sprites.
MUSHROOM = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxoOOOox
xxO*OOOx
xxOOOO*x
xxwiiiwx
xxx!!!xx
"""
# Factory sprites.
NW_PERSPECTIVE_WALL = """
--------
--------
--------
--------
-----GGG
-----gGg
-----GgG
-----ggg
"""
PERSPECTIVE_WALL = """
--------
--------
--------
--------
GGGGGGGG
GgGgGgGg
gGgGgGgG
gggggggg
"""
PERSPECTIVE_WALL_T_COUPLING = """
--------
--------
--------
--------
G-----GG
G-----Gg
g-----gG
g-----gg
"""
NE_PERSPECTIVE_WALL = """
--------
--------
--------
--------
GGG-----
GgG-----
gGg-----
ggg-----
"""
W_PERSPECTIVE_WALL = """
-----xxx
-----xxx
-----xxx
-----xxx
-----xxx
-----xxx
-----xxx
-----xxx
"""
MID_PERSPECTIVE_WALL = """
x-----xx
x-----xx
x-----xx
x-----xx
x-----xx
x-----xx
x-----xx
x-----xx
"""
E_PERSPECTIVE_WALL = """
xxx-----
xxx-----
xxx-----
xxx-----
xxx-----
xxx-----
xxx-----
xxx-----
"""
PERSPECTIVE_THRESHOLD = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
EEEEEEEE
eeeeeeee
EEEEEEEE
eeeeeeee
"""
PERSPECTIVE_WALL_PALETTE = {
# Palette for PERSPECTIVE_WALL sprites.
"-": (130, 112, 148, 255),
"G": (74, 78, 99, 255),
"g": (79, 84, 107, 255),
"E": (134, 136, 138, 255),
"e": (143, 146, 148, 255),
"x": (0, 0, 0, 0),
}
HOPPER_BODY = """
xaaaaaax
xaaaaaax
caaaaaab
faaaaaab
gaaaaaab
caaaaaac
caaaaaac
cbbbbbbc
"""
HOPPER_BODY_ACTIVATED = """
xaaaaaax
xaaaaaab
caaaaaab
faaaaaab
gaaaaaab
caaaaaab
caaaaaac
cbbbbbbc
"""
DISPENSER_BODY = """
xaaaaaax
xaaaaaax
maaaaaax
maaaaaax
maaaaaax
xaaaaaax
xaaaaaax
xbbbbbbx
"""
DISPENSER_BODY_ACTIVATED = """
xaaaaaax
maaaaaax
maaaaaax
maaaaaax
maaaaaax
maaaaaax
xaaaaaax
xbbbbbbx
"""
HOPPER_CLOSED = """
ceeeeeec
ceccccec
ceccccec
ceccccec
ceeeeeec
cddddddc
cccccccc
xxxxxxxx
"""
HOPPER_CLOSING = """
ceeeeeec
cec##cec
cec--cec
cec--cec
ceeeeeec
cddddddc
cccccccc
xxxxxxxx
"""
HOPPER_OPEN = """
ceeeeeec
ce####ec
ce#--#ec
ce#--#ec
ceeeeeec
cddddddc
cccccccc
xxxxxxxx
"""
DISPENSER_BELT_OFF = """
xbaaaabx
xbaaaabx
xejjjjex
xejjjjex
xejjjjex
xejjjjex
xdaaaadx
xxxxxxxx
"""
DISPENSER_BELT_ON_POSITION_1 = """
xbaaaabx
xboaaobx
xejOOjex
xejjjjex
xeOjjOex
xejOOjex
xdaaaadx
xxxxxxxx
"""
DISPENSER_BELT_ON_POSITION_2 = """
xbaooabx
xbaaaabx
xeOjjOex
xejOOjex
xejjjjex
xeOjjOex
xdaooadx
xxxxxxxx
"""
DISPENSER_BELT_ON_POSITION_3 = """
xboaaobx
xbaooabx
xejjjjex
xeOjjOex
xejOOjex
xejjjjex
xdoaaodx
xxxxxxxx
"""
FLOOR_MARKING = """
--------
--xx-xx-
-x-xx-x-
-xx-xx--
--xx-xx-
-x-xx-x-
-xx-xx--
--------
"""
FLOOR_MARKING_LONG_TOP = """
--------
--xx-xx-
-x-xx-x-
-xx-xx--
--xx-xx-
-x-xx-x-
-xx-xx--
--xx-xx-
"""
FLOOR_MARKING_LONG_BOTTOM = """
-x-xx-x-
-xx-xx--
--xx-xx-
-x-xx-x-
-xx-xx--
--xx-xx-
-x-xx-x-
--------
"""
APPLE_CUBE_INDICATOR = """
xxxxxxxx
xxgsxxxx
xxffxxxx
xxxxxxxx
xxxxaaxx
xxxxaaxx
xxxxxxxx
xxxxxxxx
"""
APPLE_INDICATOR = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxgsxxx
xxxffxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
DOUBLE_APPLE_INDICATOR = """
xxxxxxxx
xxgsxxxx
xxffxxxx
xxxxxxxx
xxxxgsxx
xxxxffxx
xxxxxxxx
xxxxxxxx
"""
APPLE_INDICATOR_FADE = """
xxxxxxxx
xxxxxxxx
xxxGSxxx
xxxFFxxx
xxxFFxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
APPLE_DISPENSING_ANIMATION_1 = """
xxFffFxx
xxxFFxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
APPLE_DISPENSING_ANIMATION_2 = """
xxxxxxxx
xxxgsxxx
xxFffFxx
xxFffFxx
xxxFFxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
APPLE_DISPENSING_ANIMATION_3 = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxgsxxx
xxFffFxx
xxFffFxx
xxxFFxxx
"""
BANANA_DISPENSING_ANIMATION_1 = """
xxxBbbxx
xxbbbBxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
BANANA_DISPENSING_ANIMATION_3 = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxsxx
xxxxBbxx
xxxBbbxx
xxbbbBxx
"""
CUBE_DISPENSING_ANIMATION_1 = """
xxxaaAxx
xxxaA&xx
xxxA&&xx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
CUBE_DISPENSING_ANIMATION_2 = """
xxxxxxxx
xxxxxxxx
xxxaaAxx
xxxaA&xx
xxxA&&xx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
CUBE_DISPENSING_ANIMATION_3 = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxaaAxx
xxxaA&xx
xxxA&&xx
"""
BANANA = """
xxxxxxxx
xxxxxsxx
xxxxBbxx
xxxBbbxx
xxbbbBxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
BANANA_DROP_1 = """
xxxxxxxx
xxxxxsxx
xxxxBbxx
xxxBbxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
BANANA_DROP_2 = """
xxxxxxxx
xxxxxxxx
xxxxBxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
BLOCK = """
xxxxxxxx
xxxxxxxx
xxaaAxxx
xxaA&xxx
xxA&&xxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
BLOCK_DROP_1 = """
xxxxxxxx
xxxxxxxx
xxxaAxxx
xxxA&xxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
BLOCK_DROP_2 = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxx&xxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
HOPPER_INDICATOR_SINGLE_BLOCK = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxaaxxx
xxxaaxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
HOPPER_INDICATOR_SINGLE_BANANA = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxbxx
xxxbbxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
HOPPER_INDICATOR_TWO_BLOCKS = """
xxxxxxxx
xxxxaaxx
xxxxaaxx
xxxxxxxx
xxaaxxxx
xxaaxxxx
xxxxxxxx
xxxxxxxx
"""
HOPPER_INDICATOR_ONE_BLOCK = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxaaxxxx
xxaaxxxx
xxxxxxxx
xxxxxxxx
"""
HOPPER_INDICATOR_ON = """
xxxxxxxx
xxxxxbxx
xxxbbxxx
xxxxxxxx
xxaaxxxx
xxaaxxxx
xxxxxxxx
xxxxxxxx
"""
HOPPER_INDICATOR_BANANA = """
xxxxxxxx
xxxxxbxx
xxxbbxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
HOPPER_INDICATOR_BLOCK = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxaaxxxx
xxaaxxxx
xxxxxxxx
xxxxxxxx
"""
HOPPER_INDICATOR_FADE = """
xxxxxxxx
xxxxxBxx
xxxBBxxx
xxxxxxxx
xxEExxxx
xxEExxxx
xxxxxxxx
xxxxxxxx
"""
FACTORY_MACHINE_BODY_PALETTE = {
# Palette for DISPENSER_BODY, HOPPER_BODY, and HOPPER sprites
"a": (140, 129, 129, 255),
"b": (84, 77, 77, 255),
"f": (62, 123, 214, 255),
"g": (214, 71, 71, 255),
"c": (92, 98, 120, 255),
"d": (64, 68, 82, 255),
"m": (105, 97, 97, 255),
"e": (120, 128, 156, 255),
"h": (64, 68, 82, 255),
"#": (51, 51, 51, 255),
"-": (0, 0, 0, 255),
"x": (0, 0, 0, 0),
}
CUTE_AVATAR_W_BUBBLE_N = """
xxxxxx,,
x*xx*x,,
x****,xx
x&&&&xxx
******xx
&****&xx
x****xxx
x&xx&xxx
"""
CUTE_AVATAR_W_BUBBLE_E = """
xxxxx,,,
x*x*xx,,
x****x,,
x*O*O,xx
**##*&xx
&****&xx
x****xxx
x&&x&xxx
"""
CUTE_AVATAR_W_BUBBLE_S = """
xxxxxx,,
x*xx*x,,
x****,xx
xO**Oxxx
&*##*&xx
&****&xx
x****xxx
x&xx&xxx
"""
CUTE_AVATAR_W_BUBBLE_W = """
xxxxx,,,
xx*x*x,,
x****x,,
xO*O*,xx
&*##**xx
&****&xx
x****xxx
x&x&&xxx
"""
CUTE_AVATAR_W_BUBBLE = [CUTE_AVATAR_W_BUBBLE_N, CUTE_AVATAR_W_BUBBLE_E,
CUTE_AVATAR_W_BUBBLE_S, CUTE_AVATAR_W_BUBBLE_W]
CUTE_AVATAR_FROZEN = """
########
##O##O##
##OOOO##
##,OO,##
#OO##OO#
#OOOOOO#
##OOOO##
##O##O##
"""
# Suggested base colour for the palette: (190, 190, 50, 255)
HD_CROWN_N = """
xxxxxxxxoxxxxxxx
xxxx#xxoooxxoxxx
xxxx@oxoooxooxxx
xxxxx@oo@oooxxxx
xxxx#@@&r*o@Oxxx
xxx#@@@*R*@*oOxx
xxx###@*r*oOOOxx
xxxxx#####OOxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
"""
HD_CROWN_E = """
xxxxxxxxxxxx*xxx
xxx#xxx*xxx*&xxx
xxx@*xx*&x*&&rxx
xxxr@@@*&&&oRrxx
xxxx@@**&&&orxxx
xxx#@**&###OOxxx
xxx###@#xxxxxxxx
xxxxx##xxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
"""
HD_CROWN_S = """
xxxxxxxx@xxxxxxx
xxxx@xx#r*xxoxxx
xxxx@ox%Rrx&oxxx
xxxxx@&RRr&oxxxx
xxxx#@@*r*&oOxxx
xxx#@#####OOoOxx
xxx###xxxxxOOOxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
"""
HD_CROWN_W = """
xxxx#xxxxxxxxxxx
xxxx@@xxx*xxx*xx
xxx%@@*x*&x*&&xx
xxxRr@@*&&&oorxx
xxxxr@**&&&ooxxx
xxxx#####**&&Oxx
xxxxxxxxx#&OOOxx
xxxxxxxxxxOOxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxx
"""
HD_CROWN = [HD_CROWN_N, HD_CROWN_E, HD_CROWN_S, HD_CROWN_W]
JUST_BADGE = """
xxxx
xabx
xcdx
xxxx
"""
EMPTY_TREE = """
x@@@@@@x
x@@@@@@@x
x@@@@@@x
xx@**@xx
xxx**xxx
xxx**xxx
xxx**xxx
xxxxxxxx
"""
EMPTY_SHRUB = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xx@@@@xx
x@@@@@@x
x@@@@@@x
x@@@@@@x
xxxxxxxx
"""
FRUIT_SHRUB = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xx@@@@xx
x@@Z@Z@x
x@Z@Z@@x
x@@@@@@x
xxxxxxxx
"""
FRUIT_IN_SHRUB = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxZxZxx
xxZxZxxx
xxxxxxxx
"""
FRUIT_IN_TREE = """
xxxxxxxx
xxZxZxxx
xxxZxZxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
"""
GRASP_SHAPE = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xoxxxxox
xxooooxx
"""
CUTE_AVATAR_CHILD_N = """
xxxxxxxx
xxxxxxxx
xx*xx*xx
xx****xx
xx&&&&xx
x******x
xx&xx&xx
xxxxxxxx
"""
CUTE_AVATAR_CHILD_E = """
xxxxxxxx
xxxxxxxx
xx*x*xxx
xx****xx
xx*O*Oxx
x**##*&x
xx&&x&xx
xxxxxxxx
"""
CUTE_AVATAR_CHILD_S = """
xxxxxxxx
xxxxxxxx
xx*xx*xx
xx****xx
xxO**Oxx
x&*##*&x
xx&xx&xx
xxxxxxxx
"""
CUTE_AVATAR_CHILD_W = """
xxxxxxxx
xxxxxxxx
xxx*x*xx
xx****xx
xxO*O*xx
x&*##**x
xx&x&&xx
xxxxxxxx
"""
CUTE_AVATAR_CHILD = [
CUTE_AVATAR_CHILD_N, CUTE_AVATAR_CHILD_E, CUTE_AVATAR_CHILD_S,
CUTE_AVATAR_CHILD_W
]
GEM_PALETTE = {
"e": (119, 255, 239, 255),
"r": (106, 241, 225, 255),
"t": (61, 206, 189, 255),
"d": (78, 218, 202, 255),
"x": ALPHA
}
GRATE_PALETTE = {
"*": (59, 59, 59, 255),
"@": (70, 70, 70, 255),
"&": (48, 48, 48, 255),
"~": (31, 31, 31, 255),
"X": (104, 91, 91, 255),
"o": (109, 98, 98, 255),
"x": ALPHA
}
GRASS_PALETTE = {
"*": (124, 153, 115, 255),
"@": (136, 168, 126, 255),
"x": (204, 199, 192, 255)
}
GLASS_PALETTE = {
"@": (218, 243, 245, 150),
"*": (186, 241, 245, 150),
"!": (134, 211, 217, 150),
"x": ALPHA
}
WOOD_FLOOR_PALETTE = {
"-": (130, 100, 70, 255),
"x": (148, 109, 77, 255)
}
METAL_FLOOR_PALETTE = {
"o": (90, 92, 102, 255),
"O": (117, 120, 133, 255),
"x": (99, 101, 112, 255)
}
METAL_PANEL_FLOOR_PALETTE = {
"-": (142, 149, 163, 255),
"#": (144, 152, 166, 255),
"/": (151, 159, 173, 255)
}
SHIP_PALETTE = {
"o": (90, 105, 136, 255),
"#": (58, 68, 102, 255),
"x": (38, 43, 68, 255)
}
TILE_FLOOR_PALETTE = {
"t": (235, 228, 216, 255),
"x": (222, 215, 202, 255),
"o": (214, 207, 195, 255)
}
ROCK_PALETTE = {
"l": (20, 30, 40, 255),
"r": (30, 40, 50, 255),
"k": (100, 120, 120, 255),
"*": (90, 100, 110, 255),
"s": (45, 55, 65, 255),
"p": (40, 60, 60, 255),
"x": ALPHA,
}
PAPER_PALETTE = {
"*": (250, 250, 250, 255),
"@": (20, 20, 20, 255),
"x": ALPHA,
}
MOULD_PALETTE = {
"@": (179, 255, 0, 255),
"~": (140, 232, 0, 255),
"*": (132, 222, 0, 255),
"&": (119, 194, 0, 255),
"+": (153, 219, 0, 80),
"x": ALPHA
}
SCISSORS_PALETTE = {
"*": (89, 26, 180, 255),
">": (100, 100, 100, 255),
"#": (127, 127, 127, 255),
"x": ALPHA,
}
WATER_PALETTE = {
"@": (150, 190, 255, 255),
"*": (0, 100, 120, 255),
"o": (0, 70, 90, 255),
"~": (0, 55, 74, 255),
"x": ALPHA,
}
BOAT_PALETTE = {
"*": (90, 70, 20, 255),
"&": (120, 100, 30, 255),
"o": (160, 125, 35, 255),
"@": (180, 140, 40, 255),
"#": (255, 255, 240, 255),
"x": ALPHA,
}
GRAY_PALETTE = {
"*": (30, 30, 30, 255),
"&": (130, 130, 130, 255),
"@": (200, 200, 200, 255),
"#": (230, 230, 230, 255),
"x": ALPHA
}
WALL_PALETTE = {
"*": (95, 95, 95, 255),
"&": (100, 100, 100, 255),
"@": (109, 109, 109, 255),
"#": (152, 152, 152, 255),
"x": ALPHA
}
BRICK_WALL_PALETTE = {
"b": (166, 162, 139, 255),
"c": (110, 108, 92, 255),
"o": (78, 78, 78, 255),
"i": (138, 135, 116, 255),
"x": ALPHA
}
COIN_PALETTE = {
"*": (90, 90, 20, 255),
"@": (220, 220, 60, 255),
"&": (180, 180, 40, 255),
"#": (255, 255, 240, 255),
"x": ALPHA
}
RED_COIN_PALETTE = {
"*": (90, 20, 20, 255),
"@": (220, 60, 60, 255),
"&": (180, 40, 40, 255),
"#": (255, 240, 240, 255),
"x": ALPHA
}
GREEN_COIN_PALETTE = {
"*": (20, 90, 20, 255),
"@": (60, 220, 60, 255),
"&": (40, 180, 40, 255),
"#": (240, 255, 240, 255),
"x": ALPHA
}
TILED_FLOOR_GREY_PALETTE = {
"o": (204, 199, 192, 255),
"-": (194, 189, 182, 255),
"x": ALPHA
}
INVISIBLE_PALETTE = {
"*": ALPHA,
"@": ALPHA,
"&": ALPHA,
"#": ALPHA,
"x": ALPHA
}
TREE_PALETTE = {
"*": TREE_BROWN,
"@": LEAF_GREEN,
"x": ALPHA
}
POTATO_PATCH_PALETTE = {
"*": VEGETAL_GREEN,
"@": LEAF_GREEN,
"x": ALPHA
}
FIRE_PALETTE = {
"@": TREE_BROWN,
"*": DARK_FLAME,
"&": LIGHT_FLAME,
"x": ALPHA
}
STONE_QUARRY_PALETTE = {
"@": DARK_STONE,
"#": LIGHT_STONE,
"x": ALPHA
}
PRED1_PALETTE = {
"e": (80, 83, 115, 255),
"h": (95, 98, 135, 255),
"s": (89, 93, 128, 255),
"l": (117, 121, 158, 255),
"u": (113, 117, 153, 255),
"a": (108, 111, 145, 255),
"y": (255, 227, 71, 255),
"x": ALPHA
}
CROWN_PALETTE = {
"*": (190, 190, 50, 255),
"&": (150, 150, 45, 255),
"o": (100, 100, 30, 255),
"@": (240, 240, 62, 255),
"r": (170, 0, 0, 255),
"R": (220, 0, 0, 255),
"%": (255, 80, 80, 255),
"#": (255, 255, 255, 255),
"O": (160, 160, 160, 255),
"x": (0, 0, 0, 0),
}
FENCE_PALETTE_BROWN = {
"a": (196, 155, 123, 255),
"b": (167, 131, 105, 255),
"c": (146, 114, 90, 255),
"d": (122, 94, 75, 255),
"e": (89, 67, 55, 255),
"x": (0, 0, 0, 0),
"#": (0, 0, 0, 38),
}
MUSHROOM_GREEN_PALETTE = {
"|": (245, 240, 206, 255),
"!": (224, 216, 173, 255),
"i": (191, 185, 147, 255),
"w": (37, 161, 72, 255),
"O": (90, 224, 116, 255),
"o": (90, 224, 116, 75),
"*": (186, 238, 205, 255),
"x": (0, 0, 0, 0),
}
MUSHROOM_RED_PALETTE = {
"|": (245, 240, 206, 255),
"!": (224, 216, 173, 255),
"i": (191, 185, 147, 255),
"w": (184, 99, 92, 255),
"O": (239, 132, 240, 255),
"o": (239, 132, 240, 75),
"*": (235, 192, 236, 255),
"x": (0, 0, 0, 0),
}
MUSHROOM_BLUE_PALETTE = {
"|": (245, 240, 206, 255),
"!": (224, 216, 173, 255),
"i": (191, 185, 147, 255),
"w": (30, 168, 161, 255),
"O": (41, 210, 227, 255),
"o": (41, 210, 227, 75),
"*": (187, 228, 226, 255),
"x": (0, 0, 0, 0),
}
MUSHROOM_ORANGE_PALETTE = {
"|": (245, 240, 206, 255),
"!": (224, 216, 173, 255),
"i": (191, 185, 147, 255),
"w": (242, 140, 40, 255),
"O": (255, 165, 0, 255),
"o": (255, 172, 28, 75),
"*": (197, 208, 216, 255),
"x": (0, 0, 0, 0),
}
DISPENSER_BELT_PALETTE = {
# Palette for DISPENSER_BELT sprites
"a": (140, 129, 129, 255),
"b": (84, 77, 77, 255),
"e": (120, 128, 156, 255),
"j": (181, 167, 167, 255),
"o": (174, 127, 19, 255),
"-": (222, 179, 80, 255),
"O": (230, 168, 25, 255),
"d": (64, 68, 82, 255),
"x": (0, 0, 0, 0),
}
FACTORY_OBJECTS_PALETTE = {
# Palette for BANANA, BLOCK, APPLE and HOPPER_INDICATOR sprites
"a": (120, 210, 210, 255),
"A": (100, 190, 190, 255),
"&": (90, 180, 180, 255),
"x": (0, 0, 0, 0),
"b": (245, 230, 27, 255),
"B": (245, 230, 27, 145),
"s": (94, 54, 67, 255),
"E": (124, 224, 230, 104),
"f": (169, 59, 59, 255),
"g": (57, 123, 68, 255),
"F": (140, 49, 49, 255),
"G": (57, 123, 68, 115),
"S": (94, 54, 67, 115),
}
BATTERY_PALETTE = {
# Palette for BATTERY andd WIRES sprites
"a": (99, 92, 92, 255),
"A": (71, 66, 66, 255),
"d": (78, 122, 86, 255),
"D": (60, 89, 86, 255),
"f": (62, 123, 214, 255),
"g": (214, 71, 71, 255),
"s": (181, 167, 167, 255),
"w": (223, 246, 245, 255),
"o": (111, 196, 20, 255),
"O": (98, 173, 17, 255),
"#": (0, 0, 0, 255),
"W": (255, 255, 255, 255),
"x": (0, 0, 0, 0),
}
STARS_PALETTE = {
"-": (223, 237, 19, 255),
"x": (0, 0, 0, 0),
}
GOLD_CROWN_PALETTE = {
"#": (244, 180, 27, 255),
"@": (186, 136, 20, 150),
"x": (0, 0, 0, 0)
}
SILVER_CROWN_PALETTE = {
"#": (204, 203, 200, 255),
"@": (171, 170, 167, 150),
"x": (0, 0, 0, 0),
}
BRONZE_CAP_PALETTE = {
"#": (102, 76, 0, 255),
"@": (87, 65, 0, 255),
"x": (0, 0, 0, 0)
}
YELLOW_FLAG_PALETTE = {
"*": (255, 216, 0, 255),
"@": (230, 195, 0, 255),
"&": (204, 173, 0, 255),
"|": (102, 51, 61, 255),
"x": (0, 0, 0, 0)
}
RED_FLAG_PALETTE = {
"*": (207, 53, 29, 255),
"@": (181, 46, 25, 255),
"&": (156, 40, 22, 255),
"|": (102, 51, 61, 255),
"x": (0, 0, 0, 0)
}
GREEN_FLAG_PALETTE = {
"*": (23, 191, 62, 255),
"@": (20, 166, 54, 255),
"&": (17, 140, 46, 255),
"|": (102, 51, 61, 255),
"x": (0, 0, 0, 0)
}
HIGHLIGHT_PALETTE = {
"*": (255, 255, 255, 35),
"o": (0, 0, 0, 35),
"x": (0, 0, 0, 0)
}
BRUSH_PALETTE = {
"-": (143, 96, 74, 255),
"+": (117, 79, 61, 255),
"k": (199, 176, 135, 255)
}
MAGIC_BEAM_PALETTE = {
"*": (196, 77, 190, 200),
"~": (184, 72, 178, 150),
"x": (0, 0, 0, 0),
}
FRUIT_TREE_PALETTE = {
"#": (113, 170, 52, 255),
"w": (57, 123, 68, 255),
"I": (71, 45, 60, 255),
"x": (0, 0, 0, 0),
}
CYTOAVATAR_PALETTE = {
"*": (184, 61, 187, 255),
"&": (161, 53, 146, 255),
"o": (110, 15, 97, 255),
",": (0, 0, 0, 255),
"x": (0, 0, 0, 0),
"#": (255, 255, 255, 255),
}
PETRI_DISH_PALETTE = {
"@": (238, 245, 245, 255),
"~": (212, 234, 232, 255),
"*": (188, 220, 220, 255),
"o": (182, 204, 201, 255),
"&": (168, 189, 189, 255),
"x": (0, 0, 0, 0),
"#": (255, 255, 255, 255),
}
MATTER_PALETTE = {
"S": (138, 255, 228, 255),
"s": (104, 247, 214, 255),
"Z": (96, 230, 198, 255),
"G": (104, 247, 214, 100),
"g": (71, 222, 187, 175),
"L": (48, 194, 160, 255),
"l": (41, 166, 137, 255),
"w": (41, 186, 154, 255),
"x": (0, 0, 0, 0),
}
CONVEYOR_BELT_PALETTE = {
"B": (48, 44, 46, 255),
"Y": (250, 197, 75, 255),
"y": (212, 177, 97, 255),
"M": (117, 108, 103, 255),
"h": (161, 147, 141, 255),
"s": (148, 135, 130, 255),
}
PAINTER_STAND_BLUE_PALETTE = {
"*": (70, 147, 199, 255),
"@": (98, 176, 222, 255),
"!": (47, 95, 158, 255),
"o": (41, 77, 128, 255),
"W": (255, 255, 255, 175),
"w": (255, 255, 255, 150),
"x": (255, 255, 255, 0),
}
OBJECT_INDICATOR_PALETTE = {
"O": (229, 221, 212, 255),
"o": (185, 178, 170, 255),
"S": (105, 101, 96, 255),
"s": (122, 118, 113, 255),
"x": (0, 0, 0, 0),
}
BLUE_INDICATOR_PALETTE = {
"O": (111, 191, 237, 255),
"o": (81, 160, 207, 255),
"S": (33, 102, 148, 255),
"s": (29, 89, 130, 255),
"x": (0, 0, 0, 0),
}
MONOCHROME_OBJECT_PALETTE = {
"*": (255, 247, 235, 255),
"@": (245, 237, 225, 255),
"#": (232, 225, 213, 255),
"!": (215, 210, 198, 255),
"S": (145, 141, 136, 255),
"s": (172, 168, 163, 255),
"x": (0, 0, 0, 0),
"o": (107, 86, 85, 255),
"|": (89, 71, 70, 255),
}
PAINTER_STAND_BLUE_PALETTE = {
"*": (70, 147, 199, 255),
"@": (98, 176, 222, 255),
"!": (47, 95, 158, 255),
"o": (41, 77, 128, 255),
"W": (255, 255, 255, 175),
"w": (255, 255, 255, 150),
"x": (255, 255, 255, 0),
}
OBJECT_INDICATOR_PALETTE = {
"O": (229, 221, 212, 255),
"o": (185, 178, 170, 255),
"S": (105, 101, 96, 255),
"s": (122, 118, 113, 255),
"x": (0, 0, 0, 0),
}
BLUE_INDICATOR_PALETTE = {
"O": (111, 191, 237, 255),
"o": (81, 160, 207, 255),
"S": (33, 102, 148, 255),
"s": (29, 89, 130, 255),
"x": (0, 0, 0, 0),
}
MONOCHROME_OBJECT_PALETTE = {
"*": (255, 247, 235, 255),
"@": (245, 237, 225, 255),
"#": (232, 225, 213, 255),
"!": (215, 210, 198, 255),
"S": (145, 141, 136, 255),
"s": (172, 168, 163, 255),
"x": (0, 0, 0, 0),
"o": (107, 86, 85, 255),
"|": (89, 71, 70, 255),
}
CONVERTER_PALETTE = {
"a": (178, 171, 164, 255),
"b": (163, 156, 150, 255),
"c": (150, 144, 138, 255),
"d": (138, 129, 123, 255),
"e": (128, 120, 113, 255),
"f": (122, 114, 109, 255),
"g": (112, 108, 101, 255),
"h": (71, 69, 64, 255),
"A": (129, 143, 142, 255),
"B": (110, 122, 121, 255),
"C": (92, 102, 101, 255),
">": (51, 51, 51, 255),
"<": (30, 30, 30, 255),
",": (0, 0, 0, 255),
"x": (0, 0, 0, 0),
"`": (138, 96, 95, 255),
"!": (253, 56, 38, 255),
"[": (74, 167, 181, 255),
"_": (67, 150, 163, 255),
"]": (61, 136, 148, 255),
"*": (0, 0, 0, 73),
}
FACTORY_FLOOR_PALETTE = {
"-": (204, 204, 188, 255),
"x": (194, 194, 178, 255),
"o": (212, 212, 195, 255)
}
CONVEYOR_BELT_PALETTE_MONOCHROME = {
"y": (181, 170, 168, 255),
"h": (158, 148, 147, 255),
"s": (150, 139, 138, 255),
"M": (135, 124, 123, 255),
"Y": (194, 160, 81, 255),
"B": (73, 66, 75, 255)
}
CONVEYOR_BELT_GREEN_ANCHOR_PALETTE = {
":": (135, 143, 116, 255),
",": (113, 120, 89, 255),
"G": (148, 156, 126, 255),
"g": (129, 138, 103, 255),
"x": (0, 0, 0, 0)
}
BLUE_OBJECT_PALETTE = {
"@": (51, 170, 189, 255),
"*": (56, 186, 207, 255),
"#": (45, 152, 168, 255),
"x": (0, 0, 0, 0)
}
APPLE_RED_PALETTE = {
"x": (0, 0, 0, 0),
"*": (171, 32, 32, 255),
"#": (140, 27, 27, 255),
"o": (43, 127, 53, 255),
"|": (79, 47, 44, 255),
}
DIAMOND_PALETTE = {
"a": (227, 255, 231, 255),
"b": (183, 247, 224, 255),
"c": (166, 224, 203, 255),
"d": (157, 212, 191, 255),
"x": (0, 0, 0, 0),
}
WALLS_PALETTE = {
"a": (191, 183, 180, 255),
"b": (143, 137, 134, 255),
"c": (135, 123, 116, 255),
"d": (84, 76, 72, 255),
"x": (0, 0, 0, 0),
}
APPLE_TREE_PALETTE = {
"a": (124, 186, 58, 255),
"b": (105, 158, 49, 255),
"o": (199, 33, 8, 255),
"I": (122, 68, 74, 255),
"x": (0, 0, 0, 0),
}
BANANA_TREE_PALETTE = {
"a": (43, 135, 52, 255),
"b": (37, 115, 45, 255),
"o": (222, 222, 13, 255),
"I": (122, 68, 74, 255),
"x": (0, 0, 0, 0),
}
ORANGE_TREE_PALETTE = {
"a": (78, 110, 49, 255),
"b": (37, 115, 45, 255),
"o": (222, 222, 13, 255),
"I": (122, 68, 74, 255),
"x": (0, 0, 0, 0),
}
GOLD_MINE_PALETTE = {
"a": (32, 32, 32, 255),
"b": (27, 27, 27, 255),
"o": (255, 215, 0, 255),
"I": (5, 5, 5, 255),
"x": (0, 0, 0, 0),
}
FENCE_PALETTE = {
"a": (208, 145, 94, 255),
"b": (191, 121, 88, 255),
"c": (160, 91, 83, 255),
"d": (122, 68, 74, 255),
"e": (94, 54, 67, 255),
"x": (0, 0, 0, 0),
"#": (0, 0, 0, 38),
}
SHADOW_PALETTE = {
"~": (0, 0, 0, 20),
"*": (0, 0, 0, 43),
"@": (0, 0, 0, 49),
"#": (0, 0, 0, 55),
"x": (0, 0, 0, 0),
}
|
meltingpot-main
|
meltingpot/utils/substrates/shapes.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Substrate builder."""
from collections.abc import Collection, Mapping, Sequence
from typing import Any
import chex
import dm_env
from meltingpot.utils.substrates import builder
from meltingpot.utils.substrates.wrappers import base
from meltingpot.utils.substrates.wrappers import collective_reward_wrapper
from meltingpot.utils.substrates.wrappers import discrete_action_wrapper
from meltingpot.utils.substrates.wrappers import multiplayer_wrapper
from meltingpot.utils.substrates.wrappers import observables
from meltingpot.utils.substrates.wrappers import observables_wrapper
import reactivex
from reactivex import subject
@chex.dataclass(frozen=True)
class SubstrateObservables:
"""Observables for a substrate.
Attributes:
action: emits actions sent to the substrate from players.
timestep: emits timesteps sent from the substrate to players.
events: emits environment-specific events resulting from any interactions
with the Substrate. Each individual event is emitted as a single element:
(event_name, event_item).
dmlab2d: Observables from the underlying dmlab2d environment.
"""
action: reactivex.Observable[Sequence[int]]
timestep: reactivex.Observable[dm_env.TimeStep]
events: reactivex.Observable[tuple[str, Any]]
dmlab2d: observables.Lab2dObservables
class Substrate(base.Lab2dWrapper):
"""Specific subclass of Wrapper with overridden spec types."""
def __init__(self, env: observables.ObservableLab2d) -> None:
"""See base class."""
super().__init__(env)
self._action_subject = subject.Subject()
self._timestep_subject = subject.Subject()
self._events_subject = subject.Subject()
self._observables = SubstrateObservables(
action=self._action_subject,
events=self._events_subject,
timestep=self._timestep_subject,
dmlab2d=env.observables(),
)
def reset(self) -> dm_env.TimeStep:
"""See base class."""
timestep = super().reset()
self._timestep_subject.on_next(timestep)
for event in super().events():
self._events_subject.on_next(event)
return timestep
def step(self, action: Sequence[int]) -> dm_env.TimeStep:
"""See base class."""
self._action_subject.on_next(action)
timestep = super().step(action)
self._timestep_subject.on_next(timestep)
for event in super().events():
self._events_subject.on_next(event)
return timestep
def reward_spec(self) -> Sequence[dm_env.specs.Array]:
"""See base class."""
return self._env.reward_spec()
def observation_spec(self) -> Sequence[Mapping[str, dm_env.specs.Array]]:
"""See base class."""
return self._env.observation_spec()
def action_spec(self) -> Sequence[dm_env.specs.DiscreteArray]:
"""See base class."""
return self._env.action_spec()
def close(self) -> None:
"""See base class."""
super().close()
self._action_subject.on_completed()
self._timestep_subject.on_completed()
self._events_subject.on_completed()
def observables(self) -> SubstrateObservables:
"""Returns observables for the substrate."""
return self._observables
def build_substrate(
*,
lab2d_settings: builder.Settings,
individual_observations: Collection[str],
global_observations: Collection[str],
action_table: Sequence[Mapping[str, int]],
) -> Substrate:
"""Builds a Melting Pot substrate.
Args:
lab2d_settings: the lab2d settings for building the lab2d environment.
individual_observations: names of the player-specific observations to make
available to each player.
global_observations: names of the dmlab2d observations to make available to
all players.
action_table: the possible actions. action_table[i] defines the dmlab2d
action that will be forwarded to the wrapped dmlab2d environment for the
discrete Melting Pot action i.
Returns:
The constructed substrate.
"""
env = builder.builder(lab2d_settings)
env = observables_wrapper.ObservablesWrapper(env)
env = multiplayer_wrapper.Wrapper(
env,
individual_observation_names=individual_observations,
global_observation_names=global_observations)
env = discrete_action_wrapper.Wrapper(env, action_table=action_table)
# Add a wrapper that augments adds an observation of the collective
# reward (sum of all players' rewards).
env = collective_reward_wrapper.CollectiveRewardWrapper(env)
return Substrate(env)
|
meltingpot-main
|
meltingpot/utils/substrates/substrate.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for shapes."""
from absl.testing import absltest
from absl.testing import parameterized
from meltingpot.utils.substrates import shapes
class ShapesTest(parameterized.TestCase):
@parameterized.parameters([
["""
a
b
c
""", """
c
b
a
"""], ["""
""", """
"""], ["""
abc
def
ghi
""", """
ghi
def
abc
"""],
])
def test_flip_vertical(self, original, expected):
actual = shapes.flip_vertical(original)
self.assertEqual(actual, expected)
@parameterized.parameters([
["""
a
b
c
""", """
a
b
c
"""], ["""
""", """
"""], ["""
abc
def
ghi
""", """
cba
fed
ihg
"""],
])
def test_flip_horizontal(self, original, expected):
actual = shapes.flip_horizontal(original)
self.assertEqual(actual, expected)
if __name__ == '__main__':
absltest.main()
|
meltingpot-main
|
meltingpot/utils/substrates/shapes_test.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Substrate factory."""
from collections.abc import Collection, Mapping, Sequence, Set
from typing import Callable
import dm_env
from meltingpot.utils.substrates import builder
from meltingpot.utils.substrates import substrate
class SubstrateFactory:
"""Factory for building specific substrates."""
def __init__(
self,
*,
lab2d_settings_builder: Callable[[Sequence[str]], builder.Settings],
individual_observations: Collection[str],
global_observations: Collection[str],
action_table: Sequence[Mapping[str, int]],
timestep_spec: dm_env.TimeStep,
action_spec: dm_env.specs.DiscreteArray,
valid_roles: Collection[str],
default_player_roles: Sequence[str],
) -> None:
"""Initializes the factory.
Args:
lab2d_settings_builder: callable that takes a sequence of player roles and
returns the lab2d settings for the substrate.
individual_observations: names of the player-specific observations to make
available to each player.
global_observations: names of the dmlab2d observations to make available
to all players.
action_table: the possible actions. action_table[i] defines the dmlab2d
action that will be forwarded to the wrapped dmlab2d environment for the
discrete Melting Pot action i.
timestep_spec: spec of timestep sent to a single player.
action_spec: spec of action expected from a single player.
valid_roles: player roles the substrate supports.
default_player_roles: the default player roles vector that should be used
for training.
"""
self._lab2d_settings_builder = lab2d_settings_builder
self._individual_observations = frozenset(individual_observations)
self._global_observations = frozenset(global_observations)
self._action_table = tuple(dict(row) for row in action_table)
self._timestep_spec = timestep_spec
self._action_spec = action_spec
self._valid_roles = frozenset(valid_roles)
self._default_player_roles = tuple(default_player_roles)
def valid_roles(self) -> Set[str]:
"""Returns the roles the substrate supports."""
return self._valid_roles
def default_player_roles(self) -> Sequence[str]:
"""Returns the player roles used by scenarios."""
return self._default_player_roles
def timestep_spec(self) -> dm_env.TimeStep:
"""Returns spec of timestep sent to a single player."""
return self._timestep_spec
def action_spec(self) -> dm_env.specs.DiscreteArray:
"""Returns spec of action expected from a single player."""
return self._action_spec
def build(self, roles: Sequence[str]) -> substrate.Substrate:
"""Builds the substrate.
Args:
roles: the role each player will take.
Returns:
The constructed substrate.
"""
return substrate.build_substrate(
lab2d_settings=self._lab2d_settings_builder(roles),
individual_observations=self._individual_observations,
global_observations=self._global_observations,
action_table=self._action_table)
|
meltingpot-main
|
meltingpot/utils/substrates/substrate_factory.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
meltingpot-main
|
meltingpot/utils/substrates/__init__.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for game_object_utils."""
from absl.testing import absltest
from absl.testing import parameterized
from meltingpot.utils.substrates import game_object_utils
def get_transform(x, y, orientation):
return game_object_utils.Transform(position=game_object_utils.Position(x, y),
orientation=orientation)
class ParseMapTest(parameterized.TestCase):
@parameterized.parameters(
('\nHello', 'H', 1),
('\nHello', 'h', 0),
('\nHello', 'l', 2),
('\nHello\nWorld', 'l', 3),
('\nHello\nWorld', 'o', 2),
('\nHello\nWorld', 'd', 1),
('\nHello\nWorld', 'W', 1),
('\nWWWW\nW AW\nWWWW', 'A', 1),
('\nWWWW\nW AW\nWWWW', 'W', 10),
('\nWWWW\nW AW\nWWWW', 'P', 0),
)
def test_get_positions_length(self, ascii_map, char, exp_len):
transforms = game_object_utils.get_game_object_positions_from_map(
ascii_map, char)
self.assertLen(transforms, exp_len)
def test_get_positions(self):
# Locations of 'A' -> (2, 1)
# Locations of ' ' -> (1, 1), (3, 1) and (4, 1)
ascii_map = '''
WWWWWW
W A W
WWWWWW
'''
transforms = game_object_utils.get_game_object_positions_from_map(
ascii_map, 'A')
self.assertSameElements(
[get_transform(2, 1, game_object_utils.Orientation.NORTH)], transforms)
transforms = game_object_utils.get_game_object_positions_from_map(
ascii_map, ' ')
self.assertSameElements(
[
get_transform(1, 1, game_object_utils.Orientation.NORTH),
get_transform(3, 1, game_object_utils.Orientation.NORTH),
get_transform(4, 1, game_object_utils.Orientation.NORTH)
],
transforms)
transforms = game_object_utils.get_game_object_positions_from_map(
ascii_map, 'W')
self.assertSameElements(
[
# Top walls
get_transform(0, 0, game_object_utils.Orientation.NORTH),
get_transform(1, 0, game_object_utils.Orientation.NORTH),
get_transform(2, 0, game_object_utils.Orientation.NORTH),
get_transform(3, 0, game_object_utils.Orientation.NORTH),
get_transform(4, 0, game_object_utils.Orientation.NORTH),
get_transform(5, 0, game_object_utils.Orientation.NORTH),
# Side walls
get_transform(0, 1, game_object_utils.Orientation.NORTH),
get_transform(5, 1, game_object_utils.Orientation.NORTH),
# Bottom walls
get_transform(0, 2, game_object_utils.Orientation.NORTH),
get_transform(1, 2, game_object_utils.Orientation.NORTH),
get_transform(2, 2, game_object_utils.Orientation.NORTH),
get_transform(3, 2, game_object_utils.Orientation.NORTH),
get_transform(4, 2, game_object_utils.Orientation.NORTH),
get_transform(5, 2, game_object_utils.Orientation.NORTH),
],
transforms)
def test_get_game_objects(self):
ascii_map = '''
WWWWWW
W A W
WWWWWW
'''
wall = {
'name': 'wall',
'components': [
{
'component': 'PieceTypeManager',
'kwargs': {
'initialPieceType': 'wall',
'pieceTypeConfigs': [{'pieceType': 'wall',}],
},
},
{
'component': 'Transform',
'kwargs': {
'position': (0, 0),
'orientation': 'N'
},
},
]
}
apple = {
'name': 'apple',
'components': [
{
'component': 'PieceTypeManager',
'kwargs': {
'initialPieceType': 'apple',
'pieceTypeConfigs': [{'pieceType': 'apple',}],
},
},
{
'component': 'Transform',
'kwargs': {
'position': (0, 0),
'orientation': 'N'
},
},
]
}
prefabs = {'wall': wall, 'apple': apple}
game_objects = game_object_utils.get_game_objects_from_map(
ascii_map, {'W': 'wall', 'A': 'apple'}, prefabs)
self.assertLen(game_objects, 15)
self.assertEqual(
1,
sum([1 if go['name'] == 'apple' else 0 for go in game_objects]))
self.assertEqual(
14,
sum([1 if go['name'] == 'wall' else 0 for go in game_objects]))
positions = []
for go in game_objects:
if go['name'] == 'wall':
positions.append(game_object_utils.get_first_named_component(
go, 'Transform')['kwargs']['position'])
self.assertSameElements(
[
(0, 0), (1, 0), (2, 0), (3, 0), (4, 0), (5, 0), # Top walls
(0, 1), (5, 1), # Side walls
(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (5, 2), # Bottom walls
],
positions)
AVATAR = {
'name': 'avatar',
'components': [
{
'component': 'StateManager',
'kwargs': {
'initialState': 'player',
'stateConfigs': [
{'state': 'player',
'layer': 'upperPhysical',
'sprite': 'Avatar',}, # Will be overridden
{'state': 'playerWait',},
]
}
},
{
'component': 'Transform',
'kwargs': {
'position': (0, 0),
'orientation': 'N'
}
},
{
'component': 'Appearance',
'kwargs': {
'renderMode': 'ascii_shape',
'spriteNames': ['Avatar'], # Will be overridden
'spriteShapes': ["""*"""],
'palettes': [(0, 0, 255, 255)], # Will be overridden
'noRotates': [True]
}
},
{
'component': 'Avatar',
'kwargs': {
'index': -1, # Will be overridden
'spawnGroup': 'spawnPoints',
'aliveState': 'player',
'waitState': 'playerWait',
'actionOrder': ['move'],
'actionSpec': {
'move': {'default': 0, 'min': 0, 'max': 4},
},
}
},
],
}
BADGE = {
'name': 'avatar_badge',
'components': [
{
'component': 'StateManager',
'kwargs': {
'initialState': 'badgeWait',
'stateConfigs': [
{'state': 'badge',
'layer': 'overlay',
'sprite': 'Badge',
'groups': ['badges']},
{'state': 'badgeWait',
'groups': ['badgeWaits']},
]
}
},
{
'component': 'Transform',
'kwargs': {
'position': (0, 0),
'orientation': 'N'
}
},
{
'component': 'Appearance',
'kwargs': {
'renderMode': 'ascii_shape',
'spriteNames': ['Badge'],
'spriteShapes': ['*'],
'palettes': [(0, 0, 255, 255)],
'noRotates': [False]
}
},
{
'component': 'AvatarConnector',
'kwargs': {
'playerIndex': -1, # player index to be overwritten.
'aliveState': 'badge',
'waitState': 'badgeWait'
}
},
]
}
class BuildAvatarObjectsTest(parameterized.TestCase):
@parameterized.parameters(
[1], [2], [3], [4], [5]
)
def test_simple_build(self, num_players):
prefabs = {'avatar': AVATAR}
avatars = game_object_utils.build_avatar_objects(
num_players=num_players,
prefabs=prefabs,
player_palettes=None,
)
self.assertLen(avatars, num_players)
def test_with_palette_build(self):
palettes = [(255, 0, 0, 255), (0, 255, 0, 255)]
prefabs = {'avatar': AVATAR}
avatars = game_object_utils.build_avatar_objects(
num_players=2,
prefabs=prefabs,
player_palettes=palettes,
)
self.assertLen(avatars, 2)
self.assertEqual(
game_object_utils.get_first_named_component(
avatars[0], 'Appearance')['kwargs']['palettes'][0],
palettes[0])
self.assertEqual(
game_object_utils.get_first_named_component(
avatars[1], 'Appearance')['kwargs']['palettes'][0],
palettes[1])
class BuildAvatarBadgesTest(parameterized.TestCase):
@parameterized.parameters(
[1], [2], [3], [4], [5]
)
def test_simple_build(self, num_players):
prefabs = {'avatar_badge': BADGE}
badges = game_object_utils.build_avatar_badges(
num_players=num_players,
prefabs=prefabs,
badge_palettes=None,
)
self.assertLen(badges, num_players)
def test_with_palette_build(self):
palettes = [(255, 0, 0, 255), (0, 255, 0, 255)]
prefabs = {'avatar_badge': BADGE}
badges = game_object_utils.build_avatar_badges(
num_players=2,
prefabs=prefabs,
badge_palettes=palettes,
)
self.assertLen(badges, 2)
self.assertEqual(
game_object_utils.get_first_named_component(
badges[0], 'Appearance')['kwargs']['palettes'][0],
palettes[0])
self.assertEqual(
game_object_utils.get_first_named_component(
badges[1], 'Appearance')['kwargs']['palettes'][0],
palettes[1])
if __name__ == '__main__':
absltest.main()
|
meltingpot-main
|
meltingpot/utils/substrates/game_object_utils_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi-player environment builder for Melting Pot levels."""
from collections.abc import Mapping
import copy
import itertools
import os
import random
from typing import Any, Optional, Union
from absl import logging
import dmlab2d
from dmlab2d import runfiles_helper
from dmlab2d import settings_helper
from meltingpot.utils.substrates import game_object_utils
from meltingpot.utils.substrates.wrappers import reset_wrapper
from ml_collections import config_dict
import tree
Settings = Union[config_dict.ConfigDict, Mapping[str, Any]]
_MAX_SEED = 2 ** 32 - 1
_DMLAB2D_ROOT = runfiles_helper.find()
def _find_root() -> str:
import re # pylint: disable=g-import-not-at-top
return re.sub('^(.*)/meltingpot/.*?$', r'\1', __file__)
_MELTINGPOT_ROOT = _find_root()
# Although to_dict in ConfigDict is recursive, it is not enough for our use case
# because the recursion will _not_ go into the list elements. And we have plenty
# of those in our configs.
def _config_dict_to_dict(value):
if isinstance(value, config_dict.ConfigDict):
return tree.map_structure(_config_dict_to_dict, value.to_dict())
return value
def parse_python_settings_for_dmlab2d(
lab2d_settings: config_dict.ConfigDict) -> dict[str, Any]:
"""Flatten lab2d_settings into Lua-friendly properties."""
# Since config_dicts disallow "." in keys, we must use a different character,
# "$", in our config and then convert it to "." here. This is particularly
# important for levels with config keys like 'player.%default' in DMLab2D.
lab2d_settings = _config_dict_to_dict(lab2d_settings)
lab2d_settings = settings_helper.flatten_args(lab2d_settings)
lab2d_settings_dict = {}
for key, value in lab2d_settings.items():
converted_key = key.replace("$", ".")
lab2d_settings_dict[converted_key] = str(value)
return lab2d_settings_dict
def apply_prefab_overrides(
lab2d_settings: config_dict.ConfigDict,
prefab_overrides: Optional[Settings] = None) -> None:
"""Apply prefab overrides to lab2d_settings."""
if "gameObjects" not in lab2d_settings.simulation:
lab2d_settings.simulation.gameObjects = []
# Edit prefabs with the overrides, both in lab2d_settings and in prefabs.
if prefab_overrides:
for prefab, override in prefab_overrides.items():
for component, arg_overrides in override.items():
for arg_name, arg_override in arg_overrides.items():
if prefab not in lab2d_settings.simulation.prefabs:
raise ValueError(f"Prefab override for '{prefab}' given, but not " +
"available in `prefabs`.")
game_object_utils.get_first_named_component(
lab2d_settings.simulation.prefabs[prefab],
component)["kwargs"][arg_name] = arg_override
def maybe_build_and_add_avatar_objects(
lab2d_settings: config_dict.ConfigDict) -> None:
"""If requested, build the avatar objects and add them to lab2d_settings.
Avatars will be built here if and only if:
1) An 'avatar' prefab is supplied in lab2d_settings.simulation.prefabs; and
2) lab2d_settings.simulation.buildAvatars is not True.
Avatars built here will have their colors set from the palette provided in
lab2d_settings.simulation.playerPalettes, or if none is provided, using the
first num_players colors in the colors.py module.
Args:
lab2d_settings: A writable version of the lab2d_settings. Avatar objects,
if they are to be built here, will be added as game objects in
lab2d_settings.simulation.gameObjects.
"""
# Whether the avatars will be built in Lua (False) or here (True). This is
# roughly the opposite of the `buildAvatars` setting.
build_avatars_here = ("avatar" in lab2d_settings.simulation.prefabs)
if ("buildAvatars" in lab2d_settings.simulation
and lab2d_settings.simulation.buildAvatars):
build_avatars_here = False
if "avatar" not in lab2d_settings.simulation.prefabs:
raise ValueError(
"Deferring avatar building to Lua, yet no 'avatar' prefab given.")
if build_avatars_here:
palettes = (lab2d_settings.simulation.playerPalettes
if "playerPalettes" in lab2d_settings.simulation else None)
if "gameObjects" not in lab2d_settings.simulation:
lab2d_settings.simulation.gameObjects = []
# Create avatars.
logging.info("Building avatars in `meltingpot.builder` with palettes: %s",
lab2d_settings.simulation.playerPalettes)
avatar_objects = game_object_utils.build_avatar_objects(
int(lab2d_settings.numPlayers),
lab2d_settings.simulation.prefabs,
palettes)
lab2d_settings.simulation.gameObjects += avatar_objects
def locate_and_overwrite_level_directory(
lab2d_settings: config_dict.ConfigDict) -> None:
"""Locates the run files, and overwrites the levelDirectory with it."""
# Locate runfiles.
level_name = lab2d_settings.get("levelName")
level_dir = lab2d_settings.get("levelDirectory")
if level_dir:
lab2d_settings.levelName = os.path.join(level_dir, level_name)
lab2d_settings.levelDirectory = _MELTINGPOT_ROOT
def builder(
lab2d_settings: Settings,
prefab_overrides: Optional[Settings] = None,
env_seed: Optional[int] = None,
**settings) -> dmlab2d.Environment:
"""Builds a Melting Pot environment.
Args:
lab2d_settings: a dict of environment designation args.
prefab_overrides: overrides for prefabs.
env_seed: the seed to pass to the environment.
**settings: Other settings which are not used by Melting Pot but can still
be passed from the environment builder.
Returns:
A multi-player Melting Pot environment.
"""
del settings # Not currently used by DMLab2D.
assert "simulation" in lab2d_settings
# Copy config, so as not to modify it.
lab2d_settings = config_dict.ConfigDict(
copy.deepcopy(lab2d_settings)).unlock()
apply_prefab_overrides(lab2d_settings, prefab_overrides)
maybe_build_and_add_avatar_objects(lab2d_settings)
locate_and_overwrite_level_directory(lab2d_settings)
# Convert settings from python to Lua format.
lab2d_settings_dict = parse_python_settings_for_dmlab2d(lab2d_settings)
if env_seed is None:
# Select a long seed different than zero.
env_seed = random.randint(1, _MAX_SEED)
env_seeds = (seed % (_MAX_SEED + 1) for seed in itertools.count(env_seed))
def build_environment():
seed = next(env_seeds)
lab2d_settings_dict["env_seed"] = str(seed) # Sets the Lua seed.
env_raw = dmlab2d.Lab2d(_DMLAB2D_ROOT, lab2d_settings_dict)
observation_names = env_raw.observation_names()
return dmlab2d.Environment(
env=env_raw,
observation_names=observation_names,
seed=seed)
# Add a wrapper that rebuilds the environment when reset is called.
env = reset_wrapper.ResetWrapper(build_environment)
return env
|
meltingpot-main
|
meltingpot/utils/substrates/builder.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for substrate."""
import dataclasses
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
from meltingpot.utils.substrates import substrate
from meltingpot.utils.substrates.wrappers import observables as observables_lib
class SubstrateTest(parameterized.TestCase):
def test_observables(self):
base = mock.create_autospec(
observables_lib.ObservableLab2d, instance=True, spec_set=True)
with substrate.Substrate(base) as env:
received = []
observables = env.observables()
for field in dataclasses.fields(observables):
getattr(observables, field.name).subscribe(
on_next=received.append,
on_error=lambda e: received.append(type(e)),
on_completed=lambda: received.append('DONE'),
)
base.reset.return_value = mock.sentinel.timestep_0
base.events.return_value = [mock.sentinel.events_0]
env.reset()
base.step.return_value = mock.sentinel.timestep_1
base.events.return_value = [mock.sentinel.events_1]
env.step(mock.sentinel.action_1)
base.step.return_value = mock.sentinel.timestep_2
base.events.return_value = [mock.sentinel.events_2]
env.step(mock.sentinel.action_2)
self.assertSequenceEqual(received, [
mock.sentinel.timestep_0,
mock.sentinel.events_0,
mock.sentinel.action_1,
mock.sentinel.timestep_1,
mock.sentinel.events_1,
mock.sentinel.action_2,
mock.sentinel.timestep_2,
mock.sentinel.events_2,
'DONE',
'DONE',
'DONE',
])
if __name__ == '__main__':
absltest.main()
|
meltingpot-main
|
meltingpot/utils/substrates/substrate_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for DMLab2D Game Objects."""
import copy
import enum
from typing import List, Mapping, NamedTuple, Optional, Sequence, Tuple, Union
from meltingpot.utils.substrates import colors
from meltingpot.utils.substrates import shapes
import numpy as np
# Type of a GameObject prefab configuration: A recursive string mapping.
# pytype: disable=not-supported-yet
PrefabConfig = Mapping[str, "PrefabConfigValue"]
PrefabConfigValue = Union[str, float, List["PrefabConfigValue"], PrefabConfig]
# pytype: enable=not-supported-yet
class Position(NamedTuple):
x: int
y: int
class Orientation(enum.Enum):
NORTH = "N"
EAST = "E"
SOUTH = "S"
WEST = "W"
class Transform(NamedTuple):
position: Position
orientation: Optional[Orientation] = None
# Special char to prefab mappings
TYPE_ALL = "all"
TYPE_CHOICE = "choice"
def get_named_components(
game_object_config: PrefabConfig,
name: str):
return [component for component in game_object_config["components"]
if component["component"] == name]
def get_first_named_component(
game_object_config: PrefabConfig,
name: str):
named = get_named_components(game_object_config, name)
if not named:
raise ValueError(f"No component with name '{name}' found.")
return named[0]
def build_game_objects(
num_players: int,
ascii_map: str,
prefabs: Optional[Mapping[str, PrefabConfig]] = None,
char_prefab_map: Optional[PrefabConfig] = None,
player_palettes: Optional[Sequence[shapes.Color]] = None,
use_badges: bool = False,
badge_palettes: Optional[Sequence[shapes.Color]] = None,
) -> Tuple[List[PrefabConfig], List[PrefabConfig]]:
"""Build all avatar and normal game objects based on the config and map."""
game_objects = get_game_objects_from_map(ascii_map, char_prefab_map, prefabs)
avatar_objects = build_avatar_objects(num_players, prefabs, player_palettes)
if use_badges:
game_objects += build_avatar_badges(num_players, prefabs, badge_palettes)
return game_objects, avatar_objects
def build_avatar_objects(
num_players: int,
prefabs: Optional[Mapping[str, PrefabConfig]] = None,
player_palettes: Optional[Sequence[shapes.Color]] = None,
) -> List[PrefabConfig]:
"""Build all avatar and their associated game objects from the prefabs."""
if not prefabs or "avatar" not in prefabs:
raise ValueError(
"Building avatar objects requested, but no avatar prefab provided.")
if not player_palettes:
player_palettes = [
shapes.get_palette(colors.palette[i]) for i in range(num_players)]
avatar_objects = []
for idx in range(0, num_players):
game_object = copy.deepcopy(prefabs["avatar"])
color_palette = player_palettes[idx]
# Lua is 1-indexed.
lua_index = idx + 1
# First, modify the prefab's sprite name.
sprite_name = get_first_named_component(
game_object, "Appearance")["kwargs"]["spriteNames"][0]
new_sprite_name = sprite_name + str(lua_index)
get_first_named_component(
game_object,
"Appearance")["kwargs"]["spriteNames"][0] = new_sprite_name
# Second, name the same sprite in the prefab's stateManager.
state_configs = get_first_named_component(
game_object,
"StateManager")["kwargs"]["stateConfigs"]
for state_config in state_configs:
if "sprite" in state_config and state_config["sprite"] == sprite_name:
state_config["sprite"] = new_sprite_name
# Third, override the prefab's color palette for this sprite.
get_first_named_component(
game_object, "Appearance")["kwargs"]["palettes"][0] = color_palette
# Fourth, override the avatar's player id.
get_first_named_component(
game_object, "Avatar")["kwargs"]["index"] = lua_index
avatar_objects.append(game_object)
return avatar_objects
def build_avatar_badges(
num_players: int,
prefabs: Optional[Mapping[str, PrefabConfig]] = None,
badge_palettes: Optional[Sequence[shapes.Color]] = None,
) -> List[PrefabConfig]:
"""Build all avatar and their associated game objects from the prefabs."""
if not prefabs or "avatar_badge" not in prefabs:
raise ValueError(
"Building avatar badges requested, but no avatar_badge prefab " +
"provided.")
game_objects = []
if badge_palettes is None:
badge_palettes = [
shapes.get_palette(colors.palette[i]) for i in range(num_players)]
for idx in range(0, num_players):
lua_index = idx + 1
# Add the overlaid badge on top of each avatar.
badge_object = copy.deepcopy(prefabs["avatar_badge"])
sprite_name = get_first_named_component(
badge_object, "Appearance")["kwargs"]["spriteNames"][0]
new_sprite_name = sprite_name + str(lua_index)
get_first_named_component(
badge_object,
"Appearance")["kwargs"]["spriteNames"][0] = new_sprite_name
get_first_named_component(
badge_object,
"StateManager")["kwargs"]["stateConfigs"][0]["sprite"] = (
new_sprite_name)
get_first_named_component(
badge_object, "AvatarConnector")["kwargs"]["playerIndex"] = lua_index
get_first_named_component(
badge_object,
"Appearance")["kwargs"]["palettes"][0] = badge_palettes[idx]
game_objects.append(badge_object)
return game_objects
def get_game_object_positions_from_map(
ascii_map: str, char: str, orientation_mode: str = "always_north"
) -> Sequence[Transform]:
"""Extract the occurrences of a character in the ascii map into transforms.
For all occurrences of the given `char`, retrieves a Transform containing the
position and orientation of the instance.
Args:
ascii_map: the ascii map.
char: the character to extract transforms from the ascii map.
orientation_mode: select a method for choosing orientations.
Returns:
A list of Transforms containing all the positions and orientations of all
occurrences of the character in the map.
"""
transforms = []
rows = ascii_map.split("\n")
# Assume the first line of the string consists only of '\n'. This means we
# need to skip the first row.
for i, row in enumerate(rows[1:]):
indices = [i for i, c in enumerate(row) if char == c]
for j in indices:
if orientation_mode == "always_north":
orientation = Orientation.NORTH
else:
raise ValueError("Other orientation modes are not yet implemented.")
transform = Transform(position=Position(j, i), orientation=orientation)
transforms.append(transform)
return transforms
def _create_game_object(
prefab: PrefabConfig, transform: Transform) -> PrefabConfig:
game_object = copy.deepcopy(prefab)
go_transform = get_first_named_component(game_object, "Transform")
go_transform["kwargs"] = {
"position": (transform.position.x, transform.position.y),
"orientation": (transform.orientation.value
if transform.orientation is not None
else Orientation.NORTH.value),
}
return game_object
def get_game_objects_from_map(
ascii_map: str,
char_prefab_map: Mapping[str, str],
prefabs: Mapping[str, PrefabConfig],
random: np.random.RandomState = np.random.RandomState()
) -> List[PrefabConfig]:
"""Returns a list of game object configurations from the map and prefabs.
Each prefab will have its `Transform` component overwritten to its actual
location (and orientation, although it is all 'N' by default) in the ASCII
map.
Args:
ascii_map: The map for the level. Defines which prefab to use at each
position in the map, which is a string defining a matrix of characters.
char_prefab_map: A dictionary mapping characters in the ascii_map to prefab
names.
prefabs: A collection of named prefabs that define a GameObject
configuration.
random: An optional random number generator.
Returns:
A list of game object configurations from the map and prefabs.
"""
game_objects = []
for char, prefab in char_prefab_map.items():
transforms = get_game_object_positions_from_map(ascii_map, char)
for transform in transforms:
if hasattr(prefab, "items"):
assert "type" in prefab
assert "list" in prefab
if prefab["type"] == TYPE_ALL:
for p in prefab["list"]:
game_objects.append(_create_game_object(prefabs[p], transform))
elif prefab["type"] == TYPE_CHOICE:
game_objects.append(
_create_game_object(prefabs[random.choice(prefab["list"])],
transform))
else: # Typical case, since named prefab.
game_objects.append(_create_game_object(prefabs[prefab], transform))
return game_objects
|
meltingpot-main
|
meltingpot/utils/substrates/game_object_utils.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for builder.py."""
import copy
from absl.testing import absltest
from absl.testing import parameterized
from meltingpot.configs.substrates import running_with_scissors_in_the_matrix__repeated as test_substrate
from meltingpot.utils.substrates import builder
import numpy as np
def _get_test_settings():
config = test_substrate.get_config()
return test_substrate.build(config, config.default_player_roles)
_TEST_SETTINGS = _get_test_settings()
def _get_lua_randomization_map():
"""Replaces first row of walls with items randomized by Lua."""
head, line, *tail = _TEST_SETTINGS['simulation']['map'].split('\n')
# Replace line 1 (walls) with a row of 'a' (items randomized by Lua).
new_map = '\n'.join([head, 'a' * len(line), *tail])
return new_map
_LUA_RANDOMIZED_LINE = 1
_LUA_RANDOMIZATION_MAP = _get_lua_randomization_map()
class GeneralTestCase(parameterized.TestCase):
@parameterized.product(seed=[42, 123, 1337, 12481632])
def test_seed_causes_determinism(self, seed):
env1 = self.enter_context(builder.builder(_TEST_SETTINGS, env_seed=seed))
env2 = self.enter_context(builder.builder(_TEST_SETTINGS, env_seed=seed))
for episode in range(5):
obs1 = env1.reset().observation['WORLD.RGB']
obs2 = env2.reset().observation['WORLD.RGB']
np.testing.assert_equal(
obs1, obs2, f'Episode {episode} mismatch: {obs1} != {obs2} ')
@parameterized.product(seed=[None, 42, 123, 1337, 12481632])
def test_episodes_are_randomized(self, seed):
env = self.enter_context(builder.builder(_TEST_SETTINGS, env_seed=seed))
obs = env.reset().observation['WORLD.RGB']
for episode in range(4):
last_obs = obs
obs = env.reset().observation['WORLD.RGB']
with self.assertRaises(
AssertionError,
msg=f'Episodes {episode} and {episode+1} match: {last_obs} == {obs}'):
np.testing.assert_equal(last_obs, obs)
def test_no_seed_causes_nondeterminism(self):
env1 = self.enter_context(builder.builder(_TEST_SETTINGS, env_seed=None))
env2 = self.enter_context(builder.builder(_TEST_SETTINGS, env_seed=None))
for episode in range(5):
obs1 = env1.reset().observation['WORLD.RGB']
obs2 = env2.reset().observation['WORLD.RGB']
with self.assertRaises(
AssertionError, msg=f'Episode {episode} match: {obs1} == {obs2}'):
np.testing.assert_equal(obs1, obs2)
@parameterized.product(seed=[None, 42, 123, 1337, 12481632])
def test_episodes_are_randomized_in_lua(self, seed):
lab2d_settings = copy.deepcopy(_TEST_SETTINGS)
lab2d_settings['simulation']['map'] = _LUA_RANDOMIZATION_MAP
env = self.enter_context(builder.builder(lab2d_settings, env_seed=seed))
obs = env.reset().observation['WORLD.RGB'][_LUA_RANDOMIZED_LINE]
for episode in range(4):
last_obs = obs
obs = env.reset().observation['WORLD.RGB'][_LUA_RANDOMIZED_LINE]
with self.assertRaises(
AssertionError,
msg=f'Episodes {episode} and {episode+1} match: {last_obs} == {obs}'):
np.testing.assert_equal(last_obs, obs)
def test_no_seed_causes_nondeterminism_for_lua(self):
lab2d_settings = copy.deepcopy(_TEST_SETTINGS)
lab2d_settings['simulation']['map'] = _LUA_RANDOMIZATION_MAP
env1 = self.enter_context(builder.builder(lab2d_settings))
env2 = self.enter_context(builder.builder(lab2d_settings))
for episode in range(5):
obs1 = env1.reset().observation['WORLD.RGB'][_LUA_RANDOMIZED_LINE]
obs2 = env2.reset().observation['WORLD.RGB'][_LUA_RANDOMIZED_LINE]
with self.assertRaises(
AssertionError, msg=f'Episode {episode} match {obs1} == {obs2}'):
np.testing.assert_equal(obs1, obs2)
if __name__ == '__main__':
absltest.main()
|
meltingpot-main
|
meltingpot/utils/substrates/builder_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A set of 62 visually distinct colors."""
# LINT.IfChange
palette = (
(1, 0, 103),
(213, 255, 0),
(255, 0, 86),
(158, 0, 142),
(14, 76, 161),
(255, 229, 2),
(0, 95, 57),
(0, 255, 0),
(149, 0, 58),
(255, 147, 126),
(164, 36, 0),
(0, 21, 68),
(145, 208, 203),
(98, 14, 0),
(107, 104, 130),
(0, 0, 255),
(0, 125, 181),
(106, 130, 108),
(0, 174, 126),
(194, 140, 159),
(190, 153, 112),
(0, 143, 156),
(95, 173, 78),
(255, 0, 0),
(255, 0, 246),
(255, 2, 157),
(104, 61, 59),
(255, 116, 163),
(150, 138, 232),
(152, 255, 82),
(167, 87, 64),
(1, 255, 254),
(255, 238, 232),
(254, 137, 0),
(189, 198, 255),
(1, 208, 255),
(187, 136, 0),
(117, 68, 177),
(165, 255, 210),
(255, 166, 254),
(119, 77, 0),
(122, 71, 130),
(38, 52, 0),
(0, 71, 84),
(67, 0, 44),
(181, 0, 255),
(255, 177, 103),
(255, 219, 102),
(144, 251, 146),
(126, 45, 210),
(189, 211, 147),
(229, 111, 254),
(222, 255, 116),
(0, 255, 120),
(0, 155, 255),
(0, 100, 1),
(0, 118, 255),
(133, 169, 0),
(0, 185, 23),
(120, 130, 49),
(0, 255, 198),
(255, 110, 65),
)
# LINT.ThenChange(//meltingpot/lua/modules/colors.lua)
human_readable = (
(45, 110, 220),
(125, 50, 200),
(205, 5, 165),
(245, 65, 65),
(245, 130, 0),
(195, 180, 0),
(125, 185, 65),
(35, 185, 175),
(160, 15, 200),
(230, 50, 95),
(230, 90, 55),
(220, 140, 15),
(180, 195, 0),
(25, 210, 140),
(25, 170, 200),
(85, 80, 210),
)
desaturated_avatar_palette = (
(30, 115, 200),
(120, 90, 135),
(100, 145, 100),
(180, 45, 120),
(200, 125, 20),
(180, 155, 0),
(50, 115, 180),
(115, 70, 160),
)
light_desaturated_avatar_palette = (
(70, 130, 200),
(105, 105, 190),
(200, 200, 0),
(200, 150, 50),
(200, 100, 100),
(155, 90, 155),
(105, 190, 105),
)
|
meltingpot-main
|
meltingpot/utils/substrates/colors.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for base wrapper."""
import inspect
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
import dmlab2d
from meltingpot.utils.substrates.wrappers import base
_WRAPPED_METHODS = tuple([
name for name, _ in inspect.getmembers(dmlab2d.Environment)
if not name.startswith('_')
])
class WrapperTest(parameterized.TestCase):
def test_instance(self):
env = mock.Mock(spec_set=dmlab2d.Environment)
wrapped = base.Lab2dWrapper(env=env)
self.assertIsInstance(wrapped, dmlab2d.Environment)
@parameterized.named_parameters(
(name, name) for name in _WRAPPED_METHODS
)
def test_wrapped(self, method):
env = mock.Mock(spec_set=dmlab2d.Environment)
env_method = getattr(env, method)
env_method.return_value = mock.sentinel
wrapped = base.Lab2dWrapper(env=env)
args = [object()]
kwargs = {'a': object()}
actual = getattr(wrapped, method)(*args, **kwargs)
with self.subTest('args'):
env_method.assert_called_once_with(*args, **kwargs)
with self.subTest('return_value'):
self.assertEqual(actual, env_method.return_value)
if __name__ == '__main__':
absltest.main()
|
meltingpot-main
|
meltingpot/utils/substrates/wrappers/base_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper that rebuilds the Lab2d environment on every reset."""
from typing import Callable
import dm_env
import dmlab2d
from meltingpot.utils.substrates.wrappers import base
class ResetWrapper(base.Lab2dWrapper):
"""Wrapper that rebuilds the environment on reset."""
def __init__(self, build_environment: Callable[[], dmlab2d.Environment]):
"""Initializes the object.
Args:
build_environment: Called to build the underlying environment.
"""
env = build_environment()
super().__init__(env)
self._rebuild_environment = build_environment
self._reset = False
def reset(self) -> dm_env.TimeStep:
"""Rebuilds the environment and calls reset on it."""
if self._reset:
self._env.close()
self._env = self._rebuild_environment()
else:
# Don't rebuild on very first reset call (it's inefficient).
self._reset = True
return super().reset()
|
meltingpot-main
|
meltingpot/utils/substrates/wrappers/reset_wrapper.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for multiplayer_wrapper."""
from unittest import mock
from absl.testing import absltest
import dm_env
import dmlab2d
from meltingpot.utils.substrates.wrappers import multiplayer_wrapper
import numpy as np
ACT_SPEC = dm_env.specs.BoundedArray(
shape=(), minimum=0, maximum=4, dtype=np.int8)
ACT_VALUE = np.ones([], dtype=np.int8)
RGB_SPEC = dm_env.specs.Array(shape=(8, 8, 3), dtype=np.int8)
RGB_VALUE = np.ones((8, 8, 3), np.int8)
REWARD_SPEC = dm_env.specs.Array(shape=(), dtype=np.float32)
REWARD_VALUE = np.ones((), dtype=np.float32)
class Lab2DToListsWrapperTest(absltest.TestCase):
def test_get_num_players(self):
env = mock.Mock(spec_set=dmlab2d.Environment)
env.action_spec.return_value = {
'1.MOVE': ACT_SPEC,
'2.MOVE': ACT_SPEC,
'3.MOVE': ACT_SPEC
}
wrapped = multiplayer_wrapper.Wrapper(
env, individual_observation_names=[], global_observation_names=[])
self.assertEqual(wrapped._get_num_players(), 3)
def test_get_rewards(self):
env = mock.Mock(spec_set=dmlab2d.Environment)
env.action_spec.return_value = {
'1.MOVE': ACT_SPEC,
'2.MOVE': ACT_SPEC,
'3.MOVE': ACT_SPEC
}
wrapped = multiplayer_wrapper.Wrapper(
env, individual_observation_names=[], global_observation_names=[])
source = {
'1.RGB': RGB_VALUE,
'2.RGB': RGB_VALUE * 2,
'3.RGB': RGB_VALUE * 3,
'1.REWARD': 10,
'2.REWARD': 20,
'3.REWARD': 30,
'WORLD.RGB': RGB_VALUE
}
rewards = wrapped._get_rewards(source)
self.assertEqual(rewards, [10, 20, 30])
def test_get_observations(self):
env = mock.Mock(spec_set=dmlab2d.Environment)
env.action_spec.return_value = {
'1.MOVE': ACT_SPEC,
'2.MOVE': ACT_SPEC,
'3.MOVE': ACT_SPEC,
}
wrapped = multiplayer_wrapper.Wrapper(
env,
individual_observation_names=['RGB'],
global_observation_names=['WORLD.RGB'])
source = {
'1.RGB': RGB_VALUE * 1,
'2.RGB': RGB_VALUE * 2,
'3.RGB': RGB_VALUE * 3,
'1.OTHER': RGB_SPEC,
'2.OTHER': RGB_SPEC,
'3.OTHER': RGB_SPEC,
'WORLD.RGB': RGB_VALUE,
}
actual = wrapped._get_observations(source)
expected = [
{'RGB': RGB_VALUE * 1, 'WORLD.RGB': RGB_VALUE},
{'RGB': RGB_VALUE * 2, 'WORLD.RGB': RGB_VALUE},
{'RGB': RGB_VALUE * 3, 'WORLD.RGB': RGB_VALUE},
]
np.testing.assert_equal(actual, expected)
def test_get_action(self):
env = mock.Mock(spec_set=dmlab2d.Environment)
env.action_spec.return_value = {
'1.MOVE': ACT_SPEC,
'2.MOVE': ACT_SPEC,
'3.MOVE': ACT_SPEC,
}
wrapped = multiplayer_wrapper.Wrapper(
env,
individual_observation_names=[],
global_observation_names=[])
source = [
{'MOVE': ACT_VALUE * 1},
{'MOVE': ACT_VALUE * 2},
{'MOVE': ACT_VALUE * 3},
]
actual = wrapped._get_action(source)
expected = {
'1.MOVE': ACT_VALUE * 1,
'2.MOVE': ACT_VALUE * 2,
'3.MOVE': ACT_VALUE * 3,
}
np.testing.assert_equal(actual, expected)
def test_spec(self):
env = mock.Mock(spec_set=dmlab2d.Environment)
env.action_spec.return_value = {
'1.MOVE': ACT_SPEC,
'2.MOVE': ACT_SPEC,
'3.MOVE': ACT_SPEC,
}
env.observation_spec.return_value = {
'1.RGB': RGB_SPEC,
'2.RGB': RGB_SPEC,
'3.RGB': RGB_SPEC,
'1.OTHER': RGB_SPEC,
'2.OTHER': RGB_SPEC,
'3.OTHER': RGB_SPEC,
'1.REWARD': REWARD_SPEC,
'2.REWARD': REWARD_SPEC,
'3.REWARD': REWARD_SPEC,
'WORLD.RGB': RGB_SPEC
}
wrapped = multiplayer_wrapper.Wrapper(
env,
individual_observation_names=['RGB'],
global_observation_names=['WORLD.RGB'])
with self.subTest('action_spec'):
self.assertEqual(wrapped.action_spec(), [
{'MOVE': ACT_SPEC.replace(name='MOVE')},
{'MOVE': ACT_SPEC.replace(name='MOVE')},
{'MOVE': ACT_SPEC.replace(name='MOVE')},
])
with self.subTest('observation_spec'):
self.assertEqual(wrapped.observation_spec(), [
{'RGB': RGB_SPEC, 'WORLD.RGB': RGB_SPEC},
{'RGB': RGB_SPEC, 'WORLD.RGB': RGB_SPEC},
{'RGB': RGB_SPEC, 'WORLD.RGB': RGB_SPEC},
])
with self.subTest('reward_spec'):
self.assertEqual(
wrapped.reward_spec(), [REWARD_SPEC, REWARD_SPEC, REWARD_SPEC])
def test_step(self):
env = mock.Mock(spec_set=dmlab2d.Environment)
env.action_spec.return_value = {
'1.MOVE': ACT_VALUE * 1,
'2.MOVE': ACT_VALUE * 2,
'3.MOVE': ACT_VALUE * 3,
}
env.step.return_value = dm_env.transition(1, {
'1.RGB': RGB_VALUE * 1,
# Intentionally missing 2.RGB
'3.RGB': RGB_VALUE * 3,
'1.OTHER': RGB_VALUE,
'2.OTHER': RGB_VALUE,
'3.OTHER': RGB_VALUE,
'1.REWARD': REWARD_VALUE * 10,
'2.REWARD': REWARD_VALUE * 20,
'3.REWARD': REWARD_VALUE * 30,
'WORLD.RGB': RGB_VALUE,
})
wrapped = multiplayer_wrapper.Wrapper(
env,
individual_observation_names=['RGB'],
global_observation_names=['WORLD.RGB'])
actions = [
{'MOVE': ACT_VALUE * 1},
{'MOVE': ACT_VALUE * 2},
{'MOVE': ACT_VALUE * 3},
]
actual = wrapped.step(actions)
expected = dm_env.transition(
reward=[
REWARD_VALUE * 10,
REWARD_VALUE * 20,
REWARD_VALUE * 30,
],
observation=[
{'RGB': RGB_VALUE * 1, 'WORLD.RGB': RGB_VALUE},
{'WORLD.RGB': RGB_VALUE},
{'RGB': RGB_VALUE * 3, 'WORLD.RGB': RGB_VALUE},
])
with self.subTest('timestep'):
np.testing.assert_equal(actual, expected)
with self.subTest('action'):
(action,), _ = env.step.call_args
np.testing.assert_equal(action, {
'1.MOVE': ACT_VALUE * 1,
'2.MOVE': ACT_VALUE * 2,
'3.MOVE': ACT_VALUE * 3,
})
def test_reset(self):
env = mock.Mock(spec_set=dmlab2d.Environment)
env.action_spec.return_value = {
'1.MOVE': ACT_VALUE * 1,
'2.MOVE': ACT_VALUE * 2,
'3.MOVE': ACT_VALUE * 3,
}
env.reset.return_value = dm_env.restart({
'1.RGB': RGB_VALUE * 1,
'2.RGB': RGB_VALUE * 2,
'3.RGB': RGB_VALUE * 3,
'1.OTHER': RGB_VALUE,
'2.OTHER': RGB_VALUE,
'3.OTHER': RGB_VALUE,
'1.REWARD': REWARD_VALUE * 0,
'2.REWARD': REWARD_VALUE * 0,
'3.REWARD': REWARD_VALUE * 0,
'WORLD.RGB': RGB_VALUE,
})
wrapped = multiplayer_wrapper.Wrapper(
env,
individual_observation_names=['RGB'],
global_observation_names=['WORLD.RGB'])
actual = wrapped.reset()
expected = dm_env.TimeStep(
step_type=dm_env.StepType.FIRST,
reward=[
REWARD_VALUE * 0,
REWARD_VALUE * 0,
REWARD_VALUE * 0,
],
discount=0.,
observation=[
{'RGB': RGB_VALUE * 1, 'WORLD.RGB': RGB_VALUE},
{'RGB': RGB_VALUE * 2, 'WORLD.RGB': RGB_VALUE},
{'RGB': RGB_VALUE * 3, 'WORLD.RGB': RGB_VALUE},
])
np.testing.assert_equal(actual, expected)
def test_observation(self):
env = mock.Mock(spec_set=dmlab2d.Environment)
env.action_spec.return_value = {
'1.MOVE': ACT_SPEC,
'2.MOVE': ACT_SPEC,
'3.MOVE': ACT_SPEC,
}
env.observation.return_value = {
'1.RGB': RGB_VALUE * 1,
'2.RGB': RGB_VALUE * 2,
'3.RGB': RGB_VALUE * 3,
'1.OTHER': RGB_VALUE,
'2.OTHER': RGB_VALUE,
'3.OTHER': RGB_VALUE,
'1.REWARD': REWARD_VALUE * 0,
'2.REWARD': REWARD_VALUE * 0,
'3.REWARD': REWARD_VALUE * 0,
'WORLD.RGB': RGB_VALUE,
}
wrapped = multiplayer_wrapper.Wrapper(
env,
individual_observation_names=['RGB'],
global_observation_names=['WORLD.RGB'])
actual = wrapped.observation()
expected = [
{'RGB': RGB_VALUE * 1, 'WORLD.RGB': RGB_VALUE},
{'RGB': RGB_VALUE * 2, 'WORLD.RGB': RGB_VALUE},
{'RGB': RGB_VALUE * 3, 'WORLD.RGB': RGB_VALUE},
]
np.testing.assert_equal(actual, expected)
if __name__ == '__main__':
absltest.main()
|
meltingpot-main
|
meltingpot/utils/substrates/wrappers/multiplayer_wrapper_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for discrete_action_wrapper."""
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
import dm_env
import dmlab2d
from meltingpot.utils.substrates.wrappers import discrete_action_wrapper
import numpy as np
MOVE_SPEC = dm_env.specs.BoundedArray(
shape=(), minimum=0, maximum=3, dtype=np.int8, name='MOVE')
TURN_SPEC = dm_env.specs.BoundedArray(
shape=(), minimum=0, maximum=3, dtype=np.int8, name='TURN')
VALID_VALUE_0 = np.zeros([], dtype=np.int8)
VALID_VALUE_1 = np.array(3, dtype=np.int8)
INVALID_VALUE = np.array(4, dtype=np.int8)
class Lab2DToListsWrapperTest(parameterized.TestCase):
def test_valid_set(self):
env = mock.Mock(spec_set=dmlab2d.Environment)
env.action_spec.return_value = [
{'MOVE': MOVE_SPEC, 'TURN': TURN_SPEC},
{'MOVE': MOVE_SPEC, 'TURN': TURN_SPEC},
]
discrete_action_wrapper.Wrapper(env, action_table=[
{'MOVE': VALID_VALUE_0, 'TURN': VALID_VALUE_0},
{'MOVE': VALID_VALUE_0, 'TURN': VALID_VALUE_1},
{'MOVE': VALID_VALUE_1, 'TURN': VALID_VALUE_0},
{'MOVE': VALID_VALUE_1, 'TURN': VALID_VALUE_1},
])
@parameterized.named_parameters(
('empty', []),
('out_of_bounds', [{'MOVE': INVALID_VALUE, 'TURN': VALID_VALUE_0}]),
('missing_key', [{'TURN': VALID_VALUE_0}]),
('extra_key', [{'INVALID': VALID_VALUE_0}]),
)
def test_invalid_set(self, action_table):
env = mock.Mock(spec_set=dmlab2d.Environment)
env.action_spec.return_value = [
{'MOVE': MOVE_SPEC, 'TURN': TURN_SPEC},
{'MOVE': MOVE_SPEC, 'TURN': TURN_SPEC},
]
with self.assertRaises(ValueError):
discrete_action_wrapper.Wrapper(env, action_table=action_table)
def test_action_spec(self):
env = mock.Mock(spec_set=dmlab2d.Environment)
env.action_spec.return_value = [
{'MOVE': MOVE_SPEC, 'TURN': TURN_SPEC},
{'MOVE': MOVE_SPEC, 'TURN': TURN_SPEC},
]
wrapped = discrete_action_wrapper.Wrapper(env, action_table=[
{'MOVE': VALID_VALUE_0, 'TURN': VALID_VALUE_0},
{'MOVE': VALID_VALUE_0, 'TURN': VALID_VALUE_1},
{'MOVE': VALID_VALUE_1, 'TURN': VALID_VALUE_0},
])
actual = wrapped.action_spec()
expected = (
dm_env.specs.DiscreteArray(num_values=3, dtype=np.int64, name='action'),
) * 2
self.assertEqual(actual, expected)
def test_step(self):
env = mock.Mock(spec_set=dmlab2d.Environment)
env.action_spec.return_value = [
{'MOVE': MOVE_SPEC, 'TURN': TURN_SPEC},
{'MOVE': MOVE_SPEC, 'TURN': TURN_SPEC},
]
env.step.return_value = mock.sentinel.timestep
wrapped = discrete_action_wrapper.Wrapper(env, action_table=[
{'MOVE': VALID_VALUE_0, 'TURN': VALID_VALUE_0},
{'MOVE': VALID_VALUE_0, 'TURN': VALID_VALUE_1},
{'MOVE': VALID_VALUE_1, 'TURN': VALID_VALUE_0},
])
actual = wrapped.step([0, 2])
with self.subTest('timestep'):
np.testing.assert_equal(actual, mock.sentinel.timestep)
with self.subTest('action'):
(action,), _ = env.step.call_args
self.assertEqual(action, [
{'MOVE': VALID_VALUE_0, 'TURN': VALID_VALUE_0},
{'MOVE': VALID_VALUE_1, 'TURN': VALID_VALUE_0},
])
if __name__ == '__main__':
absltest.main()
|
meltingpot-main
|
meltingpot/utils/substrates/wrappers/discrete_action_wrapper_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper that exposes Lab2d timesteps, actions, and events as observables."""
from typing import Mapping, Union
import dm_env
import dmlab2d
from meltingpot.utils.substrates.wrappers import observables
import numpy as np
from reactivex import subject
Action = Union[int, float, np.ndarray]
class ObservablesWrapper(observables.ObservableLab2dWrapper):
"""Wrapper exposes timesteps, actions, and events as observables."""
def __init__(self, env: dmlab2d.Environment):
"""Initializes the object.
Args:
env: The environment to wrap.
"""
super().__init__(env)
self._action_subject = subject.Subject()
self._timestep_subject = subject.Subject()
self._events_subject = subject.Subject()
self._observables = observables.Lab2dObservables(
action=self._action_subject,
events=self._events_subject,
timestep=self._timestep_subject,
)
def reset(self) -> dm_env.TimeStep:
"""See base class."""
timestep = super().reset()
self._timestep_subject.on_next(timestep)
for event in super().events():
self._events_subject.on_next(event)
return timestep
def step(self, action: Mapping[str, Action]) -> dm_env.TimeStep:
"""See base class."""
self._action_subject.on_next(action)
timestep = super().step(action)
self._timestep_subject.on_next(timestep)
for event in super().events():
self._events_subject.on_next(event)
return timestep
def close(self) -> None:
"""See base class."""
super().close()
self._action_subject.on_completed()
self._timestep_subject.on_completed()
self._events_subject.on_completed()
def observables(self) -> observables.Lab2dObservables:
"""See base class."""
return self._observables
|
meltingpot-main
|
meltingpot/utils/substrates/wrappers/observables_wrapper.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper that adds the sum of all players' rewards to observations."""
import copy
from typing import Mapping, Sequence, TypeVar
import dm_env
from meltingpot.utils.substrates.wrappers import observables
import numpy as np
T = TypeVar("T")
_COLLECTIVE_REWARD_OBS = "COLLECTIVE_REWARD"
class CollectiveRewardWrapper(observables.ObservableLab2dWrapper):
"""Wrapper that adds an observation of the sum of all players' rewards."""
def __init__(self, env):
"""Initializes the object.
Args:
env: environment to wrap.
"""
self._env = env
def _get_timestep(self, input_timestep: dm_env.TimeStep) -> dm_env.TimeStep:
"""Returns timestep augmented with `collective_reward'.
Args:
input_timestep: input_timestep before adding `collective_reward'.
"""
return dm_env.TimeStep(
step_type=input_timestep.step_type,
reward=input_timestep.reward,
discount=input_timestep.discount,
observation=[{_COLLECTIVE_REWARD_OBS: np.sum(input_timestep.reward),
**obs} for obs in input_timestep.observation])
def reset(self, *args, **kwargs) -> dm_env.TimeStep:
"""See base class."""
timestep = super().reset()
return self._get_timestep(timestep)
def step(
self, actions: Sequence[Mapping[str, np.ndarray]]) -> dm_env.TimeStep:
"""See base class."""
timestep = super().step(actions)
return self._get_timestep(timestep)
def observation_spec(self) -> Sequence[Mapping[str, dm_env.specs.Array]]:
"""See base class."""
observation_spec = copy.copy(super().observation_spec())
for obs in observation_spec:
obs[_COLLECTIVE_REWARD_OBS] = dm_env.specs.Array(
shape=(), dtype=np.float64, name=_COLLECTIVE_REWARD_OBS)
return observation_spec
|
meltingpot-main
|
meltingpot/utils/substrates/wrappers/collective_reward_wrapper.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
meltingpot-main
|
meltingpot/utils/substrates/wrappers/__init__.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper that converts action dictionary to a one hot vector."""
import functools
from typing import Mapping, Sequence, TypeVar, Union
import dm_env
import immutabledict
from meltingpot.utils.substrates.wrappers import observables
import numpy as np
T = TypeVar('T')
Numeric = Union[int, float, np.ndarray]
def _validate_action(
action: Mapping[str, np.ndarray],
action_spec: Mapping[str, dm_env.specs.Array]) -> None:
"""Raises ValueError if action does not matches the action_spec."""
if set(action) != set(action_spec):
raise ValueError('Keys do not match.')
for key, spec in action_spec.items():
spec.validate(action[key])
def _validate_action_table(
action_table: Sequence[Mapping[str, np.ndarray]],
action_spec: Mapping[str, dm_env.specs.Array]) -> None:
"""Raises ValueError if action_table does not matches the action_spec."""
if not action_table:
raise ValueError('action_table must not be empty')
for action_index, action in enumerate(action_table):
try:
_validate_action(action, action_spec)
except ValueError:
raise ValueError(f'Action {action_index} ({action}) does not match '
f'action_spec ({action_spec}).') from None
def _immutable_action(
action: Mapping[str, Numeric],
action_spec: Mapping[str, dm_env.specs.Array],
) -> Mapping[str, np.ndarray]:
"""Returns an immutable action."""
new_action = {}
for key, value in action.items():
if isinstance(value, np.ndarray):
value = np.copy(value)
else:
value = np.array(value, dtype=action_spec[key].dtype)
value.flags.writeable = False
new_action[key] = value
return immutabledict.immutabledict(new_action)
def _immutable_action_table(
action_table: Sequence[Mapping[str, Numeric]],
action_spec: Mapping[str, dm_env.specs.Array],
) -> Sequence[Mapping[str, np.ndarray]]:
"""Returns an immutable action table."""
return tuple(
_immutable_action(action, action_spec) for action in action_table)
class Wrapper(observables.ObservableLab2dWrapper):
"""Wrapper that maps a discrete action to an entry in an a table."""
def __init__(self, env, action_table: Sequence[Mapping[str, Numeric]]):
"""Constructor.
Args:
env: environment to wrap. When the adaptor closes env will also be closed.
Note that each player must have the same action spec.
action_table: Actions that are permissable. The same action lookup is
used by each player. action_table[i] defines the action that will be
forwarded to the wrapped environment for discrete action i.
"""
action_spec = env.action_spec()
if any(action_spec[0] != spec for spec in action_spec[1:]):
raise ValueError('Environment has heterogeneous action specs.')
super().__init__(env)
self._action_table = _immutable_action_table(action_table, action_spec[0])
_validate_action_table(self._action_table, action_spec[0])
def step(self, action: Sequence[int]):
"""See base class."""
action = [self._action_table[player_action] for player_action in action]
return super().step(action)
@functools.lru_cache(maxsize=1)
def action_spec(self) -> Sequence[dm_env.specs.DiscreteArray]:
"""See base class."""
spec = dm_env.specs.DiscreteArray(
num_values=len(self._action_table),
dtype=np.int64,
name='action')
return tuple(spec for _ in super().action_spec())
|
meltingpot-main
|
meltingpot/utils/substrates/wrappers/discrete_action_wrapper.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for multiplayer_wrapper."""
from unittest import mock
from absl.testing import absltest
import dm_env
import dmlab2d
from meltingpot.utils.substrates.wrappers import collective_reward_wrapper
import numpy as np
RGB_SPEC = dm_env.specs.Array(shape=(2, 1), dtype=np.int8)
COLLECTIVE_REWARD_SPEC = dm_env.specs.Array(shape=(), dtype=np.float64)
NUM_PLAYERS = 3
REWARDS = [1.0, 2.0, 3.0]
RGB = np.zeros((2, 1))
class CollectiveRewardWrapperTest(absltest.TestCase):
def test_get_timestep(self):
env = mock.Mock(spec_set=dmlab2d.Environment)
wrapped = collective_reward_wrapper.CollectiveRewardWrapper(env)
source = dm_env.TimeStep(
step_type=dm_env.StepType.MID,
reward=REWARDS,
discount=1.0,
observation=[{'RGB': RGB} for _ in range(NUM_PLAYERS)])
actual = wrapped._get_timestep(source)
added_key = collective_reward_wrapper._COLLECTIVE_REWARD_OBS
collective_reward = np.sum(REWARDS)
expected_observation = [
{'RGB': RGB, added_key: collective_reward},
] * NUM_PLAYERS
expected_timestep = dm_env.TimeStep(
step_type=dm_env.StepType.MID,
reward=REWARDS,
discount=1.0,
observation=expected_observation)
np.testing.assert_equal(actual, expected_timestep)
def test_spec(self):
env = mock.Mock(spec_set=dmlab2d.Environment)
env.observation_spec.return_value = [{
'RGB': RGB_SPEC,
}] * NUM_PLAYERS
wrapped = collective_reward_wrapper.CollectiveRewardWrapper(env)
added_key = collective_reward_wrapper._COLLECTIVE_REWARD_OBS
self.assertEqual(wrapped.observation_spec(), [
{'RGB': RGB_SPEC, added_key: COLLECTIVE_REWARD_SPEC}] * NUM_PLAYERS)
if __name__ == '__main__':
absltest.main()
|
meltingpot-main
|
meltingpot/utils/substrates/wrappers/collective_reward_wrapper_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for observables_wrapper."""
import dataclasses
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
import dmlab2d
from meltingpot.utils.substrates.wrappers import observables_wrapper
class ObservablesWrapperTest(parameterized.TestCase):
def test_observables(self):
base = mock.create_autospec(
dmlab2d.Environment, instance=True, spec_set=True)
with observables_wrapper.ObservablesWrapper(base) as env:
received = []
observables = env.observables()
for field in dataclasses.fields(observables):
getattr(observables, field.name).subscribe(
on_next=received.append,
on_error=lambda e: received.append(type(e)),
on_completed=lambda: received.append('DONE'),
)
base.reset.return_value = mock.sentinel.timestep_0
base.events.return_value = [mock.sentinel.events_0]
env.reset()
base.step.return_value = mock.sentinel.timestep_1
base.events.return_value = [mock.sentinel.events_1]
env.step(mock.sentinel.action_1)
base.step.return_value = mock.sentinel.timestep_2
base.events.return_value = [mock.sentinel.events_2]
env.step(mock.sentinel.action_2)
self.assertSequenceEqual(received, [
mock.sentinel.timestep_0,
mock.sentinel.events_0,
mock.sentinel.action_1,
mock.sentinel.timestep_1,
mock.sentinel.events_1,
mock.sentinel.action_2,
mock.sentinel.timestep_2,
mock.sentinel.events_2,
'DONE',
'DONE',
'DONE',
])
if __name__ == '__main__':
absltest.main()
|
meltingpot-main
|
meltingpot/utils/substrates/wrappers/observables_wrapper_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for wrappers.
Wrappers are assumed to own the wrapped environment and that they have the
**only** reference to it. This means that they will:
1. Close the environment when they close.
2. Modify the environment specs and timesteps inplace.
"""
import abc
from typing import Any, Sequence
import chex
import dm_env
import dmlab2d
from meltingpot.utils.substrates.wrappers import base
import reactivex
@chex.dataclass(frozen=True)
class Lab2dObservables:
"""Observables for a Lab2D environment.
Attributes:
action: emits actions sent to the substrate from players.
timestep: emits timesteps sent from the substrate to players.
events: emits environment-specific events resulting from any interactions
with the Substrate. Each individual event is emitted as a single element:
(event_name, event_item).
"""
action: reactivex.Observable[Sequence[int]]
timestep: reactivex.Observable[dm_env.TimeStep]
events: reactivex.Observable[tuple[str, Any]]
class ObservableLab2d(dmlab2d.Environment):
"""A DM Lab2D environment which is observable."""
@abc.abstractmethod
def observables(self) -> Lab2dObservables:
"""The observables of the Lab2D environment."""
class ObservableLab2dWrapper(base.Lab2dWrapper, ObservableLab2d):
"""Base class for wrappers of ObservableLab2d."""
def observables(self, *args, **kwargs) -> ...:
"""See base class."""
return self._env.observables(*args, **kwargs)
|
meltingpot-main
|
meltingpot/utils/substrates/wrappers/observables.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for wrappers.
Wrappers are assumed to own the wrapped environment and that they have the
**only** reference to it. This means that they will:
1. Close the environment when they close.
2. Modify the environment specs and timesteps inplace.
"""
import dmlab2d
class Lab2dWrapper(dmlab2d.Environment):
"""Base class for wrappers of dmlab2d.Environments."""
def __init__(self, env):
"""Initializes the wrapper.
Args:
env: An environment to wrap. This environment will be closed with this
wrapper.
"""
self._env = env
def reset(self, *args, **kwargs) -> ...:
"""See base class."""
return self._env.reset(*args, **kwargs)
def step(self, *args, **kwargs) -> ...:
"""See base class."""
return self._env.step(*args, **kwargs)
def reward_spec(self, *args, **kwargs) -> ...:
"""See base class."""
return self._env.reward_spec(*args, **kwargs)
def discount_spec(self, *args, **kwargs) -> ...:
"""See base class."""
return self._env.discount_spec(*args, **kwargs)
def observation_spec(self, *args, **kwargs) -> ...:
"""See base class."""
return self._env.observation_spec(*args, **kwargs)
def action_spec(self, *args, **kwargs) -> ...:
"""See base class."""
return self._env.action_spec(*args, **kwargs)
def close(self, *args, **kwargs) -> ...:
"""See base class."""
return self._env.close(*args, **kwargs)
def observation(self, *args, **kwargs) -> ...:
"""See base class."""
return self._env.observation(*args, **kwargs)
def events(self, *args, **kwargs) -> ...:
"""See base class."""
return self._env.events(*args, **kwargs)
def list_property(self, *args, **kwargs) -> ...:
"""See base class."""
return self._env.list_property(*args, **kwargs)
def write_property(self, *args, **kwargs) -> ...:
"""See base class."""
return self._env.write_property(*args, **kwargs)
def read_property(self, *args, **kwargs) -> ...:
"""See base class."""
return self._env.read_property(*args, **kwargs)
|
meltingpot-main
|
meltingpot/utils/substrates/wrappers/base.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper that converts the DMLab2D specs into lists of action/observation."""
from collections.abc import Collection, Iterator, Mapping, Sequence
from typing import TypeVar
import dm_env
from meltingpot.utils.substrates.wrappers import observables
import numpy as np
T = TypeVar("T")
def _player_observations(observations: Mapping[str, T], suffix: str,
num_players: int) -> Iterator[T]:
"""Yields observations for each player.
Args:
observations: dmlab2d observations source to check.
suffix: suffix of player key to return.
num_players: the number of players.
"""
for player_index in range(num_players):
try:
value = observations[f"{player_index + 1}.{suffix}"]
except KeyError:
pass
else:
if isinstance(value, dm_env.specs.Array):
value = value.replace(name=suffix)
yield player_index, value
class Wrapper(observables.ObservableLab2dWrapper):
"""Wrapper that converts the environment to multiplayer lists.
Ensures:
- observations are returned as lists of dictionary observations
- rewards are returned as lists of scalars
- actions are received as lists of dictionary observations
- discounts are never None
"""
def __init__(self, env,
individual_observation_names: Collection[str],
global_observation_names: Collection[str]):
"""Constructor.
Args:
env: environment to wrap. When this wrapper closes env will also be
closed.
individual_observation_names: the per-player observations to make
available to the players.
global_observation_names: the observations that are available to all
players and analytics.
"""
super().__init__(env)
self._num_players = self._get_num_players()
self._individual_observation_suffixes = set(individual_observation_names)
self._global_observation_names = set(global_observation_names)
def _get_num_players(self) -> int:
"""Returns maximum player index in dmlab2d action spec."""
action_spec_keys = super().action_spec().keys()
lua_player_indices = (int(key.split(".", 1)[0]) for key in action_spec_keys)
return max(lua_player_indices)
def _get_observations(
self, source: Mapping[str, T]) -> Sequence[Mapping[str, T]]:
"""Returns multiplayer observations from dmlab2d observations.
Args:
source: dmlab2d observations source to check.
"""
player_observations = [{} for i in range(self._num_players)]
for suffix in self._individual_observation_suffixes:
for i, value in _player_observations(source, suffix, self._num_players):
player_observations[i][suffix] = value
for name in self._global_observation_names:
value = source[name]
for i in range(self._num_players):
player_observations[i][name] = value
return player_observations
def _get_rewards(self, source: Mapping[str, T]) -> Sequence[T]:
"""Returns multiplayer rewards from dmlab2d observations.
Args:
source: dmlab2d observations source to check.
"""
rewards = [None] * self._num_players
for i, value in _player_observations(source, "REWARD", self._num_players):
rewards[i] = value
return rewards
def _get_timestep(self, source: dm_env.TimeStep) -> dm_env.TimeStep:
"""Returns multiplayer timestep from dmlab2d observations.
Args:
source: dmlab2d observations source to check.
"""
return dm_env.TimeStep(
step_type=source.step_type,
reward=self._get_rewards(source.observation),
discount=0. if source.discount is None else source.discount,
observation=self._get_observations(source.observation))
def _get_action(self, source: Sequence[Mapping[str, T]]) -> Mapping[str, T]:
"""Returns dmlab2 action from multiplayer actions.
Args:
source: multiplayer actions.
"""
dmlab2d_actions = {}
for player_index, action in enumerate(source):
for key, value in action.items():
dmlab2d_actions[f"{player_index + 1}.{key}"] = value
return dmlab2d_actions
def reset(self) -> dm_env.TimeStep:
"""See base class."""
timestep = super().reset()
return self._get_timestep(timestep)
def step(
self, actions: Sequence[Mapping[str, np.ndarray]]) -> dm_env.TimeStep:
"""See base class."""
action = self._get_action(actions)
timestep = super().step(action)
return self._get_timestep(timestep)
def observation(self) -> Sequence[Mapping[str, np.ndarray]]:
"""See base class."""
observation = super().observation()
return self._get_observations(observation)
def action_spec(self) -> Sequence[Mapping[str, dm_env.specs.Array]]:
"""See base class."""
source = super().action_spec()
action_spec = [{} for _ in range(self._num_players)]
for key, spec in source.items():
lua_player_index, suffix = key.split(".", 1)
player_index = int(lua_player_index) - 1
action_spec[player_index][suffix] = spec.replace(name=suffix)
return action_spec
def observation_spec(self) -> Sequence[Mapping[str, dm_env.specs.Array]]:
"""See base class."""
source = super().observation_spec()
return self._get_observations(source)
def reward_spec(self) -> Sequence[dm_env.specs.Array]:
"""See base class."""
source = super().observation_spec()
return self._get_rewards(source)
|
meltingpot-main
|
meltingpot/utils/substrates/wrappers/multiplayer_wrapper.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation utilities."""
import collections
from collections.abc import Collection, Iterator, Mapping
import contextlib
import os
from typing import Optional, TypeVar
import uuid
from absl import logging
import cv2
import dm_env
import meltingpot
from meltingpot.utils.policies import policy as policy_lib
from meltingpot.utils.policies import saved_model_policy
from meltingpot.utils.scenarios import population as population_lib
from meltingpot.utils.scenarios import scenario as scenario_lib
from meltingpot.utils.substrates import substrate as substrate_lib
import numpy as np
import pandas as pd
from reactivex import operators as ops
from reactivex import subject
T = TypeVar('T')
def run_episode(
population: population_lib.Population,
substrate: substrate_lib.Substrate,
) -> None:
"""Runs a population on a substrate for one episode."""
population.reset()
timestep = substrate.reset()
population.send_timestep(timestep)
actions = population.await_action()
while not timestep.step_type.last():
timestep = substrate.step(actions)
population.send_timestep(timestep)
actions = population.await_action()
class VideoSubject(subject.Subject):
"""Subject that emits a video at the end of each episode."""
def __init__(
self,
root: str,
*,
extension: str = 'webm',
codec: str = 'vp90',
fps: int = 30,
) -> None:
"""Initializes the instance.
Args:
root: directory to write videos in.
extension: file extention of file.
codec: codex to write with.
fps: frames-per-second for videos.
"""
super().__init__()
self._root = root
self._extension = extension
self._codec = codec
self._fps = fps
self._path = None
self._writer = None
def on_next(self, timestep: dm_env.TimeStep) -> None:
"""Called on each timestep.
Args:
timestep: the most recent timestep.
"""
rgb_frame = timestep.observation[0]['WORLD.RGB']
if timestep.step_type.first():
self._path = os.path.join(
self._root, f'{uuid.uuid4().hex}.{self._extension}')
height, width, _ = rgb_frame.shape
self._writer = cv2.VideoWriter(
filename=self._path,
fourcc=cv2.VideoWriter_fourcc(*self._codec),
fps=self._fps,
frameSize=(width, height),
isColor=True)
elif self._writer is None:
raise ValueError('First timestep must be StepType.FIRST.')
bgr_frame = cv2.cvtColor(rgb_frame, cv2.COLOR_RGB2BGR)
assert self._writer.isOpened() # Catches any cv2 usage errors.
self._writer.write(bgr_frame)
if timestep.step_type.last():
self._writer.release()
super().on_next(self._path)
self._path = None
self._writer = None
def dispose(self):
"""See base class."""
if self._writer is not None:
self._writer.release()
super().dispose()
class ReturnSubject(subject.Subject):
"""Subject that emits the player returns at the end of each episode."""
def on_next(self, timestep: dm_env.TimeStep):
"""Called on each timestep.
Args:
timestep: the most recent timestep.
"""
if timestep.step_type.first():
self._return = np.zeros_like(timestep.reward)
self._return += timestep.reward
if timestep.step_type.last():
super().on_next(self._return)
self._return = None
def run_and_observe_episodes(
population: population_lib.Population,
substrate: substrate_lib.Substrate,
num_episodes: int,
video_root: Optional[str] = None,
) -> pd.DataFrame:
"""Runs a population on a substrate and returns results.
Args:
population: the population to run.
substrate: the substrate to run on.
num_episodes: the number of episodes to gather data for.
video_root: path to directory to save videos in.
Returns:
A dataframe of results. One row for each episode with columns:
background_player_names: the names of each background player.
background_player_returns: the episode returns for each background player.
focal_player_names: the names of each focal player.
focal_player_returns: the episode returns for each focal player.
video_path: a path to a video of the episode.
"""
focal_observables = population.observables()
if isinstance(substrate, scenario_lib.Scenario):
substrate_observables = substrate.observables().substrate
background_observables = substrate.observables().background
else:
substrate_observables = substrate.observables()
background_observables = population_lib.PopulationObservables(
names=focal_observables.names.pipe(ops.map(lambda x: ())),
action=focal_observables.action.pipe(ops.map(lambda x: ())),
timestep=focal_observables.timestep.pipe(
ops.map(lambda t: t._replace(observation=(), reward=()))))
data = collections.defaultdict(list)
with contextlib.ExitStack() as stack:
def subscribe(observable, *args, **kwargs):
disposable = observable.subscribe(*args, **kwargs) # pytype: disable=wrong-keyword-args
stack.callback(disposable.dispose)
if video_root:
video_subject = VideoSubject(video_root)
subscribe(substrate_observables.timestep, video_subject)
subscribe(video_subject, on_next=data['video_path'].append)
focal_return_subject = ReturnSubject()
subscribe(focal_observables.timestep, focal_return_subject)
subscribe(focal_return_subject, on_next=data['focal_player_returns'].append)
subscribe(focal_return_subject.pipe(ops.map(np.mean)),
on_next=data['focal_per_capita_return'].append)
subscribe(focal_observables.names,
on_next=data['focal_player_names'].append)
background_return_subject = ReturnSubject()
subscribe(background_observables.timestep, background_return_subject)
subscribe(background_return_subject,
on_next=data['background_player_returns'].append)
subscribe(background_return_subject.pipe(ops.map(np.mean)),
on_next=data['background_per_capita_return'].append)
subscribe(background_observables.names,
on_next=data['background_player_names'].append)
for n in range(num_episodes):
run_episode(population, substrate)
logging.info('%4d / %4d episodes completed...', n + 1, num_episodes)
return pd.DataFrame(data).sort_index(axis=1)
def evaluate_population_on_scenario(
population: Mapping[str, policy_lib.Policy],
names_by_role: Mapping[str, Collection[str]],
scenario: str,
num_episodes: int = 100,
video_root: Optional[str] = None,
) -> pd.DataFrame:
"""Evaluates a population on a scenario.
Args:
population: the population to evaluate.
names_by_role: the names of the policies that support specific roles.
scenario: the scenario to evaluate on.
num_episodes: the number of episodes to run.
video_root: path to directory to save videos in.
Returns:
A dataframe of results. One row for each episode with columns:
background_player_names: the names of each background player.
background_player_returns: the episode returns for each background player.
focal_player_names: the names of each focal player.
focal_player_returns: the episode returns for each focal player.
video_path: a path to a video of the episode.
"""
factory = meltingpot.scenario.get_factory(scenario)
focal_population = population_lib.Population(
policies=population,
names_by_role=names_by_role,
roles=factory.focal_player_roles())
with factory.build() as env:
return run_and_observe_episodes(
population=focal_population,
substrate=env,
num_episodes=num_episodes,
video_root=video_root)
def evaluate_population_on_substrate(
population: Mapping[str, policy_lib.Policy],
names_by_role: Mapping[str, Collection[str]],
substrate: str,
num_episodes: int = 100,
video_root: Optional[str] = None,
) -> pd.DataFrame:
"""Evaluates a population on a substrate.
Args:
population: the population to evaluate.
names_by_role: the names of the policies that support specific roles.
substrate: the substrate to evaluate on.
num_episodes: the number of episodes to run.
video_root: path to directory to save videos in.
Returns:
A dataframe of results. One row for each episode with columns:
background_player_names: the names of each background player.
background_player_returns: the episode returns for each background player.
focal_player_names: the names of each focal player.
focal_player_returns: the episode returns for each focal player.
video_path: a path to a video of the episode.
"""
factory = meltingpot.substrate.get_factory(substrate)
roles = factory.default_player_roles()
focal_population = population_lib.Population(
policies=population,
names_by_role=names_by_role,
roles=roles)
with factory.build(roles) as env:
return run_and_observe_episodes(
population=focal_population,
substrate=env,
num_episodes=num_episodes,
video_root=video_root)
def evaluate_population(
population: Mapping[str, policy_lib.Policy],
names_by_role: Mapping[str, Collection[str]],
scenario: str,
num_episodes: int = 100,
video_root: Optional[str] = None,
) -> pd.DataFrame:
"""Evaluates a population on a scenario (or a substrate).
Args:
population: the population to evaluate.
names_by_role: the names of the policies that support specific roles.
scenario: the scenario (or substrate) to evaluate on.
num_episodes: the number of episodes to run.
video_root: path to directory to save videos under.
Returns:
A dataframe of results. One row for each episode with columns:
background_player_names: the names of each background player.
background_player_returns: the episode returns for each background player.
focal_player_names: the names of each focal player.
focal_player_returns: the episode returns for each focal player.
video_path: a path to a video of the episode.
"""
if scenario in meltingpot.scenario.SCENARIOS:
return evaluate_population_on_scenario(
population=population,
names_by_role=names_by_role,
scenario=scenario,
num_episodes=num_episodes,
video_root=video_root)
elif scenario in meltingpot.substrate.SUBSTRATES:
return evaluate_population_on_substrate(
population=population,
names_by_role=names_by_role,
substrate=scenario,
num_episodes=num_episodes,
video_root=video_root)
else:
raise ValueError(f'Unknown substrate or scenario: {scenario!r}')
@contextlib.contextmanager
def build_saved_model_population(
saved_models: Mapping[str, str],
) -> Iterator[Mapping[str, policy_lib.Policy]]:
"""Builds a population from the specified saved models.
Args:
saved_models: a mapping form name to saved model path.
Yields:
A mapping from name to policy.
"""
with contextlib.ExitStack() as stack:
yield {
name: stack.enter_context(saved_model_policy.SavedModelPolicy(path))
for name, path in saved_models.items()
}
def evaluate_saved_models_on_scenario(
saved_models: Mapping[str, str],
names_by_role: Mapping[str, Collection[str]],
scenario: str,
num_episodes: int = 100,
video_root: Optional[str] = None,
) -> pd.DataFrame:
"""Evaluates saved models on a scenario.
Args:
saved_models: names and paths of the saved_models to evaluate.
names_by_role: the names of the policies that support specific roles.
scenario: the scenario to evaluate on.
num_episodes: the number of episodes to run.
video_root: path to directory to save videos in.
Returns:
A dataframe of results. One row for each episode with columns:
background_player_names: the names of each background player.
background_player_returns: the episode returns for each background player.
focal_player_names: the names of each focal player.
focal_player_returns: the episode returns for each focal player.
video_path: a path to a video of the episode.
"""
with build_saved_model_population(saved_models) as population:
return evaluate_population_on_scenario(
population=population,
names_by_role=names_by_role,
scenario=scenario,
num_episodes=num_episodes,
video_root=video_root)
def evaluate_saved_models_on_substrate(
saved_models: Mapping[str, str],
names_by_role: Mapping[str, Collection[str]],
substrate: str,
num_episodes: int = 100,
video_root: Optional[str] = None,
) -> pd.DataFrame:
"""Evaluates saved models on a substrate.
Args:
saved_models: names and paths of the saved_models to evaluate.
names_by_role: the names of the policies that support specific roles.
substrate: the substrate to evaluate on.
num_episodes: the number of episodes to run.
video_root: path to directory to save videos in.
Returns:
A dataframe of results. One row for each episode with columns:
background_player_names: the names of each background player.
background_player_returns: the episode returns for each background player.
focal_player_names: the names of each focal player.
focal_player_returns: the episode returns for each focal player.
video_path: a path to a video of the episode.
"""
with build_saved_model_population(saved_models) as population:
return evaluate_population_on_substrate(
population=population,
names_by_role=names_by_role,
substrate=substrate,
num_episodes=num_episodes,
video_root=video_root)
def evaluate_saved_models(
saved_models: Mapping[str, str],
names_by_role: Mapping[str, Collection[str]],
scenario: str,
num_episodes: int = 100,
video_root: Optional[str] = None,
) -> pd.DataFrame:
"""Evaluates saved models on a substrate and it's scenarios.
Args:
saved_models: names and paths of the saved_models to evaluate.
names_by_role: the names of the policies that support specific roles.
scenario: the scenario (or substrate) to evaluate on.
num_episodes: the number of episodes to run.
video_root: path to directory to save videos under.
Returns:
A dataframe of results. One row for each episode with columns:
background_player_names: the names of each background player.
background_player_returns: the episode returns for each background player.
focal_player_names: the names of each focal player.
focal_player_returns: the episode returns for each focal player.
video_path: a path to a video of the episode.
"""
with build_saved_model_population(saved_models) as population:
return evaluate_population(
population=population,
names_by_role=names_by_role,
scenario=scenario,
num_episodes=num_episodes,
video_root=video_root)
|
meltingpot-main
|
meltingpot/utils/evaluation/evaluation.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
meltingpot-main
|
meltingpot/utils/evaluation/__init__.py
|
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
from absl.testing import absltest
import cv2
import dm_env
from meltingpot.utils.evaluation import evaluation
import numpy as np
def _as_timesteps(frames):
first, *mids, last = frames
yield dm_env.restart(observation=[{'WORLD.RGB': first}])
for frame in mids:
yield dm_env.transition(observation=[{'WORLD.RGB': frame}], reward=0)
yield dm_env.termination(observation=[{'WORLD.RGB': last}], reward=0)
def _get_frames(path):
capture = cv2.VideoCapture(path)
while capture.isOpened():
ret, bgr_frame = capture.read()
if not ret:
break
rgb_frame = cv2.cvtColor(bgr_frame, cv2.COLOR_BGR2RGB)
yield rgb_frame
capture.release()
FRAME_SHAPE = (4, 8)
ZERO = np.zeros(FRAME_SHAPE, np.uint8)
EYE = np.eye(*FRAME_SHAPE, dtype=np.uint8) * 255
RED_EYE = np.stack([EYE, ZERO, ZERO], axis=-1)
GREEN_EYE = np.stack([ZERO, EYE, ZERO], axis=-1)
BLUE_EYE = np.stack([ZERO, ZERO, EYE], axis=-1)
class EvaluationTest(absltest.TestCase):
def test_video_subject(self):
video_path = None
step_written = None
def save_path(path):
nonlocal video_path
video_path = path
tempdir = tempfile.mkdtemp()
assert os.path.exists(tempdir)
# Use lossless compression for test.
subject = evaluation.VideoSubject(tempdir, extension='avi', codec='png ')
subject.subscribe(on_next=save_path)
frames = [RED_EYE, GREEN_EYE, BLUE_EYE]
for n, timestep in enumerate(_as_timesteps(frames)):
subject.on_next(timestep)
if step_written is None and video_path is not None:
step_written = n
with self.subTest('video_exists'):
self.assertTrue(video_path and os.path.exists(video_path))
with self.subTest('written_on_final_step'):
self.assertEqual(step_written, 2)
with self.subTest('contents'):
written = list(_get_frames(video_path))
np.testing.assert_equal(written, frames)
def test_return_subject(self):
episode_return = None
step_written = None
def save_return(ret):
nonlocal episode_return
episode_return = ret
subject = evaluation.ReturnSubject()
subject.subscribe(on_next=save_return)
timesteps = [
dm_env.restart(observation=[{}])._replace(reward=[0, 0]),
dm_env.transition(observation=[{}], reward=[2, 4]),
dm_env.termination(observation=[{}], reward=[1, 3]),
]
for n, timestep in enumerate(timesteps):
subject.on_next(timestep)
if step_written is None and episode_return is not None:
step_written = n
with self.subTest('written_on_final_step'):
self.assertEqual(step_written, 2)
with self.subTest('contents'):
np.testing.assert_equal(episode_return, [3, 7])
if __name__ == '__main__':
absltest.main()
|
meltingpot-main
|
meltingpot/utils/evaluation/evaluation_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test utilities for substrates."""
from absl.testing import parameterized
class SubstrateTestCase(parameterized.TestCase):
"""Base class for tests of substrates."""
def assert_step_matches_specs(self, env):
"""Asserts that env accepts an action permitted by its spec.
Args:
env: environment to check.
Raises:
AssertionError: the env doesn't match its spec.
"""
env.reset()
action = [spec.maximum for spec in env.action_spec()]
try:
timestep = env.step(action)
except Exception: # pylint: disable=broad-except
self.fail(f'Failure when passing action {action!r}.')
try:
env.discount_spec().validate(timestep.discount)
except ValueError:
self.fail('Discount does not match spec.')
reward_spec = env.reward_spec()
if len(reward_spec) != len(timestep.reward):
self.fail(f'Spec is length {len(reward_spec)} but reward is length '
f'{len(timestep.reward)}.')
for n, spec in enumerate(reward_spec):
try:
spec.validate(timestep.reward[n])
except ValueError:
self.fail(f'Reward {n} does not match spec.')
observations = timestep.observation
observation_specs = env.observation_spec()
if len(observation_specs) != len(observations):
self.fail(f'Spec is length {len(observation_specs)} but observations '
f'are length {len(observations)}')
for n, (observation, spec) in enumerate(
zip(observations, observation_specs)):
if set(spec) != set(observation):
self.fail(f'Observation {n} keys {set(observation)!r} do not match '
f'spec keys {set(observation)!r}.')
for key in spec:
try:
spec[key].validate(observation[key])
except ValueError:
self.fail(f'Observation {n} key {key!r} does not match spec.')
|
meltingpot-main
|
meltingpot/testing/substrates.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for testing bots."""
from absl.testing import absltest
from absl.testing import parameterized
import dm_env
from meltingpot.utils.policies import policy as policy_lib
import tree
class BotTestCase(parameterized.TestCase):
"""Base test case for bots."""
def assert_compatible(
self,
policy: policy_lib.Policy,
timestep_spec: dm_env.TimeStep,
action_spec: dm_env.specs.DiscreteArray,
) -> None:
"""Asserts that policy matches the given spec.
Args:
policy: policy to check.
timestep_spec: the timestep spec to check the policy against.
action_spec: the action spec to check the policy against.
Raises:
AssertionError: the env doesn't match its spec.
"""
timestep = tree.map_structure(
lambda spec: spec.generate_value(), timestep_spec)
prev_state = policy.initial_state()
try:
action, _ = policy.step(timestep, prev_state)
except Exception: # pylint: disable=broad-except
self.fail(f'Failed step with timestep matching spec {timestep_spec!r}.')
try:
action_spec.validate(action)
except ValueError:
self.fail(f'Returned action {action!r} does not match action_spec '
f'{action_spec!r}.')
if __name__ == '__main__':
absltest.main()
|
meltingpot-main
|
meltingpot/testing/bots.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
meltingpot-main
|
meltingpot/testing/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Puppeteer test utilities."""
from typing import Any, Iterable, Iterator, Mapping, Optional, Sequence, TypeVar
import dm_env
from meltingpot.utils.puppeteers import puppeteer as puppeteer_lib
GOAL_KEY = puppeteer_lib._GOAL_OBSERVATION_KEY # pylint: disable=protected-access
State = TypeVar('State')
def step_many(
puppeteer: puppeteer_lib.Puppeteer[State],
timesteps: Iterable[dm_env.TimeStep],
state: Optional[State] = None,
) -> Iterator[tuple[dm_env.TimeStep, State]]:
"""Yields multiple puppeteeer steps."""
if state is None:
state = puppeteer.initial_state()
for timestep in timesteps:
transformed_timestep, state = puppeteer.step(timestep, state)
yield transformed_timestep, state
def goals_from_timesteps(
puppeteer: puppeteer_lib.Puppeteer[State],
timesteps: Iterable[dm_env.TimeStep],
state: Optional[State] = None,
) -> tuple[Sequence[puppeteer_lib.PuppetGoal], State]:
"""Returns puppet goals for each timestep."""
goals = []
for timestep, state in step_many(puppeteer, timesteps, state):
goals.append(timestep.observation[GOAL_KEY])
return goals, state
def episode_timesteps(
observations: Sequence[Mapping[str, Any]]) -> Iterator[dm_env.TimeStep]:
"""Yields an episode timestep for each observation."""
for n, observation in enumerate(observations):
if n == 0:
yield dm_env.restart(observation=observation)
elif n == len(observations) - 1:
yield dm_env.termination(observation=observation, reward=0)
else:
yield dm_env.transition(observation=observation, reward=0)
def goals_from_observations(
puppeteer: puppeteer_lib.Puppeteer[State],
observations: Sequence[Mapping[str, Any]],
state: Optional[State] = None,
) -> tuple[Sequence[puppeteer_lib.PuppetGoal], State]:
"""Returns puppet goals from an episode of the provided observations."""
timesteps = episode_timesteps(observations)
return goals_from_timesteps(puppeteer, timesteps, state)
|
meltingpot-main
|
meltingpot/testing/puppeteers.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
import dm_env
from meltingpot.testing import mocks
from meltingpot.testing import substrates
from meltingpot.utils.substrates import specs as meltingpot_specs
from meltingpot.utils.substrates import substrate
import numpy as np
class MocksTest(substrates.SubstrateTestCase):
def test_value_from_specs(self):
specs = (
{'a': dm_env.specs.Array([1, 2, 3], dtype=np.uint8)},
{'b': dm_env.specs.Array([1, 2, 3], dtype=np.uint8)},
)
actual = mocks._values_from_specs(specs)
expected = (
{'a': np.zeros([1, 2, 3], dtype=np.uint8)},
{'b': np.ones([1, 2, 3], dtype=np.uint8)},
)
np.testing.assert_equal(actual, expected)
def test_mock_substrate(self):
num_players = 2
num_actions = 3
observation_spec = {'a': dm_env.specs.Array([], dtype=np.uint8)}
mock = mocks.build_mock_substrate(
num_players=num_players,
num_actions=num_actions,
observation_spec=observation_spec)
expected_observation = (
{'a': np.zeros([], dtype=np.uint8)},
{'a': np.ones([], dtype=np.uint8)},
)
expected_reward = tuple(float(n) for n in range(num_players))
with self.subTest('is_substrate'):
self.assertIsInstance(mock, substrate.Substrate)
with self.subTest('error_getting_invalid'):
with self.assertRaises(AttributeError):
mock.no_such_method() # pytype: disable=attribute-error
with self.subTest('error_setting_invalid'):
with self.assertRaises(AttributeError):
mock.no_such_method = None
with self.subTest('can_enter_context'):
with mock as c:
self.assertEqual(c.discount_spec(), mock.discount_spec())
with self.subTest('action_spec'):
self.assertEqual(mock.action_spec(), (meltingpot_specs.action(3),) * 2)
with self.subTest('reward_spec'):
self.assertLen(mock.reward_spec(), num_players)
with self.subTest('observation_spec'):
self.assertLen(mock.observation_spec(), num_players)
with self.subTest('reset'):
expected = dm_env.TimeStep(
step_type=dm_env.StepType.FIRST,
observation=expected_observation,
reward=(0.,) * num_players,
discount=0.,
)
self.assertEqual(mock.reset(), expected)
with self.subTest('step'):
expected = dm_env.transition(expected_reward, expected_observation)
self.assertEqual(mock.step([0, 0]), expected)
with self.subTest('events'):
self.assertEmpty(mock.events())
with self.subTest('observation'):
self.assertEqual(mock.observation(), expected_observation)
def test_mock_substrate_like(self):
mock = mocks.build_mock_substrate_like('clean_up')
self.assert_step_matches_specs(mock)
def test_mock_scenario_like(self):
mock = mocks.build_mock_scenario_like('clean_up_0')
self.assert_step_matches_specs(mock)
if __name__ == '__main__':
absltest.main()
|
meltingpot-main
|
meltingpot/testing/mocks_test.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mocks of various Melting Pot classes for use in testing."""
from collections.abc import Mapping, Sequence
from typing import Optional, Type, TypeVar
from unittest import mock
import dm_env
import immutabledict
import meltingpot
from meltingpot.utils.scenarios import scenario
from meltingpot.utils.substrates import specs as meltingpot_specs
from meltingpot.utils.substrates import substrate
import numpy as np
import tree
SUBSTRATE_OBSERVATION_SPEC = immutabledict.immutabledict({
# Observations present in all substrates. Sizes may vary.
'RGB': meltingpot_specs.OBSERVATION['RGB'],
'WORLD.RGB': meltingpot_specs.rgb(128, 256, name='WORLD.RGB'),
})
SCENARIO_OBSERVATION_SPEC = immutabledict.immutabledict({
# Observations present in all scenarios.
'RGB': meltingpot_specs.OBSERVATION['RGB'],
})
def _values_from_specs(
specs: Sequence[tree.Structure[dm_env.specs.Array]]
) -> tree.Structure[np.ndarray]:
values = tree.map_structure(lambda spec: spec.generate_value(), specs)
return tuple(
tree.map_structure(lambda v, n=n: np.full_like(v, n), value)
for n, value in enumerate(values))
_AnySubstrate = TypeVar('_AnySubstrate', bound=substrate.Substrate)
def _build_mock_substrate(
*,
spec: Type[_AnySubstrate],
num_players: int,
timestep_spec: dm_env.TimeStep,
action_spec: dm_env.specs.DiscreteArray,
) -> ...:
"""Returns a mock Substrate for use in testing.
Args:
spec: the Substrate class to use as a spec.
num_players: the number of players in the substrate.
timestep_spec: the timestep spec for a single player.
action_spec: the action spec for a single player.
"""
mock_substrate = mock.create_autospec(spec=spec, instance=True, spec_set=True)
mock_substrate.__enter__.return_value = mock_substrate
mock_substrate.__exit__.return_value = None
mock_substrate.observation_spec.return_value = (
timestep_spec.observation,) * num_players
mock_substrate.reward_spec.return_value = (
timestep_spec.reward,) * num_players
mock_substrate.discount_spec.return_value = timestep_spec.discount
mock_substrate.action_spec.return_value = (action_spec,) * num_players
mock_substrate.events.return_value = ()
observation = _values_from_specs(
(timestep_spec.observation,) * num_players)
mock_substrate.observation.return_value = observation
mock_substrate.reset.return_value = dm_env.TimeStep(
step_type=dm_env.StepType.FIRST,
reward=(timestep_spec.reward.generate_value(),) * num_players,
discount=0.,
observation=observation,
)
mock_substrate.step.return_value = dm_env.transition(
reward=tuple(float(i) for i in range(num_players)),
observation=observation,
)
return mock_substrate
def build_mock_substrate(
*,
num_players: int = 8,
num_actions: int = 8,
observation_spec: Mapping[str,
dm_env.specs.Array] = SUBSTRATE_OBSERVATION_SPEC,
) -> ...:
"""Returns a mock Substrate for use in testing.
Args:
num_players: the number of players in the substrate.
num_actions: the number of actions supported by the substrate.
observation_spec: the observation spec for a single player.
"""
return _build_mock_substrate(
spec=substrate.Substrate,
num_players=num_players,
action_spec=meltingpot_specs.action(num_actions),
timestep_spec=meltingpot_specs.timestep(observation_spec),
)
def build_mock_substrate_like(name: str, *,
num_players: Optional[int] = None) -> ...:
"""Returns a mock of a specific Substrate for use in testing.
Args:
name: substrate to mock.
num_players: number of players to support.
"""
factory = meltingpot.substrate.get_factory(name)
if num_players is None:
num_players = len(factory.default_player_roles())
return _build_mock_substrate(
spec=substrate.Substrate,
num_players=num_players,
action_spec=factory.action_spec(),
timestep_spec=factory.timestep_spec(),
)
def build_mock_scenario(
*,
num_players: int = 8,
num_actions: int = 8,
observation_spec: Mapping[str,
dm_env.specs.Array] = SCENARIO_OBSERVATION_SPEC,
) -> ...:
"""Returns a mock Scenario for use in testing.
Args:
num_players: the number of focal players in the scenario.
num_actions: the number of actions supported by the scenario.
observation_spec: the observation spec for a single focal player.
"""
return _build_mock_substrate(
spec=scenario.Scenario,
num_players=num_players,
action_spec=meltingpot_specs.action(num_actions),
timestep_spec=meltingpot_specs.timestep(observation_spec),
)
def build_mock_scenario_like(name: str) -> ...:
"""Returns a mock of a specific Scenario for use in testing.
Args:
name: scenario to mock.
"""
factory = meltingpot.scenario.get_factory(name)
return _build_mock_substrate(
spec=scenario.Scenario,
num_players=factory.num_focal_players(),
action_spec=factory.action_spec(),
timestep_spec=factory.timestep_spec(),
)
|
meltingpot-main
|
meltingpot/testing/mocks.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple human player for testing `coop_mining`.
Use `WASD` keys to move the character around.
Use `Q and E` to turn the character.
Use `SPACE` to fire the gift beam.
Use `1` to consume tokens.
Use `TAB` to switch between players.
"""
import argparse
import json
from meltingpot.configs.substrates import coop_mining
from meltingpot.human_players import level_playing_utils
from ml_collections import config_dict
environment_configs = {
'coop_mining': coop_mining,
}
_ACTION_MAP = {
'move': level_playing_utils.get_direction_pressed,
'turn': level_playing_utils.get_turn_pressed,
'mine': level_playing_utils.get_space_key_pressed,
}
def verbose_fn(unused_env, unused_player_index, unused_current_player_index):
pass
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--level_name', type=str, default='coop_mining',
choices=environment_configs.keys(),
help='Level name to load')
parser.add_argument(
'--observation', type=str, default='RGB', help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
# Activate verbose mode with --verbose=True.
parser.add_argument(
'--verbose', type=bool, default=False, help='Print debug information')
# Activate events printing mode with --print_events=True.
parser.add_argument(
'--print_events', type=bool, default=False, help='Print events')
args = parser.parse_args()
env_module = environment_configs[args.level_name]
env_config = env_module.get_config()
with config_dict.ConfigDict(env_config).unlocked() as env_config:
roles = env_config.default_player_roles
env_config.lab2d_settings = env_module.build(roles, env_config)
level_playing_utils.run_episode(
args.observation, args.settings, _ACTION_MAP,
env_config, level_playing_utils.RenderType.PYGAME,
verbose_fn=verbose_fn if args.verbose else None,
print_events=args.print_events)
if __name__ == '__main__':
main()
|
meltingpot-main
|
meltingpot/human_players/play_coop_mining.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple human player for testing any `territory` substrate.
Use `WASD` keys to move the character around.
Use `Q and E` to turn the character.
Use `SPACE` to fire the zapper.
Use `TAB` to switch between players.
"""
import argparse
import json
from meltingpot.configs.substrates import territory__inside_out
from meltingpot.configs.substrates import territory__open
from meltingpot.configs.substrates import territory__rooms
from meltingpot.human_players import level_playing_utils
from ml_collections import config_dict
environment_configs = {
'territory__open': territory__open,
'territory__rooms': territory__rooms,
'territory__inside_out': territory__inside_out,
}
_ACTION_MAP = {
'move': level_playing_utils.get_direction_pressed,
'turn': level_playing_utils.get_turn_pressed,
'fireZap': level_playing_utils.get_space_key_pressed,
'fireClaim': level_playing_utils.get_left_shift_pressed,
}
def verbose_fn(unused_env, unused_player_index, unused_current_player_index):
pass
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--level_name', type=str, default='territory__rooms',
choices=environment_configs.keys(),
help='Level name to load')
parser.add_argument(
'--observation', type=str, default='RGB', help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
# Activate verbose mode with --verbose=True.
parser.add_argument(
'--verbose', type=bool, default=False, help='Print debug information')
# Activate events printing mode with --print_events=True.
parser.add_argument(
'--print_events', type=bool, default=False, help='Print events')
args = parser.parse_args()
env_module = environment_configs[args.level_name]
env_config = env_module.get_config()
with config_dict.ConfigDict(env_config).unlocked() as env_config:
roles = env_config.default_player_roles
env_config.lab2d_settings = env_module.build(roles, env_config)
level_playing_utils.run_episode(
args.observation, args.settings, _ACTION_MAP,
env_config, level_playing_utils.RenderType.PYGAME,
verbose_fn=verbose_fn if args.verbose else None,
print_events=args.print_events)
if __name__ == '__main__':
main()
|
meltingpot-main
|
meltingpot/human_players/play_territory.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple human player for testing `*_in_the_matrix`.
Use `WASD` keys to move the character around.
Use `Q and E` to turn the character.
Use `SPACE` to fire the interaction beam.
Use `TAB` to switch between players.
"""
import argparse
import json
from meltingpot.configs.substrates import bach_or_stravinsky_in_the_matrix__arena as bach_or_stravinsky_itm
from meltingpot.configs.substrates import bach_or_stravinsky_in_the_matrix__repeated as bach_or_stravinsky_itm__repeated
from meltingpot.configs.substrates import chicken_in_the_matrix__arena as chicken_itm
from meltingpot.configs.substrates import chicken_in_the_matrix__repeated as chicken_itm__repeated
from meltingpot.configs.substrates import prisoners_dilemma_in_the_matrix__arena as prisoners_dilemma_itm
from meltingpot.configs.substrates import prisoners_dilemma_in_the_matrix__repeated as prisoners_dilemma_itm__repeated
from meltingpot.configs.substrates import pure_coordination_in_the_matrix__arena as pure_coord_itm
from meltingpot.configs.substrates import pure_coordination_in_the_matrix__repeated as pure_coord_itm__repeated
from meltingpot.configs.substrates import rationalizable_coordination_in_the_matrix__arena as rational_coord_itm
from meltingpot.configs.substrates import rationalizable_coordination_in_the_matrix__repeated as rational_coord_itm__repeated
from meltingpot.configs.substrates import running_with_scissors_in_the_matrix__arena as rws_itm__arena
from meltingpot.configs.substrates import running_with_scissors_in_the_matrix__one_shot as rws_itm
from meltingpot.configs.substrates import running_with_scissors_in_the_matrix__repeated as rws_itm__repeated
from meltingpot.configs.substrates import stag_hunt_in_the_matrix__arena as stag_hunt_itm
from meltingpot.configs.substrates import stag_hunt_in_the_matrix__repeated as stag_hunt_itm__repeated
from meltingpot.human_players import level_playing_utils
from ml_collections import config_dict
environment_configs = {
'bach_or_stravinsky_in_the_matrix__arena': bach_or_stravinsky_itm,
'bach_or_stravinsky_in_the_matrix__repeated':
bach_or_stravinsky_itm__repeated,
'chicken_in_the_matrix__arena': chicken_itm,
'chicken_in_the_matrix__repeated': chicken_itm__repeated,
'prisoners_dilemma_in_the_matrix__arena': prisoners_dilemma_itm,
'prisoners_dilemma_in_the_matrix__repeated':
prisoners_dilemma_itm__repeated,
'pure_coordination_in_the_matrix__arena': pure_coord_itm,
'pure_coordination_in_the_matrix__repeated': pure_coord_itm__repeated,
'rationalizable_coordination_in_the_matrix__arena': rational_coord_itm,
'rationalizable_coordination_in_the_matrix__repeated':
rational_coord_itm__repeated,
'running_with_scissors_in_the_matrix__arena': rws_itm__arena,
'running_with_scissors_in_the_matrix__one_shot': rws_itm,
'running_with_scissors_in_the_matrix__repeated': rws_itm__repeated,
'stag_hunt_in_the_matrix__arena': stag_hunt_itm,
'stag_hunt_in_the_matrix__repeated': stag_hunt_itm__repeated,
}
_ACTION_MAP = {
'move': level_playing_utils.get_direction_pressed,
'turn': level_playing_utils.get_turn_pressed,
'interact': level_playing_utils.get_space_key_pressed,
}
def verbose_fn(env_timestep, player_index, current_player_index):
"""Print using this function once enabling the option --verbose=True."""
lua_index = player_index + 1
collected_resource_1 = env_timestep.observation[
f'{lua_index}.COLLECTED_RESOURCE_1']
collected_resource_2 = env_timestep.observation[
f'{lua_index}.COLLECTED_RESOURCE_2']
destroyed_resource_1 = env_timestep.observation[
f'{lua_index}.DESTROYED_RESOURCE_1']
destroyed_resource_2 = env_timestep.observation[
f'{lua_index}.DESTROYED_RESOURCE_2']
interacted_this_step = env_timestep.observation[
f'{lua_index}.INTERACTED_THIS_STEP']
argmax_interact_inventory_1 = env_timestep.observation[
f'{lua_index}.ARGMAX_INTERACTION_INVENTORY_WAS_1']
argmax_interact_inventory_2 = env_timestep.observation[
f'{lua_index}.ARGMAX_INTERACTION_INVENTORY_WAS_2']
# Only print observations from current player.
if player_index == current_player_index:
print(
f'player: {player_index} --- \n' +
f' collected_resource_1: {collected_resource_1} \n' +
f' collected_resource_2: {collected_resource_2} \n' +
f' destroyed_resource_1: {destroyed_resource_1} \n' +
f' destroyed_resource_1: {destroyed_resource_2} \n' +
f' interacted_this_step: {interacted_this_step} \n' +
f' argmax_interaction_inventory_1: {argmax_interact_inventory_1} \n' +
f' argmax_interaction_inventory_2: {argmax_interact_inventory_2} \n'
)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--level_name', type=str,
default='prisoners_dilemma_in_the_matrix__repeated',
choices=environment_configs.keys(),
help='Level name to load')
parser.add_argument(
'--observation', type=str, default='RGB', help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
# Activate verbose mode with --verbose=True.
parser.add_argument(
'--verbose', type=bool, default=False, help='Print debug information')
# Activate events printing mode with --print_events=True.
parser.add_argument(
'--print_events', type=bool, default=False, help='Print events')
args = parser.parse_args()
env_module = environment_configs[args.level_name]
env_config = env_module.get_config()
with config_dict.ConfigDict(env_config).unlocked() as env_config:
roles = env_config.default_player_roles
env_config.lab2d_settings = env_module.build(roles, env_config)
level_playing_utils.run_episode(
args.observation, args.settings, _ACTION_MAP,
env_config, level_playing_utils.RenderType.PYGAME,
verbose_fn=verbose_fn if args.verbose else None,
print_events=args.print_events)
if __name__ == '__main__':
main()
|
meltingpot-main
|
meltingpot/human_players/play_anything_in_the_matrix.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple human player for testing any `paintball__*` substrate.
Use `WASD` keys to move the character around.
Use `Q and E` to turn the character.
Use `SPACE` to fire the zapper.
Use `TAB` to switch between players.
"""
import argparse
import json
from meltingpot.configs.substrates import paintball__capture_the_flag
from meltingpot.configs.substrates import paintball__king_of_the_hill
from meltingpot.human_players import level_playing_utils
from ml_collections import config_dict
def get_zap() -> int:
"""Sets zap to either 0, 1, or 2."""
if level_playing_utils.get_right_shift_pressed():
return 2
if level_playing_utils.get_space_key_pressed():
return 1
return 0
environment_configs = {
'paintball__capture_the_flag': paintball__capture_the_flag,
'paintball__king_of_the_hill': paintball__king_of_the_hill,
}
_ACTION_MAP = {
'move': level_playing_utils.get_direction_pressed,
'turn': level_playing_utils.get_turn_pressed,
'fireZap': get_zap,
}
def verbose_fn(unused_env, unused_player_index, unused_current_player_index):
pass
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--level_name', type=str, default='paintball__capture_the_flag',
choices=environment_configs.keys(),
help='Level name to load')
parser.add_argument(
'--observation', type=str, default='RGB', help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
# Activate verbose mode with --verbose=True.
parser.add_argument(
'--verbose', type=bool, default=False, help='Print debug information')
# Activate events printing mode with --print_events=True.
parser.add_argument(
'--print_events', type=bool, default=False, help='Print events')
args = parser.parse_args()
env_module = environment_configs[args.level_name]
env_config = env_module.get_config()
with config_dict.ConfigDict(env_config).unlocked() as env_config:
roles = env_config.default_player_roles
env_config.lab2d_settings = env_module.build(roles, env_config)
level_playing_utils.run_episode(
args.observation, args.settings, _ACTION_MAP,
env_config, level_playing_utils.RenderType.PYGAME,
verbose_fn=verbose_fn if args.verbose else None,
print_events=args.print_events)
if __name__ == '__main__':
main()
|
meltingpot-main
|
meltingpot/human_players/play_paintball.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple human player for testing `boat_race`.
Use `WASD` keys to move the character around.
Use `Q and E` to turn the character.
Use ` ` to row (effectively, but needs coordinated stroke).
Use `x` to flail (row ineffectively, but with safe steady progress).
Use `TAB` to switch between players.
"""
import argparse
import json
from meltingpot.configs.substrates import boat_race__eight_races
from meltingpot.human_players import level_playing_utils
from meltingpot.utils.substrates import game_object_utils
from ml_collections import config_dict
MAX_SCREEN_WIDTH = 600
MAX_SCREEN_HEIGHT = 800
FRAMES_PER_SECOND = 8
environment_configs = {
'boat_race__eight_races': boat_race__eight_races,
}
_ACTION_MAP = {
'move': level_playing_utils.get_direction_pressed,
'turn': level_playing_utils.get_turn_pressed,
'row': level_playing_utils.get_space_key_pressed,
'flail': level_playing_utils.get_key_x_pressed,
}
def verbose_fn(env_timestep, player_index, current_player_index):
lua_index = player_index + 1
if (env_timestep.observation['WORLD.RACE_START'].any() and
player_index == current_player_index):
print('WORLD.RACE_START', env_timestep.observation['WORLD.RACE_START'])
for obs in [f'{lua_index}.PADDLES', f'{lua_index}.FLAILS']:
if env_timestep.observation[obs]:
print(obs, env_timestep.observation[obs])
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--level_name', type=str, default='boat_race__eight_races',
choices=environment_configs.keys(),
help='Level name to load')
parser.add_argument(
'--observation', type=str, default='RGB', help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
# Activate verbose mode with --verbose=True.
parser.add_argument(
'--verbose', type=bool, default=False, help='Print debug information')
# Activate events printing mode with --print_events=True.
parser.add_argument(
'--print_events', type=bool, default=False, help='Print events')
parser.add_argument(
'--override_flail_effectiveness', type=float, default=0.1,
help='Override flail effectiveness to make debugging easier.')
args = parser.parse_args()
env_module = environment_configs[args.level_name]
env_config = env_module.get_config()
with config_dict.ConfigDict(env_config).unlocked() as env_config:
roles = env_config.default_player_roles
env_config.lab2d_settings = env_module.build(roles, env_config)
# For easier debug, override the flailEffectiveness
game_object_utils.get_first_named_component(
env_config.lab2d_settings['simulation']['prefabs']['seat_L'],
'BoatManager'
)['kwargs']['flailEffectiveness'] = args.override_flail_effectiveness
level_playing_utils.run_episode(
args.observation, args.settings, _ACTION_MAP, env_config,
level_playing_utils.RenderType.PYGAME, MAX_SCREEN_WIDTH,
MAX_SCREEN_HEIGHT, FRAMES_PER_SECOND,
verbose_fn if args.verbose else None,
print_events=args.print_events)
if __name__ == '__main__':
main()
|
meltingpot-main
|
meltingpot/human_players/play_boat_race.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple human player for testing `coins`.
Use `WASD` keys to move the character around.
Use `TAB` to switch between players.
"""
import argparse
import json
from meltingpot.configs.substrates import coins
from meltingpot.human_players import level_playing_utils
from ml_collections import config_dict
MAX_SCREEN_WIDTH = 600
MAX_SCREEN_HEIGHT = 450
FRAMES_PER_SECOND = 8
environment_configs = {
'coins': coins,
}
def no_op() -> int:
"""Gets direction pressed."""
return level_playing_utils.MOVEMENT_MAP['NONE']
_ACTION_MAP = {
'move': level_playing_utils.get_direction_pressed,
'turn': level_playing_utils.get_turn_pressed,
}
def verbose_fn(env_timestep, player_index, current_player_index):
del env_timestep, player_index, current_player_index
pass
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--level_name', type=str, default='coins',
choices=environment_configs.keys(),
help='Level name to load')
parser.add_argument(
'--observation', type=str, default='RGB', help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
# Activate verbose mode with --verbose=True.
parser.add_argument(
'--verbose', type=bool, default=False, help='Print debug information')
# Activate events printing mode with --print_events=True.
parser.add_argument(
'--print_events', type=bool, default=False, help='Print events')
args = parser.parse_args()
env_module = environment_configs[args.level_name]
env_config = env_module.get_config()
with config_dict.ConfigDict(env_config).unlocked() as env_config:
roles = env_config.default_player_roles
env_config.lab2d_settings = env_module.build(roles, env_config)
level_playing_utils.run_episode(
args.observation, args.settings, _ACTION_MAP, env_config,
level_playing_utils.RenderType.PYGAME, MAX_SCREEN_WIDTH,
MAX_SCREEN_HEIGHT, FRAMES_PER_SECOND,
verbose_fn if args.verbose else None,
print_events=args.print_events)
if __name__ == '__main__':
main()
|
meltingpot-main
|
meltingpot/human_players/play_coins.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple human player for testing factory_commons.
Use `WASD` keys to move the character around.
Use `Q and E` to turn the character.
Use `Z` to pick-up pickuppable objects.
Use `SPACE` to grasp a movable block.
Use `TAB` to switch between players.
"""
import argparse
import json
from meltingpot.configs.substrates import factory_commons__either_or
from meltingpot.human_players import level_playing_utils
from ml_collections import config_dict
def get_push_pull() -> int:
"""Sets shove to either -1, 0, or 1."""
if level_playing_utils.get_right_shift_pressed():
return 1
if level_playing_utils.get_left_control_pressed():
return -1
return 0
environment_configs = {
'factory_commons__either_or': factory_commons__either_or,
}
_ACTION_MAP = {
'move': level_playing_utils.get_direction_pressed,
'turn': level_playing_utils.get_turn_pressed,
'pickup': level_playing_utils.get_key_z_pressed,
'grasp': level_playing_utils.get_key_x_pressed,
# Grappling actions
'hold': level_playing_utils.get_space_key_pressed,
'shove': get_push_pull,
}
def verbose_fn(unused_env, unused_player_index, unused_current_player_index):
pass
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--level_name', type=str, default='factory_commons__either_or',
choices=environment_configs.keys(),
help='Level name to load')
parser.add_argument(
'--observation', type=str, default='RGB', help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
# Activate verbose mode with --verbose=True.
parser.add_argument(
'--verbose', type=bool, default=False, help='Print debug information')
# Activate events printing mode with --print_events=True.
parser.add_argument(
'--print_events', type=bool, default=False, help='Print events')
args = parser.parse_args()
env_module = environment_configs[args.level_name]
env_config = env_module.get_config()
with config_dict.ConfigDict(env_config).unlocked() as env_config:
roles = env_config.default_player_roles
env_config.lab2d_settings = env_module.build(roles, env_config)
level_playing_utils.run_episode(
args.observation, args.settings, _ACTION_MAP,
env_config, level_playing_utils.RenderType.PYGAME,
verbose_fn=verbose_fn if args.verbose else None,
print_events=args.print_events)
if __name__ == '__main__':
main()
|
meltingpot-main
|
meltingpot/human_players/play_factory_commons.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple human player for testing `externality_mushrooms`.
Use `WASD` keys to move the character around.
Use `Q and E` to turn the character.
Use `SPACE` to fire the zapper.
Use `TAB` to switch between players.
"""
import argparse
import json
from meltingpot.configs.substrates import externality_mushrooms__dense
from meltingpot.human_players import level_playing_utils
from ml_collections import config_dict
environment_configs = {
'externality_mushrooms__dense': externality_mushrooms__dense,
}
_ACTION_MAP = {
'move': level_playing_utils.get_direction_pressed,
'turn': level_playing_utils.get_turn_pressed,
'fireZap': level_playing_utils.get_space_key_pressed,
}
def verbose_fn(env_timestep, player_index, current_player_index):
"""Print using this function once enabling the option --verbose=True."""
lua_index = player_index + 1
ate_hihe = env_timestep.observation[f'{lua_index}.ATE_MUSHROOM_HIHE']
ate_fize = env_timestep.observation[f'{lua_index}.ATE_MUSHROOM_FIZE']
ate_zife = env_timestep.observation[f'{lua_index}.ATE_MUSHROOM_ZIFE']
destroyed_hihe = env_timestep.observation[
f'{lua_index}.DESTROYED_MUSHROOM_HIHE']
destroyed_fize = env_timestep.observation[
f'{lua_index}.DESTROYED_MUSHROOM_FIZE']
destroyed_zife = env_timestep.observation[
f'{lua_index}.DESTROYED_MUSHROOM_ZIFE']
at_least_one_nonzero = (ate_hihe + ate_fize + ate_zife +
destroyed_hihe + destroyed_fize + destroyed_zife)
# Only print observations from player 0.
if player_index == current_player_index and at_least_one_nonzero > 0:
print(
f'player: {player_index} --- \n' +
f' ate_hihe: {ate_hihe} \n' +
f' ate_fize: {ate_fize} \n' +
f' ate_zife: {ate_zife} \n' +
f' destroyed_hihe: {destroyed_hihe} \n' +
f' destroyed_fize: {destroyed_fize} \n' +
f' destroyed_zife: {destroyed_zife} \n'
)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--level_name', type=str, default='externality_mushrooms__dense',
choices=environment_configs.keys(),
help='Level name to load')
parser.add_argument(
'--observation', type=str, default='RGB', help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
# Activate verbose mode with --verbose=True.
parser.add_argument(
'--verbose', type=bool, default=False, help='Print debug information')
# Activate events printing mode with --print_events=True.
parser.add_argument(
'--print_events', type=bool, default=False, help='Print events')
args = parser.parse_args()
env_module = environment_configs[args.level_name]
env_config = env_module.get_config()
with config_dict.ConfigDict(env_config).unlocked() as env_config:
roles = env_config.default_player_roles
env_config.lab2d_settings = env_module.build(roles, env_config)
level_playing_utils.run_episode(
args.observation, args.settings, _ACTION_MAP,
env_config, level_playing_utils.RenderType.PYGAME,
verbose_fn=verbose_fn if args.verbose else None,
print_events=args.print_events)
if __name__ == '__main__':
main()
|
meltingpot-main
|
meltingpot/human_players/play_externality_mushrooms.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple human player for playing the `Hidden Agenda` level interactively.
Use `WASD` keys to move the character around. `Q` and `E` to turn.
Use 'Space' key for the impostor to fire a beam.
Use numerical keys to vote.
Use 'Tab' to switch between players.
"""
import argparse
import json
from meltingpot.configs.substrates import hidden_agenda
from meltingpot.human_players import level_playing_utils
from ml_collections import config_dict
MAX_SCREEN_WIDTH = 800
MAX_SCREEN_HEIGHT = 600
FRAMES_PER_SECOND = 8
_ACTION_MAP = {
'move': level_playing_utils.get_direction_pressed,
'turn': level_playing_utils.get_turn_pressed,
'tag': level_playing_utils.get_space_key_pressed,
'vote': level_playing_utils.get_key_number_pressed,
}
environment_configs = {
'hidden_agenda': hidden_agenda,
}
def verbose_fn(env_timestep, player_index, current_player_index):
"""Prints out relevant observations and rewards at every timestep."""
del current_player_index
lua_index = player_index + 1
for obs in ['VOTING']:
obs_name = f'{lua_index}.{obs}'
if env_timestep.observation[obs_name].any():
print(obs_name, env_timestep.observation[obs_name])
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--level_name', type=str, default='hidden_agenda',
choices=environment_configs.keys(),
help='Level name to load')
parser.add_argument(
'--observation', type=str, default='RGB', help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
# Activate verbose mode with --verbose=True.
parser.add_argument(
'--verbose', type=bool, default=False, help='Print debug information')
# Activate events printing mode with --print_events=True.
parser.add_argument(
'--print_events', type=bool, default=False, help='Print events')
args = parser.parse_args()
env_module = environment_configs[args.level_name]
env_config = env_module.get_config()
with config_dict.ConfigDict(env_config).unlocked() as env_config:
roles = env_config.default_player_roles
env_config.lab2d_settings = env_module.build(roles, env_config)
level_playing_utils.run_episode(
args.observation, args.settings, _ACTION_MAP, env_config,
level_playing_utils.RenderType.PYGAME, MAX_SCREEN_WIDTH,
MAX_SCREEN_HEIGHT, FRAMES_PER_SECOND,
verbose_fn if args.verbose else None,
print_events=args.print_events)
if __name__ == '__main__':
main()
|
meltingpot-main
|
meltingpot/human_players/play_hidden_agenda.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
meltingpot-main
|
meltingpot/human_players/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple human player for testing `gift_refinements`.
Use `WASD` keys to move the character around.
Use `Q and E` to turn the character.
Use `SPACE` to fire the gift beam.
Use `1` to consume tokens.
Use `TAB` to switch between players.
"""
import argparse
import json
from meltingpot.configs.substrates import gift_refinements
from meltingpot.human_players import level_playing_utils
from ml_collections import config_dict
environment_configs = {
'gift_refinements': gift_refinements,
}
_ACTION_MAP = {
'move': level_playing_utils.get_direction_pressed,
'turn': level_playing_utils.get_turn_pressed,
'refineAndGift': level_playing_utils.get_space_key_pressed,
'consumeTokens': level_playing_utils.get_key_number_one_pressed,
}
def verbose_fn(unused_env, unused_player_index, unused_current_player_index):
pass
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--level_name', type=str, default='gift_refinements',
choices=environment_configs.keys(),
help='Level name to load')
parser.add_argument(
'--observation', type=str, default='RGB', help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
# Activate verbose mode with --verbose=True.
parser.add_argument(
'--verbose', type=bool, default=False, help='Print debug information')
# Activate events printing mode with --print_events=True.
parser.add_argument(
'--print_events', type=bool, default=False, help='Print events')
args = parser.parse_args()
env_module = environment_configs[args.level_name]
env_config = env_module.get_config()
with config_dict.ConfigDict(env_config).unlocked() as env_config:
roles = env_config.default_player_roles
env_config.lab2d_settings = env_module.build(roles, env_config)
level_playing_utils.run_episode(
args.observation, args.settings, _ACTION_MAP,
env_config, level_playing_utils.RenderType.PYGAME,
verbose_fn=verbose_fn if args.verbose else None,
print_events=args.print_events)
if __name__ == '__main__':
main()
|
meltingpot-main
|
meltingpot/human_players/play_gift_refinements.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple human player for testing `commons_harvest`.
Use `WASD` keys to move the character around.
Use `Q and E` to turn the character.
Use `SPACE` to fire the zapper.
Use `TAB` to switch between players.
"""
import argparse
import json
from meltingpot.configs.substrates import commons_harvest__closed
from meltingpot.configs.substrates import commons_harvest__open
from meltingpot.configs.substrates import commons_harvest__partnership
from meltingpot.human_players import level_playing_utils
from ml_collections import config_dict
environment_configs = {
'commons_harvest__closed': commons_harvest__closed,
'commons_harvest__open': commons_harvest__open,
'commons_harvest__partnership': commons_harvest__partnership,
}
_ACTION_MAP = {
'move': level_playing_utils.get_direction_pressed,
'turn': level_playing_utils.get_turn_pressed,
'fireZap': level_playing_utils.get_space_key_pressed,
}
def verbose_fn(unused_env, unused_player_index, unused_current_player_index):
pass
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--level_name', type=str, default='commons_harvest__closed',
choices=environment_configs.keys(),
help='Level name to load')
parser.add_argument(
'--observation', type=str, default='RGB', help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
# Activate verbose mode with --verbose=True.
parser.add_argument(
'--verbose', type=bool, default=False, help='Print debug information')
# Activate events printing mode with --print_events=True.
parser.add_argument(
'--print_events', type=bool, default=False, help='Print events')
args = parser.parse_args()
env_module = environment_configs[args.level_name]
env_config = env_module.get_config()
with config_dict.ConfigDict(env_config).unlocked() as env_config:
roles = env_config.default_player_roles
env_config.lab2d_settings = env_module.build(roles, env_config)
level_playing_utils.run_episode(
args.observation, args.settings, _ACTION_MAP,
env_config, level_playing_utils.RenderType.PYGAME,
verbose_fn=verbose_fn if args.verbose else None,
print_events=args.print_events)
if __name__ == '__main__':
main()
|
meltingpot-main
|
meltingpot/human_players/play_commons_harvest.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A human player for testing fruit_market.
Note: The real agents can make and accept offers up to size 3 (up to 3 apples
for up to 3 bananas). However this human player script only allows offers up to
size 1. The reason is just that we started to run out of keys on the keyboard to
represent higher offers.
Use `WASD` keys to move the player around.
Use `Q and E` to turn the player.
Use `TAB` to switch which player you are controlling.
Use 'Z' to eat an apple from your inventory.
Use 'X' to eat a banana from your inventory.
"""
import argparse
import json
from meltingpot.configs.substrates import fruit_market__concentric_rivers
from meltingpot.human_players import level_playing_utils
from ml_collections import config_dict
import pygame
def get_offer_apple_pressed() -> int:
"""Sets apple offer to either -1, 0, or 1."""
key_pressed = pygame.key.get_pressed()
if key_pressed[pygame.K_1]:
return -1
if key_pressed[pygame.K_2]:
return 1
return 0
def get_offer_banana_pressed() -> int:
"""Sets banana offer to either -1, 0, or 1."""
key_pressed = pygame.key.get_pressed()
if key_pressed[pygame.K_3]:
return -1
if key_pressed[pygame.K_4]:
return 1
return 0
def get_push_pull() -> int:
"""Sets shove to either -1, 0, or 1."""
if level_playing_utils.get_right_shift_pressed():
return 1
if level_playing_utils.get_left_control_pressed():
return -1
return 0
environment_configs = {
'fruit_market__concentric_rivers': fruit_market__concentric_rivers,
}
_ACTION_MAP = {
# Basic movement actions
'move': level_playing_utils.get_direction_pressed,
'turn': level_playing_utils.get_turn_pressed,
# Trade actions
'eat_apple': level_playing_utils.get_key_z_pressed,
'eat_banana': level_playing_utils.get_key_x_pressed,
'offer_apple': get_offer_apple_pressed, # 1 and 2
'offer_banana': get_offer_banana_pressed, # 3 and 4
'offer_cancel': level_playing_utils.get_key_number_five_pressed,
# Grappling actions
'hold': level_playing_utils.get_space_key_pressed,
'shove': get_push_pull,
}
def verbose_fn(env_timestep, player_index, current_player_index):
"""Print using this function once enabling the option --verbose=True."""
lua_index = player_index + 1
inventory = env_timestep.observation[f'{lua_index}.INVENTORY']
hunger = env_timestep.observation[f'{lua_index}.HUNGER']
my_offer = env_timestep.observation[f'{lua_index}.MY_OFFER']
offers = env_timestep.observation[f'{lua_index}.OFFERS']
# Only print offer observations from player 0.
if player_index == current_player_index:
print(
f'player: {player_index} --- inventory: {inventory}, hunger: {hunger}')
print(f'**player 0 view of offers:\n{offers}')
print(f'**player 0 view of own offer: {my_offer}')
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--level_name',
type=str,
default='fruit_market__concentric_rivers',
choices=environment_configs.keys(),
help='Level name to load')
parser.add_argument(
'--observation', type=str, default='RGB', help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
# Activate verbose mode with --verbose=True.
parser.add_argument(
'--verbose', type=bool, default=False, help='Print debug information')
# Activate events printing mode with --print_events=True.
parser.add_argument(
'--print_events', type=bool, default=False, help='Print events')
args = parser.parse_args()
env_module = environment_configs[args.level_name]
env_config = env_module.get_config()
with config_dict.ConfigDict(env_config).unlocked() as env_config:
roles = env_config.default_player_roles
env_config.lab2d_settings = env_module.build(roles, env_config)
level_playing_utils.run_episode(
args.observation, args.settings, _ACTION_MAP,
env_config, level_playing_utils.RenderType.PYGAME,
verbose_fn=verbose_fn if args.verbose else None,
print_events=args.print_events)
if __name__ == '__main__':
main()
|
meltingpot-main
|
meltingpot/human_players/play_fruit_market.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for human_players."""
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
from meltingpot.configs.substrates import allelopathic_harvest__open
from meltingpot.configs.substrates import boat_race__eight_races
from meltingpot.configs.substrates import chemistry__three_metabolic_cycles
from meltingpot.configs.substrates import chemistry__three_metabolic_cycles_with_plentiful_distractors
from meltingpot.configs.substrates import chemistry__two_metabolic_cycles
from meltingpot.configs.substrates import chemistry__two_metabolic_cycles_with_distractors
from meltingpot.configs.substrates import clean_up
from meltingpot.configs.substrates import coins
from meltingpot.configs.substrates import collaborative_cooking__asymmetric
from meltingpot.configs.substrates import commons_harvest__closed
from meltingpot.configs.substrates import coop_mining
from meltingpot.configs.substrates import daycare
from meltingpot.configs.substrates import externality_mushrooms__dense
from meltingpot.configs.substrates import factory_commons__either_or
from meltingpot.configs.substrates import fruit_market__concentric_rivers
from meltingpot.configs.substrates import gift_refinements
from meltingpot.configs.substrates import paintball__capture_the_flag
from meltingpot.configs.substrates import paintball__king_of_the_hill
from meltingpot.configs.substrates import predator_prey__alley_hunt
from meltingpot.configs.substrates import predator_prey__orchard
from meltingpot.configs.substrates import prisoners_dilemma_in_the_matrix__arena
from meltingpot.configs.substrates import territory__rooms
from meltingpot.human_players import level_playing_utils
from meltingpot.human_players import play_allelopathic_harvest
from meltingpot.human_players import play_anything_in_the_matrix
from meltingpot.human_players import play_boat_race
from meltingpot.human_players import play_chemistry
from meltingpot.human_players import play_clean_up
from meltingpot.human_players import play_coins
from meltingpot.human_players import play_collaborative_cooking
from meltingpot.human_players import play_commons_harvest
from meltingpot.human_players import play_coop_mining
from meltingpot.human_players import play_daycare
from meltingpot.human_players import play_externality_mushrooms
from meltingpot.human_players import play_factory_commons
from meltingpot.human_players import play_fruit_market
from meltingpot.human_players import play_gift_refinements
from meltingpot.human_players import play_paintball
from meltingpot.human_players import play_predator_and_prey
from meltingpot.human_players import play_territory
from ml_collections import config_dict
import pygame
class PlayLevelTest(parameterized.TestCase):
@parameterized.named_parameters(
('allelopathic_harvest__open', allelopathic_harvest__open,
play_allelopathic_harvest),
('boat_race__eight_races', boat_race__eight_races, play_boat_race),
('chemistry__three_metabolic_cycles', chemistry__three_metabolic_cycles,
play_chemistry),
('chemistry__three_metabolic_cycles_with_plentiful_distractors',
chemistry__three_metabolic_cycles_with_plentiful_distractors,
play_chemistry),
('chemistry__two_metabolic_cycles', chemistry__two_metabolic_cycles,
play_chemistry),
('chemistry__two_metabolic_cycles_with_distractors',
chemistry__two_metabolic_cycles_with_distractors, play_chemistry),
('clean_up', clean_up, play_clean_up),
('coins', coins, play_coins),
('collaborative_cooking__asymmetric', collaborative_cooking__asymmetric,
play_collaborative_cooking),
('commons_harvest__closed', commons_harvest__closed,
play_commons_harvest),
('coop_mining', coop_mining, play_coop_mining),
('daycare', daycare, play_daycare),
('externality_mushrooms__dense', externality_mushrooms__dense,
play_externality_mushrooms),
('factory_commons__either_or', factory_commons__either_or,
play_factory_commons),
('fruit_market__concentric_rivers', fruit_market__concentric_rivers,
play_fruit_market),
('gift_refinements', gift_refinements, play_gift_refinements),
('paintball__capture_the_flag', paintball__capture_the_flag,
play_paintball),
('paintball__king_of_the_hill', paintball__king_of_the_hill,
play_paintball),
('predator_prey__alley_hunt', predator_prey__alley_hunt,
play_predator_and_prey),
('predator_prey__orchard', predator_prey__orchard,
play_predator_and_prey),
('prisoners_dilemma_in_the_matrix__arena',
prisoners_dilemma_in_the_matrix__arena, play_anything_in_the_matrix),
('territory__rooms', territory__rooms, play_territory),
)
@mock.patch.object(pygame, 'key')
@mock.patch.object(pygame, 'display')
@mock.patch.object(pygame, 'event')
@mock.patch.object(pygame, 'time')
def test_run_level(
self, config_module, play_module, unused_k, unused_d, unused_e, unused_t):
env_module = config_module
env_config = env_module.get_config()
with config_dict.ConfigDict(env_config).unlocked() as env_config:
roles = env_config.default_player_roles
env_config.lab2d_settings = env_module.build(roles, env_config)
env_config['lab2d_settings']['maxEpisodeLengthFrames'] = 10
level_playing_utils.run_episode(
'RGB', {}, play_module._ACTION_MAP, env_config)
if __name__ == '__main__':
absltest.main()
|
meltingpot-main
|
meltingpot/human_players/play_level_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple human player for testing a Melting Pot level.
Use `WASD` keys to move the character around.
Use `Q and E` to turn the character.
Use `SPACE` to fire the zapper.
Use `TAB` to switch between players.
"""
import collections
import enum
from typing import Any, Callable, Dict, Mapping, Optional, Sequence, Tuple
import dm_env
import dmlab2d
from meltingpot.utils.substrates import builder
from ml_collections import config_dict
import numpy as np
import pygame
WHITE = (255, 255, 255)
MOVEMENT_MAP = {
'NONE': 0,
'FORWARD': 1,
'RIGHT': 2,
'BACKWARD': 3,
'LEFT': 4,
}
EnvBuilder = Callable[..., dmlab2d.Environment] # Only supporting kwargs.
ActionMap = Mapping[str, Callable[[], int]]
class RenderType(enum.Enum):
NONE = 0
PYGAME = 1
def get_random_direction() -> int:
"""Gets a random direction."""
return np.random.choice(list(MOVEMENT_MAP.values()))
def get_random_turn() -> int:
"""Gets a random turn."""
return np.random.choice([-1, 0, 1])
def get_random_fire() -> int:
"""Gets a random fire."""
return np.random.choice([0, 1])
def get_direction_pressed() -> int:
"""Gets direction pressed."""
key_pressed = pygame.key.get_pressed()
if key_pressed[pygame.K_UP] or key_pressed[pygame.K_w]:
return MOVEMENT_MAP['FORWARD']
if key_pressed[pygame.K_RIGHT] or key_pressed[pygame.K_d]:
return MOVEMENT_MAP['RIGHT']
if key_pressed[pygame.K_DOWN] or key_pressed[pygame.K_s]:
return MOVEMENT_MAP['BACKWARD']
if key_pressed[pygame.K_LEFT] or key_pressed[pygame.K_a]:
return MOVEMENT_MAP['LEFT']
return MOVEMENT_MAP['NONE']
def get_turn_pressed() -> int:
"""Calculates turn increment."""
key_pressed = pygame.key.get_pressed()
if key_pressed[pygame.K_DELETE] or key_pressed[pygame.K_q]:
return -1
if key_pressed[pygame.K_PAGEDOWN] or key_pressed[pygame.K_e]:
return 1
return 0
def get_space_key_pressed() -> int:
return 1 if pygame.key.get_pressed()[pygame.K_SPACE] else 0
def get_key_number_pressed() -> int:
number_keys = [pygame.K_0, pygame.K_1, pygame.K_2, pygame.K_3, pygame.K_4,
pygame.K_5, pygame.K_6, pygame.K_7, pygame.K_8, pygame.K_9]
for num in range(len(number_keys)):
if pygame.key.get_pressed()[number_keys[num]]:
return num
return -1
def get_key_number_one_pressed() -> int:
return 1 if pygame.key.get_pressed()[pygame.K_1] else 0
def get_key_number_two_pressed() -> int:
return 1 if pygame.key.get_pressed()[pygame.K_2] else 0
def get_key_number_three_pressed() -> int:
return 1 if pygame.key.get_pressed()[pygame.K_3] else 0
def get_key_number_four_pressed() -> int:
return 1 if pygame.key.get_pressed()[pygame.K_4] else 0
def get_key_number_five_pressed() -> int:
return 1 if pygame.key.get_pressed()[pygame.K_5] else 0
def get_left_control_pressed() -> int:
return 1 if pygame.key.get_pressed()[pygame.K_LCTRL] else 0
def get_left_shift_pressed() -> int:
return 1 if pygame.key.get_pressed()[pygame.K_LSHIFT] else 0
def get_right_shift_pressed() -> int:
return 1 if pygame.key.get_pressed()[pygame.K_RSHIFT] else 0
def get_key_c_pressed() -> int:
return 1 if pygame.key.get_pressed()[pygame.K_c] else 0
def get_key_z_pressed() -> int:
return 1 if pygame.key.get_pressed()[pygame.K_z] else 0
def get_key_x_pressed() -> int:
return 1 if pygame.key.get_pressed()[pygame.K_x] else 0
def _split_key(key: str) -> Tuple[str, str]:
"""Splits the key into player index and name."""
return tuple(key.split('.', maxsplit=1))
def _get_rewards(timestep: dm_env.TimeStep) -> Mapping[str, float]:
"""Gets the list of rewards, one for each player."""
rewards = {}
for key in timestep.observation.keys():
if key.endswith('.REWARD'):
player_prefix, name = _split_key(key)
if name == 'REWARD':
rewards[player_prefix] = timestep.observation[key]
return rewards
class ActionReader(object):
"""Convert keyboard actions to environment actions."""
def __init__(self, env: dmlab2d.Environment, action_map: ActionMap):
# Actions are named "<player_prefix>.<action_name>"
self._action_map = action_map
self._action_spec = env.action_spec()
assert isinstance(self._action_spec, dict)
self._action_names = set()
for action_key in self._action_spec.keys():
_, action_name = _split_key(action_key)
self._action_names.add(action_name)
def step(self, player_prefix: str) -> Mapping[str, int]:
"""Update the actions of player `player_prefix`."""
actions = {action_key: 0 for action_key in self._action_spec.keys()}
for action_name in self._action_names:
actions[f'{player_prefix}.{action_name}'] = self._action_map[
action_name]()
return actions
def run_episode(
render_observation: str,
config_overrides: Dict[str, Any],
action_map: ActionMap,
full_config: config_dict.ConfigDict,
interactive: RenderType = RenderType.PYGAME,
screen_width: int = 800,
screen_height: int = 600,
fps: int = 8,
verbose_fn: Optional[Callable[[dm_env.TimeStep, int, int], None]] = None,
text_display_fn: Optional[Callable[[dm_env.TimeStep, int], str]] = None,
text_font_size: int = 36,
text_x_pos: int = 20,
text_y_pos: int = 20,
text_color: Tuple[int, ...] = WHITE,
env_builder: EnvBuilder = builder.builder,
print_events: Optional[bool] = False,
player_prefixes: Optional[Sequence[str]] = None,
default_observation: str = 'WORLD.RGB',
reset_env_when_done: bool = False,
initial_player_index: int = 0,
) -> None:
"""Run multiplayer environment, with per player rendering and actions.
This function initialises a Melting Pot environment with the given
configuration (including possible config overrides), and optionally launches
the episode as an interactive game using pygame. The controls are described
in the action_map, whose keys correspond to discrete actions of the
environment.
Args:
render_observation: A string consisting of the observation name to render.
Usually 'RGB' for the third person world view.
config_overrides: A dictionary of settings to override from the original
`full_config.lab2d_settings`. Typically these are used to set the number
of players.
action_map: A dictionary of (discrete) action names to functions that detect
the keys that correspond to its possible action values. For example,
for movement, we might want to have WASD navigation tied to the 'move'
action name using `get_direction_pressed`. See examples in the various
play_*.py scripts.
full_config: The full configuration for the Melting Pot environment. These
usually come from meltingpot/python/configs/environments.
interactive: A RenderType representing whether the episode should be run
with PyGame, or without any interface. Setting interactive to false
enables running e.g. a random agent via the action_map returning actions
without polling PyGame (or any human input). Non interactive runs
ignore the screen_width, screen_height and fps parameters.
screen_width: Width, in pixels, of the window to render the game.
screen_height: Height, in pixels, of the window to render the game.
fps: Frames per second of the game.
verbose_fn: An optional function that will be executed for every step of
the environment. It receives the environment timestep, a player index
(will be called for every index), and the current player index. This is
typically used to print extra information that would be useful for
debugging a running episode.
text_display_fn: An optional function for displaying text on screen. It
receives the environment and the player index, and returns a string to
display on the pygame screen.
text_font_size: the font size of onscreen text (from `text_display_fn`)
text_x_pos: the x position of onscreen text (from `text_display_fn`)
text_y_pos: the x position of onscreen text (from `text_display_fn`)
text_color: RGB color of onscreen text (from `text_display_fn`)
env_builder: The environment builder function to use. By default it is
meltingpot.builder.
print_events: An optional bool that if enabled will print events captured
from the dmlab2d events API on any timestep where they occur.
player_prefixes: If given, use these as the prefixes of player actions.
Pressing TAB will cycle through these. If not given, use the standard
('1', '2', ..., numPlayers).
default_observation: Default observation to render if 'render_observation'
or '{player_prefix}.{render_observation}' is not found in the dict.
reset_env_when_done: if True, reset the environment once the episode has
terminated; useful for playing multiple episodes in a row. Note this
will cause this function to loop infinitely.
initial_player_index: Initial index of the player to play as. Defaults to 0.
(Players are always switchable via the tab key.)
"""
full_config.lab2d_settings.update(config_overrides)
if player_prefixes is None:
player_count = full_config.lab2d_settings.get('numPlayers', 1)
# By default, we use lua indices (which start at 1) as player prefixes.
player_prefixes = [f'{i+1}' for i in range(player_count)]
else:
player_count = len(player_prefixes)
print(f'Running an episode with {player_count} players: {player_prefixes}.')
with env_builder(**full_config) as env:
if len(player_prefixes) != player_count:
raise ValueError('Player prefixes, when specified, must be of the same '
'length as the number of players.')
player_index = initial_player_index
timestep = env.reset()
score = collections.defaultdict(float)
action_reader = ActionReader(env, action_map)
if interactive == RenderType.PYGAME:
pygame.init()
pygame.display.set_caption('Melting Pot: {}'.format(
full_config.lab2d_settings.levelName))
font = pygame.font.SysFont(None, text_font_size)
scale = 1
observation_spec = env.observation_spec()
if render_observation in observation_spec:
obs_spec = observation_spec[render_observation]
elif f'1.{render_observation}' in observation_spec:
# This assumes all players have the same observation, which is true for
# MeltingPot environments.
obs_spec = observation_spec[f'1.{render_observation}']
else:
# Falls back to 'default_observation.'
obs_spec = observation_spec[default_observation]
observation_shape = obs_spec.shape
observation_height = observation_shape[0]
observation_width = observation_shape[1]
scale = min(screen_height // observation_height,
screen_width // observation_width)
if interactive == RenderType.PYGAME:
game_display = pygame.display.set_mode(
(observation_width * scale, observation_height * scale))
clock = pygame.time.Clock()
stop = False
# Game loop
while True:
# Check for pygame controls
if interactive == RenderType.PYGAME:
for event in pygame.event.get():
if event.type == pygame.QUIT:
stop = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_TAB:
player_index = (player_index + 1) % player_count
break
player_prefix = player_prefixes[player_index] if player_prefixes else ''
if stop:
break
# Compute next timestep
actions = action_reader.step(player_prefix) if player_count else []
timestep = env.step(actions)
if timestep.step_type == dm_env.StepType.LAST:
if reset_env_when_done:
timestep = env.reset()
else:
break
rewards = _get_rewards(timestep)
for i, prefix in enumerate(player_prefixes):
if verbose_fn:
verbose_fn(timestep, i, player_index)
score[prefix] += rewards[prefix]
if i == player_index and rewards[prefix] != 0:
print(f'Player {prefix} Score: {score[prefix]}')
# Print events if applicable
if print_events and hasattr(env, 'events'):
events = env.events()
# Only print events on timesteps when there are events to print.
if events:
print(events)
# pygame display
if interactive == RenderType.PYGAME:
# show visual observation
if render_observation in timestep.observation:
obs = timestep.observation[render_observation]
elif f'{player_prefix}.{render_observation}' in timestep.observation:
obs = timestep.observation[f'{player_prefix}.{render_observation}']
else:
# Fall back to default_observation.
obs = timestep.observation[default_observation]
obs = np.transpose(obs, (1, 0, 2)) # PyGame is column major!
surface = pygame.surfarray.make_surface(obs)
rect = surface.get_rect()
surf = pygame.transform.scale(
surface, (rect[2] * scale, rect[3] * scale))
game_display.blit(surf, dest=(0, 0))
# show text
if text_display_fn:
if player_count == 1:
text_str = text_display_fn(timestep, 0)
else:
text_str = text_display_fn(timestep, player_index)
img = font.render(text_str, True, text_color)
game_display.blit(img, (text_x_pos, text_y_pos))
# tick
pygame.display.update()
clock.tick(fps)
if interactive == RenderType.PYGAME:
pygame.quit()
for prefix in player_prefixes:
print('Player %s: score is %g' % (prefix, score[prefix]))
|
meltingpot-main
|
meltingpot/human_players/level_playing_utils.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple human player for testing `collaborative_cooking`.
Use `WASD` keys to move the character around.
Use `Q and E` to turn the character.
Use `SPACE` to use the interact action.
Use `TAB` to switch between players.
"""
import argparse
import json
from meltingpot.configs.substrates import collaborative_cooking__asymmetric
from meltingpot.configs.substrates import collaborative_cooking__circuit
from meltingpot.configs.substrates import collaborative_cooking__cramped
from meltingpot.configs.substrates import collaborative_cooking__crowded
from meltingpot.configs.substrates import collaborative_cooking__figure_eight
from meltingpot.configs.substrates import collaborative_cooking__forced
from meltingpot.configs.substrates import collaborative_cooking__ring
from meltingpot.human_players import level_playing_utils
from ml_collections import config_dict
MAX_SCREEN_WIDTH = 800
MAX_SCREEN_HEIGHT = 600
FRAMES_PER_SECOND = 8
_ACTION_MAP = {
'move': level_playing_utils.get_direction_pressed,
'turn': level_playing_utils.get_turn_pressed,
'interact': level_playing_utils.get_space_key_pressed,
}
environment_configs = {
'collaborative_cooking__asymmetric': collaborative_cooking__asymmetric,
'collaborative_cooking__circuit': collaborative_cooking__circuit,
'collaborative_cooking__cramped': collaborative_cooking__cramped,
'collaborative_cooking__crowded': collaborative_cooking__crowded,
'collaborative_cooking__figure_eight': collaborative_cooking__figure_eight,
'collaborative_cooking__forced': collaborative_cooking__forced,
'collaborative_cooking__ring': collaborative_cooking__ring,
}
def verbose_fn(env_timestep, player_index, current_player_index):
if player_index != current_player_index:
return
for obs in ['ADDED_INGREDIENT_TO_COOKING_POT',
'COLLECTED_SOUP_FROM_COOKING_POT']:
lua_index = player_index + 1
if env_timestep.observation[f'{lua_index}.{obs}']:
print(obs, env_timestep.observation[f'{lua_index}.{obs}'])
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--level_name',
type=str,
default='collaborative_cooking__cramped',
choices=environment_configs.keys(),
help='Level name to load')
parser.add_argument(
'--observation', type=str, default='RGB', help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
# Activate verbose mode with --verbose=True.
parser.add_argument(
'--verbose', type=bool, default=False, help='Print debug information')
# Activate events printing mode with --print_events=True.
parser.add_argument(
'--print_events', type=bool, default=False, help='Print events')
args = parser.parse_args()
env_module = environment_configs[args.level_name]
env_config = env_module.get_config()
with config_dict.ConfigDict(env_config).unlocked() as env_config:
roles = env_config.default_player_roles
env_config.lab2d_settings = env_module.build(roles, env_config)
level_playing_utils.run_episode(
args.observation, args.settings, _ACTION_MAP, env_config,
level_playing_utils.RenderType.PYGAME, MAX_SCREEN_WIDTH,
MAX_SCREEN_HEIGHT, FRAMES_PER_SECOND,
verbose_fn if args.verbose else None,
print_events=args.print_events)
if __name__ == '__main__':
main()
|
meltingpot-main
|
meltingpot/human_players/play_collaborative_cooking.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple human player for testing `allelopathic_harvest`.
Use `WASD` keys to move the character around.
Use `Q and E` to turn the character.
Use `SPACE` to fire the zapper.
Use `TAB` to switch between players.
"""
import argparse
import json
from meltingpot.configs.substrates import allelopathic_harvest__open
from meltingpot.human_players import level_playing_utils
from ml_collections import config_dict
environment_configs = {
'allelopathic_harvest__open': allelopathic_harvest__open,
}
_ACTION_MAP = {
'move': level_playing_utils.get_direction_pressed,
'turn': level_playing_utils.get_turn_pressed,
'fireZap': level_playing_utils.get_space_key_pressed,
'fire_1': level_playing_utils.get_key_number_one_pressed,
'fire_2': level_playing_utils.get_key_number_two_pressed,
'fire_3': level_playing_utils.get_key_number_three_pressed,
}
def verbose_fn(unused_env, unused_player_index, unused_current_player_index):
pass
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--level_name', type=str, default='allelopathic_harvest__open',
choices=environment_configs.keys(),
help='Level name to load')
parser.add_argument(
'--observation', type=str, default='RGB', help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
# Activate verbose mode with --verbose=True.
parser.add_argument(
'--verbose', type=bool, default=False, help='Print debug information')
# Activate events printing mode with --print_events=True.
parser.add_argument(
'--print_events', type=bool, default=False, help='Print events')
args = parser.parse_args()
env_module = environment_configs[args.level_name]
env_config = env_module.get_config()
with config_dict.ConfigDict(env_config).unlocked() as env_config:
roles = env_config.default_player_roles
env_config.lab2d_settings = env_module.build(roles, env_config)
level_playing_utils.run_episode(
args.observation, args.settings, _ACTION_MAP,
env_config, level_playing_utils.RenderType.PYGAME,
verbose_fn=verbose_fn if args.verbose else None,
print_events=args.print_events)
if __name__ == '__main__':
main()
|
meltingpot-main
|
meltingpot/human_players/play_allelopathic_harvest.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple human player for testing the `predator_prey__*` substrates.
Use `WASD` keys to move the character around.
Use `Q and E` to turn the character.
Use `TAB` to switch between players.
Use `space bar` to select the 'eat' (i.e. the `interact` action).
"""
import argparse
import json
from meltingpot.configs.substrates import predator_prey__alley_hunt
from meltingpot.configs.substrates import predator_prey__open
from meltingpot.configs.substrates import predator_prey__orchard
from meltingpot.configs.substrates import predator_prey__random_forest
from meltingpot.human_players import level_playing_utils
from ml_collections import config_dict
MAX_SCREEN_WIDTH = 800
MAX_SCREEN_HEIGHT = 600
FRAMES_PER_SECOND = 8
environment_configs = {
'predator_prey__alley_hunt': predator_prey__alley_hunt,
'predator_prey__open': predator_prey__open,
'predator_prey__orchard': predator_prey__orchard,
'predator_prey__random_forest': predator_prey__random_forest,
}
_ACTION_MAP = {
'move': level_playing_utils.get_direction_pressed,
'turn': level_playing_utils.get_turn_pressed,
# 'interact' is the 'eat' action for this substrate.
'interact': level_playing_utils.get_space_key_pressed,
}
def verbose_fn(env_timestep, player_index, current_player_index):
"""Print using this function once enabling the option --verbose=True."""
lua_index = player_index + 1
stamina = env_timestep.observation[f'{lua_index}.STAMINA']
# Only print observations from current player.
if player_index == current_player_index:
print(f'player: {player_index} --- stamina: {stamina}')
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--level_name', type=str,
default='predator_prey__alley_hunt',
choices=environment_configs.keys(),
help='Level name to load')
parser.add_argument(
'--observation', type=str, default='RGB', help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
# Activate verbose mode with --verbose=True.
parser.add_argument(
'--verbose', type=bool, default=False, help='Print debug information')
# Activate events printing mode with --print_events=True.
parser.add_argument(
'--print_events', type=bool, default=False, help='Print events')
args = parser.parse_args()
env_module = environment_configs[args.level_name]
env_config = env_module.get_config()
with config_dict.ConfigDict(env_config).unlocked() as env_config:
roles = env_config.default_player_roles
env_config.lab2d_settings = env_module.build(roles, env_config)
level_playing_utils.run_episode(
args.observation, args.settings, _ACTION_MAP, env_config,
level_playing_utils.RenderType.PYGAME, MAX_SCREEN_WIDTH,
MAX_SCREEN_HEIGHT, FRAMES_PER_SECOND,
verbose_fn if args.verbose else None,
print_events=args.print_events)
if __name__ == '__main__':
main()
|
meltingpot-main
|
meltingpot/human_players/play_predator_and_prey.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple human player for testing `clean_up`.
Use `WASD` keys to move the character around.
Use `Q and E` to turn the character.
Use `SPACE` to fire the zapper.
Use `TAB` to switch between players.
"""
import argparse
import json
from meltingpot.configs.substrates import clean_up
from meltingpot.human_players import level_playing_utils
from ml_collections import config_dict
environment_configs = {
'clean_up': clean_up,
}
_ACTION_MAP = {
'move': level_playing_utils.get_direction_pressed,
'turn': level_playing_utils.get_turn_pressed,
'fireZap': level_playing_utils.get_key_number_one_pressed,
'fireClean': level_playing_utils.get_key_number_two_pressed,
}
def verbose_fn(env_timestep, player_index, current_player_index):
"""Print using this function once enabling the option --verbose=True."""
lua_index = player_index + 1
cleaned = env_timestep.observation[f'{lua_index}.PLAYER_CLEANED']
ate = env_timestep.observation[f'{lua_index}.PLAYER_ATE_APPLE']
num_zapped_this_step = env_timestep.observation[
f'{lua_index}.NUM_OTHERS_PLAYER_ZAPPED_THIS_STEP']
num_others_cleaned = env_timestep.observation[
f'{lua_index}.NUM_OTHERS_WHO_CLEANED_THIS_STEP']
num_others_ate = env_timestep.observation[
f'{lua_index}.NUM_OTHERS_WHO_ATE_THIS_STEP']
# Only print observations from current player.
if player_index == current_player_index:
print(f'player: {player_index} --- player_cleaned: {cleaned} --- ' +
f'player_ate_apple: {ate} --- num_others_cleaned: ' +
f'{num_others_cleaned} --- num_others_ate: {num_others_ate} ' +
f'---num_others_player_zapped_this_step: {num_zapped_this_step}')
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--level_name', type=str, default='clean_up',
choices=environment_configs.keys(),
help='Level name to load')
parser.add_argument(
'--observation', type=str, default='RGB', help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
# Activate verbose mode with --verbose=True.
parser.add_argument(
'--verbose', type=bool, default=False, help='Print debug information')
# Activate events printing mode with --print_events=True.
parser.add_argument(
'--print_events', type=bool, default=False, help='Print events')
args = parser.parse_args()
env_module = environment_configs[args.level_name]
env_config = env_module.get_config()
with config_dict.ConfigDict(env_config).unlocked() as env_config:
roles = env_config.default_player_roles
env_config.lab2d_settings = env_module.build(roles, env_config)
level_playing_utils.run_episode(
args.observation, args.settings, _ACTION_MAP,
env_config, level_playing_utils.RenderType.PYGAME,
verbose_fn=verbose_fn if args.verbose else None,
print_events=args.print_events)
if __name__ == '__main__':
main()
|
meltingpot-main
|
meltingpot/human_players/play_clean_up.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple human player for testing `daycare`.
Use `WASD` keys to move the character around.
Use `Q and E` to turn the character.
Use `TAB` to switch between players.
"""
import argparse
import json
from meltingpot.configs.substrates import daycare
from meltingpot.human_players import level_playing_utils
from ml_collections import config_dict
MAX_SCREEN_WIDTH = 800
MAX_SCREEN_HEIGHT = 600
FRAMES_PER_SECOND = 8
environment_configs = {
'daycare': daycare,
}
_ACTION_MAP = {
'move': level_playing_utils.get_direction_pressed,
'turn': level_playing_utils.get_turn_pressed,
'eat': level_playing_utils.get_key_z_pressed,
'grasp': level_playing_utils.get_space_key_pressed,
}
def verbose_fn(env_timestep, player_index, current_player_index):
del env_timestep, player_index, current_player_index
pass
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--level_name', type=str,
default='daycare',
choices=environment_configs.keys(),
help='Level name to load')
parser.add_argument(
'--observation', type=str, default='RGB', help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
# Activate verbose mode with --verbose=True.
parser.add_argument(
'--verbose', type=bool, default=False, help='Print debug information')
# Activate events printing mode with --print_events=True.
parser.add_argument(
'--print_events', type=bool, default=False, help='Print events')
args = parser.parse_args()
env_module = environment_configs[args.level_name]
env_config = env_module.get_config()
with config_dict.ConfigDict(env_config).unlocked() as env_config:
roles = env_config.default_player_roles
env_config.lab2d_settings = env_module.build(roles, env_config)
level_playing_utils.run_episode(
args.observation, args.settings, _ACTION_MAP, env_config,
level_playing_utils.RenderType.PYGAME, MAX_SCREEN_WIDTH,
MAX_SCREEN_HEIGHT, FRAMES_PER_SECOND,
verbose_fn if args.verbose else None,
print_events=args.print_events)
if __name__ == '__main__':
main()
|
meltingpot-main
|
meltingpot/human_players/play_daycare.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple human player for testing `chemistry`.
Use `WASD` keys to move the character around.
Use `Q and E` to turn the character.
Use `SPACE` to select the `endocytose` action.
Use `TAB` to switch between players.
"""
import argparse
import json
from meltingpot.configs.substrates import chemistry__three_metabolic_cycles
from meltingpot.configs.substrates import chemistry__three_metabolic_cycles_with_plentiful_distractors
from meltingpot.configs.substrates import chemistry__two_metabolic_cycles
from meltingpot.configs.substrates import chemistry__two_metabolic_cycles_with_distractors
from meltingpot.human_players import level_playing_utils
from ml_collections import config_dict
MAX_SCREEN_WIDTH = 800
MAX_SCREEN_HEIGHT = 600
FRAMES_PER_SECOND = 8
_ACTION_MAP = {
'move': level_playing_utils.get_direction_pressed,
'turn': level_playing_utils.get_turn_pressed,
'ioAction': level_playing_utils.get_space_key_pressed,
}
environment_configs = {
'chemistry__three_metabolic_cycles': (
chemistry__three_metabolic_cycles),
'chemistry__three_metabolic_cycles_with_plentiful_distractors': (
chemistry__three_metabolic_cycles_with_plentiful_distractors),
'chemistry__two_metabolic_cycles': chemistry__two_metabolic_cycles,
'chemistry__two_metabolic_cycles_with_distractors': (
chemistry__two_metabolic_cycles_with_distractors),
}
def verbose_fn(unused_env, unused_player_index, unused_current_player_index):
"""Activate verbose printing with --verbose=True."""
pass
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--level_name', type=str, default='chemistry__two_metabolic_cycles',
choices=environment_configs.keys(),
help='Level name to load')
parser.add_argument(
'--observation', type=str, default='RGB', help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
# Activate verbose mode with --verbose=True.
parser.add_argument(
'--verbose', type=bool, default=False, help='Print debug information')
# Activate events printing mode with --print_events=True.
parser.add_argument(
'--print_events', type=bool, default=False, help='Print events')
args = parser.parse_args()
env_module = environment_configs[args.level_name]
env_config = env_module.get_config()
with config_dict.ConfigDict(env_config).unlocked() as env_config:
roles = env_config.default_player_roles
env_config.lab2d_settings = env_module.build(roles, env_config)
level_playing_utils.run_episode(
args.observation, args.settings, _ACTION_MAP,
env_config, level_playing_utils.RenderType.PYGAME,
verbose_fn=verbose_fn if args.verbose else None,
print_events=args.print_events)
if __name__ == '__main__':
main()
|
meltingpot-main
|
meltingpot/human_players/play_chemistry.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
meltingpot-main
|
meltingpot/configs/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests of the bot configs."""
import collections
import os
from absl.testing import absltest
from absl.testing import parameterized
from meltingpot.configs import bots
from meltingpot.configs import substrates
def _subdirs(root):
for file in os.listdir(root):
if os.path.isdir(os.path.join(root, file)):
yield file
def _models(models_root=bots.MODELS_ROOT):
for substrate in _subdirs(models_root):
for model in _subdirs(os.path.join(models_root, substrate)):
yield os.path.join(models_root, substrate, model)
BOT_CONFIGS = bots.BOT_CONFIGS
AVAILABLE_MODELS = frozenset(_models())
AVAILABLE_SUBSTRATES = frozenset(substrates.SUBSTRATES)
class BotConfigTest(parameterized.TestCase):
@parameterized.named_parameters(BOT_CONFIGS.items())
def test_has_valid_substrate(self, bot):
self.assertIn(bot.substrate, AVAILABLE_SUBSTRATES)
@parameterized.named_parameters(BOT_CONFIGS.items())
def test_model_exists(self, bot):
self.assertTrue(
os.path.isdir(bot.model_path), f'Missing model {bot.model_path!r}.')
@parameterized.named_parameters(BOT_CONFIGS.items())
def test_substrate_matches_model(self, bot):
substrate = os.path.basename(os.path.dirname(bot.model_path))
self.assertEqual(bot.substrate, substrate,
f'{bot} substrate does not match model path.')
def test_no_duplicates(self):
seen = collections.defaultdict(set)
for name, config in BOT_CONFIGS.items():
seen[config].add(name)
duplicates = {names for _, names in seen.items() if len(names) > 1}
self.assertEmpty(duplicates, f'Duplicate configs found: {duplicates!r}.')
def test_models_used_by_bots(self):
used = {bot.model_path for bot in BOT_CONFIGS.values()}
unused = AVAILABLE_MODELS - used
self.assertEmpty(unused, f'Models not used by any bot: {unused!r}')
if __name__ == '__main__':
absltest.main()
|
meltingpot-main
|
meltingpot/configs/bots/bot_configs_test.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library of stored bots for MeltingPot scenarios."""
import dataclasses
import functools
import os
from typing import AbstractSet, Callable, Iterable, Literal, Mapping, Optional, Sequence
import immutabledict
from meltingpot.utils.puppeteers import alternator
from meltingpot.utils.puppeteers import clean_up
from meltingpot.utils.puppeteers import coins
from meltingpot.utils.puppeteers import coordination_in_the_matrix
from meltingpot.utils.puppeteers import fixed_goal
from meltingpot.utils.puppeteers import gift_refinements
from meltingpot.utils.puppeteers import in_the_matrix
from meltingpot.utils.puppeteers import puppeteer
from meltingpot.utils.puppeteers import running_with_scissors_in_the_matrix
def _find_models_root() -> str:
import re # pylint: disable=g-import-not-at-top
return re.sub('^(.*)/meltingpot/.*?$', r'\1/meltingpot/assets/saved_models/', __file__)
MODELS_ROOT = _find_models_root()
# pylint: disable=line-too-long
# Ordered puppet goals must match the order used in bot training.
_PUPPET_GOALS = immutabledict.immutabledict(
# keep-sorted start numeric=yes block=yes
bach_or_stravinsky_in_the_matrix__arena=puppeteer.puppet_goals([
'COLLECT_BACH',
'COLLECT_STRAVINSKY',
'INTERACT_PLAYING_BACH',
'INTERACT_PLAYING_STRAVINSKY',
]),
bach_or_stravinsky_in_the_matrix__repeated=puppeteer.puppet_goals([
'COLLECT_BACH',
'COLLECT_STRAVINSKY',
'INTERACT_PLAYING_BACH',
'INTERACT_PLAYING_STRAVINSKY',
]),
chicken_in_the_matrix__arena=puppeteer.puppet_goals([
'COLLECT_DOVE',
'COLLECT_HAWK',
'INTERACT_PLAYING_DOVE',
'INTERACT_PLAYING_HAWK',
]),
chicken_in_the_matrix__repeated=puppeteer.puppet_goals([
'COLLECT_DOVE',
'COLLECT_HAWK',
'INTERACT_PLAYING_DOVE',
'INTERACT_PLAYING_HAWK',
]),
clean_up=puppeteer.puppet_goals([
'EAT',
'CLEAN',
]),
coins=puppeteer.puppet_goals([
'COOPERATE',
'DEFECT',
'SPITE',
]),
coop_mining=puppeteer.puppet_goals([
'EXTRACT_IRON',
'MINE_GOLD',
'EXTRACT_GOLD',
'EXTRACT_ALL',
]),
externality_mushrooms__dense=puppeteer.puppet_goals([
'COLLECT_MUSHROOM_HIHE',
'COLLECT_MUSHROOM_FIZE',
'COLLECT_MUSHROOM_ZIFE',
'COLLECT_MUSHROOM_NINE',
'DESTROY_MUSHROOM_HIHE',
'DESTROY_MUSHROOM_FIZE',
'DESTROY_MUSHROOM_ZIFE',
]),
gift_refinements=puppeteer.puppet_goals([
'COLLECT_TOKENS',
'GIFT',
'CONSUME_SIMPLE_TOKENS',
'CONSUME_TOKENS',
'FORAGE',
]),
prisoners_dilemma_in_the_matrix__arena=puppeteer.puppet_goals([
'COLLECT_COOPERATE',
'COLLECT_DEFECT',
'INTERACT_COOPERATE',
'INTERACT_DEFECT',
]),
prisoners_dilemma_in_the_matrix__repeated=puppeteer.puppet_goals([
'COLLECT_COOPERATE',
'COLLECT_DEFECT',
'INTERACT_COOPERATE',
'INTERACT_DEFECT',
]),
pure_coordination_in_the_matrix__arena=puppeteer.puppet_goals([
'COLLECT_RED',
'COLLECT_GREEN',
'COLLECT_BLUE',
'INTERACT_PLAYING_RED',
'INTERACT_PLAYING_GREEN',
'INTERACT_PLAYING_BLUE',
'COLLECT_RED_IGNORING_OTHER_CONSIDERATIONS',
'COLLECT_GREEN_IGNORING_OTHER_CONSIDERATIONS',
'COLLECT_BLUE_IGNORING_OTHER_CONSIDERATIONS',
]),
pure_coordination_in_the_matrix__repeated=puppeteer.puppet_goals([
'COLLECT_RED',
'COLLECT_GREEN',
'COLLECT_BLUE',
'INTERACT_PLAYING_RED',
'INTERACT_PLAYING_GREEN',
'INTERACT_PLAYING_BLUE',
'COLLECT_RED_IGNORING_OTHER_CONSIDERATIONS',
'COLLECT_GREEN_IGNORING_OTHER_CONSIDERATIONS',
'COLLECT_BLUE_IGNORING_OTHER_CONSIDERATIONS',
]),
rationalizable_coordination_in_the_matrix__arena=puppeteer.puppet_goals([
'COLLECT_YELLOW',
'COLLECT_VIOLET',
'COLLECT_CYAN',
'INTERACT_PLAYING_YELLOW',
'INTERACT_PLAYING_VIOLET',
'INTERACT_PLAYING_CYAN',
'COLLECT_YELLOW_IGNORING_OTHER_CONSIDERATIONS',
'COLLECT_VIOLET_IGNORING_OTHER_CONSIDERATIONS',
'COLLECT_CYAN_IGNORING_OTHER_CONSIDERATIONS',
]),
rationalizable_coordination_in_the_matrix__repeated=puppeteer.puppet_goals([
'COLLECT_YELLOW',
'COLLECT_VIOLET',
'COLLECT_CYAN',
'INTERACT_PLAYING_YELLOW',
'INTERACT_PLAYING_VIOLET',
'INTERACT_PLAYING_CYAN',
'COLLECT_YELLOW_IGNORING_OTHER_CONSIDERATIONS',
'COLLECT_VIOLET_IGNORING_OTHER_CONSIDERATIONS',
'COLLECT_CYAN_IGNORING_OTHER_CONSIDERATIONS',
]),
running_with_scissors_in_the_matrix__arena=puppeteer.puppet_goals([
'COLLECT_ROCK',
'COLLECT_PAPER',
'COLLECT_SCISSORS',
'INTERACT_PLAYING_ROCK',
'INTERACT_PLAYING_PAPER',
'INTERACT_PLAYING_SCISSORS',
'COLLECT_ROCK_IGNORING_OTHER_CONSIDERATIONS',
'COLLECT_PAPER_IGNORING_OTHER_CONSIDERATIONS',
'COLLECT_SCISSORS_IGNORING_OTHER_CONSIDERATIONS',
]),
running_with_scissors_in_the_matrix__one_shot=puppeteer.puppet_goals([
'COLLECT_ROCK',
'COLLECT_PAPER',
'COLLECT_SCISSORS',
'INTERACT_PLAYING_ROCK',
'INTERACT_PLAYING_PAPER',
'INTERACT_PLAYING_SCISSORS',
'COLLECT_ROCK_IGNORING_OTHER_CONSIDERATIONS',
'COLLECT_PAPER_IGNORING_OTHER_CONSIDERATIONS',
'COLLECT_SCISSORS_IGNORING_OTHER_CONSIDERATIONS',
]),
running_with_scissors_in_the_matrix__repeated=puppeteer.puppet_goals([
'COLLECT_ROCK',
'COLLECT_PAPER',
'COLLECT_SCISSORS',
'INTERACT_PLAYING_ROCK',
'INTERACT_PLAYING_PAPER',
'INTERACT_PLAYING_SCISSORS',
'COLLECT_ROCK_IGNORING_OTHER_CONSIDERATIONS',
'COLLECT_PAPER_IGNORING_OTHER_CONSIDERATIONS',
'COLLECT_SCISSORS_IGNORING_OTHER_CONSIDERATIONS',
]),
stag_hunt_in_the_matrix__arena=puppeteer.puppet_goals([
'COLLECT_STAG',
'COLLECT_HARE',
'INTERACT_PLAYING_STAG',
'INTERACT_PLAYING_HARE',
]),
stag_hunt_in_the_matrix__repeated=puppeteer.puppet_goals([
'COLLECT_STAG',
'COLLECT_HARE',
'INTERACT_PLAYING_STAG',
'INTERACT_PLAYING_HARE',
]),
# keep-sorted end
)
_RESOURCES = immutabledict.immutabledict(
# keep-sorted start numeric=yes block=yes
bach_or_stravinsky_in_the_matrix__arena=immutabledict.immutabledict({
'BACH': in_the_matrix.Resource(
index=0,
collect_goal=_PUPPET_GOALS['bach_or_stravinsky_in_the_matrix__arena']['COLLECT_BACH'],
interact_goal=_PUPPET_GOALS['bach_or_stravinsky_in_the_matrix__arena']['INTERACT_PLAYING_BACH'],
),
'STRAVINSKY': in_the_matrix.Resource(
index=1,
collect_goal=_PUPPET_GOALS['bach_or_stravinsky_in_the_matrix__arena']['COLLECT_STRAVINSKY'],
interact_goal=_PUPPET_GOALS['bach_or_stravinsky_in_the_matrix__arena']['INTERACT_PLAYING_STRAVINSKY'],
),
}),
bach_or_stravinsky_in_the_matrix__repeated=immutabledict.immutabledict({
'BACH': in_the_matrix.Resource(
index=0,
collect_goal=_PUPPET_GOALS['bach_or_stravinsky_in_the_matrix__repeated']['COLLECT_BACH'],
interact_goal=_PUPPET_GOALS['bach_or_stravinsky_in_the_matrix__repeated']['INTERACT_PLAYING_BACH'],
),
'STRAVINSKY': in_the_matrix.Resource(
index=1,
collect_goal=_PUPPET_GOALS['bach_or_stravinsky_in_the_matrix__repeated']['COLLECT_STRAVINSKY'],
interact_goal=_PUPPET_GOALS['bach_or_stravinsky_in_the_matrix__repeated']['INTERACT_PLAYING_STRAVINSKY'],
),
}),
chicken_in_the_matrix__arena=immutabledict.immutabledict({
'DOVE': in_the_matrix.Resource(
index=0,
collect_goal=_PUPPET_GOALS['chicken_in_the_matrix__arena']['COLLECT_DOVE'],
interact_goal=_PUPPET_GOALS['chicken_in_the_matrix__arena']['INTERACT_PLAYING_DOVE'],
),
'HAWK': in_the_matrix.Resource(
index=1,
collect_goal=_PUPPET_GOALS['chicken_in_the_matrix__arena']['COLLECT_HAWK'],
interact_goal=_PUPPET_GOALS['chicken_in_the_matrix__arena']['INTERACT_PLAYING_HAWK'],
),
}),
chicken_in_the_matrix__repeated=immutabledict.immutabledict({
'DOVE': in_the_matrix.Resource(
index=0,
collect_goal=_PUPPET_GOALS['chicken_in_the_matrix__repeated']['COLLECT_DOVE'],
interact_goal=_PUPPET_GOALS['chicken_in_the_matrix__repeated']['INTERACT_PLAYING_DOVE'],
),
'HAWK': in_the_matrix.Resource(
index=1,
collect_goal=_PUPPET_GOALS['chicken_in_the_matrix__repeated']['COLLECT_HAWK'],
interact_goal=_PUPPET_GOALS['chicken_in_the_matrix__repeated']['INTERACT_PLAYING_HAWK'],
),
}),
prisoners_dilemma_in_the_matrix__arena=immutabledict.immutabledict({
'COOPERATE': in_the_matrix.Resource(
index=0,
collect_goal=_PUPPET_GOALS['prisoners_dilemma_in_the_matrix__arena']['COLLECT_COOPERATE'],
interact_goal=_PUPPET_GOALS['prisoners_dilemma_in_the_matrix__arena']['INTERACT_COOPERATE'],
),
'DEFECT': in_the_matrix.Resource(
index=1,
collect_goal=_PUPPET_GOALS['prisoners_dilemma_in_the_matrix__arena']['COLLECT_DEFECT'],
interact_goal=_PUPPET_GOALS['prisoners_dilemma_in_the_matrix__arena']['INTERACT_DEFECT'],
),
}),
prisoners_dilemma_in_the_matrix__repeated=immutabledict.immutabledict({
'COOPERATE': in_the_matrix.Resource(
index=0,
collect_goal=_PUPPET_GOALS['prisoners_dilemma_in_the_matrix__repeated']['COLLECT_COOPERATE'],
interact_goal=_PUPPET_GOALS['prisoners_dilemma_in_the_matrix__repeated']['INTERACT_COOPERATE'],
),
'DEFECT': in_the_matrix.Resource(
index=1,
collect_goal=_PUPPET_GOALS['prisoners_dilemma_in_the_matrix__repeated']['COLLECT_DEFECT'],
interact_goal=_PUPPET_GOALS['prisoners_dilemma_in_the_matrix__repeated']['INTERACT_DEFECT'],
),
}),
pure_coordination_in_the_matrix__arena=immutabledict.immutabledict({
'RED': in_the_matrix.Resource(
index=0,
collect_goal=_PUPPET_GOALS['pure_coordination_in_the_matrix__arena']['COLLECT_RED'],
interact_goal=_PUPPET_GOALS['pure_coordination_in_the_matrix__arena']['INTERACT_PLAYING_RED'],
),
'GREEN': in_the_matrix.Resource(
index=1,
collect_goal=_PUPPET_GOALS['pure_coordination_in_the_matrix__arena']['COLLECT_GREEN'],
interact_goal=_PUPPET_GOALS['pure_coordination_in_the_matrix__arena']['INTERACT_PLAYING_GREEN'],
),
'BLUE': in_the_matrix.Resource(
index=2,
collect_goal=_PUPPET_GOALS['pure_coordination_in_the_matrix__arena']['COLLECT_BLUE'],
interact_goal=_PUPPET_GOALS['pure_coordination_in_the_matrix__arena']['INTERACT_PLAYING_BLUE'],
),
}),
pure_coordination_in_the_matrix__repeated=immutabledict.immutabledict({
'RED': in_the_matrix.Resource(
index=0,
collect_goal=_PUPPET_GOALS['pure_coordination_in_the_matrix__repeated']['COLLECT_RED'],
interact_goal=_PUPPET_GOALS['pure_coordination_in_the_matrix__repeated']['INTERACT_PLAYING_RED'],
),
'GREEN': in_the_matrix.Resource(
index=1,
collect_goal=_PUPPET_GOALS['pure_coordination_in_the_matrix__repeated']['COLLECT_GREEN'],
interact_goal=_PUPPET_GOALS['pure_coordination_in_the_matrix__repeated']['INTERACT_PLAYING_GREEN'],
),
'BLUE': in_the_matrix.Resource(
index=2,
collect_goal=_PUPPET_GOALS['pure_coordination_in_the_matrix__repeated']['COLLECT_BLUE'],
interact_goal=_PUPPET_GOALS['pure_coordination_in_the_matrix__repeated']['INTERACT_PLAYING_BLUE'],
),
}),
rationalizable_coordination_in_the_matrix__arena=immutabledict.immutabledict({
'YELLOW': in_the_matrix.Resource(
index=0,
collect_goal=_PUPPET_GOALS['rationalizable_coordination_in_the_matrix__arena']['COLLECT_YELLOW'],
interact_goal=_PUPPET_GOALS['rationalizable_coordination_in_the_matrix__arena']['INTERACT_PLAYING_YELLOW'],
),
'VIOLET': in_the_matrix.Resource(
index=1,
collect_goal=_PUPPET_GOALS['rationalizable_coordination_in_the_matrix__arena']['COLLECT_VIOLET'],
interact_goal=_PUPPET_GOALS['rationalizable_coordination_in_the_matrix__arena']['INTERACT_PLAYING_VIOLET'],
),
'CYAN': in_the_matrix.Resource(
index=2,
collect_goal=_PUPPET_GOALS['rationalizable_coordination_in_the_matrix__arena']['COLLECT_CYAN'],
interact_goal=_PUPPET_GOALS['rationalizable_coordination_in_the_matrix__arena']['INTERACT_PLAYING_CYAN'],
),
}),
rationalizable_coordination_in_the_matrix__repeated=immutabledict.immutabledict({
'YELLOW': in_the_matrix.Resource(
index=0,
collect_goal=_PUPPET_GOALS['rationalizable_coordination_in_the_matrix__repeated']['COLLECT_YELLOW'],
interact_goal=_PUPPET_GOALS['rationalizable_coordination_in_the_matrix__repeated']['INTERACT_PLAYING_YELLOW'],
),
'VIOLET': in_the_matrix.Resource(
index=1,
collect_goal=_PUPPET_GOALS['rationalizable_coordination_in_the_matrix__repeated']['COLLECT_VIOLET'],
interact_goal=_PUPPET_GOALS['rationalizable_coordination_in_the_matrix__repeated']['INTERACT_PLAYING_VIOLET'],
),
'CYAN': in_the_matrix.Resource(
index=2,
collect_goal=_PUPPET_GOALS['rationalizable_coordination_in_the_matrix__repeated']['COLLECT_CYAN'],
interact_goal=_PUPPET_GOALS['rationalizable_coordination_in_the_matrix__repeated']['INTERACT_PLAYING_CYAN'],
),
}),
running_with_scissors_in_the_matrix__arena=immutabledict.immutabledict({
'ROCK': in_the_matrix.Resource(
index=0,
collect_goal=_PUPPET_GOALS['running_with_scissors_in_the_matrix__arena']['COLLECT_ROCK'],
interact_goal=_PUPPET_GOALS['running_with_scissors_in_the_matrix__arena']['INTERACT_PLAYING_ROCK'],
),
'PAPER': in_the_matrix.Resource(
index=1,
collect_goal=_PUPPET_GOALS['running_with_scissors_in_the_matrix__arena']['COLLECT_PAPER'],
interact_goal=_PUPPET_GOALS['running_with_scissors_in_the_matrix__arena']['INTERACT_PLAYING_PAPER'],
),
'SCISSORS': in_the_matrix.Resource(
index=2,
collect_goal=_PUPPET_GOALS['running_with_scissors_in_the_matrix__arena']['COLLECT_SCISSORS'],
interact_goal=_PUPPET_GOALS['running_with_scissors_in_the_matrix__arena']['INTERACT_PLAYING_SCISSORS'],
),
}),
running_with_scissors_in_the_matrix__one_shot=immutabledict.immutabledict({
'ROCK': in_the_matrix.Resource(
index=0,
collect_goal=_PUPPET_GOALS['running_with_scissors_in_the_matrix__one_shot']['COLLECT_ROCK'],
interact_goal=_PUPPET_GOALS['running_with_scissors_in_the_matrix__one_shot']['INTERACT_PLAYING_ROCK'],
),
'PAPER': in_the_matrix.Resource(
index=1,
collect_goal=_PUPPET_GOALS['running_with_scissors_in_the_matrix__one_shot']['COLLECT_PAPER'],
interact_goal=_PUPPET_GOALS['running_with_scissors_in_the_matrix__one_shot']['INTERACT_PLAYING_PAPER'],
),
'SCISSORS': in_the_matrix.Resource(
index=2,
collect_goal=_PUPPET_GOALS['running_with_scissors_in_the_matrix__one_shot']['COLLECT_SCISSORS'],
interact_goal=_PUPPET_GOALS['running_with_scissors_in_the_matrix__one_shot']['INTERACT_PLAYING_SCISSORS'],
),
}),
running_with_scissors_in_the_matrix__repeated=immutabledict.immutabledict({
'ROCK': in_the_matrix.Resource(
index=0,
collect_goal=_PUPPET_GOALS['running_with_scissors_in_the_matrix__repeated']['COLLECT_ROCK'],
interact_goal=_PUPPET_GOALS['running_with_scissors_in_the_matrix__repeated']['INTERACT_PLAYING_ROCK'],
),
'PAPER': in_the_matrix.Resource(
index=1,
collect_goal=_PUPPET_GOALS['running_with_scissors_in_the_matrix__repeated']['COLLECT_PAPER'],
interact_goal=_PUPPET_GOALS['running_with_scissors_in_the_matrix__repeated']['INTERACT_PLAYING_PAPER'],
),
'SCISSORS': in_the_matrix.Resource(
index=2,
collect_goal=_PUPPET_GOALS['running_with_scissors_in_the_matrix__repeated']['COLLECT_SCISSORS'],
interact_goal=_PUPPET_GOALS['running_with_scissors_in_the_matrix__repeated']['INTERACT_PLAYING_SCISSORS'],
),
}),
stag_hunt_in_the_matrix__arena=immutabledict.immutabledict({
'STAG': in_the_matrix.Resource(
index=0,
collect_goal=_PUPPET_GOALS['stag_hunt_in_the_matrix__arena']['COLLECT_STAG'],
interact_goal=_PUPPET_GOALS['stag_hunt_in_the_matrix__arena']['INTERACT_PLAYING_STAG'],
),
'HARE': in_the_matrix.Resource(
index=1,
collect_goal=_PUPPET_GOALS['stag_hunt_in_the_matrix__arena']['COLLECT_HARE'],
interact_goal=_PUPPET_GOALS['stag_hunt_in_the_matrix__arena']['INTERACT_PLAYING_HARE'],
),
}),
stag_hunt_in_the_matrix__repeated=immutabledict.immutabledict({
'STAG': in_the_matrix.Resource(
index=0,
collect_goal=_PUPPET_GOALS['stag_hunt_in_the_matrix__repeated']['COLLECT_STAG'],
interact_goal=_PUPPET_GOALS['stag_hunt_in_the_matrix__repeated']['INTERACT_PLAYING_STAG'],
),
'HARE': in_the_matrix.Resource(
index=1,
collect_goal=_PUPPET_GOALS['stag_hunt_in_the_matrix__repeated']['COLLECT_HARE'],
interact_goal=_PUPPET_GOALS['stag_hunt_in_the_matrix__repeated']['INTERACT_PLAYING_HARE'],
),
}),
# keep-sorted end
)
@dataclasses.dataclass(frozen=True)
class BotConfig:
"""Bot config.
Attributes:
substrate: the substrate the bot was trained for.
roles: the roles the bot was trained for.
model_path: the path to the bot's saved model.
model_version: whether the bot is a "1.0" bot or a new "1.1" bot.
puppeteer_builder: returns the puppeteer used to control the bot.
"""
substrate: str
roles: AbstractSet[str]
model_path: str
puppeteer_builder: Optional[Callable[[], puppeteer.Puppeteer]]
def __post_init__(self):
object.__setattr__(self, 'roles', frozenset(self.roles))
def saved_model(*,
substrate: str,
roles: Iterable[str] = ('default',),
model: str,
models_root: str = MODELS_ROOT) -> BotConfig:
"""Returns the config for a saved model bot.
Args:
substrate: the substrate on which the bot was trained.
roles: the roles the bot was trained for.
model: the name of the model.
models_root: The path to the directory containing the saved_models.
"""
model_path = os.path.join(models_root, substrate, model)
return BotConfig(
substrate=substrate,
roles=frozenset(roles),
model_path=model_path,
puppeteer_builder=None)
def puppet(*,
substrate: str,
roles: Iterable[str] = ('default',),
model: str,
puppeteer_builder: Callable[[], puppeteer.Puppeteer],
models_root: str = MODELS_ROOT) -> BotConfig:
"""Returns the config for a puppet bot.
Args:
substrate: the substrate on which the bot was trained.
roles: the roles the bot was trained for.
model: the name of the model.
puppeteer_builder: returns the puppeteer used to control the bot.
models_root: the path to the directory containing the saved_models.
"""
puppet_path = os.path.join(models_root, substrate, model)
return BotConfig(
substrate=substrate,
roles=frozenset(roles),
model_path=puppet_path,
puppeteer_builder=puppeteer_builder)
BOT_CONFIGS: Mapping[str, BotConfig] = immutabledict.immutabledict(
# keep-sorted start numeric=yes block=yes
allelopathic_harvest__open__bot_that_supports_green_0=saved_model(
substrate='allelopathic_harvest__open',
model='bot_that_loves_green_0',
roles=('default', 'player_who_likes_red', 'player_who_likes_green',),
),
allelopathic_harvest__open__bot_that_supports_green_1=saved_model(
substrate='allelopathic_harvest__open',
model='bot_that_loves_green_1',
roles=('default', 'player_who_likes_red', 'player_who_likes_green',),
),
allelopathic_harvest__open__bot_that_supports_green_2=saved_model(
substrate='allelopathic_harvest__open',
model='bot_that_loves_green_2',
roles=('default', 'player_who_likes_red', 'player_who_likes_green',),
),
allelopathic_harvest__open__bot_that_supports_green_3=saved_model(
substrate='allelopathic_harvest__open',
model='bot_that_loves_green_3',
roles=('default', 'player_who_likes_red', 'player_who_likes_green',),
),
allelopathic_harvest__open__bot_that_supports_red_0=saved_model(
substrate='allelopathic_harvest__open',
model='bot_that_loves_red_0',
roles=('default', 'player_who_likes_red', 'player_who_likes_green',),
),
allelopathic_harvest__open__bot_that_supports_red_1=saved_model(
substrate='allelopathic_harvest__open',
model='bot_that_loves_red_1',
roles=('default', 'player_who_likes_red', 'player_who_likes_green',),
),
allelopathic_harvest__open__bot_that_supports_red_2=saved_model(
substrate='allelopathic_harvest__open',
model='bot_that_loves_red_2',
roles=('default', 'player_who_likes_red', 'player_who_likes_green',),
),
allelopathic_harvest__open__bot_that_supports_red_3=saved_model(
substrate='allelopathic_harvest__open',
model='bot_that_loves_red_3',
roles=('default', 'player_who_likes_red', 'player_who_likes_green',),
),
bach_or_stravinsky_in_the_matrix__arena__bach_picker_0=puppet(
substrate='bach_or_stravinsky_in_the_matrix__arena',
model='puppet_0',
roles=('default', 'bach_fan', 'stravinsky_fan',),
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['bach_or_stravinsky_in_the_matrix__arena']['BACH'],
margin=3,
),
),
bach_or_stravinsky_in_the_matrix__arena__stravinsky_picker_0=puppet(
substrate='bach_or_stravinsky_in_the_matrix__arena',
model='puppet_0',
roles=('default', 'bach_fan', 'stravinsky_fan',),
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['bach_or_stravinsky_in_the_matrix__arena']['STRAVINSKY'],
margin=3,
),
),
bach_or_stravinsky_in_the_matrix__arena__turn_taking_initial_bach_0=puppet(
substrate='bach_or_stravinsky_in_the_matrix__arena',
model='puppet_0',
roles=('default', 'bach_fan', 'stravinsky_fan',),
puppeteer_builder=functools.partial(
in_the_matrix.AlternatingSpecialist,
targets=[
_RESOURCES['bach_or_stravinsky_in_the_matrix__arena']['BACH'],
_RESOURCES['bach_or_stravinsky_in_the_matrix__arena']['STRAVINSKY'],
],
interactions_per_target=2,
margin=2,
),
),
bach_or_stravinsky_in_the_matrix__arena__turn_taking_initial_stravinsky_0=puppet(
substrate='bach_or_stravinsky_in_the_matrix__arena',
model='puppet_0',
roles=('default', 'bach_fan', 'stravinsky_fan',),
puppeteer_builder=functools.partial(
in_the_matrix.AlternatingSpecialist,
targets=[
_RESOURCES['bach_or_stravinsky_in_the_matrix__arena']['STRAVINSKY'],
_RESOURCES['bach_or_stravinsky_in_the_matrix__arena']['BACH'],
],
interactions_per_target=2,
margin=2,
),
),
bach_or_stravinsky_in_the_matrix__repeated__bach_picker_0=puppet(
substrate='bach_or_stravinsky_in_the_matrix__repeated',
model='puppet_0',
roles=('default', 'bach_fan', 'stravinsky_fan',),
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['bach_or_stravinsky_in_the_matrix__repeated']['BACH'],
margin=5,
),
),
bach_or_stravinsky_in_the_matrix__repeated__bach_tft_0=puppet(
substrate='bach_or_stravinsky_in_the_matrix__repeated',
model='puppet_0',
roles=('default', 'bach_fan', 'stravinsky_fan',),
puppeteer_builder=functools.partial(
in_the_matrix.TitForTat,
cooperate_resource=_RESOURCES['bach_or_stravinsky_in_the_matrix__repeated']['BACH'],
defect_resource=_RESOURCES['bach_or_stravinsky_in_the_matrix__repeated']['STRAVINSKY'],
tremble_probability=0,
margin=5,
),
),
bach_or_stravinsky_in_the_matrix__repeated__bach_tft_tremble_0=puppet(
substrate='bach_or_stravinsky_in_the_matrix__repeated',
model='puppet_0',
roles=('default', 'bach_fan', 'stravinsky_fan',),
puppeteer_builder=functools.partial(
in_the_matrix.TitForTat,
cooperate_resource=_RESOURCES['bach_or_stravinsky_in_the_matrix__repeated']['BACH'],
defect_resource=_RESOURCES['bach_or_stravinsky_in_the_matrix__repeated']['STRAVINSKY'],
tremble_probability=0.25,
margin=5,
),
),
bach_or_stravinsky_in_the_matrix__repeated__stravinsky_picker_0=puppet(
substrate='bach_or_stravinsky_in_the_matrix__repeated',
model='puppet_0',
roles=('default', 'bach_fan', 'stravinsky_fan',),
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['bach_or_stravinsky_in_the_matrix__repeated']['STRAVINSKY'],
margin=5,
),
),
bach_or_stravinsky_in_the_matrix__repeated__stravinsky_tft_0=puppet(
substrate='bach_or_stravinsky_in_the_matrix__repeated',
model='puppet_0',
roles=('default', 'bach_fan', 'stravinsky_fan',),
puppeteer_builder=functools.partial(
in_the_matrix.TitForTat,
cooperate_resource=_RESOURCES['bach_or_stravinsky_in_the_matrix__repeated']['STRAVINSKY'],
defect_resource=_RESOURCES['bach_or_stravinsky_in_the_matrix__repeated']['BACH'],
tremble_probability=0,
margin=5,
),
),
bach_or_stravinsky_in_the_matrix__repeated__stravinsky_tft_tremble_0=puppet(
substrate='bach_or_stravinsky_in_the_matrix__repeated',
model='puppet_0',
roles=('default', 'bach_fan', 'stravinsky_fan',),
puppeteer_builder=functools.partial(
in_the_matrix.TitForTat,
cooperate_resource=_RESOURCES['bach_or_stravinsky_in_the_matrix__repeated']['STRAVINSKY'],
defect_resource=_RESOURCES['bach_or_stravinsky_in_the_matrix__repeated']['BACH'],
tremble_probability=0.25,
margin=5,
),
),
bach_or_stravinsky_in_the_matrix__repeated__turn_taking_initial_bach_0=puppet(
substrate='bach_or_stravinsky_in_the_matrix__repeated',
model='puppet_0',
roles=('default', 'bach_fan', 'stravinsky_fan',),
puppeteer_builder=functools.partial(
in_the_matrix.AlternatingSpecialist,
targets=[
_RESOURCES['bach_or_stravinsky_in_the_matrix__repeated']['BACH'],
_RESOURCES['bach_or_stravinsky_in_the_matrix__repeated']['STRAVINSKY'],
],
interactions_per_target=1,
margin=5,
),
),
bach_or_stravinsky_in_the_matrix__repeated__turn_taking_initial_bach_1=puppet(
substrate='bach_or_stravinsky_in_the_matrix__repeated',
model='puppet_0',
roles=('default', 'bach_fan', 'stravinsky_fan',),
puppeteer_builder=functools.partial(
in_the_matrix.AlternatingSpecialist,
targets=[
_RESOURCES['bach_or_stravinsky_in_the_matrix__repeated']['BACH'],
_RESOURCES['bach_or_stravinsky_in_the_matrix__repeated']['STRAVINSKY'],
],
interactions_per_target=3,
margin=5,
),
),
bach_or_stravinsky_in_the_matrix__repeated__turn_taking_initial_stravinsky_0=puppet(
substrate='bach_or_stravinsky_in_the_matrix__repeated',
model='puppet_0',
roles=('default', 'bach_fan', 'stravinsky_fan',),
puppeteer_builder=functools.partial(
in_the_matrix.AlternatingSpecialist,
targets=[
_RESOURCES['bach_or_stravinsky_in_the_matrix__repeated']['STRAVINSKY'],
_RESOURCES['bach_or_stravinsky_in_the_matrix__repeated']['BACH'],
],
interactions_per_target=1,
margin=5,
),
),
bach_or_stravinsky_in_the_matrix__repeated__turn_taking_initial_stravinsky_1=puppet(
substrate='bach_or_stravinsky_in_the_matrix__repeated',
model='puppet_0',
roles=('default', 'bach_fan', 'stravinsky_fan',),
puppeteer_builder=functools.partial(
in_the_matrix.AlternatingSpecialist,
targets=[
_RESOURCES['bach_or_stravinsky_in_the_matrix__repeated']['STRAVINSKY'],
_RESOURCES['bach_or_stravinsky_in_the_matrix__repeated']['BACH'],
],
interactions_per_target=3,
margin=5,
),
),
boat_race__eight_races__cooperator_0=saved_model(
substrate='boat_race__eight_races',
model='cooperator_0',
roles=('default', 'target'),
),
boat_race__eight_races__defector_0=saved_model(
substrate='boat_race__eight_races',
model='defector_0',
roles=('default',),
),
chemistry__three_metabolic_cycles__blue_0=saved_model(
substrate='chemistry__three_metabolic_cycles',
model='blue_0',
),
chemistry__three_metabolic_cycles__green_0=saved_model(
substrate='chemistry__three_metabolic_cycles',
model='green_0',
),
chemistry__three_metabolic_cycles__yellow_0=saved_model(
substrate='chemistry__three_metabolic_cycles',
model='yellow_0',
),
chemistry__three_metabolic_cycles_with_plentiful_distractors__blue_0=saved_model(
substrate='chemistry__three_metabolic_cycles_with_plentiful_distractors',
model='blue_0',
),
chemistry__three_metabolic_cycles_with_plentiful_distractors__green_0=saved_model(
substrate='chemistry__three_metabolic_cycles_with_plentiful_distractors',
model='green_0',
),
chemistry__three_metabolic_cycles_with_plentiful_distractors__yellow_0=saved_model(
substrate='chemistry__three_metabolic_cycles_with_plentiful_distractors',
model='yellow_0',
),
chemistry__two_metabolic_cycles__blue_0=saved_model(
substrate='chemistry__two_metabolic_cycles',
model='blue_0',
),
chemistry__two_metabolic_cycles__green_0=saved_model(
substrate='chemistry__two_metabolic_cycles',
model='green_0',
),
chemistry__two_metabolic_cycles_with_distractors__blue_0=saved_model(
substrate='chemistry__two_metabolic_cycles_with_distractors',
model='blue_0',
),
chemistry__two_metabolic_cycles_with_distractors__green_0=saved_model(
substrate='chemistry__two_metabolic_cycles_with_distractors',
model='green_0',
),
chicken_in_the_matrix__arena__puppet_dove_0=puppet(
substrate='chicken_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['chicken_in_the_matrix__arena']['DOVE'],
margin=1,
),
),
chicken_in_the_matrix__arena__puppet_dove_margin_0=puppet(
substrate='chicken_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['chicken_in_the_matrix__arena']['DOVE'],
margin=5,
),
),
chicken_in_the_matrix__arena__puppet_grim_one_strike_0=puppet(
substrate='chicken_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['chicken_in_the_matrix__arena']['DOVE'],
defect_resource=_RESOURCES['chicken_in_the_matrix__arena']['HAWK'],
threshold=1,
margin=1,
),
),
chicken_in_the_matrix__arena__puppet_grim_one_strike_margin_0=puppet(
substrate='chicken_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['chicken_in_the_matrix__arena']['DOVE'],
defect_resource=_RESOURCES['chicken_in_the_matrix__arena']['HAWK'],
threshold=1,
margin=5,
),
),
chicken_in_the_matrix__arena__puppet_grim_three_strikes_0=puppet(
substrate='chicken_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['chicken_in_the_matrix__arena']['DOVE'],
defect_resource=_RESOURCES['chicken_in_the_matrix__arena']['HAWK'],
threshold=3,
margin=1,
),
),
chicken_in_the_matrix__arena__puppet_grim_three_strikes_margin_0=puppet(
substrate='chicken_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['chicken_in_the_matrix__arena']['DOVE'],
defect_resource=_RESOURCES['chicken_in_the_matrix__arena']['HAWK'],
threshold=3,
margin=5,
),
),
chicken_in_the_matrix__arena__puppet_grim_two_strikes_0=puppet(
substrate='chicken_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['chicken_in_the_matrix__arena']['DOVE'],
defect_resource=_RESOURCES['chicken_in_the_matrix__arena']['HAWK'],
threshold=2,
margin=1,
),
),
chicken_in_the_matrix__arena__puppet_grim_two_strikes_margin_0=puppet(
substrate='chicken_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['chicken_in_the_matrix__arena']['DOVE'],
defect_resource=_RESOURCES['chicken_in_the_matrix__arena']['HAWK'],
threshold=2,
margin=5,
),
),
chicken_in_the_matrix__arena__puppet_hawk_0=puppet(
substrate='chicken_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['chicken_in_the_matrix__arena']['HAWK'],
margin=1,
),
),
chicken_in_the_matrix__arena__puppet_hawk_margin_0=puppet(
substrate='chicken_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['chicken_in_the_matrix__arena']['HAWK'],
margin=5,
),
),
chicken_in_the_matrix__repeated__puppet_corrigible_0=puppet(
substrate='chicken_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Corrigible,
cooperate_resource=_RESOURCES['chicken_in_the_matrix__repeated']['DOVE'],
defect_resource=_RESOURCES['chicken_in_the_matrix__repeated']['HAWK'],
threshold=3,
margin=5,
tremble_probability=0,
),
),
chicken_in_the_matrix__repeated__puppet_corrigible_tremble_0=puppet(
substrate='chicken_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Corrigible,
cooperate_resource=_RESOURCES['chicken_in_the_matrix__repeated']['DOVE'],
defect_resource=_RESOURCES['chicken_in_the_matrix__repeated']['HAWK'],
threshold=3,
margin=5,
tremble_probability=0.15,
),
),
chicken_in_the_matrix__repeated__puppet_dove_margin_0=puppet(
substrate='chicken_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['chicken_in_the_matrix__repeated']['DOVE'],
margin=5,
),
),
chicken_in_the_matrix__repeated__puppet_dove_margin_1=puppet(
substrate='chicken_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['chicken_in_the_matrix__repeated']['DOVE'],
margin=7,
),
),
chicken_in_the_matrix__repeated__puppet_flip_0=puppet(
substrate='chicken_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
initial_target=_RESOURCES['chicken_in_the_matrix__repeated']['DOVE'],
final_target=_RESOURCES['chicken_in_the_matrix__repeated']['HAWK'],
threshold=3,
initial_margin=1,
final_margin=5,
),
),
chicken_in_the_matrix__repeated__puppet_grim_one_strike_margin_0=puppet(
substrate='chicken_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['chicken_in_the_matrix__repeated']['DOVE'],
defect_resource=_RESOURCES['chicken_in_the_matrix__repeated']['HAWK'],
threshold=1,
margin=5,
),
),
chicken_in_the_matrix__repeated__puppet_grim_one_strike_margin_1=puppet(
substrate='chicken_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['chicken_in_the_matrix__repeated']['DOVE'],
defect_resource=_RESOURCES['chicken_in_the_matrix__repeated']['HAWK'],
threshold=1,
margin=7,
),
),
chicken_in_the_matrix__repeated__puppet_grim_two_strikes_margin_0=puppet(
substrate='chicken_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['chicken_in_the_matrix__repeated']['DOVE'],
defect_resource=_RESOURCES['chicken_in_the_matrix__repeated']['HAWK'],
threshold=2,
margin=5,
),
),
chicken_in_the_matrix__repeated__puppet_grim_two_strikes_margin_1=puppet(
substrate='chicken_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['chicken_in_the_matrix__repeated']['DOVE'],
defect_resource=_RESOURCES['chicken_in_the_matrix__repeated']['HAWK'],
threshold=2,
margin=7,
),
),
chicken_in_the_matrix__repeated__puppet_hawk_margin_0=puppet(
substrate='chicken_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['chicken_in_the_matrix__repeated']['HAWK'],
margin=5,
),
),
chicken_in_the_matrix__repeated__puppet_hawk_margin_1=puppet(
substrate='chicken_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['chicken_in_the_matrix__repeated']['HAWK'],
margin=7,
),
),
chicken_in_the_matrix__repeated__puppet_tft_margin_0=puppet(
substrate='chicken_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.TitForTat,
cooperate_resource=_RESOURCES['chicken_in_the_matrix__repeated']['DOVE'],
defect_resource=_RESOURCES['chicken_in_the_matrix__repeated']['HAWK'],
tremble_probability=0,
margin=5,
),
),
chicken_in_the_matrix__repeated__puppet_tft_margin_1=puppet(
substrate='chicken_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.TitForTat,
cooperate_resource=_RESOURCES['chicken_in_the_matrix__repeated']['DOVE'],
defect_resource=_RESOURCES['chicken_in_the_matrix__repeated']['HAWK'],
tremble_probability=0,
margin=7,
),
),
chicken_in_the_matrix__repeated__puppet_tft_tremble_margin_0=puppet(
substrate='chicken_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.TitForTat,
cooperate_resource=_RESOURCES['chicken_in_the_matrix__repeated']['DOVE'],
defect_resource=_RESOURCES['chicken_in_the_matrix__repeated']['HAWK'],
tremble_probability=0.15,
margin=5,
),
),
chicken_in_the_matrix__repeated__puppet_tft_tremble_margin_1=puppet(
substrate='chicken_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.TitForTat,
cooperate_resource=_RESOURCES['chicken_in_the_matrix__repeated']['DOVE'],
defect_resource=_RESOURCES['chicken_in_the_matrix__repeated']['HAWK'],
tremble_probability=0.15,
margin=7,
),
),
clean_up__cleaner_0=saved_model(
substrate='clean_up',
model='cleaner_0',
),
clean_up__cleaner_1=saved_model(
substrate='clean_up',
model='cleaner_1',
),
clean_up__consumer_0=saved_model(
substrate='clean_up',
model='consumer_0',
),
clean_up__consumer_1=saved_model(
substrate='clean_up',
model='consumer_1',
),
clean_up__puppet_alternator_first_cleans_0=puppet(
substrate='clean_up',
model='puppet_0',
roles=('default',),
puppeteer_builder=functools.partial(
alternator.Alternator,
goals=[
_PUPPET_GOALS['clean_up']['CLEAN'],
_PUPPET_GOALS['clean_up']['EAT'],
],
steps_per_goal=200,
)),
clean_up__puppet_alternator_first_eats_0=puppet(
substrate='clean_up',
model='puppet_0',
roles=('default',),
puppeteer_builder=functools.partial(
alternator.Alternator,
goals=[
_PUPPET_GOALS['clean_up']['EAT'],
_PUPPET_GOALS['clean_up']['CLEAN'],
],
steps_per_goal=200,
),
),
clean_up__puppet_high_threshold_reciprocator_0=puppet(
substrate='clean_up',
model='puppet_0',
roles=('default',),
puppeteer_builder=functools.partial(
clean_up.ConditionalCleaner,
clean_goal=_PUPPET_GOALS['clean_up']['CLEAN'],
eat_goal=_PUPPET_GOALS['clean_up']['EAT'],
coplayer_cleaning_signal='NUM_OTHERS_WHO_CLEANED_THIS_STEP',
threshold=3,
recency_window=5,
reciprocation_period=75,
niceness_period=0,
),
),
clean_up__puppet_low_threshold_reciprocator_0=puppet(
substrate='clean_up',
model='puppet_0',
roles=('default',),
puppeteer_builder=functools.partial(
clean_up.ConditionalCleaner,
clean_goal=_PUPPET_GOALS['clean_up']['CLEAN'],
eat_goal=_PUPPET_GOALS['clean_up']['EAT'],
coplayer_cleaning_signal='NUM_OTHERS_WHO_CLEANED_THIS_STEP',
threshold=2,
recency_window=5,
reciprocation_period=75,
niceness_period=0,
),
),
clean_up__puppet_nice_low_threshold_reciprocator_0=puppet(
substrate='clean_up',
model='puppet_0',
roles=('default',),
puppeteer_builder=functools.partial(
clean_up.ConditionalCleaner,
clean_goal=_PUPPET_GOALS['clean_up']['CLEAN'],
eat_goal=_PUPPET_GOALS['clean_up']['EAT'],
coplayer_cleaning_signal='NUM_OTHERS_WHO_CLEANED_THIS_STEP',
threshold=2,
recency_window=5,
reciprocation_period=75,
niceness_period=200,
),
),
coins__puppet_cooperator_0=puppet(
substrate='coins',
model='puppet_1',
puppeteer_builder=functools.partial(
fixed_goal.FixedGoal,
goal=_PUPPET_GOALS['coins']['COOPERATE'],
),
),
coins__puppet_defector_0=puppet(
substrate='coins',
model='puppet_1',
puppeteer_builder=functools.partial(
fixed_goal.FixedGoal,
goal=_PUPPET_GOALS['coins']['DEFECT'],
),
),
coins__puppet_one_strike_reciprocator_0=puppet(
substrate='coins',
model='puppet_1',
puppeteer_builder=functools.partial(
coins.Reciprocator,
cooperate_goal=_PUPPET_GOALS['coins']['COOPERATE'],
defect_goal=_PUPPET_GOALS['coins']['DEFECT'],
spite_goal=_PUPPET_GOALS['coins']['SPITE'],
partner_defection_signal='MISMATCHED_COIN_COLLECTED_BY_PARTNER',
recency_window=100,
threshold=1,
frames_to_punish=100,
spiteful_punishment_window=0,
),
),
coins__puppet_one_strike_strong_reciprocator_0=puppet(
substrate='coins',
model='puppet_1',
puppeteer_builder=functools.partial(
coins.Reciprocator,
cooperate_goal=_PUPPET_GOALS['coins']['COOPERATE'],
defect_goal=_PUPPET_GOALS['coins']['DEFECT'],
spite_goal=_PUPPET_GOALS['coins']['SPITE'],
partner_defection_signal='MISMATCHED_COIN_COLLECTED_BY_PARTNER',
recency_window=100,
threshold=1,
frames_to_punish=100,
spiteful_punishment_window=50,
),
),
coins__puppet_three_strikes_reciprocator_0=puppet(
substrate='coins',
model='puppet_1',
puppeteer_builder=functools.partial(
coins.Reciprocator,
cooperate_goal=_PUPPET_GOALS['coins']['COOPERATE'],
defect_goal=_PUPPET_GOALS['coins']['DEFECT'],
spite_goal=_PUPPET_GOALS['coins']['SPITE'],
partner_defection_signal='MISMATCHED_COIN_COLLECTED_BY_PARTNER',
recency_window=150,
threshold=3,
frames_to_punish=150,
spiteful_punishment_window=0,
),
),
coins__puppet_three_strikes_strong_reciprocator_0=puppet(
substrate='coins',
model='puppet_1',
puppeteer_builder=functools.partial(
coins.Reciprocator,
cooperate_goal=_PUPPET_GOALS['coins']['COOPERATE'],
defect_goal=_PUPPET_GOALS['coins']['DEFECT'],
spite_goal=_PUPPET_GOALS['coins']['SPITE'],
partner_defection_signal='MISMATCHED_COIN_COLLECTED_BY_PARTNER',
recency_window=150,
threshold=3,
frames_to_punish=150,
spiteful_punishment_window=75,
),
),
collaborative_cooking__asymmetric__apprentice_0=saved_model(
substrate='collaborative_cooking__asymmetric',
model='apprentice_0',
),
collaborative_cooking__asymmetric__apprentice_1=saved_model(
substrate='collaborative_cooking__asymmetric',
model='apprentice_1',
),
collaborative_cooking__asymmetric__chef_0=saved_model(
substrate='collaborative_cooking__asymmetric',
model='chef_0',
),
collaborative_cooking__asymmetric__chef_1=saved_model(
substrate='collaborative_cooking__asymmetric',
model='chef_1',
),
collaborative_cooking__circuit__apprentice_0=saved_model(
substrate='collaborative_cooking__circuit',
model='apprentice_0',
),
collaborative_cooking__circuit__apprentice_1=saved_model(
substrate='collaborative_cooking__circuit',
model='apprentice_1',
),
collaborative_cooking__circuit__chef_0=saved_model(
substrate='collaborative_cooking__circuit',
model='chef_0',
),
collaborative_cooking__circuit__chef_1=saved_model(
substrate='collaborative_cooking__circuit',
model='chef_1',
),
collaborative_cooking__cramped__apprentice_0=saved_model(
substrate='collaborative_cooking__cramped',
model='apprentice_0',
),
collaborative_cooking__cramped__apprentice_1=saved_model(
substrate='collaborative_cooking__cramped',
model='apprentice_1',
),
collaborative_cooking__cramped__chef_0=saved_model(
substrate='collaborative_cooking__cramped',
model='chef_0',
),
collaborative_cooking__cramped__chef_1=saved_model(
substrate='collaborative_cooking__cramped',
model='chef_1',
),
collaborative_cooking__crowded__independent_chef_0=saved_model(
substrate='collaborative_cooking__crowded',
model='independent_chef_0',
),
collaborative_cooking__crowded__robust_chef_0=saved_model(
substrate='collaborative_cooking__crowded',
model='robust_chef_0',
),
collaborative_cooking__figure_eight__independent_chef_0=saved_model(
substrate='collaborative_cooking__figure_eight',
model='independent_chef_0',
),
collaborative_cooking__figure_eight__robust_chef_0=saved_model(
substrate='collaborative_cooking__figure_eight',
model='robust_chef_0',
),
collaborative_cooking__forced__apprentice_0=saved_model(
substrate='collaborative_cooking__forced',
model='apprentice_0',
),
collaborative_cooking__forced__apprentice_1=saved_model(
substrate='collaborative_cooking__forced',
model='apprentice_1',
),
collaborative_cooking__forced__chef_0=saved_model(
substrate='collaborative_cooking__forced',
model='chef_0',
),
collaborative_cooking__forced__chef_1=saved_model(
substrate='collaborative_cooking__forced',
model='chef_1',
),
collaborative_cooking__ring__apprentice_0=saved_model(
substrate='collaborative_cooking__ring',
model='apprentice_0',
),
collaborative_cooking__ring__apprentice_1=saved_model(
substrate='collaborative_cooking__ring',
model='apprentice_1',
),
collaborative_cooking__ring__chef_0=saved_model(
substrate='collaborative_cooking__ring',
model='chef_0',
),
collaborative_cooking__ring__chef_1=saved_model(
substrate='collaborative_cooking__ring',
model='chef_1',
),
commons_harvest__closed__free_0=saved_model(
substrate='commons_harvest__closed',
model='free_0',
),
commons_harvest__closed__free_1=saved_model(
substrate='commons_harvest__closed',
model='free_1',
),
commons_harvest__closed__free_2=saved_model(
substrate='commons_harvest__closed',
model='free_2',
),
commons_harvest__closed__free_3=saved_model(
substrate='commons_harvest__closed',
model='free_3',
),
commons_harvest__closed__pacifist_0=saved_model(
substrate='commons_harvest__closed',
model='pacifist_0',
),
commons_harvest__closed__pacifist_1=saved_model(
substrate='commons_harvest__closed',
model='pacifist_1',
),
commons_harvest__closed__pacifist_2=saved_model(
substrate='commons_harvest__closed',
model='pacifist_2',
),
commons_harvest__open__free_0=saved_model(
substrate='commons_harvest__open',
model='free_0',
),
commons_harvest__open__free_1=saved_model(
substrate='commons_harvest__open',
model='free_1',
),
commons_harvest__open__pacifist_0=saved_model(
substrate='commons_harvest__open',
model='pacifist_0',
),
commons_harvest__open__pacifist_1=saved_model(
substrate='commons_harvest__open',
model='pacifist_1',
),
commons_harvest__partnership__free_0=saved_model(
substrate='commons_harvest__partnership',
model='free_0',
),
commons_harvest__partnership__free_1=saved_model(
substrate='commons_harvest__partnership',
model='free_1',
),
commons_harvest__partnership__free_2=saved_model(
substrate='commons_harvest__partnership',
model='free_2',
),
commons_harvest__partnership__good_partner_0=saved_model(
substrate='commons_harvest__partnership',
model='good_partner_0',
),
commons_harvest__partnership__good_partner_1=saved_model(
substrate='commons_harvest__partnership',
model='good_partner_1',
),
commons_harvest__partnership__good_partner_2=saved_model(
substrate='commons_harvest__partnership',
model='good_partner_2',
),
commons_harvest__partnership__pacifist_0=saved_model(
substrate='commons_harvest__partnership',
model='pacifist_0',
),
commons_harvest__partnership__pacifist_1=saved_model(
substrate='commons_harvest__partnership',
model='pacifist_1',
),
commons_harvest__partnership__pacifist_2=saved_model(
substrate='commons_harvest__partnership',
model='pacifist_2',
),
commons_harvest__partnership__sustainable_fighter_0=saved_model(
substrate='commons_harvest__partnership',
model='sustainable_fighter_0',
),
commons_harvest__partnership__sustainable_fighter_1=saved_model(
substrate='commons_harvest__partnership',
model='sustainable_fighter_1',
),
coop_mining__cooperator_0=puppet(
substrate='coop_mining',
model='puppet_0',
roles=('default', 'target'),
puppeteer_builder=functools.partial(
fixed_goal.FixedGoal,
_PUPPET_GOALS['coop_mining']['EXTRACT_GOLD'],
),
),
coop_mining__defector_0=puppet(
substrate='coop_mining',
model='puppet_0',
roles=('default',),
puppeteer_builder=functools.partial(
fixed_goal.FixedGoal,
_PUPPET_GOALS['coop_mining']['EXTRACT_IRON'],
),
),
coop_mining__mixed_0=puppet(
substrate='coop_mining',
model='puppet_0',
roles=('default', 'target'),
puppeteer_builder=functools.partial(
alternator.Alternator,
goals=[
_PUPPET_GOALS['coop_mining']['EXTRACT_IRON'],
_PUPPET_GOALS['coop_mining']['EXTRACT_GOLD'],
],
steps_per_goal=100,
),
),
daycare__foraging_child_0=saved_model(
substrate='daycare',
model='foraging_child_0',
roles=('child',),
),
daycare__foraging_parent_0=saved_model(
substrate='daycare',
model='foraging_parent_0',
roles=('parent',),
),
daycare__helping_parent_0=saved_model(
substrate='daycare',
model='helping_parent_0',
roles=('parent',),
),
daycare__pointing_child_0=saved_model(
substrate='daycare',
model='pointing_child_0',
roles=('child',),
),
externality_mushrooms__dense__puppet_fize_0=puppet(
substrate='externality_mushrooms__dense',
model='puppet_0',
roles=('default',),
puppeteer_builder=functools.partial(
fixed_goal.FixedGoal, (_PUPPET_GOALS['externality_mushrooms__dense']
['COLLECT_MUSHROOM_FIZE'])),
),
externality_mushrooms__dense__puppet_hihe_0=puppet(
substrate='externality_mushrooms__dense',
model='puppet_0',
roles=('default',),
puppeteer_builder=functools.partial(
fixed_goal.FixedGoal, (_PUPPET_GOALS['externality_mushrooms__dense']
['COLLECT_MUSHROOM_HIHE'])),
),
factory_commons__either_or__sustainable_0=saved_model(
substrate='factory_commons__either_or',
model='sustainable_0',
roles=('default',),
),
factory_commons__either_or__sustainable_1=saved_model(
substrate='factory_commons__either_or',
model='sustainable_1',
roles=('default',),
),
factory_commons__either_or__sustainable_2=saved_model(
substrate='factory_commons__either_or',
model='sustainable_2',
roles=('default',),
),
factory_commons__either_or__unsustainable_0=saved_model(
substrate='factory_commons__either_or',
model='unsustainable_0',
roles=('default',),
),
factory_commons__either_or__unsustainable_1=saved_model(
substrate='factory_commons__either_or',
model='unsustainable_1',
roles=('default',),
),
factory_commons__either_or__unsustainable_2=saved_model(
substrate='factory_commons__either_or',
model='unsustainable_2',
roles=('default',),
),
fruit_market__concentric_rivers__apple_farmer_0=saved_model(
substrate='fruit_market__concentric_rivers',
model='apple_farmer_0',
roles=('apple_farmer',),
),
fruit_market__concentric_rivers__apple_farmer_1=saved_model(
substrate='fruit_market__concentric_rivers',
model='apple_farmer_1',
roles=('apple_farmer',),
),
fruit_market__concentric_rivers__apple_farmer_2=saved_model(
substrate='fruit_market__concentric_rivers',
model='apple_farmer_2',
roles=('apple_farmer',),
),
fruit_market__concentric_rivers__banana_farmer_0=saved_model(
substrate='fruit_market__concentric_rivers',
model='banana_farmer_0',
roles=('banana_farmer',),
),
fruit_market__concentric_rivers__banana_farmer_1=saved_model(
substrate='fruit_market__concentric_rivers',
model='banana_farmer_1',
roles=('banana_farmer',),
),
fruit_market__concentric_rivers__banana_farmer_2=saved_model(
substrate='fruit_market__concentric_rivers',
model='banana_farmer_2',
roles=('banana_farmer',),
),
gift_refinements__cooperator_0=puppet(
substrate='gift_refinements',
roles=('default', 'target'),
model='puppet_0',
puppeteer_builder=functools.partial(
gift_refinements.GiftRefinementsCooperator,
collect_goal=_PUPPET_GOALS['gift_refinements']['COLLECT_TOKENS'],
consume_goal=_PUPPET_GOALS['gift_refinements']['CONSUME_TOKENS'],
gift_goal=_PUPPET_GOALS['gift_refinements']['GIFT'],
),
),
gift_refinements__defector_0=puppet(
substrate='gift_refinements',
roles=('default', 'target'),
model='puppet_0',
puppeteer_builder=functools.partial(
fixed_goal.FixedGoal,
goal=_PUPPET_GOALS['gift_refinements']['FORAGE'],
),
),
gift_refinements__extreme_cooperator_0=puppet(
substrate='gift_refinements',
roles=('default', 'target'),
model='puppet_0',
puppeteer_builder=functools.partial(
gift_refinements.GiftRefinementsExtremeCooperator,
collect_goal=_PUPPET_GOALS['gift_refinements']['COLLECT_TOKENS'],
consume_goal=_PUPPET_GOALS['gift_refinements']['CONSUME_TOKENS'],
gift_goal=_PUPPET_GOALS['gift_refinements']['GIFT'],
),
),
hidden_agenda__collector_crew_0=saved_model(
substrate='hidden_agenda',
model='collector_crew_0',
roles=('crewmate',),
),
hidden_agenda__collector_crew_1=saved_model(
substrate='hidden_agenda',
model='collector_crew_1',
roles=('crewmate',),
),
hidden_agenda__hunter_impostor_0=saved_model(
substrate='hidden_agenda',
model='hunter_impostor_0',
roles=('impostor',),
),
paintball__capture_the_flag__shaped_bot_0=saved_model(
substrate='paintball__capture_the_flag',
model='shaped_0',
roles=('default',),
),
paintball__capture_the_flag__shaped_bot_1=saved_model(
substrate='paintball__capture_the_flag',
model='shaped_1',
roles=('default',),
),
paintball__capture_the_flag__shaped_bot_2=saved_model(
substrate='paintball__capture_the_flag',
model='shaped_2',
roles=('default',),
),
paintball__capture_the_flag__shaped_bot_3=saved_model(
substrate='paintball__capture_the_flag',
model='shaped_3',
roles=('default',),
),
paintball__king_of_the_hill__free_0=saved_model(
substrate='paintball__king_of_the_hill',
model='free_bot_0',
roles=('default',),
),
paintball__king_of_the_hill__free_1=saved_model(
substrate='paintball__king_of_the_hill',
model='free_bot_1',
roles=('default',),
),
paintball__king_of_the_hill__free_2=saved_model(
substrate='paintball__king_of_the_hill',
model='free_bot_2',
roles=('default',),
),
paintball__king_of_the_hill__spawn_camper_0=saved_model(
substrate='paintball__king_of_the_hill',
model='spawn_camper_0',
roles=('default',),
),
paintball__king_of_the_hill__spawn_camper_1=saved_model(
substrate='paintball__king_of_the_hill',
model='spawn_camper_1',
roles=('default',),
),
paintball__king_of_the_hill__spawn_camper_2=saved_model(
substrate='paintball__king_of_the_hill',
model='spawn_camper_2',
roles=('default',),
),
paintball__king_of_the_hill__spawn_camper_3=saved_model(
substrate='paintball__king_of_the_hill',
model='spawn_camper_3',
roles=('default',),
),
predator_prey__alley_hunt__predator_0=saved_model(
substrate='predator_prey__alley_hunt',
model='basic_predator_0',
roles=('predator',),
),
predator_prey__alley_hunt__predator_1=saved_model(
substrate='predator_prey__alley_hunt',
model='basic_predator_1',
roles=('predator',),
),
predator_prey__alley_hunt__predator_2=saved_model(
substrate='predator_prey__alley_hunt',
model='basic_predator_2',
roles=('predator',),
),
predator_prey__alley_hunt__prey_0=saved_model(
substrate='predator_prey__alley_hunt',
model='basic_prey_0',
roles=('prey',),
),
predator_prey__alley_hunt__prey_1=saved_model(
substrate='predator_prey__alley_hunt',
model='basic_prey_1',
roles=('prey',),
),
predator_prey__alley_hunt__prey_2=saved_model(
substrate='predator_prey__alley_hunt',
model='basic_prey_2',
roles=('prey',),
),
predator_prey__open__basic_predator_0=saved_model(
substrate='predator_prey__open',
model='basic_predator_0',
roles=('predator',),
),
predator_prey__open__basic_predator_1=saved_model(
substrate='predator_prey__open',
model='basic_predator_1',
roles=('predator',),
),
predator_prey__open__basic_prey_0=saved_model(
substrate='predator_prey__open',
model='basic_prey_0',
roles=('prey',),
),
predator_prey__open__basic_prey_1=saved_model(
substrate='predator_prey__open',
model='basic_prey_1',
roles=('prey',),
),
predator_prey__open__basic_prey_2=saved_model(
substrate='predator_prey__open',
model='basic_prey_2',
roles=('prey',),
),
predator_prey__open__smart_prey_0=saved_model(
substrate='predator_prey__open',
model='smart_prey_0',
roles=('prey',),
),
predator_prey__open__smart_prey_1=saved_model(
substrate='predator_prey__open',
model='smart_prey_1',
roles=('prey',),
),
predator_prey__open__smart_prey_2=saved_model(
substrate='predator_prey__open',
model='smart_prey_2',
roles=('prey',),
),
predator_prey__orchard__acorn_specialist_prey_0=saved_model(
substrate='predator_prey__orchard',
model='acorn_specialist_prey_0',
roles=('prey',),
),
predator_prey__orchard__acorn_specialist_prey_1=saved_model(
substrate='predator_prey__orchard',
model='acorn_specialist_prey_1',
roles=('prey',),
),
predator_prey__orchard__acorn_specialist_prey_2=saved_model(
substrate='predator_prey__orchard',
model='acorn_specialist_prey_2',
roles=('prey',),
),
predator_prey__orchard__acorn_specialist_prey_3=saved_model(
substrate='predator_prey__orchard',
model='acorn_specialist_prey_3',
roles=('prey',),
),
predator_prey__orchard__acorn_specialist_prey_4=saved_model(
substrate='predator_prey__orchard',
model='acorn_specialist_prey_4',
roles=('prey',),
),
predator_prey__orchard__basic_predator_0=saved_model(
substrate='predator_prey__orchard',
model='basic_predator_0',
roles=('predator',),
),
predator_prey__orchard__basic_predator_1=saved_model(
substrate='predator_prey__orchard',
model='basic_predator_1',
roles=('predator',),
),
predator_prey__orchard__basic_predator_2=saved_model(
substrate='predator_prey__orchard',
model='basic_predator_2',
roles=('predator',),
),
predator_prey__orchard__basic_prey_0=saved_model(
substrate='predator_prey__orchard',
model='basic_prey_0',
roles=('prey',),
),
predator_prey__orchard__basic_prey_1=saved_model(
substrate='predator_prey__orchard',
model='basic_prey_1',
roles=('prey',),
),
predator_prey__orchard__basic_prey_2=saved_model(
substrate='predator_prey__orchard',
model='basic_prey_2',
roles=('prey',),
),
predator_prey__orchard__basic_prey_3=saved_model(
substrate='predator_prey__orchard',
model='basic_prey_3',
roles=('prey',),
),
predator_prey__orchard__basic_prey_4=saved_model(
substrate='predator_prey__orchard',
model='basic_prey_4',
roles=('prey',),
),
predator_prey__orchard__basic_prey_5=saved_model(
substrate='predator_prey__orchard',
model='basic_prey_5',
roles=('prey',),
),
predator_prey__random_forest__basic_predator_0=saved_model(
substrate='predator_prey__random_forest',
model='basic_predator_0',
roles=('predator',),
),
predator_prey__random_forest__basic_predator_1=saved_model(
substrate='predator_prey__random_forest',
model='basic_predator_1',
roles=('predator',),
),
predator_prey__random_forest__basic_predator_2=saved_model(
substrate='predator_prey__random_forest',
model='basic_predator_2',
roles=('predator',),
),
predator_prey__random_forest__basic_prey_0=saved_model(
substrate='predator_prey__random_forest',
model='basic_prey_0',
roles=('prey',),
),
predator_prey__random_forest__basic_prey_1=saved_model(
substrate='predator_prey__random_forest',
model='basic_prey_1',
roles=('prey',),
),
predator_prey__random_forest__basic_prey_2=saved_model(
substrate='predator_prey__random_forest',
model='basic_prey_2',
roles=('prey',),
),
prisoners_dilemma_in_the_matrix__arena__puppet_cooperator_0=puppet(
substrate='prisoners_dilemma_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['prisoners_dilemma_in_the_matrix__arena']['COOPERATE'],
margin=1,
),
),
prisoners_dilemma_in_the_matrix__arena__puppet_cooperator_margin_0=puppet(
substrate='prisoners_dilemma_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['prisoners_dilemma_in_the_matrix__arena']['COOPERATE'],
margin=5,
),
),
prisoners_dilemma_in_the_matrix__arena__puppet_defector_0=puppet(
substrate='prisoners_dilemma_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['prisoners_dilemma_in_the_matrix__arena']['DEFECT'],
margin=1,
),
),
prisoners_dilemma_in_the_matrix__arena__puppet_defector_margin_0=puppet(
substrate='prisoners_dilemma_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['prisoners_dilemma_in_the_matrix__arena']['DEFECT'],
margin=5,
),
),
prisoners_dilemma_in_the_matrix__arena__puppet_grim_one_strike_0=puppet(
substrate='prisoners_dilemma_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__arena']['COOPERATE'],
defect_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__arena']['DEFECT'],
threshold=1,
margin=1,
),
),
prisoners_dilemma_in_the_matrix__arena__puppet_grim_one_strike_margin_0=puppet(
substrate='prisoners_dilemma_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__arena']['COOPERATE'],
defect_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__arena']['DEFECT'],
threshold=1,
margin=5,
),
),
prisoners_dilemma_in_the_matrix__arena__puppet_grim_three_strikes_0=puppet(
substrate='prisoners_dilemma_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__arena']['COOPERATE'],
defect_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__arena']['DEFECT'],
threshold=3,
margin=1,
),
),
prisoners_dilemma_in_the_matrix__arena__puppet_grim_three_strikes_margin_0=puppet(
substrate='prisoners_dilemma_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__arena']['COOPERATE'],
defect_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__arena']['DEFECT'],
threshold=3,
margin=5,
),
),
prisoners_dilemma_in_the_matrix__arena__puppet_grim_two_strikes_0=puppet(
substrate='prisoners_dilemma_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__arena']['COOPERATE'],
defect_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__arena']['DEFECT'],
threshold=2,
margin=1,
),
),
prisoners_dilemma_in_the_matrix__arena__puppet_grim_two_strikes_margin_0=puppet(
substrate='prisoners_dilemma_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__arena']['COOPERATE'],
defect_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__arena']['DEFECT'],
threshold=2,
margin=5,
),
),
prisoners_dilemma_in_the_matrix__repeated__puppet_cooperator_margin_0=puppet(
substrate='prisoners_dilemma_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['prisoners_dilemma_in_the_matrix__repeated']['COOPERATE'],
margin=5,
),
),
prisoners_dilemma_in_the_matrix__repeated__puppet_cooperator_margin_1=puppet(
substrate='prisoners_dilemma_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['prisoners_dilemma_in_the_matrix__repeated']['COOPERATE'],
margin=7,
),
),
prisoners_dilemma_in_the_matrix__repeated__puppet_corrigible_0=puppet(
substrate='prisoners_dilemma_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Corrigible,
cooperate_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__repeated']['COOPERATE'],
defect_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__repeated']['DEFECT'],
threshold=3,
margin=5,
tremble_probability=0,
),
),
prisoners_dilemma_in_the_matrix__repeated__puppet_corrigible_tremble_0=puppet(
substrate='prisoners_dilemma_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Corrigible,
cooperate_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__repeated']['COOPERATE'],
defect_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__repeated']['DEFECT'],
threshold=3,
margin=5,
tremble_probability=0.15,
),
),
prisoners_dilemma_in_the_matrix__repeated__puppet_defector_margin_0=puppet(
substrate='prisoners_dilemma_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['prisoners_dilemma_in_the_matrix__repeated']['DEFECT'],
margin=5,
),
),
prisoners_dilemma_in_the_matrix__repeated__puppet_defector_margin_1=puppet(
substrate='prisoners_dilemma_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['prisoners_dilemma_in_the_matrix__repeated']['DEFECT'],
margin=7,
),
),
prisoners_dilemma_in_the_matrix__repeated__puppet_flip_0=puppet(
substrate='prisoners_dilemma_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
initial_target=_RESOURCES['prisoners_dilemma_in_the_matrix__repeated']['COOPERATE'],
final_target=_RESOURCES['prisoners_dilemma_in_the_matrix__repeated']['DEFECT'],
threshold=3,
initial_margin=1,
final_margin=5,
),
),
prisoners_dilemma_in_the_matrix__repeated__puppet_grim_one_strike_margin_0=puppet(
substrate='prisoners_dilemma_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__repeated']['COOPERATE'],
defect_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__repeated']['DEFECT'],
threshold=1,
margin=5,
),
),
prisoners_dilemma_in_the_matrix__repeated__puppet_grim_one_strike_margin_1=puppet(
substrate='prisoners_dilemma_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__repeated']['COOPERATE'],
defect_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__repeated']['DEFECT'],
threshold=1,
margin=7,
),
),
prisoners_dilemma_in_the_matrix__repeated__puppet_grim_two_strikes_margin_0=puppet(
substrate='prisoners_dilemma_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__repeated']['COOPERATE'],
defect_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__repeated']['DEFECT'],
threshold=2,
margin=5,
),
),
prisoners_dilemma_in_the_matrix__repeated__puppet_grim_two_strikes_margin_1=puppet(
substrate='prisoners_dilemma_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__repeated']['COOPERATE'],
defect_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__repeated']['DEFECT'],
threshold=2,
margin=7,
),
),
prisoners_dilemma_in_the_matrix__repeated__puppet_tft_margin_0=puppet(
substrate='prisoners_dilemma_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.TitForTat,
cooperate_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__repeated']['COOPERATE'],
defect_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__repeated']['DEFECT'],
tremble_probability=0,
margin=5,
),
),
prisoners_dilemma_in_the_matrix__repeated__puppet_tft_margin_1=puppet(
substrate='prisoners_dilemma_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.TitForTat,
cooperate_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__repeated']['COOPERATE'],
defect_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__repeated']['DEFECT'],
tremble_probability=0,
margin=7,
),
),
prisoners_dilemma_in_the_matrix__repeated__puppet_tft_tremble_margin_0=puppet(
substrate='prisoners_dilemma_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.TitForTat,
cooperate_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__repeated']['COOPERATE'],
defect_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__repeated']['DEFECT'],
tremble_probability=0.15,
margin=5,
),
),
prisoners_dilemma_in_the_matrix__repeated__puppet_tft_tremble_margin_1=puppet(
substrate='prisoners_dilemma_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.TitForTat,
cooperate_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__repeated']['COOPERATE'],
defect_resource=_RESOURCES['prisoners_dilemma_in_the_matrix__repeated']['DEFECT'],
tremble_probability=0.15,
margin=7,
),
),
pure_coordination_in_the_matrix__arena__flip_a2b_0=puppet(
substrate='pure_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=5,
initial_target=_RESOURCES['pure_coordination_in_the_matrix__arena']['RED'],
final_target=_RESOURCES['pure_coordination_in_the_matrix__arena']['GREEN'],
initial_margin=1,
final_margin=1,
),
),
pure_coordination_in_the_matrix__arena__flip_a2c_0=puppet(
substrate='pure_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=5,
initial_target=_RESOURCES['pure_coordination_in_the_matrix__arena']['RED'],
final_target=_RESOURCES['pure_coordination_in_the_matrix__arena']['BLUE'],
initial_margin=1,
final_margin=1,
),
),
pure_coordination_in_the_matrix__arena__flip_b2a_0=puppet(
substrate='pure_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=5,
initial_target=_RESOURCES['pure_coordination_in_the_matrix__arena']['GREEN'],
final_target=_RESOURCES['pure_coordination_in_the_matrix__arena']['RED'],
initial_margin=1,
final_margin=1,
),
),
pure_coordination_in_the_matrix__arena__flip_b2c_0=puppet(
substrate='pure_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=5,
initial_target=_RESOURCES['pure_coordination_in_the_matrix__arena']['GREEN'],
final_target=_RESOURCES['pure_coordination_in_the_matrix__arena']['BLUE'],
initial_margin=1,
final_margin=1,
),
),
pure_coordination_in_the_matrix__arena__flip_c2a_0=puppet(
substrate='pure_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=5,
initial_target=_RESOURCES['pure_coordination_in_the_matrix__arena']['BLUE'],
final_target=_RESOURCES['pure_coordination_in_the_matrix__arena']['RED'],
initial_margin=1,
final_margin=1,
),
),
pure_coordination_in_the_matrix__arena__flip_c2b_0=puppet(
substrate='pure_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=5,
initial_target=_RESOURCES['pure_coordination_in_the_matrix__arena']['BLUE'],
final_target=_RESOURCES['pure_coordination_in_the_matrix__arena']['GREEN'],
initial_margin=1,
final_margin=1,
),
),
pure_coordination_in_the_matrix__arena__pure_a_0=puppet(
substrate='pure_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['pure_coordination_in_the_matrix__arena']['RED'],
margin=1,
),
),
pure_coordination_in_the_matrix__arena__pure_b_0=puppet(
substrate='pure_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['pure_coordination_in_the_matrix__arena']['GREEN'],
margin=1,
),
),
pure_coordination_in_the_matrix__arena__pure_c_0=puppet(
substrate='pure_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['pure_coordination_in_the_matrix__arena']['BLUE'],
margin=1,
),
),
pure_coordination_in_the_matrix__arena__pure_greedy_a_0=puppet(
substrate='pure_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['pure_coordination_in_the_matrix__arena']['RED'],
margin=6,
),
),
pure_coordination_in_the_matrix__arena__pure_greedy_b_0=puppet(
substrate='pure_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['pure_coordination_in_the_matrix__arena']['GREEN'],
margin=6,
),
),
pure_coordination_in_the_matrix__arena__pure_greedy_c_0=puppet(
substrate='pure_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['pure_coordination_in_the_matrix__arena']['BLUE'],
margin=6,
),
),
pure_coordination_in_the_matrix__arena__resp2prev_0=puppet(
substrate='pure_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
coordination_in_the_matrix.CoordinateWithPrevious,
resources=(
_RESOURCES['pure_coordination_in_the_matrix__arena']['RED'],
_RESOURCES['pure_coordination_in_the_matrix__arena']['GREEN'],
_RESOURCES['pure_coordination_in_the_matrix__arena']['BLUE'],
),
margin=1,
),
),
pure_coordination_in_the_matrix__arena__resp2prev_greedy_0=puppet(
substrate='pure_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
coordination_in_the_matrix.CoordinateWithPrevious,
resources=(
_RESOURCES['pure_coordination_in_the_matrix__arena']['RED'],
_RESOURCES['pure_coordination_in_the_matrix__arena']['GREEN'],
_RESOURCES['pure_coordination_in_the_matrix__arena']['BLUE'],
),
margin=6,
),
),
pure_coordination_in_the_matrix__repeated__flip_a2b_0=puppet(
substrate='pure_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=4,
initial_target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['RED'],
final_target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['GREEN'],
initial_margin=5,
final_margin=5,
),
),
pure_coordination_in_the_matrix__repeated__flip_a2b_1=puppet(
substrate='pure_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=12,
initial_target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['RED'],
final_target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['GREEN'],
initial_margin=5,
final_margin=5,
),
),
pure_coordination_in_the_matrix__repeated__flip_a2c_0=puppet(
substrate='pure_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=4,
initial_target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['RED'],
final_target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['BLUE'],
initial_margin=5,
final_margin=5,
),
),
pure_coordination_in_the_matrix__repeated__flip_a2c_1=puppet(
substrate='pure_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=12,
initial_target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['RED'],
final_target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['BLUE'],
initial_margin=5,
final_margin=5,
),
),
pure_coordination_in_the_matrix__repeated__flip_b2a_0=puppet(
substrate='pure_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=4,
initial_target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['GREEN'],
final_target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['RED'],
initial_margin=5,
final_margin=5,
),
),
pure_coordination_in_the_matrix__repeated__flip_b2a_1=puppet(
substrate='pure_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=12,
initial_target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['GREEN'],
final_target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['RED'],
initial_margin=5,
final_margin=5,
),
),
pure_coordination_in_the_matrix__repeated__flip_b2c_0=puppet(
substrate='pure_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=4,
initial_target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['GREEN'],
final_target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['BLUE'],
initial_margin=5,
final_margin=5,
),
),
pure_coordination_in_the_matrix__repeated__flip_b2c_1=puppet(
substrate='pure_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=12,
initial_target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['GREEN'],
final_target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['BLUE'],
initial_margin=5,
final_margin=5,
),
),
pure_coordination_in_the_matrix__repeated__flip_c2a_0=puppet(
substrate='pure_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=4,
initial_target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['BLUE'],
final_target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['RED'],
initial_margin=5,
final_margin=5,
),
),
pure_coordination_in_the_matrix__repeated__flip_c2a_1=puppet(
substrate='pure_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=12,
initial_target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['BLUE'],
final_target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['RED'],
initial_margin=5,
final_margin=5,
),
),
pure_coordination_in_the_matrix__repeated__flip_c2b_0=puppet(
substrate='pure_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=4,
initial_target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['BLUE'],
final_target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['GREEN'],
initial_margin=5,
final_margin=5,
),
),
pure_coordination_in_the_matrix__repeated__flip_c2b_1=puppet(
substrate='pure_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=12,
initial_target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['BLUE'],
final_target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['GREEN'],
initial_margin=5,
final_margin=5,
),
),
pure_coordination_in_the_matrix__repeated__pure_a_margin_0=puppet(
substrate='pure_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['RED'],
margin=5,
),
),
pure_coordination_in_the_matrix__repeated__pure_b_margin_0=puppet(
substrate='pure_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['GREEN'],
margin=5,
),
),
pure_coordination_in_the_matrix__repeated__pure_c_margin_0=puppet(
substrate='pure_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['pure_coordination_in_the_matrix__repeated']['BLUE'],
margin=5,
),
),
pure_coordination_in_the_matrix__repeated__resp2prev_margin_0=puppet(
substrate='pure_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
coordination_in_the_matrix.CoordinateWithPrevious,
resources=(
_RESOURCES['pure_coordination_in_the_matrix__repeated']['RED'],
_RESOURCES['pure_coordination_in_the_matrix__repeated']['GREEN'],
_RESOURCES['pure_coordination_in_the_matrix__repeated']['BLUE'],
),
margin=5,
),
),
rationalizable_coordination_in_the_matrix__arena__flip_a2b_0=puppet(
substrate='rationalizable_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=5,
initial_target=_RESOURCES['rationalizable_coordination_in_the_matrix__arena']['YELLOW'],
final_target=_RESOURCES['rationalizable_coordination_in_the_matrix__arena']['VIOLET'],
initial_margin=1,
final_margin=1,
),
),
rationalizable_coordination_in_the_matrix__arena__flip_a2c_0=puppet(
substrate='rationalizable_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=5,
initial_target=_RESOURCES['rationalizable_coordination_in_the_matrix__arena']['YELLOW'],
final_target=_RESOURCES['rationalizable_coordination_in_the_matrix__arena']['CYAN'],
initial_margin=1,
final_margin=1,
),
),
rationalizable_coordination_in_the_matrix__arena__flip_b2a_0=puppet(
substrate='rationalizable_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=5,
initial_target=_RESOURCES['rationalizable_coordination_in_the_matrix__arena']['VIOLET'],
final_target=_RESOURCES['rationalizable_coordination_in_the_matrix__arena']['YELLOW'],
initial_margin=1,
final_margin=1,
),
),
rationalizable_coordination_in_the_matrix__arena__flip_b2c_0=puppet(
substrate='rationalizable_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=5,
initial_target=_RESOURCES['rationalizable_coordination_in_the_matrix__arena']['VIOLET'],
final_target=_RESOURCES['rationalizable_coordination_in_the_matrix__arena']['CYAN'],
initial_margin=1,
final_margin=1,
),
),
rationalizable_coordination_in_the_matrix__arena__flip_c2a_0=puppet(
substrate='rationalizable_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=5,
initial_target=_RESOURCES['rationalizable_coordination_in_the_matrix__arena']['CYAN'],
final_target=_RESOURCES['rationalizable_coordination_in_the_matrix__arena']['YELLOW'],
initial_margin=1,
final_margin=1,
),
),
rationalizable_coordination_in_the_matrix__arena__flip_c2b_0=puppet(
substrate='rationalizable_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=5,
initial_target=_RESOURCES['rationalizable_coordination_in_the_matrix__arena']['CYAN'],
final_target=_RESOURCES['rationalizable_coordination_in_the_matrix__arena']['VIOLET'],
initial_margin=1,
final_margin=1,
),
),
rationalizable_coordination_in_the_matrix__arena__pure_a_0=puppet(
substrate='rationalizable_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['rationalizable_coordination_in_the_matrix__arena']['YELLOW'],
margin=1,
),
),
rationalizable_coordination_in_the_matrix__arena__pure_b_0=puppet(
substrate='rationalizable_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['rationalizable_coordination_in_the_matrix__arena']['VIOLET'],
margin=1,
),
),
rationalizable_coordination_in_the_matrix__arena__pure_c_0=puppet(
substrate='rationalizable_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['rationalizable_coordination_in_the_matrix__arena']['CYAN'],
margin=1,
),
),
rationalizable_coordination_in_the_matrix__arena__pure_greedy_a_0=puppet(
substrate='rationalizable_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['rationalizable_coordination_in_the_matrix__arena']['YELLOW'],
margin=6,
),
),
rationalizable_coordination_in_the_matrix__arena__pure_greedy_b_0=puppet(
substrate='rationalizable_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['rationalizable_coordination_in_the_matrix__arena']['VIOLET'],
margin=6,
),
),
rationalizable_coordination_in_the_matrix__arena__pure_greedy_c_0=puppet(
substrate='rationalizable_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['rationalizable_coordination_in_the_matrix__arena']['CYAN'],
margin=6,
),
),
rationalizable_coordination_in_the_matrix__arena__resp2prev_0=puppet(
substrate='rationalizable_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
coordination_in_the_matrix.CoordinateWithPrevious,
resources=(
_RESOURCES['rationalizable_coordination_in_the_matrix__arena']['YELLOW'],
_RESOURCES['rationalizable_coordination_in_the_matrix__arena']['VIOLET'],
_RESOURCES['rationalizable_coordination_in_the_matrix__arena']['CYAN'],
),
margin=1,
),
),
rationalizable_coordination_in_the_matrix__arena__resp2prev_greedy_0=puppet(
substrate='rationalizable_coordination_in_the_matrix__arena',
model='puppet_0',
puppeteer_builder=functools.partial(
coordination_in_the_matrix.CoordinateWithPrevious,
resources=(
_RESOURCES['rationalizable_coordination_in_the_matrix__arena']['YELLOW'],
_RESOURCES['rationalizable_coordination_in_the_matrix__arena']['VIOLET'],
_RESOURCES['rationalizable_coordination_in_the_matrix__arena']['CYAN'],
),
margin=6,
),
),
rationalizable_coordination_in_the_matrix__repeated__flip_a2b_0=puppet(
substrate='rationalizable_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=4,
initial_target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['YELLOW'],
final_target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['VIOLET'],
initial_margin=5,
final_margin=5,
),
),
rationalizable_coordination_in_the_matrix__repeated__flip_a2b_1=puppet(
substrate='rationalizable_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=12,
initial_target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['YELLOW'],
final_target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['VIOLET'],
initial_margin=5,
final_margin=5,
),
),
rationalizable_coordination_in_the_matrix__repeated__flip_a2c_0=puppet(
substrate='rationalizable_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=4,
initial_target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['YELLOW'],
final_target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['CYAN'],
initial_margin=5,
final_margin=5,
),
),
rationalizable_coordination_in_the_matrix__repeated__flip_a2c_1=puppet(
substrate='rationalizable_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=12,
initial_target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['YELLOW'],
final_target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['CYAN'],
initial_margin=5,
final_margin=5,
),
),
rationalizable_coordination_in_the_matrix__repeated__flip_b2a_0=puppet(
substrate='rationalizable_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=4,
initial_target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['VIOLET'],
final_target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['YELLOW'],
initial_margin=5,
final_margin=5,
),
),
rationalizable_coordination_in_the_matrix__repeated__flip_b2a_1=puppet(
substrate='rationalizable_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=12,
initial_target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['VIOLET'],
final_target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['YELLOW'],
initial_margin=5,
final_margin=5,
),
),
rationalizable_coordination_in_the_matrix__repeated__flip_b2c_0=puppet(
substrate='rationalizable_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=4,
initial_target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['VIOLET'],
final_target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['CYAN'],
initial_margin=5,
final_margin=5,
),
),
rationalizable_coordination_in_the_matrix__repeated__flip_b2c_1=puppet(
substrate='rationalizable_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=12,
initial_target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['VIOLET'],
final_target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['CYAN'],
initial_margin=5,
final_margin=5,
),
),
rationalizable_coordination_in_the_matrix__repeated__flip_c2a_0=puppet(
substrate='rationalizable_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=4,
initial_target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['CYAN'],
final_target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['YELLOW'],
initial_margin=5,
final_margin=5,
),
),
rationalizable_coordination_in_the_matrix__repeated__flip_c2a_1=puppet(
substrate='rationalizable_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=12,
initial_target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['CYAN'],
final_target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['YELLOW'],
initial_margin=5,
final_margin=5,
),
),
rationalizable_coordination_in_the_matrix__repeated__flip_c2b_0=puppet(
substrate='rationalizable_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=4,
initial_target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['CYAN'],
final_target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['VIOLET'],
initial_margin=5,
final_margin=5,
),
),
rationalizable_coordination_in_the_matrix__repeated__flip_c2b_1=puppet(
substrate='rationalizable_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=12,
initial_target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['CYAN'],
final_target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['VIOLET'],
initial_margin=5,
final_margin=5,
),
),
rationalizable_coordination_in_the_matrix__repeated__pure_a_margin_0=puppet(
substrate='rationalizable_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['YELLOW'],
margin=5,
),
),
rationalizable_coordination_in_the_matrix__repeated__pure_b_margin_0=puppet(
substrate='rationalizable_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['VIOLET'],
margin=5,
),
),
rationalizable_coordination_in_the_matrix__repeated__pure_c_margin_0=puppet(
substrate='rationalizable_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['CYAN'],
margin=5,
),
),
rationalizable_coordination_in_the_matrix__repeated__resp2prev_margin_0=puppet(
substrate='rationalizable_coordination_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
coordination_in_the_matrix.CoordinateWithPrevious,
resources=(
_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['YELLOW'],
_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['VIOLET'],
_RESOURCES['rationalizable_coordination_in_the_matrix__repeated']['CYAN'],
),
margin=5,
),
),
running_with_scissors_in_the_matrix__arena__flip_p2r_0=puppet(
substrate='running_with_scissors_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=3,
initial_target=_RESOURCES['running_with_scissors_in_the_matrix__arena']['PAPER'],
final_target=_RESOURCES['running_with_scissors_in_the_matrix__arena']['SCISSORS'],
initial_margin=1,
final_margin=5,
),
),
running_with_scissors_in_the_matrix__arena__flip_r2s_0=puppet(
substrate='running_with_scissors_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=3,
initial_target=_RESOURCES['running_with_scissors_in_the_matrix__arena']['ROCK'],
final_target=_RESOURCES['running_with_scissors_in_the_matrix__arena']['SCISSORS'],
initial_margin=1,
final_margin=5,
),
),
running_with_scissors_in_the_matrix__arena__flip_s2p_0=puppet(
substrate='running_with_scissors_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=3,
initial_target=_RESOURCES['running_with_scissors_in_the_matrix__arena']['SCISSORS'],
final_target=_RESOURCES['running_with_scissors_in_the_matrix__arena']['PAPER'],
initial_margin=1,
final_margin=5,
),
),
running_with_scissors_in_the_matrix__arena__free_0=saved_model(
substrate='running_with_scissors_in_the_matrix__arena',
model='free_0',
),
running_with_scissors_in_the_matrix__arena__paper_margin_0=puppet(
substrate='running_with_scissors_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['running_with_scissors_in_the_matrix__arena']['PAPER'],
margin=3,
),
),
running_with_scissors_in_the_matrix__arena__paper_margin_1=puppet(
substrate='running_with_scissors_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['running_with_scissors_in_the_matrix__arena']['PAPER'],
margin=5,
),
),
running_with_scissors_in_the_matrix__arena__rock_margin_0=puppet(
substrate='running_with_scissors_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['running_with_scissors_in_the_matrix__arena']['ROCK'],
margin=3,
),
),
running_with_scissors_in_the_matrix__arena__rock_margin_1=puppet(
substrate='running_with_scissors_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['running_with_scissors_in_the_matrix__arena']['ROCK'],
margin=5,
),
),
running_with_scissors_in_the_matrix__arena__scissors_margin_0=puppet(
substrate='running_with_scissors_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['running_with_scissors_in_the_matrix__arena']['SCISSORS'],
margin=3,
),
),
running_with_scissors_in_the_matrix__arena__scissors_margin_1=puppet(
substrate='running_with_scissors_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['running_with_scissors_in_the_matrix__arena']['SCISSORS'],
margin=5,
),
),
running_with_scissors_in_the_matrix__one_shot__paper_margin_0=puppet(
substrate='running_with_scissors_in_the_matrix__one_shot',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['running_with_scissors_in_the_matrix__one_shot']['PAPER'],
margin=3,
),
),
running_with_scissors_in_the_matrix__one_shot__rock_margin_0=puppet(
substrate='running_with_scissors_in_the_matrix__one_shot',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['running_with_scissors_in_the_matrix__one_shot']['ROCK'],
margin=3,
),
),
running_with_scissors_in_the_matrix__one_shot__scissors_margin_0=puppet(
substrate='running_with_scissors_in_the_matrix__one_shot',
model='puppet_0',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['running_with_scissors_in_the_matrix__one_shot']['SCISSORS'],
margin=3,
),
),
running_with_scissors_in_the_matrix__repeated__flip_p2r_0=puppet(
substrate='running_with_scissors_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=3,
initial_target=_RESOURCES['running_with_scissors_in_the_matrix__repeated']['PAPER'],
final_target=_RESOURCES['running_with_scissors_in_the_matrix__repeated']['ROCK'],
initial_margin=1,
final_margin=5,
),
),
running_with_scissors_in_the_matrix__repeated__flip_p2r_1=puppet(
substrate='running_with_scissors_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=2,
initial_target=_RESOURCES['running_with_scissors_in_the_matrix__repeated']['PAPER'],
final_target=_RESOURCES['running_with_scissors_in_the_matrix__repeated']['ROCK'],
initial_margin=5,
final_margin=5,
),
),
running_with_scissors_in_the_matrix__repeated__flip_r2s_0=puppet(
substrate='running_with_scissors_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=3,
initial_target=_RESOURCES['running_with_scissors_in_the_matrix__repeated']['ROCK'],
final_target=_RESOURCES['running_with_scissors_in_the_matrix__repeated']['SCISSORS'],
initial_margin=1,
final_margin=5,
),
),
running_with_scissors_in_the_matrix__repeated__flip_r2s_1=puppet(
substrate='running_with_scissors_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=2,
initial_target=_RESOURCES['running_with_scissors_in_the_matrix__repeated']['ROCK'],
final_target=_RESOURCES['running_with_scissors_in_the_matrix__repeated']['SCISSORS'],
initial_margin=5,
final_margin=5,
),
),
running_with_scissors_in_the_matrix__repeated__flip_s2p_0=puppet(
substrate='running_with_scissors_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=3,
initial_target=_RESOURCES['running_with_scissors_in_the_matrix__repeated']['SCISSORS'],
final_target=_RESOURCES['running_with_scissors_in_the_matrix__repeated']['PAPER'],
initial_margin=1,
final_margin=5,
),
),
running_with_scissors_in_the_matrix__repeated__flip_s2p_1=puppet(
substrate='running_with_scissors_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
threshold=2,
initial_target=_RESOURCES['running_with_scissors_in_the_matrix__repeated']['SCISSORS'],
final_target=_RESOURCES['running_with_scissors_in_the_matrix__repeated']['PAPER'],
initial_margin=5,
final_margin=5,
),
),
running_with_scissors_in_the_matrix__repeated__free_0=saved_model(
substrate='running_with_scissors_in_the_matrix__repeated',
model='free_0',
),
running_with_scissors_in_the_matrix__repeated__paper_0=puppet(
substrate='running_with_scissors_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['running_with_scissors_in_the_matrix__repeated']['PAPER'],
margin=1,
),
),
running_with_scissors_in_the_matrix__repeated__paper_margin_0=puppet(
substrate='running_with_scissors_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['running_with_scissors_in_the_matrix__repeated']['PAPER'],
margin=5,
),
),
running_with_scissors_in_the_matrix__repeated__resp2prev_margin_0=puppet(
substrate='running_with_scissors_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
running_with_scissors_in_the_matrix.CounterPrevious,
rock_resource=_RESOURCES['running_with_scissors_in_the_matrix__repeated']['ROCK'],
paper_resource=_RESOURCES['running_with_scissors_in_the_matrix__repeated']['PAPER'],
scissors_resource=_RESOURCES['running_with_scissors_in_the_matrix__repeated']['SCISSORS'],
margin=5,
),
),
running_with_scissors_in_the_matrix__repeated__rock_0=puppet(
substrate='running_with_scissors_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['running_with_scissors_in_the_matrix__repeated']['ROCK'],
margin=1,
),
),
running_with_scissors_in_the_matrix__repeated__rock_margin_0=puppet(
substrate='running_with_scissors_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['running_with_scissors_in_the_matrix__repeated']['ROCK'],
margin=5,
),
),
running_with_scissors_in_the_matrix__repeated__scissors_0=puppet(
substrate='running_with_scissors_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['running_with_scissors_in_the_matrix__repeated']['SCISSORS'],
margin=1,
),
),
running_with_scissors_in_the_matrix__repeated__scissors_margin_0=puppet(
substrate='running_with_scissors_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['running_with_scissors_in_the_matrix__repeated']['SCISSORS'],
margin=5,
),
),
stag_hunt_in_the_matrix__arena__puppet_grim_one_strike_0=puppet(
substrate='stag_hunt_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['stag_hunt_in_the_matrix__arena']['STAG'],
defect_resource=_RESOURCES['stag_hunt_in_the_matrix__arena']['HARE'],
threshold=1,
margin=1,
),
),
stag_hunt_in_the_matrix__arena__puppet_grim_one_strike_margin_0=puppet(
substrate='stag_hunt_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['stag_hunt_in_the_matrix__arena']['STAG'],
defect_resource=_RESOURCES['stag_hunt_in_the_matrix__arena']['HARE'],
threshold=1,
margin=5,
),
),
stag_hunt_in_the_matrix__arena__puppet_grim_three_strikes_0=puppet(
substrate='stag_hunt_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['stag_hunt_in_the_matrix__arena']['STAG'],
defect_resource=_RESOURCES['stag_hunt_in_the_matrix__arena']['HARE'],
threshold=3,
margin=1,
),
),
stag_hunt_in_the_matrix__arena__puppet_grim_three_strikes_margin_0=puppet(
substrate='stag_hunt_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['stag_hunt_in_the_matrix__arena']['STAG'],
defect_resource=_RESOURCES['stag_hunt_in_the_matrix__arena']['HARE'],
threshold=3,
margin=5,
),
),
stag_hunt_in_the_matrix__arena__puppet_grim_two_strikes_0=puppet(
substrate='stag_hunt_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['stag_hunt_in_the_matrix__arena']['STAG'],
defect_resource=_RESOURCES['stag_hunt_in_the_matrix__arena']['HARE'],
threshold=2,
margin=1,
),
),
stag_hunt_in_the_matrix__arena__puppet_grim_two_strikes_margin_0=puppet(
substrate='stag_hunt_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['stag_hunt_in_the_matrix__arena']['STAG'],
defect_resource=_RESOURCES['stag_hunt_in_the_matrix__arena']['HARE'],
threshold=2,
margin=5,
),
),
stag_hunt_in_the_matrix__arena__puppet_hare_0=puppet(
substrate='stag_hunt_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['stag_hunt_in_the_matrix__arena']['HARE'],
margin=1,
),
),
stag_hunt_in_the_matrix__arena__puppet_hare_margin_0=puppet(
substrate='stag_hunt_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['stag_hunt_in_the_matrix__arena']['HARE'],
margin=5,
),
),
stag_hunt_in_the_matrix__arena__puppet_stag_0=puppet(
substrate='stag_hunt_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['stag_hunt_in_the_matrix__arena']['STAG'],
margin=1,
),
),
stag_hunt_in_the_matrix__arena__puppet_stag_margin_0=puppet(
substrate='stag_hunt_in_the_matrix__arena',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['stag_hunt_in_the_matrix__arena']['STAG'],
margin=5,
),
),
stag_hunt_in_the_matrix__repeated__puppet_corrigible_0=puppet(
substrate='stag_hunt_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Corrigible,
cooperate_resource=_RESOURCES['stag_hunt_in_the_matrix__repeated']['STAG'],
defect_resource=_RESOURCES['stag_hunt_in_the_matrix__repeated']['HARE'],
threshold=3,
margin=5,
tremble_probability=0,
),
),
stag_hunt_in_the_matrix__repeated__puppet_corrigible_tremble_0=puppet(
substrate='stag_hunt_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Corrigible,
cooperate_resource=_RESOURCES['stag_hunt_in_the_matrix__repeated']['STAG'],
defect_resource=_RESOURCES['stag_hunt_in_the_matrix__repeated']['HARE'],
threshold=3,
margin=5,
tremble_probability=0.15,
),
),
stag_hunt_in_the_matrix__repeated__puppet_flip_0=puppet(
substrate='stag_hunt_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.ScheduledFlip,
initial_target=_RESOURCES['stag_hunt_in_the_matrix__repeated']['STAG'],
final_target=_RESOURCES['stag_hunt_in_the_matrix__repeated']['HARE'],
threshold=3,
initial_margin=1,
final_margin=5,
),
),
stag_hunt_in_the_matrix__repeated__puppet_grim_one_strike_margin_0=puppet(
substrate='stag_hunt_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['stag_hunt_in_the_matrix__repeated']['STAG'],
defect_resource=_RESOURCES['stag_hunt_in_the_matrix__repeated']['HARE'],
threshold=1,
margin=5,
),
),
stag_hunt_in_the_matrix__repeated__puppet_grim_one_strike_margin_1=puppet(
substrate='stag_hunt_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['stag_hunt_in_the_matrix__repeated']['STAG'],
defect_resource=_RESOURCES['stag_hunt_in_the_matrix__repeated']['HARE'],
threshold=1,
margin=7,
),
),
stag_hunt_in_the_matrix__repeated__puppet_grim_two_strikes_margin_0=puppet(
substrate='stag_hunt_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['stag_hunt_in_the_matrix__repeated']['STAG'],
defect_resource=_RESOURCES['stag_hunt_in_the_matrix__repeated']['HARE'],
threshold=2,
margin=5,
),
),
stag_hunt_in_the_matrix__repeated__puppet_grim_two_strikes_margin_1=puppet(
substrate='stag_hunt_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.GrimTrigger,
cooperate_resource=_RESOURCES['stag_hunt_in_the_matrix__repeated']['STAG'],
defect_resource=_RESOURCES['stag_hunt_in_the_matrix__repeated']['HARE'],
threshold=2,
margin=7,
),
),
stag_hunt_in_the_matrix__repeated__puppet_hare_margin_0=puppet(
substrate='stag_hunt_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['stag_hunt_in_the_matrix__repeated']['HARE'],
margin=5,
),
),
stag_hunt_in_the_matrix__repeated__puppet_hare_margin_1=puppet(
substrate='stag_hunt_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['stag_hunt_in_the_matrix__repeated']['HARE'],
margin=7,
),
),
stag_hunt_in_the_matrix__repeated__puppet_stag_margin_0=puppet(
substrate='stag_hunt_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['stag_hunt_in_the_matrix__repeated']['STAG'],
margin=5,
),
),
stag_hunt_in_the_matrix__repeated__puppet_stag_margin_1=puppet(
substrate='stag_hunt_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.Specialist,
target=_RESOURCES['stag_hunt_in_the_matrix__repeated']['STAG'],
margin=7,
),
),
stag_hunt_in_the_matrix__repeated__puppet_tft_margin_0=puppet(
substrate='stag_hunt_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.TitForTat,
cooperate_resource=_RESOURCES['stag_hunt_in_the_matrix__repeated']['STAG'],
defect_resource=_RESOURCES['stag_hunt_in_the_matrix__repeated']['HARE'],
tremble_probability=0,
margin=5,
),
),
stag_hunt_in_the_matrix__repeated__puppet_tft_margin_1=puppet(
substrate='stag_hunt_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.TitForTat,
cooperate_resource=_RESOURCES['stag_hunt_in_the_matrix__repeated']['STAG'],
defect_resource=_RESOURCES['stag_hunt_in_the_matrix__repeated']['HARE'],
tremble_probability=0,
margin=7,
),
),
stag_hunt_in_the_matrix__repeated__puppet_tft_tremble_margin_0=puppet(
substrate='stag_hunt_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.TitForTat,
cooperate_resource=_RESOURCES['stag_hunt_in_the_matrix__repeated']['STAG'],
defect_resource=_RESOURCES['stag_hunt_in_the_matrix__repeated']['HARE'],
tremble_probability=0.15,
margin=5,
),
),
stag_hunt_in_the_matrix__repeated__puppet_tft_tremble_margin_1=puppet(
substrate='stag_hunt_in_the_matrix__repeated',
model='puppet_1',
puppeteer_builder=functools.partial(
in_the_matrix.TitForTat,
cooperate_resource=_RESOURCES['stag_hunt_in_the_matrix__repeated']['STAG'],
defect_resource=_RESOURCES['stag_hunt_in_the_matrix__repeated']['HARE'],
tremble_probability=0.15,
margin=7,
),
),
territory__inside_out__aggressor_0=saved_model(
substrate='territory__inside_out',
model='aggressor_0',
),
territory__inside_out__aggressor_1=saved_model(
substrate='territory__inside_out',
model='aggressor_1',
),
territory__inside_out__aggressor_2=saved_model(
substrate='territory__inside_out',
model='aggressor_2',
),
territory__inside_out__aggressor_3=saved_model(
substrate='territory__inside_out',
model='aggressor_3',
),
territory__inside_out__aggressor_with_extra_training_0=saved_model(
substrate='territory__inside_out',
model='aggressor_with_extra_training_0',
),
territory__inside_out__somewhat_tolerant_bot_0=saved_model(
substrate='territory__inside_out',
model='somewhat_tolerant_bot_0',
),
territory__inside_out__somewhat_tolerant_bot_1=saved_model(
substrate='territory__inside_out',
model='somewhat_tolerant_bot_1',
),
territory__open__aggressor_0=saved_model(
substrate='territory__open',
model='aggressor_0',
),
territory__open__aggressor_1=saved_model(
substrate='territory__open',
model='aggressor_1',
),
territory__open__aggressor_2=saved_model(
substrate='territory__open',
model='aggressor_2',
),
territory__open__aggressor_3=saved_model(
substrate='territory__open',
model='aggressor_3',
),
territory__open__aggressor_with_extra_training_0=saved_model(
substrate='territory__open',
model='aggressor_with_extra_training_0',
),
territory__rooms__aggressor_0=saved_model(
substrate='territory__rooms',
model='aggressor_0',
),
territory__rooms__aggressor_1=saved_model(
substrate='territory__rooms',
model='aggressor_1',
),
territory__rooms__aggressor_2=saved_model(
substrate='territory__rooms',
model='aggressor_2',
),
territory__rooms__aggressor_3=saved_model(
substrate='territory__rooms',
model='aggressor_3',
),
territory__rooms__aggressor_with_extra_training_0=saved_model(
substrate='territory__rooms',
model='aggressor_with_extra_training_0',
),
# keep-sorted end
)
|
meltingpot-main
|
meltingpot/configs/bots/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests of the scenario configs."""
import collections
from absl.testing import absltest
from absl.testing import parameterized
from meltingpot import bot as bot_factory
from meltingpot.configs import bots
from meltingpot.configs import scenarios
from meltingpot.configs import substrates
SCENARIO_CONFIGS = scenarios.SCENARIO_CONFIGS
AVAILABLE_BOTS = bot_factory.BOTS
AVAILABLE_SUBSTRATES = frozenset(substrates.SUBSTRATES)
def _is_compatible(bot_name, substrate, role):
if bot_name == bot_factory.NOOP_BOT_NAME:
return True
bot_config = bots.BOT_CONFIGS[bot_name]
return substrate == bot_config.substrate and role in bot_config.roles
class ScenarioConfigTest(parameterized.TestCase):
@parameterized.named_parameters(SCENARIO_CONFIGS.items())
def test_has_description(self, scenario):
self.assertNotEmpty(scenario.description)
@parameterized.named_parameters(SCENARIO_CONFIGS.items())
def test_has_tags(self, scenario):
self.assertNotEmpty(scenario.tags)
@parameterized.named_parameters(SCENARIO_CONFIGS.items())
def test_has_valid_substrate(self, scenario):
self.assertIn(scenario.substrate, AVAILABLE_SUBSTRATES)
@parameterized.named_parameters(
(name, name, scenario) for name, scenario in SCENARIO_CONFIGS.items())
def test_name_starts_with_substrate_name(self, name, scenario):
self.assertStartsWith(name, scenario.substrate)
@parameterized.named_parameters(SCENARIO_CONFIGS.items())
def test_has_focal_players(self, scenario):
self.assertTrue(any(scenario.is_focal))
@parameterized.named_parameters(SCENARIO_CONFIGS.items())
def test_has_matching_sizes(self, scenario):
self.assertLen(scenario.is_focal, len(scenario.roles))
@parameterized.named_parameters(SCENARIO_CONFIGS.items())
def test_has_valid_roles(self, scenario):
valid_roles = substrates.get_config(scenario.substrate).valid_roles
self.assertContainsSubset(scenario.roles, valid_roles)
@parameterized.named_parameters(SCENARIO_CONFIGS.items())
def test_has_valid_bots(self, scenario):
scenario_bots = set().union(*scenario.bots_by_role.values())
self.assertContainsSubset(scenario_bots, AVAILABLE_BOTS)
@parameterized.named_parameters(SCENARIO_CONFIGS.items())
def test_bots_compatible(self, scenario):
for role, bot_names in scenario.bots_by_role.items():
incompatible = {
bot_name for bot_name in bot_names
if not _is_compatible(bot_name, scenario.substrate, role)
}
with self.subTest(role):
self.assertEmpty(
incompatible,
f'Substrate {scenario.substrate!r}, role {role!r} not supported '
f'by: {incompatible!r}.')
@parameterized.named_parameters(SCENARIO_CONFIGS.items())
def test_no_missing_role_assigments(self, scenario):
background_roles = set(role for n, role in enumerate(scenario.roles)
if not scenario.is_focal[n])
supported_roles = {
role for role, bots in scenario.bots_by_role.items() if bots}
unsupported_roles = background_roles - supported_roles
self.assertEmpty(unsupported_roles,
f'Background roles {unsupported_roles!r} have not been '
f'assigned bots.')
@parameterized.named_parameters(SCENARIO_CONFIGS.items())
def test_no_unused_role_assignments(self, scenario):
background_roles = set(role for n, role in enumerate(scenario.roles)
if not scenario.is_focal[n])
redundant_roles = set(scenario.bots_by_role) - background_roles
self.assertEmpty(redundant_roles,
f'Bots assigned to {redundant_roles!r} are unused.')
def test_no_duplicates(self):
seen = collections.defaultdict(set)
for name, config in SCENARIO_CONFIGS.items():
seen[config].add(name)
duplicates = {names for _, names in seen.items() if len(names) > 1}
self.assertEmpty(duplicates, f'Duplicate configs found: {duplicates!r}.')
def test_all_substrates_used_by_scenarios(self):
used = {scenario.substrate for scenario in SCENARIO_CONFIGS.values()}
unused = AVAILABLE_SUBSTRATES - used
self.assertEmpty(unused, f'Substrates not used by any scenario: {unused!r}')
def test_all_bots_used_by_scenarios(self):
used = set()
for scenario in SCENARIO_CONFIGS.values():
used.update(*scenario.bots_by_role.values())
unused = AVAILABLE_BOTS - used
self.assertEmpty(unused, f'Bots not used by any scenario: {unused!r}')
if __name__ == '__main__':
absltest.main()
|
meltingpot-main
|
meltingpot/configs/scenarios/scenario_configs_test.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test scenario configurations."""
import collections
import dataclasses
from typing import AbstractSet, Collection, Mapping, Optional, Sequence
import immutabledict
@dataclasses.dataclass(frozen=True)
class ScenarioConfig:
"""Scenario config.
Attributes:
description: a description of the scenario.
tags: tags for the scenario.
substrate: the substrate the scenario is based on.
roles: indicates what role the player in the corresponding player slot has.
is_focal: indicates whether the corresponding player slot is to be filled by
a focal player or a bot.
bots_by_role: names of the bots to sample from to fill the bot slots with
the corresponding role.
"""
description: str
tags: AbstractSet[str]
substrate: str
roles: Sequence[str]
is_focal: Sequence[bool]
bots_by_role: Mapping[str, AbstractSet[str]]
def __post_init__(self):
object.__setattr__(self, 'tags', frozenset(self.tags))
object.__setattr__(self, 'roles', tuple(self.roles))
object.__setattr__(self, 'is_focal', tuple(self.is_focal))
bots_by_role = immutabledict.immutabledict({
role: frozenset(bots) for role, bots in self.bots_by_role.items()
})
object.__setattr__(self, 'bots_by_role', bots_by_role)
# Local additions/overrides.
SCENARIO_CONFIGS: Mapping[str, ScenarioConfig] = immutabledict.immutabledict(
# keep-sorted start numeric=yes block=yes
allelopathic_harvest__open_0=ScenarioConfig(
description=(
'visiting a population where planting green berries is the ' +
'prevailing convention'),
tags={
'visitor',
'convention_following',
},
substrate='allelopathic_harvest__open',
roles=['player_who_likes_red',] * 8 + ['player_who_likes_green',] * 8,
is_focal=(True,) * 4 + (False,) * 12,
bots_by_role={
# The same bots can play both roles.
'player_who_likes_red': {
'allelopathic_harvest__open__bot_that_supports_green_0',
'allelopathic_harvest__open__bot_that_supports_green_1',
'allelopathic_harvest__open__bot_that_supports_green_2',
'allelopathic_harvest__open__bot_that_supports_green_3',
},
'player_who_likes_green': {
'allelopathic_harvest__open__bot_that_supports_green_0',
'allelopathic_harvest__open__bot_that_supports_green_1',
'allelopathic_harvest__open__bot_that_supports_green_2',
'allelopathic_harvest__open__bot_that_supports_green_3',
},
},
),
allelopathic_harvest__open_1=ScenarioConfig(
description=(
'visiting a population where planting red berries is the ' +
'prevailing convention'),
tags={
'visitor',
'convention_following',
},
substrate='allelopathic_harvest__open',
roles=['player_who_likes_red',] * 8 + ['player_who_likes_green',] * 8,
is_focal=(True,) * 4 + (False,) * 12,
bots_by_role={
# The same bots can play both roles.
'player_who_likes_red': {
'allelopathic_harvest__open__bot_that_supports_red_0',
'allelopathic_harvest__open__bot_that_supports_red_1',
'allelopathic_harvest__open__bot_that_supports_red_2',
'allelopathic_harvest__open__bot_that_supports_red_3',
},
'player_who_likes_green': {
'allelopathic_harvest__open__bot_that_supports_red_0',
'allelopathic_harvest__open__bot_that_supports_red_1',
'allelopathic_harvest__open__bot_that_supports_red_2',
'allelopathic_harvest__open__bot_that_supports_red_3',
},
},
),
allelopathic_harvest__open_2=ScenarioConfig(
description=(
'focals are resident and visited by bots who plant either red or ' +
'green'),
tags={
'resident',
},
substrate='allelopathic_harvest__open',
roles=['player_who_likes_red',] * 8 + ['player_who_likes_green',] * 8,
is_focal=(True,) * 14 + (False,) * 2,
bots_by_role={
'player_who_likes_green': {
'allelopathic_harvest__open__bot_that_supports_red_0',
'allelopathic_harvest__open__bot_that_supports_red_1',
'allelopathic_harvest__open__bot_that_supports_red_2',
'allelopathic_harvest__open__bot_that_supports_red_3',
'allelopathic_harvest__open__bot_that_supports_green_0',
'allelopathic_harvest__open__bot_that_supports_green_1',
'allelopathic_harvest__open__bot_that_supports_green_2',
'allelopathic_harvest__open__bot_that_supports_green_3',
},
},
),
bach_or_stravinsky_in_the_matrix__arena_0=ScenarioConfig(
description='visiting background population who picks bach',
tags={
'convention_following',
'versus_pure_bach',
'visitor',
},
substrate='bach_or_stravinsky_in_the_matrix__arena',
roles=('bach_fan',) * 4 + ('stravinsky_fan',) * 4,
is_focal=(True,) * 1 + (False,) * 7,
bots_by_role=immutabledict.immutabledict(
bach_fan=(
'bach_or_stravinsky_in_the_matrix__arena__bach_picker_0',
),
stravinsky_fan=(
'bach_or_stravinsky_in_the_matrix__arena__bach_picker_0',
),
),
),
bach_or_stravinsky_in_the_matrix__arena_1=ScenarioConfig(
description='visiting background population who picks stravinsky',
tags={
'convention_following',
'versus_pure_stravinsky',
'visitor',
},
substrate='bach_or_stravinsky_in_the_matrix__arena',
roles=('bach_fan',) * 4 + ('stravinsky_fan',) * 4,
is_focal=(True,) * 1 + (False,) * 7,
bots_by_role=immutabledict.immutabledict(
bach_fan=(
'bach_or_stravinsky_in_the_matrix__arena__stravinsky_picker_0',
),
stravinsky_fan=(
'bach_or_stravinsky_in_the_matrix__arena__stravinsky_picker_0',
),
),
),
bach_or_stravinsky_in_the_matrix__arena_2=ScenarioConfig(
description='visited by a pure bot',
tags={
'resident',
'versus_pure_all'
},
substrate='bach_or_stravinsky_in_the_matrix__arena',
roles=('bach_fan',) * 4 + ('stravinsky_fan',) * 4,
is_focal=(True,) * 7 + (False,) * 1,
bots_by_role=immutabledict.immutabledict(
stravinsky_fan=(
'bach_or_stravinsky_in_the_matrix__arena__bach_picker_0',
'bach_or_stravinsky_in_the_matrix__arena__stravinsky_picker_0',
),
),
),
bach_or_stravinsky_in_the_matrix__arena_3=ScenarioConfig(
description='visited by three pure bach pickers',
tags={
'resident',
'versus_pure_bach'
},
substrate='bach_or_stravinsky_in_the_matrix__arena',
roles=('bach_fan',) * 4 + ('stravinsky_fan',) * 4,
is_focal=(True,) * 5 + (False,) * 3,
bots_by_role=immutabledict.immutabledict(
stravinsky_fan=(
'bach_or_stravinsky_in_the_matrix__arena__bach_picker_0',
),
),
),
bach_or_stravinsky_in_the_matrix__arena_4=ScenarioConfig(
description='visited by three pure stravinsky pickers',
tags={
'resident',
'versus_pure_stravinsky'
},
substrate='bach_or_stravinsky_in_the_matrix__arena',
roles=('bach_fan',) * 4 + ('stravinsky_fan',) * 4,
is_focal=(True,) * 5 + (False,) * 3,
bots_by_role=immutabledict.immutabledict(
stravinsky_fan=(
'bach_or_stravinsky_in_the_matrix__arena__stravinsky_picker_0',
),
),
),
bach_or_stravinsky_in_the_matrix__arena_5=ScenarioConfig(
description=('visiting background population who alternates, ' +
'starting from stravinsky, repeating each twice'),
tags={
'visitor',
'turn_taking',
'convention_following',
},
substrate='bach_or_stravinsky_in_the_matrix__arena',
roles=('bach_fan',) * 4 + ('stravinsky_fan',) * 4,
is_focal=(True,) * 1 + (False,) * 7,
bots_by_role=immutabledict.immutabledict(
bach_fan=(
'bach_or_stravinsky_in_the_matrix__arena__turn_taking_initial_stravinsky_0',
),
stravinsky_fan=(
'bach_or_stravinsky_in_the_matrix__arena__turn_taking_initial_stravinsky_0',
),
),
),
bach_or_stravinsky_in_the_matrix__arena_6=ScenarioConfig(
description=('visiting background population who alternates, ' +
'starting from bach, repeating each twice'),
tags={
'visitor',
'turn_taking',
'convention_following',
},
substrate='bach_or_stravinsky_in_the_matrix__arena',
roles=('bach_fan',) * 4 + ('stravinsky_fan',) * 4,
is_focal=(True,) * 1 + (False,) * 7,
bots_by_role=immutabledict.immutabledict(
bach_fan=(
'bach_or_stravinsky_in_the_matrix__arena__turn_taking_initial_bach_0',
),
stravinsky_fan=(
'bach_or_stravinsky_in_the_matrix__arena__turn_taking_initial_bach_0',
),
),
),
bach_or_stravinsky_in_the_matrix__repeated_0=ScenarioConfig(
description='meeting a stubborn bach picker',
tags={
'convention_following',
'versus_pure_bach',
'half_and_half',
},
substrate='bach_or_stravinsky_in_the_matrix__repeated',
roles=('stravinsky_fan',) + ('bach_fan',),
is_focal=(True,) + (False,),
bots_by_role=immutabledict.immutabledict(
bach_fan=(
'bach_or_stravinsky_in_the_matrix__repeated__bach_picker_0',
),
),
),
bach_or_stravinsky_in_the_matrix__repeated_1=ScenarioConfig(
description='meeting a bot who plays bach despite not being a fan',
tags={
'convention_following',
'versus_pure_bach',
'half_and_half',
},
substrate='bach_or_stravinsky_in_the_matrix__repeated',
roles=('bach_fan',) + ('stravinsky_fan',),
is_focal=(True,) + (False,),
bots_by_role=immutabledict.immutabledict(
stravinsky_fan=(
'bach_or_stravinsky_in_the_matrix__repeated__bach_picker_0',
),
),
),
bach_or_stravinsky_in_the_matrix__repeated_2=ScenarioConfig(
description=('meeting a bot who plays stravinsky despite not being a ' +
'fan'),
tags={
'convention_following',
'versus_pure_stravinsky',
'half_and_half',
},
substrate='bach_or_stravinsky_in_the_matrix__repeated',
roles=('stravinsky_fan',) + ('bach_fan',),
is_focal=(True,) + (False,),
bots_by_role=immutabledict.immutabledict(
bach_fan=(
'bach_or_stravinsky_in_the_matrix__repeated__stravinsky_picker_0',
),
),
),
bach_or_stravinsky_in_the_matrix__repeated_3=ScenarioConfig(
description='meeting a stubborn stravinsky picker',
tags={
'convention_following',
'versus_pure_stravinsky',
'half_and_half',
},
substrate='bach_or_stravinsky_in_the_matrix__repeated',
roles=('bach_fan',) + ('stravinsky_fan',),
is_focal=(True,) + (False,),
bots_by_role=immutabledict.immutabledict(
stravinsky_fan=(
'bach_or_stravinsky_in_the_matrix__repeated__stravinsky_picker_0',
),
),
),
bach_or_stravinsky_in_the_matrix__repeated_4=ScenarioConfig(
description='bach fan focal agent meets an imperfectly copying partner',
tags={
'versus_tft',
'half_and_half',
},
substrate='bach_or_stravinsky_in_the_matrix__repeated',
roles=('bach_fan',) + ('stravinsky_fan',),
is_focal=(True,) + (False,),
bots_by_role=immutabledict.immutabledict(
stravinsky_fan=(
'bach_or_stravinsky_in_the_matrix__repeated__bach_tft_0',
'bach_or_stravinsky_in_the_matrix__repeated__bach_tft_tremble_0',
'bach_or_stravinsky_in_the_matrix__repeated__stravinsky_tft_0',
'bach_or_stravinsky_in_the_matrix__repeated__stravinsky_tft_tremble_0',
),
),
),
bach_or_stravinsky_in_the_matrix__repeated_5=ScenarioConfig(
description=('stravinsky fan focal agent meets an imperfectly ' +
'copying partner'),
tags={
'versus_tft',
'half_and_half',
},
substrate='bach_or_stravinsky_in_the_matrix__repeated',
roles=('stravinsky_fan',) + ('bach_fan',),
is_focal=(True,) + (False,),
bots_by_role=immutabledict.immutabledict(
bach_fan=(
'bach_or_stravinsky_in_the_matrix__repeated__bach_tft_0',
'bach_or_stravinsky_in_the_matrix__repeated__bach_tft_tremble_0',
'bach_or_stravinsky_in_the_matrix__repeated__stravinsky_tft_0',
'bach_or_stravinsky_in_the_matrix__repeated__stravinsky_tft_tremble_0',
),
),
),
bach_or_stravinsky_in_the_matrix__repeated_6=ScenarioConfig(
description=('bach fan focal agent meets a turn-taking partner'),
tags={
'turn_taking',
'half_and_half',
},
substrate='bach_or_stravinsky_in_the_matrix__repeated',
roles=('bach_fan',) + ('stravinsky_fan',),
is_focal=(True,) + (False,),
bots_by_role=immutabledict.immutabledict(
stravinsky_fan=(
'bach_or_stravinsky_in_the_matrix__repeated__turn_taking_initial_bach_0',
'bach_or_stravinsky_in_the_matrix__repeated__turn_taking_initial_stravinsky_0',
),
),
),
bach_or_stravinsky_in_the_matrix__repeated_7=ScenarioConfig(
description=('bach fan focal agent meets a turn-taking partner who ' +
'repeats each goal/resource three times before switching'),
tags={
'turn_taking',
'half_and_half',
},
substrate='bach_or_stravinsky_in_the_matrix__repeated',
roles=('bach_fan',) + ('stravinsky_fan',),
is_focal=(True,) + (False,),
bots_by_role=immutabledict.immutabledict(
stravinsky_fan=(
'bach_or_stravinsky_in_the_matrix__repeated__turn_taking_initial_bach_1',
'bach_or_stravinsky_in_the_matrix__repeated__turn_taking_initial_stravinsky_1',
),
),
),
boat_race__eight_races_0=ScenarioConfig(
description='visiting cooperators',
tags={
'visitor',
},
substrate='boat_race__eight_races',
roles=('default',) * 6,
is_focal=(True,) * 1 + (False,) * 5,
bots_by_role=immutabledict.immutabledict(
default=('boat_race__eight_races__cooperator_0',),
),
),
boat_race__eight_races_1=ScenarioConfig(
description='visiting defectors',
tags={
'visitor',
},
substrate='boat_race__eight_races',
roles=('default',) * 6,
is_focal=(True,) * 1 + (False,) * 5,
bots_by_role=immutabledict.immutabledict(
default=('boat_race__eight_races__defector_0',),
),
),
boat_race__eight_races_2=ScenarioConfig(
description='visited by a population of cooperators',
tags={
'resident',
},
substrate='boat_race__eight_races',
roles=('default',) * 6,
is_focal=(True,) * 5 + (False,) * 1,
bots_by_role=immutabledict.immutabledict(
default=('boat_race__eight_races__cooperator_0',),
),
),
boat_race__eight_races_3=ScenarioConfig(
description='visited by a population of defectors',
tags={
'resident',
},
substrate='boat_race__eight_races',
roles=('default',) * 6,
is_focal=(True,) * 5 + (False,) * 1,
bots_by_role=immutabledict.immutabledict(
default=('boat_race__eight_races__defector_0',),
),
),
boat_race__eight_races_4=ScenarioConfig(
description='find the cooperator partner',
tags={
'partner_choice',
},
substrate='boat_race__eight_races',
roles=('default',) * 5 + ('target',),
is_focal=(True,) * 1 + (False,) * 5,
bots_by_role=immutabledict.immutabledict(
default=('boat_race__eight_races__defector_0',),
target=('boat_race__eight_races__cooperator_0',),
),
),
chemistry__three_metabolic_cycles_0=ScenarioConfig(
description=('resident focal population meets a small mixture of ' +
'background bots'),
tags={
'resident',
},
substrate='chemistry__three_metabolic_cycles',
roles=('default',) * 8,
is_focal=(True,) * 5 + (False,) * 3,
bots_by_role={
'default': {
'chemistry__three_metabolic_cycles__blue_0',
'chemistry__three_metabolic_cycles__green_0',
'chemistry__three_metabolic_cycles__yellow_0',
},
},
),
chemistry__three_metabolic_cycles_1=ScenarioConfig(
description='meeting bots running blue and yellow',
tags={
'half_and_half',
},
substrate='chemistry__three_metabolic_cycles',
roles=('default',) * 8,
is_focal=(True,) * 4 + (False,) * 4,
bots_by_role={
'default': {
'chemistry__three_metabolic_cycles__blue_0',
'chemistry__three_metabolic_cycles__yellow_0',
},
},
),
chemistry__three_metabolic_cycles_2=ScenarioConfig(
description='meeting one-sided bots running green',
tags={
'half_and_half',
},
substrate='chemistry__three_metabolic_cycles',
roles=('default',) * 8,
is_focal=(True,) * 4 + (False,) * 4,
bots_by_role={
'default': {
'chemistry__three_metabolic_cycles__green_0',
},
},
),
chemistry__three_metabolic_cycles_3=ScenarioConfig(
description='visit a resident population with mixed specialties',
tags={
'visitor',
},
substrate='chemistry__three_metabolic_cycles',
roles=('default',) * 8,
is_focal=(True,) * 2 + (False,) * 6,
bots_by_role={
'default': {
'chemistry__three_metabolic_cycles__blue_0',
'chemistry__three_metabolic_cycles__yellow_0',
'chemistry__three_metabolic_cycles__green_0',
},
},
),
chemistry__three_metabolic_cycles_with_plentiful_distractors_0=ScenarioConfig(
description=('resident focal population meets a small mixture of ' +
'background bots, must avoid distractor molecules'),
tags={
'resident',
},
substrate='chemistry__three_metabolic_cycles_with_plentiful_distractors',
roles=('default',) * 8,
is_focal=(True,) * 5 + (False,) * 3,
bots_by_role={
'default': {
'chemistry__three_metabolic_cycles_with_plentiful_distractors__blue_0',
'chemistry__three_metabolic_cycles_with_plentiful_distractors__green_0',
'chemistry__three_metabolic_cycles_with_plentiful_distractors__yellow_0',
},
},
),
chemistry__three_metabolic_cycles_with_plentiful_distractors_1=ScenarioConfig(
description='meeting bots running blue, avoid distractors',
tags={
'half_and_half',
},
substrate='chemistry__three_metabolic_cycles_with_plentiful_distractors',
roles=('default',) * 8,
is_focal=(True,) * 4 + (False,) * 4,
bots_by_role={
'default': {
'chemistry__three_metabolic_cycles_with_plentiful_distractors__blue_0',
},
},
),
chemistry__three_metabolic_cycles_with_plentiful_distractors_2=ScenarioConfig(
description='meeting bots running green and yellow, avoid distractors',
tags={
'half_and_half',
},
substrate='chemistry__three_metabolic_cycles_with_plentiful_distractors',
roles=('default',) * 8,
is_focal=(True,) * 4 + (False,) * 4,
bots_by_role={
'default': {
'chemistry__three_metabolic_cycles_with_plentiful_distractors__green_0',
'chemistry__three_metabolic_cycles_with_plentiful_distractors__yellow_0',
},
},
),
chemistry__three_metabolic_cycles_with_plentiful_distractors_3=ScenarioConfig(
description=('visit a resident population with mixed specialties and ' +
'avoid distractor molecules'),
tags={
'visitor',
},
substrate='chemistry__three_metabolic_cycles_with_plentiful_distractors',
roles=('default',) * 8,
is_focal=(True,) * 2 + (False,) * 6,
bots_by_role={
'default': {
'chemistry__three_metabolic_cycles_with_plentiful_distractors__blue_0',
'chemistry__three_metabolic_cycles_with_plentiful_distractors__yellow_0',
'chemistry__three_metabolic_cycles_with_plentiful_distractors__green_0',
},
},
),
chemistry__two_metabolic_cycles_0=ScenarioConfig(
description=('resident focal population meets a small mixture of ' +
'background bots'),
tags={
'resident',
},
substrate='chemistry__two_metabolic_cycles',
roles=('default',) * 8,
is_focal=(True,) * 6 + (False,) * 2,
bots_by_role={
'default': {
'chemistry__two_metabolic_cycles__blue_0',
'chemistry__two_metabolic_cycles__green_0',
},
},
),
chemistry__two_metabolic_cycles_1=ScenarioConfig(
description='meeting one-sided bots running blue',
tags={
'half_and_half',
},
substrate='chemistry__two_metabolic_cycles',
roles=('default',) * 8,
is_focal=(True,) * 4 + (False,) * 4,
bots_by_role={
'default': {
'chemistry__two_metabolic_cycles__blue_0',
},
},
),
chemistry__two_metabolic_cycles_2=ScenarioConfig(
description='meeting one-sided bots running green',
tags={
'half_and_half',
},
substrate='chemistry__two_metabolic_cycles',
roles=('default',) * 8,
is_focal=(True,) * 4 + (False,) * 4,
bots_by_role={
'default': {
'chemistry__two_metabolic_cycles__green_0',
},
},
),
chemistry__two_metabolic_cycles_3=ScenarioConfig(
description=('visit a resident background population with mixed ' +
'specialties'),
tags={
'visitor',
},
substrate='chemistry__two_metabolic_cycles',
roles=('default',) * 8,
is_focal=(True,) * 2 + (False,) * 6,
bots_by_role={
'default': {
'chemistry__two_metabolic_cycles__blue_0',
'chemistry__two_metabolic_cycles__green_0',
},
},
),
chemistry__two_metabolic_cycles_with_distractors_0=ScenarioConfig(
description=('resident focal population meets a small mixture of ' +
'background bots, must avoid distractor molecules'),
tags={
'resident',
},
substrate='chemistry__two_metabolic_cycles_with_distractors',
roles=('default',) * 8,
is_focal=(True,) * 6 + (False,) * 2,
bots_by_role={
'default': {
'chemistry__two_metabolic_cycles_with_distractors__blue_0',
'chemistry__two_metabolic_cycles_with_distractors__green_0',
},
},
),
chemistry__two_metabolic_cycles_with_distractors_1=ScenarioConfig(
description=('meeting one-sided bots running blue and avoid ' +
'distractor molecules'),
tags={
'half_and_half',
},
substrate='chemistry__two_metabolic_cycles_with_distractors',
roles=('default',) * 8,
is_focal=(True,) * 4 + (False,) * 4,
bots_by_role={
'default': {
'chemistry__two_metabolic_cycles_with_distractors__blue_0',
},
},
),
chemistry__two_metabolic_cycles_with_distractors_2=ScenarioConfig(
description=('meeting one-sided bots running green and avoid ' +
'distractor molecules'),
tags={
'half_and_half',
},
substrate='chemistry__two_metabolic_cycles_with_distractors',
roles=('default',) * 8,
is_focal=(True,) * 4 + (False,) * 4,
bots_by_role={
'default': {
'chemistry__two_metabolic_cycles_with_distractors__green_0',
},
},
),
chemistry__two_metabolic_cycles_with_distractors_3=ScenarioConfig(
description=('visit a resident background population with mixed ' +
'specialties and avoid distractor molecules'),
tags={
'visitor',
},
substrate='chemistry__two_metabolic_cycles_with_distractors',
roles=('default',) * 8,
is_focal=(True,) * 2 + (False,) * 6,
bots_by_role={
'default': {
'chemistry__two_metabolic_cycles_with_distractors__blue_0',
'chemistry__two_metabolic_cycles_with_distractors__green_0',
},
},
),
chicken_in_the_matrix__arena_0=ScenarioConfig(
description='visiting unconditional dove players',
tags={
'visitor',
'versus_pure_dove_players',
},
substrate='chicken_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) + (False,) * 7,
bots_by_role={
'default': {
'chicken_in_the_matrix__arena__puppet_dove_0',
'chicken_in_the_matrix__arena__puppet_dove_margin_0',
},
},
),
chicken_in_the_matrix__arena_1=ScenarioConfig(
description=('focals are resident and visitors are unconditional ' +
'dove players'),
tags={
'resident',
'versus_pure_dove_players',
},
substrate='chicken_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) * 5 + (False,) * 3,
bots_by_role={
'default': {
'chicken_in_the_matrix__arena__puppet_dove_0',
'chicken_in_the_matrix__arena__puppet_dove_margin_0',
},
},
),
chicken_in_the_matrix__arena_2=ScenarioConfig(
description=('focals are resident and visitors are unconditional' +
'hawk players'),
tags={
'resident',
'versus_pure_hawk_players',
},
substrate='chicken_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) * 5 + (False,) * 3,
bots_by_role={
'default': {
'chicken_in_the_matrix__arena__puppet_hawk_0',
'chicken_in_the_matrix__arena__puppet_hawk_margin_0',
},
},
),
chicken_in_the_matrix__arena_3=ScenarioConfig(
description=('visiting a population of hair-trigger grim ' +
'reciprocator bots who initially cooperate but, if ' +
'defected on once, will retaliate by defecting in all ' +
'future interactions'),
tags={
'visitor',
'reciprocity',
},
substrate='chicken_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) + (False,) * 7,
bots_by_role={
'default': {
'chicken_in_the_matrix__arena__puppet_grim_one_strike_0',
'chicken_in_the_matrix__arena__puppet_grim_one_strike_margin_0',
},
},
),
chicken_in_the_matrix__arena_4=ScenarioConfig(
description=('visiting a population of two-strikes grim ' +
'reciprocator bots who initially cooperate but, if ' +
'defected on twice, will retaliate by defecting in all ' +
'future interactions'),
tags={
'visitor',
'reciprocity',
},
substrate='chicken_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) + (False,) * 7,
bots_by_role={
'default': {
'chicken_in_the_matrix__arena__puppet_grim_two_strikes_0',
'chicken_in_the_matrix__arena__puppet_grim_two_strikes_margin_0',
},
},
),
chicken_in_the_matrix__arena_5=ScenarioConfig(
description=(
'visiting a mixed population of k-strikes grim reciprocator bots ' +
'with k values from 1 to 3, they initially cooperate but, if ' +
'defected on k times, they retaliate in all future interactions'
),
tags={
'visitor',
'reciprocity',
},
substrate='chicken_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) * 3 + (False,) * 5,
bots_by_role={
'default': {
'chicken_in_the_matrix__arena__puppet_grim_one_strike_0',
'chicken_in_the_matrix__arena__puppet_grim_one_strike_margin_0',
'chicken_in_the_matrix__arena__puppet_grim_three_strikes_0',
'chicken_in_the_matrix__arena__puppet_grim_three_strikes_margin_0',
'chicken_in_the_matrix__arena__puppet_grim_two_strikes_0',
'chicken_in_the_matrix__arena__puppet_grim_two_strikes_margin_0',
},
},
),
chicken_in_the_matrix__arena_6=ScenarioConfig(
description='visiting a mixture of pure hawk and pure dove players',
tags={
'visitor',
'versus_pure_all',
},
substrate='chicken_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) * 3 + (False,) * 5,
bots_by_role={
'default': {
'chicken_in_the_matrix__arena__puppet_dove_0',
'chicken_in_the_matrix__arena__puppet_dove_margin_0',
'chicken_in_the_matrix__arena__puppet_hawk_0',
'chicken_in_the_matrix__arena__puppet_hawk_margin_0',
},
},
),
chicken_in_the_matrix__repeated_0=ScenarioConfig(
description='partner may play either hawk or dove',
tags={
'half_and_half',
'versus_pure_all',
},
substrate='chicken_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'chicken_in_the_matrix__repeated__puppet_dove_margin_0',
'chicken_in_the_matrix__repeated__puppet_dove_margin_1',
'chicken_in_the_matrix__repeated__puppet_hawk_margin_0',
'chicken_in_the_matrix__repeated__puppet_hawk_margin_1',
},
},
),
chicken_in_the_matrix__repeated_1=ScenarioConfig(
description='partner typically plays dove',
tags={
'half_and_half',
'versus_pure_dove',
},
substrate='chicken_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'chicken_in_the_matrix__repeated__puppet_dove_margin_0',
'chicken_in_the_matrix__repeated__puppet_dove_margin_1',
},
},
),
chicken_in_the_matrix__repeated_2=ScenarioConfig(
description='partner typically plays hawk',
tags={
'half_and_half',
'versus_pure_hawk',
},
substrate='chicken_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'chicken_in_the_matrix__repeated__puppet_hawk_margin_0',
'chicken_in_the_matrix__repeated__puppet_hawk_margin_1',
},
},
),
chicken_in_the_matrix__repeated_3=ScenarioConfig(
description=('partner is a hair-trigger grim reciprocator, i.e. one ' +
'who initially cooperates but, if defected on once, will' +
' retaliate by defecting forever after'),
tags={
'half_and_half',
'reciprocity',
},
substrate='chicken_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'chicken_in_the_matrix__repeated__puppet_grim_one_strike_margin_0',
'chicken_in_the_matrix__repeated__puppet_grim_one_strike_margin_1',
},
},
),
chicken_in_the_matrix__repeated_4=ScenarioConfig(
description=('partner is a two-strikes grim reciprocator, i.e. one ' +
'who initially cooperates, but if defected on twice, ' +
'will retaliate by defecting forever after'),
tags={
'half_and_half',
'reciprocity',
},
substrate='chicken_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'chicken_in_the_matrix__repeated__puppet_grim_two_strikes_margin_0',
'chicken_in_the_matrix__repeated__puppet_grim_two_strikes_margin_1',
},
},
),
chicken_in_the_matrix__repeated_5=ScenarioConfig(
description='partner is a tit-for-tat conditional cooperator',
tags={
'half_and_half',
'reciprocity',
},
substrate='chicken_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'chicken_in_the_matrix__repeated__puppet_tft_margin_0',
'chicken_in_the_matrix__repeated__puppet_tft_margin_1',
},
},
),
chicken_in_the_matrix__repeated_6=ScenarioConfig(
description=('partner is a tit-for-tat conditional cooperator who ' +
'occasionally plays hawk instead of dove'),
tags={
'half_and_half',
'reciprocity',
'forgiveness',
},
substrate='chicken_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'chicken_in_the_matrix__repeated__puppet_tft_tremble_margin_0',
'chicken_in_the_matrix__repeated__puppet_tft_tremble_margin_1',
},
},
),
chicken_in_the_matrix__repeated_7=ScenarioConfig(
description='partner plays dove for a while then switches to hawk',
tags={
'half_and_half',
'flexibility',
},
substrate='chicken_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'chicken_in_the_matrix__repeated__puppet_flip_0',
},
},
),
chicken_in_the_matrix__repeated_8=ScenarioConfig(
description=('partner tries to take advantage of the focal player ' +
'by playing hawk, but if punished, partner then ' +
'switches to tit-for-tat conditional cooperation'),
tags={
'half_and_half',
'teaching',
'reciprocity',
'forgiveness',
},
substrate='chicken_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'chicken_in_the_matrix__repeated__puppet_corrigible_0',
},
},
),
chicken_in_the_matrix__repeated_9=ScenarioConfig(
description=('partner tries to take advantage of the focal player ' +
'by playing hawk, but if punished, partner then ' +
'switches to noisy tit-for-tat conditional cooperation'),
tags={
'half_and_half',
'teaching',
'reciprocity',
'forgiveness',
},
substrate='chicken_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'chicken_in_the_matrix__repeated__puppet_corrigible_tremble_0',
},
},
),
clean_up_0=ScenarioConfig(
description='visiting an altruistic population',
tags={
'versus_cleaners',
'visitor',
},
substrate='clean_up',
roles=('default',) * 7,
is_focal=(True,) * 3 + (False,) * 4,
bots_by_role={
'default': {
'clean_up__cleaner_0',
'clean_up__cleaner_1',
},
},
),
clean_up_1=ScenarioConfig(
description='focals are resident and visitors ride free',
tags={
'resident',
'versus_consumers',
},
substrate='clean_up',
roles=('default',) * 7,
is_focal=(True,) * 4 + (False,) * 3,
bots_by_role={
'default': {
'clean_up__consumer_0',
'clean_up__consumer_1',
},
},
),
clean_up_2=ScenarioConfig(
description='visiting a turn-taking population that cleans first',
tags={
'turn_taking',
'versus_puppet',
'visitor',
},
substrate='clean_up',
roles=('default',) * 7,
is_focal=(True,) * 3 + (False,) * 4,
bots_by_role={
'default': {'clean_up__puppet_alternator_first_cleans_0',},
},
),
clean_up_3=ScenarioConfig(
description='visiting a turn-taking population that eats first',
tags={
'turn_taking',
'versus_puppet',
'visitor',
},
substrate='clean_up',
roles=('default',) * 7,
is_focal=(True,) * 3 + (False,) * 4,
bots_by_role={
'default': {'clean_up__puppet_alternator_first_eats_0',},
},
),
clean_up_4=ScenarioConfig(
description='focals are visited by one reciprocator',
tags={
'resident',
'versus_puppet',
},
substrate='clean_up',
roles=('default',) * 7,
is_focal=(True,) * 6 + (False,) * 1,
bots_by_role={
'default': {'clean_up__puppet_low_threshold_reciprocator_0',},
},
),
clean_up_5=ScenarioConfig(
description='focals are visited by two suspicious reciprocators',
tags={
'resident',
'versus_puppet',
},
substrate='clean_up',
roles=('default',) * 7,
is_focal=(True,) * 5 + (False,) * 2,
bots_by_role={
'default': {'clean_up__puppet_high_threshold_reciprocator_0',},
},
),
clean_up_6=ScenarioConfig(
description='focals are visited by one suspicious reciprocator',
tags={
'resident',
'versus_puppet',
},
substrate='clean_up',
roles=('default',) * 7,
is_focal=(True,) * 6 + (False,) * 1,
bots_by_role={
'default': {'clean_up__puppet_high_threshold_reciprocator_0',},
},
),
clean_up_7=ScenarioConfig(
description='focals visit resident group of suspicious reciprocators',
tags={
'visitor',
'versus_puppet',
},
substrate='clean_up',
roles=('default',) * 7,
is_focal=(True,) * 2 + (False,) * 5,
bots_by_role={
'default': {'clean_up__puppet_high_threshold_reciprocator_0',},
},
),
clean_up_8=ScenarioConfig(
description='focals are visited by one nice reciprocator',
tags={
'resident',
'versus_puppet',
},
substrate='clean_up',
roles=('default',) * 7,
is_focal=(True,) * 6 + (False,) * 1,
bots_by_role={
'default': {'clean_up__puppet_nice_low_threshold_reciprocator_0',},
},
),
coins_0=ScenarioConfig(
description='partner is either a pure cooperator or a pure defector',
tags={
'versus_pure_all',
'half_and_half',
},
substrate='coins',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {'coins__puppet_cooperator_0',
'coins__puppet_defector_0',},
},
),
coins_1=ScenarioConfig(
description=('partner is a high-threshold (generous) reciprocator'),
tags={
'versus_reciprocator',
'half_and_half',
},
substrate='coins',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {'coins__puppet_three_strikes_reciprocator_0',},
},
),
coins_2=ScenarioConfig(
description=('partner is a low-threshold (harsh) reciprocator'),
tags={
'versus_reciprocator',
'half_and_half',
},
substrate='coins',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {'coins__puppet_one_strike_reciprocator_0',},
},
),
coins_3=ScenarioConfig(
description=('partner is a high-threshold (generous) strong ' +
'reciprocator'),
tags={
'versus_reciprocator',
'half_and_half',
},
substrate='coins',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {'coins__puppet_three_strikes_strong_reciprocator_0',},
},
),
coins_4=ScenarioConfig(
description=('partner is a low-threshold (harsh) strong reciprocator'),
tags={
'versus_reciprocator',
'half_and_half',
},
substrate='coins',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {'coins__puppet_one_strike_strong_reciprocator_0',},
},
),
coins_5=ScenarioConfig(
description='partner is a cooperator',
tags={
'versus_pure_cooperator',
'half_and_half',
},
substrate='coins',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {'coins__puppet_cooperator_0',},
},
),
coins_6=ScenarioConfig(
description='partner is a defector',
tags={
'versus_pure_defector',
'half_and_half',
},
substrate='coins',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {'coins__puppet_defector_0',},
},
),
collaborative_cooking__asymmetric_0=ScenarioConfig(
description='collaborate with a skilled chef',
tags={
'half_and_half',
},
substrate='collaborative_cooking__asymmetric',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'collaborative_cooking__asymmetric__chef_0',
'collaborative_cooking__asymmetric__chef_1',
},
},
),
collaborative_cooking__asymmetric_1=ScenarioConfig(
description='collaborate with a semi-skilled apprentice chef',
tags={
'half_and_half',
},
substrate='collaborative_cooking__asymmetric',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'collaborative_cooking__asymmetric__apprentice_0',
'collaborative_cooking__asymmetric__apprentice_1',
},
},
),
collaborative_cooking__asymmetric_2=ScenarioConfig(
description='succeed despite an unhelpful partner',
tags={
'half_and_half',
'versus_noop',
},
substrate='collaborative_cooking__asymmetric',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={'default': {'noop_bot'}},
),
collaborative_cooking__circuit_0=ScenarioConfig(
description='collaborate with a skilled chef',
tags={
'half_and_half',
},
substrate='collaborative_cooking__circuit',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'collaborative_cooking__circuit__chef_0',
'collaborative_cooking__circuit__chef_1',
},
},
),
collaborative_cooking__circuit_1=ScenarioConfig(
description='collaborate with a semi-skilled apprentice chef',
tags={
'half_and_half',
},
substrate='collaborative_cooking__circuit',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'collaborative_cooking__circuit__apprentice_0',
'collaborative_cooking__circuit__apprentice_1',
},
},
),
collaborative_cooking__circuit_2=ScenarioConfig(
description='succeed despite an unhelpful partner',
tags={
'half_and_half',
'versus_noop',
},
substrate='collaborative_cooking__circuit',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={'default': {'noop_bot'}},
),
collaborative_cooking__cramped_0=ScenarioConfig(
description='collaborate with a skilled chef',
tags={
'half_and_half',
},
substrate='collaborative_cooking__cramped',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'collaborative_cooking__cramped__chef_0',
'collaborative_cooking__cramped__chef_1',
},
},
),
collaborative_cooking__cramped_1=ScenarioConfig(
description='collaborate with a semi-skilled apprentice chef',
tags={
'half_and_half',
},
substrate='collaborative_cooking__cramped',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'collaborative_cooking__cramped__apprentice_0',
'collaborative_cooking__cramped__apprentice_1',
},
},
),
collaborative_cooking__cramped_2=ScenarioConfig(
description='succeed despite an unhelpful partner',
tags={
'half_and_half',
'versus_noop',
},
substrate='collaborative_cooking__cramped',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={'default': {'noop_bot'}},
),
collaborative_cooking__crowded_0=ScenarioConfig(
description=(
'collaborate with an independent chef who expects others to get ' +
'out of their way'),
tags={
'resident',
},
substrate='collaborative_cooking__crowded',
roles=('default',) * 9,
is_focal=(True,) * 8 + (False,),
bots_by_role={
'default': {
'collaborative_cooking__crowded__independent_chef_0',
},
},
),
collaborative_cooking__crowded_1=ScenarioConfig(
description=(
'collaborate with several chefs who can work together, but are ' +
'not very good at doing so'),
tags={
'resident',
},
substrate='collaborative_cooking__crowded',
roles=('default',) * 9,
is_focal=(True,) * 6 + (False,) * 3,
bots_by_role={
'default': {
'collaborative_cooking__crowded__robust_chef_0',
},
},
),
collaborative_cooking__crowded_2=ScenarioConfig(
description=(
'no assistance from an unhelpful visiting noop bot'),
tags={
'resident',
'versus_noop',
},
substrate='collaborative_cooking__crowded',
roles=('default',) * 9,
is_focal=(True,) * 8 + (False,),
bots_by_role={'default': {'noop_bot'}},
),
collaborative_cooking__figure_eight_0=ScenarioConfig(
description=(
'collaborate with an independent chef who expects others to get ' +
'out of their way'),
tags={
'resident',
},
substrate='collaborative_cooking__figure_eight',
roles=('default',) * 6,
is_focal=(True,) * 5 + (False,),
bots_by_role={
'default': {
'collaborative_cooking__figure_eight__independent_chef_0',
},
},
),
collaborative_cooking__figure_eight_1=ScenarioConfig(
description=(
'collaborate with two chefs who can work together, but are ' +
'not very good at doing so'),
tags={
'resident',
},
substrate='collaborative_cooking__figure_eight',
roles=('default',) * 6,
is_focal=(True,) * 4 + (False,) * 2,
bots_by_role={
'default': {
'collaborative_cooking__figure_eight__robust_chef_0',
},
},
),
collaborative_cooking__figure_eight_2=ScenarioConfig(
description=(
'no assistance from am unhelpful visiting noop bot'),
tags={
'resident',
'versus_noop',
},
substrate='collaborative_cooking__figure_eight',
roles=('default',) * 6,
is_focal=(True,) * 5 + (False,),
bots_by_role={'default': {'noop_bot'}},
),
collaborative_cooking__forced_0=ScenarioConfig(
description='collaborate with a skilled chef',
tags={
'half_and_half',
},
substrate='collaborative_cooking__forced',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'collaborative_cooking__forced__chef_0',
'collaborative_cooking__forced__chef_1',
},
},
),
collaborative_cooking__forced_1=ScenarioConfig(
description='collaborate with a semi-skilled apprentice chef',
tags={
'half_and_half',
},
substrate='collaborative_cooking__forced',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'collaborative_cooking__forced__apprentice_0',
'collaborative_cooking__forced__apprentice_1',
},
},
),
collaborative_cooking__ring_0=ScenarioConfig(
description='collaborate with a skilled chef',
tags={
'half_and_half',
},
substrate='collaborative_cooking__ring',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'collaborative_cooking__ring__chef_0',
'collaborative_cooking__ring__chef_1',
},
},
),
collaborative_cooking__ring_1=ScenarioConfig(
description='collaborate with a semi-skilled apprentice chef',
tags={
'half_and_half',
},
substrate='collaborative_cooking__ring',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'collaborative_cooking__ring__apprentice_0',
'collaborative_cooking__ring__apprentice_1',
},
},
),
commons_harvest__closed_0=ScenarioConfig(
description='focals visit pacifist bots who harvest unsustainably',
tags={
'visitor',
'ownership',
},
substrate='commons_harvest__closed',
roles=('default',) * 7,
is_focal=(True,) * 2 + (False,) * 5,
bots_by_role={
'default': {'commons_harvest__closed__pacifist_0',
'commons_harvest__closed__pacifist_1',
'commons_harvest__closed__pacifist_2',},
},
),
commons_harvest__closed_1=ScenarioConfig(
description=('focals are resident and visited by pacifist bots who ' +
'harvest unsustainably'),
tags={
'resident',
'ownership',
},
substrate='commons_harvest__closed',
roles=('default',) * 7,
is_focal=(True,) * 5 + (False,) * 2,
bots_by_role={
'default': {'commons_harvest__closed__pacifist_0',
'commons_harvest__closed__pacifist_1',
'commons_harvest__closed__pacifist_2',},
},
),
commons_harvest__closed_2=ScenarioConfig(
description=('focals visit bots who zap and harvest sustainably if ' +
'they get a chance'),
tags={
'visitor',
'ownership',
},
substrate='commons_harvest__closed',
roles=('default',) * 7,
is_focal=(True,) * 2 + (False,) * 5,
bots_by_role={
'default': {'commons_harvest__closed__free_0',
'commons_harvest__closed__free_1',
'commons_harvest__closed__free_2',
'commons_harvest__closed__free_3'},
},
),
commons_harvest__closed_3=ScenarioConfig(
description=('focals are resident, and are visited by bots who zap ' +
'and harvest sustainably if they get a chance'),
tags={
'resident',
'ownership',
},
substrate='commons_harvest__closed',
roles=('default',) * 7,
is_focal=(True,) * 5 + (False,) * 2,
bots_by_role={
'default': {'commons_harvest__closed__free_0',
'commons_harvest__closed__free_1',
'commons_harvest__closed__free_2',
'commons_harvest__closed__free_3'},
},
),
commons_harvest__open_0=ScenarioConfig(
description=('focals are resident and visited by two bots who zap ' +
'and harvest unsustainably'),
tags={
'resident',
},
substrate='commons_harvest__open',
roles=('default',) * 7,
is_focal=(True,) * 5 + (False,) * 2,
bots_by_role={
'default': {'commons_harvest__open__free_0',
'commons_harvest__open__free_1',},
},
),
commons_harvest__open_1=ScenarioConfig(
description=('focals are resident and visited by two pacifists who ' +
'harvest unsustainably'),
tags={
'resident',
},
substrate='commons_harvest__open',
roles=('default',) * 7,
is_focal=(True,) * 5 + (False,) * 2,
bots_by_role={
'default': {'commons_harvest__open__pacifist_0',
'commons_harvest__open__pacifist_1'},
},
),
commons_harvest__partnership_0=ScenarioConfig(
description='meeting good partners',
tags={
'visitor',
'dyadic_trust',
'ownership',
},
substrate='commons_harvest__partnership',
roles=('default',) * 7,
is_focal=(True,) * 1 + (False,) * 6,
bots_by_role={
'default': {'commons_harvest__partnership__good_partner_0',
'commons_harvest__partnership__good_partner_1',
'commons_harvest__partnership__good_partner_2',},
},
),
commons_harvest__partnership_1=ScenarioConfig(
description='focals are resident and visitors are good partners',
tags={
'resident',
'dyadic_trust',
'ownership',
},
substrate='commons_harvest__partnership',
roles=('default',) * 7,
is_focal=(True,) * 5 + (False,) * 2,
bots_by_role={
'default': {'commons_harvest__partnership__good_partner_0',
'commons_harvest__partnership__good_partner_1',
'commons_harvest__partnership__good_partner_2',},
},
),
commons_harvest__partnership_2=ScenarioConfig(
description=('focals visit zappers who harvest sustainably but lack ' +
'trust'),
tags={
'visitor',
'dyadic_trust',
'ownership',
},
substrate='commons_harvest__partnership',
roles=('default',) * 7,
is_focal=(True,) * 1 + (False,) * 6,
bots_by_role={
'default': {'commons_harvest__partnership__sustainable_fighter_0',
'commons_harvest__partnership__sustainable_fighter_1',},
},
),
commons_harvest__partnership_3=ScenarioConfig(
description=('focals are resident and visited by zappers who harvest ' +
'sustainably but lack trust'),
tags={
'resident',
'dyadic_trust',
'ownership',
},
substrate='commons_harvest__partnership',
roles=('default',) * 7,
is_focal=(True,) * 5 + (False,) * 2,
bots_by_role={
'default': {'commons_harvest__partnership__sustainable_fighter_0',
'commons_harvest__partnership__sustainable_fighter_1',},
},
),
commons_harvest__partnership_4=ScenarioConfig(
description='focals visit pacifists who do not harvest sustainably',
tags={
'visitor',
'dyadic_trust',
'ownership',
},
substrate='commons_harvest__partnership',
roles=('default',) * 7,
is_focal=(True,) * 2 + (False,) * 5,
bots_by_role={
'default': {'commons_harvest__partnership__pacifist_0',
'commons_harvest__partnership__pacifist_1',
'commons_harvest__partnership__pacifist_2',},
},
),
commons_harvest__partnership_5=ScenarioConfig(
description='focals visit zappers who do not harvest sustainably',
tags={
'visitor',
'dyadic_trust',
'ownership',
},
substrate='commons_harvest__partnership',
roles=('default',) * 7,
is_focal=(True,) * 2 + (False,) * 5,
bots_by_role={
'default': {'commons_harvest__partnership__free_0',
'commons_harvest__partnership__free_1',
'commons_harvest__partnership__free_2',},
},
),
coop_mining_0=ScenarioConfig(
description='visiting cooperators',
tags={
'visitor',
},
substrate='coop_mining',
roles=('default',) * 6,
is_focal=(True,) * 1 + (False,) * 5,
bots_by_role={
'default': {'coop_mining__cooperator_0',},
},
),
coop_mining_1=ScenarioConfig(
description='visiting residents that extract both ores',
tags={
'visitor',
},
substrate='coop_mining',
roles=('default',) * 6,
is_focal=(True,) * 1 + (False,) * 5,
bots_by_role={
'default': {'coop_mining__mixed_0',},
},
),
coop_mining_2=ScenarioConfig(
description='visiting defectors',
tags={
'visitor',
},
substrate='coop_mining',
roles=('default',) * 6,
is_focal=(True,) * 1 + (False,) * 5,
bots_by_role={
'default': {'coop_mining__defector_0'},
},
),
coop_mining_3=ScenarioConfig(
description='residents visited by a cooperator',
tags={
'resident',
},
substrate='coop_mining',
roles=('default',) * 6,
is_focal=(True,) * 5 + (False,) * 1,
bots_by_role={
'default': {'coop_mining__cooperator_0',},
},
),
coop_mining_4=ScenarioConfig(
description='residents visited by a defector',
tags={
'resident',
},
substrate='coop_mining',
roles=('default',) * 6,
is_focal=(True,) * 5 + (False,) * 1,
bots_by_role={
'default': {'coop_mining__defector_0',},
},
),
coop_mining_5=ScenarioConfig(
description='find the cooperator partner',
tags={
'partner_choice',
'visitor',
},
substrate='coop_mining',
roles=('default',) * 5 + ('target',),
is_focal=(True,) * 1 + (False,) * 5,
bots_by_role={
'default': {'coop_mining__defector_0',},
'target': {'coop_mining__cooperator_0',
'coop_mining__mixed_0',},
},
),
daycare_0=ScenarioConfig(
description='meeting a helpful parent',
tags={
'half_and_half',
},
substrate='daycare',
roles=('child',) + ('parent',),
is_focal=(True,) + (False,),
bots_by_role={
'parent': {'daycare__helping_parent_0',},
},
),
daycare_1=ScenarioConfig(
description='meeting a child who points to what they want',
tags={
'half_and_half',
},
substrate='daycare',
roles=('child',) + ('parent',),
is_focal=(False,) + (True,),
bots_by_role={
'child': {'daycare__pointing_child_0',},
},
),
daycare_2=ScenarioConfig(
description='meeting an unhelpful parent',
tags={
'half_and_half',
},
substrate='daycare',
roles=('child',) + ('parent',),
is_focal=(True,) + (False,),
bots_by_role={
'parent': {'daycare__foraging_parent_0',},
},
),
daycare_3=ScenarioConfig(
description='meeting an independent child',
tags={
'half_and_half',
},
substrate='daycare',
roles=('child',) + ('parent',),
is_focal=(False,) + (True,),
bots_by_role={
'child': {'daycare__foraging_child_0',},
},
),
externality_mushrooms__dense_0=ScenarioConfig(
description='visiting unconditional hihe (cooperator) players',
tags={
'visitor',
},
substrate='externality_mushrooms__dense',
roles=('default',) * 5,
is_focal=(True,) + (False,) * 4,
bots_by_role={
'default': {'externality_mushrooms__dense__puppet_hihe_0',},
},
),
externality_mushrooms__dense_1=ScenarioConfig(
description='visiting unconditional fize (defector) players',
tags={
'visitor',
},
substrate='externality_mushrooms__dense',
roles=('default',) * 5,
is_focal=(True,) + (False,) * 4,
bots_by_role={
'default': {'externality_mushrooms__dense__puppet_fize_0',},
},
),
externality_mushrooms__dense_2=ScenarioConfig(
description=('focals are resident and joined by two unconditional ' +
'hihe (cooperator) players'),
tags={
'resident',
},
substrate='externality_mushrooms__dense',
roles=('default',) * 5,
is_focal=(True,) * 3 + (False,) * 2,
bots_by_role={
'default': {'externality_mushrooms__dense__puppet_hihe_0',},
},
),
externality_mushrooms__dense_3=ScenarioConfig(
description=('focals are resident and joined by two unconditional ' +
'fize (defector) players'),
tags={
'resident',
},
substrate='externality_mushrooms__dense',
roles=('default',) * 5,
is_focal=(True,) * 3 + (False,) * 2,
bots_by_role={
'default': {'externality_mushrooms__dense__puppet_fize_0',},
},
),
factory_commons__either_or_0=ScenarioConfig(
description='visiting a sustainable background population',
tags={
'visitor',
},
substrate='factory_commons__either_or',
roles=('default',) * 3,
is_focal=(True,) * 1 + (False,) * 2,
bots_by_role={
'default': {'factory_commons__either_or__sustainable_0',
'factory_commons__either_or__sustainable_1',
'factory_commons__either_or__sustainable_2',},
},
),
factory_commons__either_or_1=ScenarioConfig(
description='visiting an unsustainable background population',
tags={
'visitor',
},
substrate='factory_commons__either_or',
roles=('default',) * 3,
is_focal=(True,) * 1 + (False,) * 2,
bots_by_role={
'default': {'factory_commons__either_or__unsustainable_0',
'factory_commons__either_or__unsustainable_1',
'factory_commons__either_or__unsustainable_2',},
},
),
factory_commons__either_or_2=ScenarioConfig(
description='resident focal agents are joined by a sustainable visitor',
tags={
'resident',
},
substrate='factory_commons__either_or',
roles=('default',) * 3,
is_focal=(True,) * 2 + (False,) * 1,
bots_by_role={
'default': {'factory_commons__either_or__sustainable_0',
'factory_commons__either_or__sustainable_1',
'factory_commons__either_or__sustainable_2',},
},
),
factory_commons__either_or_3=ScenarioConfig(
description=('resident focal agents are joined by an unsustainable ' +
'visitor'),
tags={
'resident',
},
substrate='factory_commons__either_or',
roles=('default',) * 3,
is_focal=(True,) * 2 + (False,) * 1,
bots_by_role={
'default': {'factory_commons__either_or__unsustainable_0',
'factory_commons__either_or__unsustainable_1',
'factory_commons__either_or__unsustainable_2',},
},
),
fruit_market__concentric_rivers_0=ScenarioConfig(
description='all apple farmers are focal',
tags={
'half_and_half',
},
substrate='fruit_market__concentric_rivers',
roles=('apple_farmer',) * 8 + ('banana_farmer',) * 8,
is_focal=(True,) * 8 + (False,) * 8,
bots_by_role={
'banana_farmer': {
'fruit_market__concentric_rivers__banana_farmer_0',
'fruit_market__concentric_rivers__banana_farmer_1',
'fruit_market__concentric_rivers__banana_farmer_2',
},
},
),
fruit_market__concentric_rivers_1=ScenarioConfig(
description='all banana farmers are focal',
tags={
'half_and_half',
},
substrate='fruit_market__concentric_rivers',
roles=('apple_farmer',) * 8 + ('banana_farmer',) * 8,
is_focal=(False,) * 8 + (True,) * 8,
bots_by_role={
'apple_farmer': {
'fruit_market__concentric_rivers__apple_farmer_0',
'fruit_market__concentric_rivers__apple_farmer_1',
'fruit_market__concentric_rivers__apple_farmer_2',
},
},
),
fruit_market__concentric_rivers_2=ScenarioConfig(
description='one focal apple farmer visits a background economy',
tags={
'visitor',
},
substrate='fruit_market__concentric_rivers',
roles=('apple_farmer',) * 8 + ('banana_farmer',) * 8,
is_focal=(True,) * 1 + (False,) * 15,
bots_by_role={
'apple_farmer': {
'fruit_market__concentric_rivers__apple_farmer_0',
'fruit_market__concentric_rivers__apple_farmer_1',
'fruit_market__concentric_rivers__apple_farmer_2',
},
'banana_farmer': {
'fruit_market__concentric_rivers__banana_farmer_0',
'fruit_market__concentric_rivers__banana_farmer_1',
'fruit_market__concentric_rivers__banana_farmer_2',
},
},
),
fruit_market__concentric_rivers_3=ScenarioConfig(
description='one focal banana farmer visits a background economy',
tags={
'visitor',
},
substrate='fruit_market__concentric_rivers',
roles=('banana_farmer',) * 8 + ('apple_farmer',) * 8,
is_focal=(True,) * 1 + (False,) * 15,
bots_by_role={
'apple_farmer': {
'fruit_market__concentric_rivers__apple_farmer_0',
'fruit_market__concentric_rivers__apple_farmer_1',
'fruit_market__concentric_rivers__apple_farmer_2',
},
'banana_farmer': {
'fruit_market__concentric_rivers__banana_farmer_0',
'fruit_market__concentric_rivers__banana_farmer_1',
'fruit_market__concentric_rivers__banana_farmer_2',
},
},
),
gift_refinements_0=ScenarioConfig(
description='visiting cooperators',
tags={
'visitor',
},
substrate='gift_refinements',
roles=('default',) * 6,
is_focal=(True,) * 1 + (False,) * 5,
bots_by_role=immutabledict.immutabledict(
default=('gift_refinements__cooperator_0',),
),
),
gift_refinements_1=ScenarioConfig(
description='visiting defectors',
tags={
'visitor',
},
substrate='gift_refinements',
roles=('default',) * 6,
is_focal=(True,) * 1 + (False,) * 5,
bots_by_role=immutabledict.immutabledict(
default=('gift_refinements__defector_0',),
),
),
gift_refinements_2=ScenarioConfig(
description='visited by a cooperator',
tags={
'resident',
},
substrate='gift_refinements',
roles=('default',) * 6,
is_focal=(True,) * 5 + (False,) * 1,
bots_by_role=immutabledict.immutabledict(
default=('gift_refinements__cooperator_0',),
),
),
gift_refinements_3=ScenarioConfig(
description='visited by a defector',
tags={
'resident',
},
substrate='gift_refinements',
roles=('default',) * 6,
is_focal=(True,) * 5 + (False,) * 1,
bots_by_role=immutabledict.immutabledict(
default=('gift_refinements__defector_0',),
),
),
gift_refinements_4=ScenarioConfig(
description='find the cooperator partner',
tags={
'partner_choice',
},
substrate='gift_refinements',
roles=('default',) * 5 + ('target',),
is_focal=(True,) * 1 + (False,) * 5,
bots_by_role=immutabledict.immutabledict(
default=('gift_refinements__defector_0',),
target=('gift_refinements__cooperator_0',),
),
),
gift_refinements_5=ScenarioConfig(
description='visiting extreme cooperators',
tags={
'visitor',
},
substrate='gift_refinements',
roles=('default',) * 6,
is_focal=(True,) * 1 + (False,) * 5,
bots_by_role=immutabledict.immutabledict(
default=('gift_refinements__extreme_cooperator_0',),
),
),
gift_refinements_6=ScenarioConfig(
description='visited by an extreme cooperator',
tags={
'resident',
},
substrate='gift_refinements',
roles=('default',) * 6,
is_focal=(True,) * 5 + (False,) * 1,
bots_by_role=immutabledict.immutabledict(
default=('gift_refinements__extreme_cooperator_0',),
),
),
hidden_agenda_0=ScenarioConfig(
description=(
'A focal population is visited by impostor which hunts crewmates'),
tags={
'resident',
},
substrate='hidden_agenda',
roles=('crewmate',) * 4 + ('impostor',),
is_focal=(True,) * 4 + (False,) * 1,
bots_by_role=immutabledict.immutabledict(
impostor=('hidden_agenda__hunter_impostor_0',),
),
),
hidden_agenda_1=ScenarioConfig(
description='A focal impostor visits background crew who collect gems',
tags={
'visitor',
'learned_teamwork',
},
substrate='hidden_agenda',
roles=('crewmate',) * 4 + ('impostor',),
is_focal=(False,) * 4 + (True,) * 1,
bots_by_role=immutabledict.immutabledict(
crewmate=('hidden_agenda__collector_crew_0',
'hidden_agenda__collector_crew_1'),
),
),
hidden_agenda_2=ScenarioConfig(
description=(
'Focal crew visits background impostor, and crew who collect gems'),
tags={
'ad_hoc_teamwork',
'half_and_half',
},
substrate='hidden_agenda',
roles=('crewmate',) * 4 + ('impostor',),
is_focal=(True,) * 2 + (False,) * 3,
bots_by_role=immutabledict.immutabledict(
crewmate=('hidden_agenda__collector_crew_0',
'hidden_agenda__collector_crew_1'),
impostor=('hidden_agenda__hunter_impostor_0',),
),
),
paintball__capture_the_flag_0=ScenarioConfig(
description='focal team versus shaped bot team',
tags={
'half_and_half',
'learned_teamwork',
},
substrate='paintball__capture_the_flag',
roles=('default',) * 8,
is_focal=(True, False) * 4,
bots_by_role={
'default': {'paintball__capture_the_flag__shaped_bot_0',
'paintball__capture_the_flag__shaped_bot_1',
'paintball__capture_the_flag__shaped_bot_2',
'paintball__capture_the_flag__shaped_bot_3',},
},
),
paintball__capture_the_flag_1=ScenarioConfig(
description='ad hoc teamwork with shaped bots',
tags={
'ad_hoc_teamwork',
'visitor',
},
substrate='paintball__capture_the_flag',
roles=('default',) * 8,
is_focal=(True,) * 1 + (False,) * 7,
bots_by_role={
'default': {'paintball__capture_the_flag__shaped_bot_0',
'paintball__capture_the_flag__shaped_bot_1',
'paintball__capture_the_flag__shaped_bot_2',
'paintball__capture_the_flag__shaped_bot_3',},
},
),
paintball__king_of_the_hill_0=ScenarioConfig(
description='focal team versus default bot team',
tags={
'half_and_half',
'learned_teamwork',
},
substrate='paintball__king_of_the_hill',
roles=('default',) * 8,
is_focal=(True, False) * 4,
bots_by_role={
'default': {'paintball__king_of_the_hill__free_0',
'paintball__king_of_the_hill__free_1',
'paintball__king_of_the_hill__free_2',},
},
),
paintball__king_of_the_hill_1=ScenarioConfig(
description='focal team versus shaped bot team',
tags={
'half_and_half',
'learned_teamwork',
},
substrate='paintball__king_of_the_hill',
roles=('default',) * 8,
is_focal=(True, False) * 4,
bots_by_role={
'default': {'paintball__king_of_the_hill__spawn_camper_0',
'paintball__king_of_the_hill__spawn_camper_1',
'paintball__king_of_the_hill__spawn_camper_2',
'paintball__king_of_the_hill__spawn_camper_3',},
},
),
paintball__king_of_the_hill_2=ScenarioConfig(
description='ad hoc teamwork with default bots',
tags={
'ad_hoc_teamwork',
'visitor',
},
substrate='paintball__king_of_the_hill',
roles=('default',) * 8,
is_focal=(True,) * 1 + (False,) * 7,
bots_by_role={
'default': {'paintball__king_of_the_hill__free_0',
'paintball__king_of_the_hill__free_1',
'paintball__king_of_the_hill__free_2',},
},
),
paintball__king_of_the_hill_3=ScenarioConfig(
description='ad hoc teamwork with shaped bots',
tags={
'ad_hoc_teamwork',
'visitor',
},
substrate='paintball__king_of_the_hill',
roles=('default',) * 8,
is_focal=(True,) * 1 + (False,) * 7,
bots_by_role={
'default': {'paintball__king_of_the_hill__spawn_camper_0',
'paintball__king_of_the_hill__spawn_camper_1',
'paintball__king_of_the_hill__spawn_camper_2',
'paintball__king_of_the_hill__spawn_camper_3',},
},
),
predator_prey__alley_hunt_0=ScenarioConfig(
description='focal prey visited by background predators',
tags={
'resident',
},
substrate='predator_prey__alley_hunt',
roles=('predator',) * 5 + ('prey',) * 8,
is_focal=(False,) * 5 + (True,) * 8,
bots_by_role={
'predator': {'predator_prey__alley_hunt__predator_0',
'predator_prey__alley_hunt__predator_1',
'predator_prey__alley_hunt__predator_2',},
},
),
predator_prey__alley_hunt_1=ScenarioConfig(
description=(
'focal predators aim to eat resident prey'),
tags={
'visitor',
},
substrate='predator_prey__alley_hunt',
roles=('predator',) * 5 + ('prey',) * 8,
is_focal=(True,) * 5 + (False,) * 8,
bots_by_role={
'prey': {'predator_prey__alley_hunt__prey_0',
'predator_prey__alley_hunt__prey_1',
'predator_prey__alley_hunt__prey_2',},
},
),
predator_prey__alley_hunt_2=ScenarioConfig(
description=(
'a focal predator competes with background predators to eat prey'),
tags={
'visitor',
},
substrate='predator_prey__alley_hunt',
roles=('predator',) * 5 + ('prey',) * 8,
is_focal=(True,) + (False,) * 12,
bots_by_role={
'prey': {'predator_prey__alley_hunt__prey_0',
'predator_prey__alley_hunt__prey_1',
'predator_prey__alley_hunt__prey_2',},
'predator': {'predator_prey__alley_hunt__predator_0',
'predator_prey__alley_hunt__predator_1',
'predator_prey__alley_hunt__predator_2',},
},
),
predator_prey__alley_hunt_3=ScenarioConfig(
description=(
'one focal prey ad hoc cooperates with background prey to avoid ' +
'predation'),
tags={
'visitor',
},
substrate='predator_prey__alley_hunt',
roles=('prey',) * 8 + ('predator',) * 5,
is_focal=(True,) + (False,) * 12,
bots_by_role={
'prey': {'predator_prey__alley_hunt__prey_0',
'predator_prey__alley_hunt__prey_1',
'predator_prey__alley_hunt__prey_2',},
'predator': {'predator_prey__alley_hunt__predator_0',
'predator_prey__alley_hunt__predator_1',
'predator_prey__alley_hunt__predator_2',},
},
),
predator_prey__open_0=ScenarioConfig(
description='focal prey visited by background predators',
tags={
'resident',
},
substrate='predator_prey__open',
roles=('predator',) * 3 + ('prey',) * 10,
is_focal=(False,) * 3 + (True,) * 10,
bots_by_role={
'predator': {'predator_prey__open__basic_predator_0',
'predator_prey__open__basic_predator_1',},
},
),
predator_prey__open_1=ScenarioConfig(
description=(
'focal predators aim to eat basic resident prey'),
tags={
'visitor',
},
substrate='predator_prey__open',
roles=('predator',) * 3 + ('prey',) * 10,
is_focal=(True,) * 3 + (False,) * 10,
bots_by_role={
'prey': {'predator_prey__open__basic_prey_0',
'predator_prey__open__basic_prey_1',
'predator_prey__open__basic_prey_2',},
},
),
predator_prey__open_2=ScenarioConfig(
description=(
'a focal predator competes with background predators to hunt prey'),
tags={
'visitor',
},
substrate='predator_prey__open',
roles=('predator',) * 3 + ('prey',) * 10,
is_focal=(True,) + (False,) * 12,
bots_by_role={
'prey': {'predator_prey__open__basic_prey_0',
'predator_prey__open__basic_prey_1',
'predator_prey__open__basic_prey_2',},
'predator': {'predator_prey__open__basic_predator_0',
'predator_prey__open__basic_predator_1',},
},
),
predator_prey__open_3=ScenarioConfig(
description=(
'one focal prey ad hoc cooperates with background prey to avoid ' +
'predation'),
tags={
'visitor',
},
substrate='predator_prey__open',
roles=('prey',) * 10 + ('predator',) * 3,
is_focal=(True,) + (False,) * 12,
bots_by_role={
'prey': {'predator_prey__open__basic_prey_0',
'predator_prey__open__basic_prey_1',
'predator_prey__open__basic_prey_2',},
'predator': {'predator_prey__open__basic_predator_0',
'predator_prey__open__basic_predator_1',},
},
),
predator_prey__open_4=ScenarioConfig(
description=(
'focal predators hunt smarter resident prey'),
tags={
'visitor',
},
substrate='predator_prey__open',
roles=('predator',) * 3 + ('prey',) * 10,
is_focal=(True,) * 3 + (False,) * 10,
bots_by_role={
'prey': {'predator_prey__open__smart_prey_0',
'predator_prey__open__smart_prey_1',
'predator_prey__open__smart_prey_2',},
},
),
predator_prey__open_5=ScenarioConfig(
description=(
'a focal predator competes with background predators to hunt ' +
'smarter prey'),
tags={
'visitor',
},
substrate='predator_prey__open',
roles=('predator',) * 3 + ('prey',) * 10,
is_focal=(True,) + (False,) * 12,
bots_by_role={
'prey': {'predator_prey__open__smart_prey_0',
'predator_prey__open__smart_prey_1',
'predator_prey__open__smart_prey_2',},
'predator': {'predator_prey__open__basic_predator_0',
'predator_prey__open__basic_predator_1',},
},
),
predator_prey__open_6=ScenarioConfig(
description=(
'one focal prey ad hoc cooperates with background smart prey to ' +
'avoid predation'),
tags={
'visitor',
},
substrate='predator_prey__open',
roles=('prey',) * 10 + ('predator',) * 3,
is_focal=(True,) + (False,) * 12,
bots_by_role={
'prey': {'predator_prey__open__smart_prey_0',
'predator_prey__open__smart_prey_1',
'predator_prey__open__smart_prey_2',},
'predator': {'predator_prey__open__basic_predator_0',
'predator_prey__open__basic_predator_1',},
},
),
predator_prey__orchard_0=ScenarioConfig(
description='focal prey visited by background predators',
tags={
'resident',
},
substrate='predator_prey__orchard',
roles=('predator',) * 5 + ('prey',) * 8,
is_focal=(False,) * 5 + (True,) * 8,
bots_by_role={
'predator': {'predator_prey__orchard__basic_predator_0',
'predator_prey__orchard__basic_predator_1',
'predator_prey__orchard__basic_predator_2',},
},
),
predator_prey__orchard_1=ScenarioConfig(
description=(
'focal predators aim to eat resident population of ' +
'unspecialized prey'),
tags={
'visitor',
},
substrate='predator_prey__orchard',
roles=('predator',) * 5 + ('prey',) * 8,
is_focal=(True,) * 5 + (False,) * 8,
bots_by_role={
'prey': {'predator_prey__orchard__basic_prey_0',
'predator_prey__orchard__basic_prey_1',
'predator_prey__orchard__basic_prey_2',
'predator_prey__orchard__basic_prey_3',
'predator_prey__orchard__basic_prey_4',
'predator_prey__orchard__basic_prey_5',},
},
),
predator_prey__orchard_2=ScenarioConfig(
description=(
'a focal predator competes with background predators to eat ' +
'unspecialized prey'),
tags={
'visitor',
},
substrate='predator_prey__orchard',
roles=('predator',) * 5 + ('prey',) * 8,
is_focal=(True,) + (False,) * 12,
bots_by_role={
'prey': {'predator_prey__orchard__basic_prey_0',
'predator_prey__orchard__basic_prey_1',
'predator_prey__orchard__basic_prey_2',
'predator_prey__orchard__basic_prey_3',
'predator_prey__orchard__basic_prey_4',
'predator_prey__orchard__basic_prey_5',},
'predator': {'predator_prey__orchard__basic_predator_0',
'predator_prey__orchard__basic_predator_1',
'predator_prey__orchard__basic_predator_2',},
},
),
predator_prey__orchard_3=ScenarioConfig(
description=(
'one focal prey ad hoc cooperates with unspecialized background ' +
'prey to avoid predation'),
tags={
'visitor',
},
substrate='predator_prey__orchard',
roles=('prey',) * 8 + ('predator',) * 5,
is_focal=(True,) + (False,) * 12,
bots_by_role={
'prey': {'predator_prey__orchard__basic_prey_0',
'predator_prey__orchard__basic_prey_1',
'predator_prey__orchard__basic_prey_2',
'predator_prey__orchard__basic_prey_3',
'predator_prey__orchard__basic_prey_4',
'predator_prey__orchard__basic_prey_5',},
'predator': {'predator_prey__orchard__basic_predator_0',
'predator_prey__orchard__basic_predator_1',
'predator_prey__orchard__basic_predator_2',},
},
),
predator_prey__orchard_4=ScenarioConfig(
description=(
'focal predators aim to eat resident population of acorn ' +
'specialist prey'),
tags={
'visitor',
},
substrate='predator_prey__orchard',
roles=('predator',) * 5 + ('prey',) * 8,
is_focal=(True,) * 5 + (False,) * 8,
bots_by_role={
'prey': {'predator_prey__orchard__acorn_specialist_prey_0',
'predator_prey__orchard__acorn_specialist_prey_1',
'predator_prey__orchard__acorn_specialist_prey_2',
'predator_prey__orchard__acorn_specialist_prey_3',
'predator_prey__orchard__acorn_specialist_prey_4',},
},
),
predator_prey__orchard_5=ScenarioConfig(
description=(
'a focal predator competes with background predators to eat ' +
'acorn specialist prey'),
tags={
'visitor',
},
substrate='predator_prey__orchard',
roles=('predator',) * 5 + ('prey',) * 8,
is_focal=(True,) + (False,) * 12,
bots_by_role={
'prey': {'predator_prey__orchard__acorn_specialist_prey_0',
'predator_prey__orchard__acorn_specialist_prey_1',
'predator_prey__orchard__acorn_specialist_prey_2',
'predator_prey__orchard__acorn_specialist_prey_3',
'predator_prey__orchard__acorn_specialist_prey_4',},
'predator': {'predator_prey__orchard__basic_predator_0',
'predator_prey__orchard__basic_predator_1',
'predator_prey__orchard__basic_predator_2',},
},
),
predator_prey__orchard_6=ScenarioConfig(
description=(
'one focal prey ad hoc cooperates with acorn specialized ' +
'background prey to avoid predation'),
tags={
'visitor',
},
substrate='predator_prey__orchard',
roles=('prey',) * 8 + ('predator',) * 5,
is_focal=(True,) + (False,) * 12,
bots_by_role={
'prey': {'predator_prey__orchard__acorn_specialist_prey_0',
'predator_prey__orchard__acorn_specialist_prey_1',
'predator_prey__orchard__acorn_specialist_prey_2',
'predator_prey__orchard__acorn_specialist_prey_3',
'predator_prey__orchard__acorn_specialist_prey_4',},
'predator': {'predator_prey__orchard__basic_predator_0',
'predator_prey__orchard__basic_predator_1',
'predator_prey__orchard__basic_predator_2',},
},
),
predator_prey__random_forest_0=ScenarioConfig(
description='focal prey visited by background predators',
tags={
'resident',
},
substrate='predator_prey__random_forest',
roles=('predator',) * 5 + ('prey',) * 8,
is_focal=(False,) * 5 + (True,) * 8,
bots_by_role={
'predator': {'predator_prey__random_forest__basic_predator_0',
'predator_prey__random_forest__basic_predator_1',
'predator_prey__random_forest__basic_predator_2',},
},
),
predator_prey__random_forest_1=ScenarioConfig(
description=(
'focal predators aim to eat resident prey'),
tags={
'visitor',
},
substrate='predator_prey__random_forest',
roles=('predator',) * 5 + ('prey',) * 8,
is_focal=(True,) * 5 + (False,) * 8,
bots_by_role={
'prey': {'predator_prey__random_forest__basic_prey_0',
'predator_prey__random_forest__basic_prey_1',
'predator_prey__random_forest__basic_prey_2',},
},
),
predator_prey__random_forest_2=ScenarioConfig(
description=(
'a focal predator competes with background predators to eat prey'),
tags={
'visitor',
},
substrate='predator_prey__random_forest',
roles=('predator',) * 5 + ('prey',) * 8,
is_focal=(True,) + (False,) * 12,
bots_by_role={
'prey': {'predator_prey__random_forest__basic_prey_0',
'predator_prey__random_forest__basic_prey_1',
'predator_prey__random_forest__basic_prey_2',},
'predator': {'predator_prey__random_forest__basic_predator_0',
'predator_prey__random_forest__basic_predator_1',
'predator_prey__random_forest__basic_predator_2',},
},
),
predator_prey__random_forest_3=ScenarioConfig(
description=(
'one focal prey ad hoc cooperates with background prey to avoid ' +
'predation'),
tags={
'visitor',
},
substrate='predator_prey__random_forest',
roles=('prey',) * 8 + ('predator',) * 5,
is_focal=(True,) + (False,) * 12,
bots_by_role={
'prey': {'predator_prey__random_forest__basic_prey_0',
'predator_prey__random_forest__basic_prey_1',
'predator_prey__random_forest__basic_prey_2',},
'predator': {'predator_prey__random_forest__basic_predator_0',
'predator_prey__random_forest__basic_predator_1',
'predator_prey__random_forest__basic_predator_2',},
},
),
prisoners_dilemma_in_the_matrix__arena_0=ScenarioConfig(
description='visiting unconditional cooperators',
tags={
'visitor',
'versus_pure_cooperators',
},
substrate='prisoners_dilemma_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) + (False,) * 7,
bots_by_role={
'default': {
'prisoners_dilemma_in_the_matrix__arena__puppet_cooperator_0',
'prisoners_dilemma_in_the_matrix__arena__puppet_cooperator_margin_0',
},
},
),
prisoners_dilemma_in_the_matrix__arena_1=ScenarioConfig(
description=('focals are resident and visited by an unconditional ' +
'cooperator'),
tags={
'resident',
'versus_pure_cooperators',
},
substrate='prisoners_dilemma_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) * 7 + (False,) * 1,
bots_by_role={
'default': {
'prisoners_dilemma_in_the_matrix__arena__puppet_cooperator_0',
'prisoners_dilemma_in_the_matrix__arena__puppet_cooperator_margin_0',
},
},
),
prisoners_dilemma_in_the_matrix__arena_2=ScenarioConfig(
description='focals are resident and visitors defect unconditionally',
tags={
'resident',
'versus_pure_defectors',
},
substrate='prisoners_dilemma_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) * 6 + (False,) * 2,
bots_by_role={
'default': {
'prisoners_dilemma_in_the_matrix__arena__puppet_defector_0',
'prisoners_dilemma_in_the_matrix__arena__puppet_defector_margin_0',
},
},
),
prisoners_dilemma_in_the_matrix__arena_3=ScenarioConfig(
description=('visiting a population of hair-trigger grim ' +
'reciprocator bots who initially cooperate but, if ' +
'defected on once, will retaliate by defecting in all ' +
'future interactions'),
tags={
'visitor',
'reciprocity',
},
substrate='prisoners_dilemma_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) + (False,) * 7,
bots_by_role={
'default': {
'prisoners_dilemma_in_the_matrix__arena__puppet_grim_one_strike_0',
'prisoners_dilemma_in_the_matrix__arena__puppet_grim_one_strike_margin_0',
},
},
),
prisoners_dilemma_in_the_matrix__arena_4=ScenarioConfig(
description=('visiting a population of two-strikes grim ' +
'reciprocator bots who initially cooperate but, if ' +
'defected on twice, will retaliate by defecting in all ' +
'future interactions'),
tags={
'visitor',
'reciprocity',
},
substrate='prisoners_dilemma_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) + (False,) * 7,
bots_by_role={
'default': {
'prisoners_dilemma_in_the_matrix__arena__puppet_grim_two_strikes_0',
'prisoners_dilemma_in_the_matrix__arena__puppet_grim_two_strikes_margin_0',
},
},
),
prisoners_dilemma_in_the_matrix__arena_5=ScenarioConfig(
description=(
'visiting a mixed population of k-strikes grim reciprocator bots ' +
'with k values from 1 to 3, they initially cooperate but, if ' +
'defected on k times, they retaliate in all future interactions'
),
tags={
'visitor',
'reciprocity',
},
substrate='prisoners_dilemma_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) * 3 + (False,) * 5,
bots_by_role={
'default': {
'prisoners_dilemma_in_the_matrix__arena__puppet_grim_one_strike_0',
'prisoners_dilemma_in_the_matrix__arena__puppet_grim_one_strike_margin_0',
'prisoners_dilemma_in_the_matrix__arena__puppet_grim_three_strikes_0',
'prisoners_dilemma_in_the_matrix__arena__puppet_grim_three_strikes_margin_0',
'prisoners_dilemma_in_the_matrix__arena__puppet_grim_two_strikes_0',
'prisoners_dilemma_in_the_matrix__arena__puppet_grim_two_strikes_margin_0',
},
},
),
prisoners_dilemma_in_the_matrix__repeated_0=ScenarioConfig(
description='partner may play either cooperate or defect',
tags={
'half_and_half',
'versus_pure_all',
},
substrate='prisoners_dilemma_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'prisoners_dilemma_in_the_matrix__repeated__puppet_cooperator_margin_0',
'prisoners_dilemma_in_the_matrix__repeated__puppet_cooperator_margin_1',
'prisoners_dilemma_in_the_matrix__repeated__puppet_defector_margin_0',
'prisoners_dilemma_in_the_matrix__repeated__puppet_defector_margin_1',
},
},
),
prisoners_dilemma_in_the_matrix__repeated_1=ScenarioConfig(
description='partner typically plays cooperate',
tags={
'half_and_half',
'versus_pure_cooperator',
},
substrate='prisoners_dilemma_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'prisoners_dilemma_in_the_matrix__repeated__puppet_cooperator_margin_0',
'prisoners_dilemma_in_the_matrix__repeated__puppet_cooperator_margin_1',
},
},
),
prisoners_dilemma_in_the_matrix__repeated_2=ScenarioConfig(
description='partner typically plays defect',
tags={
'half_and_half',
'versus_pure_defector',
},
substrate='prisoners_dilemma_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'prisoners_dilemma_in_the_matrix__repeated__puppet_defector_margin_0',
'prisoners_dilemma_in_the_matrix__repeated__puppet_defector_margin_1',
},
},
),
prisoners_dilemma_in_the_matrix__repeated_3=ScenarioConfig(
description=('partner is a hair-trigger grim reciprocator, i.e. one ' +
'who initially cooperates but, if defected on once, will' +
' retaliate by defecting forever after'),
tags={
'half_and_half',
'reciprocity',
},
substrate='prisoners_dilemma_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'prisoners_dilemma_in_the_matrix__repeated__puppet_grim_one_strike_margin_0',
'prisoners_dilemma_in_the_matrix__repeated__puppet_grim_one_strike_margin_1',
},
},
),
prisoners_dilemma_in_the_matrix__repeated_4=ScenarioConfig(
description=('partner is a two-strikes grim reciprocator, i.e. one ' +
'who initially cooperates, but if defected on twice, ' +
'will retaliate by defecting forever after'),
tags={
'half_and_half',
'reciprocity',
},
substrate='prisoners_dilemma_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'prisoners_dilemma_in_the_matrix__repeated__puppet_grim_two_strikes_margin_0',
'prisoners_dilemma_in_the_matrix__repeated__puppet_grim_two_strikes_margin_1',
},
},
),
prisoners_dilemma_in_the_matrix__repeated_5=ScenarioConfig(
description='partner is a tit-for-tat conditional cooperator',
tags={
'half_and_half',
'reciprocity',
},
substrate='prisoners_dilemma_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'prisoners_dilemma_in_the_matrix__repeated__puppet_tft_margin_0',
'prisoners_dilemma_in_the_matrix__repeated__puppet_tft_margin_1',
},
},
),
prisoners_dilemma_in_the_matrix__repeated_6=ScenarioConfig(
description=('partner is a tit-for-tat conditional cooperator who ' +
'occasionally plays defect instead of cooperate'),
tags={
'half_and_half',
'reciprocity',
'forgiveness',
},
substrate='prisoners_dilemma_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'prisoners_dilemma_in_the_matrix__repeated__puppet_tft_tremble_margin_0',
'prisoners_dilemma_in_the_matrix__repeated__puppet_tft_tremble_margin_1',
},
},
),
prisoners_dilemma_in_the_matrix__repeated_7=ScenarioConfig(
description=('partner plays cooperate for a while then switches to ' +
'defect'),
tags={
'half_and_half',
'flexibility',
},
substrate='prisoners_dilemma_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'prisoners_dilemma_in_the_matrix__repeated__puppet_flip_0',
},
},
),
prisoners_dilemma_in_the_matrix__repeated_8=ScenarioConfig(
description=('partner tries to take advantage of the focal player ' +
'by playing defect, but if punished, partner then ' +
'switches to tit-for-tat conditional cooperation'),
tags={
'half_and_half',
'teaching',
'reciprocity',
'forgiveness',
},
substrate='prisoners_dilemma_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'prisoners_dilemma_in_the_matrix__repeated__puppet_corrigible_0',
},
},
),
prisoners_dilemma_in_the_matrix__repeated_9=ScenarioConfig(
description=('partner tries to take advantage of the focal player ' +
'by playing defect, but if punished, partner then ' +
'switches to noisy tit-for-tat conditional cooperation'),
tags={
'half_and_half',
'teaching',
'reciprocity',
'forgiveness',
},
substrate='prisoners_dilemma_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'prisoners_dilemma_in_the_matrix__repeated__puppet_corrigible_tremble_0',
},
},
),
pure_coordination_in_the_matrix__arena_0=ScenarioConfig(
description=('focals are resident, a single visitor joins who may ' +
'prefer any option; whichever option it prefers, it ' +
'pursues it greedily'),
tags={
'resident',
'versus_pure_all',
},
substrate='pure_coordination_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) * 7 + (False,) * 1,
bots_by_role={
'default': {
'pure_coordination_in_the_matrix__arena__pure_greedy_a_0',
'pure_coordination_in_the_matrix__arena__pure_greedy_b_0',
'pure_coordination_in_the_matrix__arena__pure_greedy_c_0',
},
},
),
pure_coordination_in_the_matrix__arena_1=ScenarioConfig(
description=('focals are resident, three visitors join who always ' +
'select the same option as their partner in the previous' +
'interaction and do so without being too greedy'),
tags={
'resident',
'versus_best_response',
},
substrate='pure_coordination_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) * 5 + (False,) * 3,
bots_by_role={
'default': {
'pure_coordination_in_the_matrix__arena__resp2prev_0',
},
},
),
pure_coordination_in_the_matrix__arena_2=ScenarioConfig(
description=('focals are resident, three visitors join who always ' +
'select the same option as their partner in the previous' +
'interaction and are greedy in doing so'),
tags={
'resident',
'versus_best_response',
'scarcity',
},
substrate='pure_coordination_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) * 5 + (False,) * 3,
bots_by_role={
'default': {
'pure_coordination_in_the_matrix__arena__resp2prev_greedy_0',
},
},
),
pure_coordination_in_the_matrix__arena_3=ScenarioConfig(
description=('visiting a background population where all initially ' +
'choose option A (without greed) and then switch '+
'to either B or C after some time'),
tags={
'visitor',
'convention_following',
'flexibility',
},
substrate='pure_coordination_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) + (False,) * 7,
bots_by_role={
'default': {
'pure_coordination_in_the_matrix__arena__flip_a2b_0',
'pure_coordination_in_the_matrix__arena__flip_a2c_0',
},
},
),
pure_coordination_in_the_matrix__arena_4=ScenarioConfig(
description=('visiting a background population where all initially ' +
'choose option B (without greed) and then switch '+
'to either A or C after some time'),
tags={
'visitor',
'convention_following',
'flexibility',
},
substrate='pure_coordination_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) + (False,) * 7,
bots_by_role={
'default': {
'pure_coordination_in_the_matrix__arena__flip_b2a_0',
'pure_coordination_in_the_matrix__arena__flip_b2c_0',
},
},
),
pure_coordination_in_the_matrix__arena_5=ScenarioConfig(
description=('visiting a background population where all initially ' +
'choose option C (without greed) and then switch '+
'to either A or B after some time'),
tags={
'visitor',
'convention_following',
'flexibility',
},
substrate='pure_coordination_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) + (False,) * 7,
bots_by_role={
'default': {
'pure_coordination_in_the_matrix__arena__flip_c2a_0',
'pure_coordination_in_the_matrix__arena__flip_c2b_0',
},
},
),
pure_coordination_in_the_matrix__arena_6=ScenarioConfig(
description=('visiting a background population where all typically ' +
'choose option A and do not collect it too greedily'),
tags={
'visitor',
'versus_pure_type_a',
'convention_following',
},
substrate='pure_coordination_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) + (False,) * 7,
bots_by_role={
'default': {
'pure_coordination_in_the_matrix__arena__pure_a_0',
},
},
),
pure_coordination_in_the_matrix__arena_7=ScenarioConfig(
description=('visiting a background population where all typically ' +
'choose option B and do not collect it too greedily'),
tags={
'visitor',
'versus_pure_type_b',
'convention_following',
},
substrate='pure_coordination_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) + (False,) * 7,
bots_by_role={
'default': {
'pure_coordination_in_the_matrix__arena__pure_b_0',
},
},
),
pure_coordination_in_the_matrix__arena_8=ScenarioConfig(
description=('visiting a background population where all typically ' +
'choose option C and do not collect it too greedily'),
tags={
'visitor',
'versus_pure_type_c',
'convention_following',
},
substrate='pure_coordination_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) + (False,) * 7,
bots_by_role={
'default': {
'pure_coordination_in_the_matrix__arena__pure_c_0',
},
},
),
pure_coordination_in_the_matrix__repeated_0=ScenarioConfig(
description='meeting any pure strategy player',
tags={
'versus_pure_all',
'half_and_half',
},
substrate='pure_coordination_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) * 1 + (False,) * 1,
bots_by_role={
'default': {
'pure_coordination_in_the_matrix__repeated__pure_a_margin_0',
'pure_coordination_in_the_matrix__repeated__pure_b_margin_0',
'pure_coordination_in_the_matrix__repeated__pure_c_margin_0',
},
},
),
pure_coordination_in_the_matrix__repeated_1=ScenarioConfig(
description=('meeting an agent who plays the best response to ' +
'what the focal agent did in the last round.'),
tags={
'half_and_half',
'versus_best_response',
},
substrate='pure_coordination_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) * 1 + (False,) * 1,
bots_by_role={
'default': {
'pure_coordination_in_the_matrix__repeated__resp2prev_margin_0',
},
},
),
pure_coordination_in_the_matrix__repeated_2=ScenarioConfig(
description=('versus mixture of opponents who often flip to other ' +
'strategies after some number of interactions'),
tags={
'half_and_half',
'versus_strategy_flip',
},
substrate='pure_coordination_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) * 1 + (False,) * 1,
bots_by_role={
'default': {
'pure_coordination_in_the_matrix__repeated__pure_a_margin_0',
'pure_coordination_in_the_matrix__repeated__flip_a2b_0',
'pure_coordination_in_the_matrix__repeated__flip_a2b_1',
'pure_coordination_in_the_matrix__repeated__flip_a2c_0',
'pure_coordination_in_the_matrix__repeated__flip_a2c_1',
'pure_coordination_in_the_matrix__repeated__pure_b_margin_0',
'pure_coordination_in_the_matrix__repeated__flip_b2c_0',
'pure_coordination_in_the_matrix__repeated__flip_b2c_1',
'pure_coordination_in_the_matrix__repeated__flip_b2a_0',
'pure_coordination_in_the_matrix__repeated__flip_b2a_1',
'pure_coordination_in_the_matrix__repeated__pure_c_margin_0',
'pure_coordination_in_the_matrix__repeated__flip_c2a_0',
'pure_coordination_in_the_matrix__repeated__flip_c2a_1',
'pure_coordination_in_the_matrix__repeated__flip_c2b_0',
'pure_coordination_in_the_matrix__repeated__flip_c2b_1'
},
},
),
pure_coordination_in_the_matrix__repeated_3=ScenarioConfig(
description='meeting an agent who almost always chooses resource a',
tags={
'versus_pure_type_a',
'half_and_half',
},
substrate='pure_coordination_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) * 1 + (False,) * 1,
bots_by_role={
'default': {
'pure_coordination_in_the_matrix__repeated__pure_a_margin_0',
},
},
),
pure_coordination_in_the_matrix__repeated_4=ScenarioConfig(
description='meeting an agent who almost always chooses resource b',
tags={
'versus_pure_type_b',
'half_and_half',
},
substrate='pure_coordination_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) * 1 + (False,) * 1,
bots_by_role={
'default': {
'pure_coordination_in_the_matrix__repeated__pure_b_margin_0',
},
},
),
pure_coordination_in_the_matrix__repeated_5=ScenarioConfig(
description='meeting an agent who almost always chooses resource c',
tags={
'versus_pure_type_c',
'half_and_half',
},
substrate='pure_coordination_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) * 1 + (False,) * 1,
bots_by_role={
'default': {
'pure_coordination_in_the_matrix__repeated__pure_c_margin_0',
},
},
),
rationalizable_coordination_in_the_matrix__arena_0=ScenarioConfig(
description=('focals are resident, a single visitor joins who may ' +
'prefer any option; whichever option it prefers, it ' +
'pursues it greedily'),
tags={
'resident',
'versus_pure_all',
},
substrate='rationalizable_coordination_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) * 7 + (False,) * 1,
bots_by_role={
'default': {
'rationalizable_coordination_in_the_matrix__arena__pure_greedy_a_0',
'rationalizable_coordination_in_the_matrix__arena__pure_greedy_b_0',
'rationalizable_coordination_in_the_matrix__arena__pure_greedy_c_0',
},
},
),
rationalizable_coordination_in_the_matrix__arena_1=ScenarioConfig(
description=('focals are resident, three visitors join who always ' +
'select the same option as their partner in the previous' +
'interaction and do so without being too greedy'),
tags={
'resident',
'versus_best_response',
},
substrate='rationalizable_coordination_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) * 5 + (False,) * 3,
bots_by_role={
'default': {
'rationalizable_coordination_in_the_matrix__arena__resp2prev_0',
},
},
),
rationalizable_coordination_in_the_matrix__arena_2=ScenarioConfig(
description=('focals are resident, three visitors join who always ' +
'select the same option as their partner in the previous' +
'interaction and are greedy in doing so'),
tags={
'resident',
'versus_best_response',
'scarcity',
},
substrate='rationalizable_coordination_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) * 5 + (False,) * 3,
bots_by_role={
'default': {
'rationalizable_coordination_in_the_matrix__arena__resp2prev_greedy_0',
},
},
),
rationalizable_coordination_in_the_matrix__arena_3=ScenarioConfig(
description=('visiting a background population where all initially ' +
'choose option A (without greed) and then switch '+
'to either B or C after some time'),
tags={
'visitor',
'convention_following',
'flexibility',
},
substrate='rationalizable_coordination_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) + (False,) * 7,
bots_by_role={
'default': {
'rationalizable_coordination_in_the_matrix__arena__flip_a2b_0',
'rationalizable_coordination_in_the_matrix__arena__flip_a2c_0',
},
},
),
rationalizable_coordination_in_the_matrix__arena_4=ScenarioConfig(
description=('visiting a background population where all initially ' +
'choose option B (without greed) and then switch '+
'to either A or C after some time'),
tags={
'visitor',
'convention_following',
'flexibility',
},
substrate='rationalizable_coordination_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) + (False,) * 7,
bots_by_role={
'default': {
'rationalizable_coordination_in_the_matrix__arena__flip_b2a_0',
'rationalizable_coordination_in_the_matrix__arena__flip_b2c_0',
},
},
),
rationalizable_coordination_in_the_matrix__arena_5=ScenarioConfig(
description=('visiting a background population where all initially ' +
'choose option C (without greed) and then switch '+
'to either A or B after some time'),
tags={
'visitor',
'convention_following',
'flexibility',
},
substrate='rationalizable_coordination_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) + (False,) * 7,
bots_by_role={
'default': {
'rationalizable_coordination_in_the_matrix__arena__flip_c2a_0',
'rationalizable_coordination_in_the_matrix__arena__flip_c2b_0',
},
},
),
rationalizable_coordination_in_the_matrix__arena_6=ScenarioConfig(
description=('visiting a background population where all typically ' +
'choose option A and do not collect it too greedily'),
tags={
'visitor',
'versus_pure_type_a',
'convention_following',
},
substrate='rationalizable_coordination_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) + (False,) * 7,
bots_by_role={
'default': {
'rationalizable_coordination_in_the_matrix__arena__pure_a_0',
},
},
),
rationalizable_coordination_in_the_matrix__arena_7=ScenarioConfig(
description=('visiting a background population where all typically ' +
'choose option B and do not collect it too greedily'),
tags={
'visitor',
'versus_pure_type_b',
'convention_following',
},
substrate='rationalizable_coordination_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) + (False,) * 7,
bots_by_role={
'default': {
'rationalizable_coordination_in_the_matrix__arena__pure_b_0',
},
},
),
rationalizable_coordination_in_the_matrix__arena_8=ScenarioConfig(
description=('visiting a background population where all typically ' +
'choose option C and do not collect it too greedily'),
tags={
'visitor',
'versus_pure_type_c',
'convention_following',
},
substrate='rationalizable_coordination_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) + (False,) * 7,
bots_by_role={
'default': {
'rationalizable_coordination_in_the_matrix__arena__pure_c_0',
},
},
),
rationalizable_coordination_in_the_matrix__repeated_0=ScenarioConfig(
description='meeting any pure strategy player',
tags={
'versus_pure_all',
'half_and_half',
},
substrate='rationalizable_coordination_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) * 1 + (False,) * 1,
bots_by_role={
'default': {
'rationalizable_coordination_in_the_matrix__repeated__pure_a_margin_0',
'rationalizable_coordination_in_the_matrix__repeated__pure_b_margin_0',
'rationalizable_coordination_in_the_matrix__repeated__pure_c_margin_0',
},
},
),
rationalizable_coordination_in_the_matrix__repeated_1=ScenarioConfig(
description=('meeting an agent who plays the best response to ' +
'what the focal agent did in the last round.'),
tags={
'half_and_half',
'versus_best_response',
},
substrate='rationalizable_coordination_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) * 1 + (False,) * 1,
bots_by_role={
'default': {
'rationalizable_coordination_in_the_matrix__repeated__resp2prev_margin_0',
},
},
),
rationalizable_coordination_in_the_matrix__repeated_2=ScenarioConfig(
description=('versus mixture of opponents who often flip to other ' +
'strategies after some number of interactions'),
tags={
'half_and_half',
'versus_strategy_flip',
},
substrate='rationalizable_coordination_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) * 1 + (False,) * 1,
bots_by_role={
'default': {
'rationalizable_coordination_in_the_matrix__repeated__pure_a_margin_0',
'rationalizable_coordination_in_the_matrix__repeated__flip_a2b_0',
'rationalizable_coordination_in_the_matrix__repeated__flip_a2b_1',
'rationalizable_coordination_in_the_matrix__repeated__flip_a2c_0',
'rationalizable_coordination_in_the_matrix__repeated__flip_a2c_1',
'rationalizable_coordination_in_the_matrix__repeated__pure_b_margin_0',
'rationalizable_coordination_in_the_matrix__repeated__flip_b2c_0',
'rationalizable_coordination_in_the_matrix__repeated__flip_b2c_1',
'rationalizable_coordination_in_the_matrix__repeated__flip_b2a_0',
'rationalizable_coordination_in_the_matrix__repeated__flip_b2a_1',
'rationalizable_coordination_in_the_matrix__repeated__pure_c_margin_0',
'rationalizable_coordination_in_the_matrix__repeated__flip_c2a_0',
'rationalizable_coordination_in_the_matrix__repeated__flip_c2a_1',
'rationalizable_coordination_in_the_matrix__repeated__flip_c2b_0',
'rationalizable_coordination_in_the_matrix__repeated__flip_c2b_1'
},
},
),
rationalizable_coordination_in_the_matrix__repeated_3=ScenarioConfig(
description='meeting an agent who almost always chooses resource a',
tags={
'versus_pure_type_a',
'half_and_half',
},
substrate='rationalizable_coordination_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) * 1 + (False,) * 1,
bots_by_role={
'default': {
'rationalizable_coordination_in_the_matrix__repeated__pure_a_margin_0',
},
},
),
rationalizable_coordination_in_the_matrix__repeated_4=ScenarioConfig(
description='meeting an agent who almost always chooses resource b',
tags={
'versus_pure_type_b',
'half_and_half',
},
substrate='rationalizable_coordination_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) * 1 + (False,) * 1,
bots_by_role={
'default': {
'rationalizable_coordination_in_the_matrix__repeated__pure_b_margin_0',
},
},
),
rationalizable_coordination_in_the_matrix__repeated_5=ScenarioConfig(
description='meeting an agent who almost always chooses resource c',
tags={
'versus_pure_type_c',
'half_and_half',
},
substrate='rationalizable_coordination_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) * 1 + (False,) * 1,
bots_by_role={
'default': {
'rationalizable_coordination_in_the_matrix__repeated__pure_c_margin_0',
},
},
),
running_with_scissors_in_the_matrix__arena_0=ScenarioConfig(
description=('versus a background population containing bots ' +
'implementing all three pure strategies'),
tags={
'visitor',
'versus_pure_all',
},
substrate='running_with_scissors_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) * 1 + (False,) * 7,
bots_by_role={
'default': {
'running_with_scissors_in_the_matrix__arena__rock_margin_0',
'running_with_scissors_in_the_matrix__arena__rock_margin_1',
'running_with_scissors_in_the_matrix__arena__paper_margin_0',
'running_with_scissors_in_the_matrix__arena__paper_margin_1',
'running_with_scissors_in_the_matrix__arena__scissors_margin_0',
'running_with_scissors_in_the_matrix__arena__scissors_margin_1',
},
}
),
running_with_scissors_in_the_matrix__arena_1=ScenarioConfig(
description=('versus gullible bots'),
tags={
'deception',
'visitor',
'versus_gullible',
},
substrate='running_with_scissors_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) * 1 + (False,) * 7,
bots_by_role={
'default': {
'running_with_scissors_in_the_matrix__arena__free_0',
},
}
),
running_with_scissors_in_the_matrix__arena_2=ScenarioConfig(
description=('versus mixture of opponents who play rock and some who ' +
'flip to scissors after two interactions'),
tags={
'visitor',
'versus_strategy_flip',
},
substrate='running_with_scissors_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) * 1 + (False,) * 7,
bots_by_role={
'default': {
'running_with_scissors_in_the_matrix__arena__rock_margin_0',
'running_with_scissors_in_the_matrix__arena__rock_margin_1',
'running_with_scissors_in_the_matrix__arena__flip_r2s_0',
},
}
),
running_with_scissors_in_the_matrix__arena_3=ScenarioConfig(
description=('versus mixture of opponents who play paper and some ' +
'who flip to rock after two interactions'),
tags={
'visitor',
'versus_strategy_flip',
},
substrate='running_with_scissors_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) * 1 + (False,) * 7,
bots_by_role={
'default': {
'running_with_scissors_in_the_matrix__arena__paper_margin_0',
'running_with_scissors_in_the_matrix__arena__paper_margin_1',
'running_with_scissors_in_the_matrix__arena__flip_p2r_0',
},
}
),
running_with_scissors_in_the_matrix__arena_4=ScenarioConfig(
description=('versus mixture of opponents who play scissors and some ' +
'who flip to paper after two interactions'),
tags={
'visitor',
'versus_strategy_flip',
},
substrate='running_with_scissors_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) * 1 + (False,) * 7,
bots_by_role={
'default': {
'running_with_scissors_in_the_matrix__arena__scissors_margin_0',
'running_with_scissors_in_the_matrix__arena__scissors_margin_1',
'running_with_scissors_in_the_matrix__arena__flip_s2p_0',
},
}
),
running_with_scissors_in_the_matrix__arena_5=ScenarioConfig(
description=('visiting a population of pure paper bots'),
tags={
'visitor',
'versus_pure_paper',
},
substrate='running_with_scissors_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) + (False,) * 7,
bots_by_role={
'default': {
'running_with_scissors_in_the_matrix__arena__paper_margin_0',
'running_with_scissors_in_the_matrix__arena__paper_margin_1',
},
}
),
running_with_scissors_in_the_matrix__arena_6=ScenarioConfig(
description=('visiting a population of pure rock bots'),
tags={
'visitor',
'versus_pure_rock',
},
substrate='running_with_scissors_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) + (False,) * 7,
bots_by_role={
'default': {
'running_with_scissors_in_the_matrix__arena__rock_margin_0',
'running_with_scissors_in_the_matrix__arena__rock_margin_1',
},
}
),
running_with_scissors_in_the_matrix__arena_7=ScenarioConfig(
description=('visiting a population of pure scissors bots'),
tags={
'visitor',
'versus_pure_scissors',
},
substrate='running_with_scissors_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) + (False,) * 7,
bots_by_role={
'default': {
'running_with_scissors_in_the_matrix__arena__scissors_margin_0',
'running_with_scissors_in_the_matrix__arena__scissors_margin_1',
},
}
),
running_with_scissors_in_the_matrix__one_shot_0=ScenarioConfig(
description='versus mixed strategy opponent',
tags={
'half_and_half',
'versus_pure_all',
},
substrate='running_with_scissors_in_the_matrix__one_shot',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'running_with_scissors_in_the_matrix__one_shot__rock_margin_0',
'running_with_scissors_in_the_matrix__one_shot__paper_margin_0',
'running_with_scissors_in_the_matrix__one_shot__scissors_margin_0',
},
}
),
running_with_scissors_in_the_matrix__one_shot_1=ScenarioConfig(
description='versus pure rock opponent',
tags={
'half_and_half',
'versus_pure_rock',
},
substrate='running_with_scissors_in_the_matrix__one_shot',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'running_with_scissors_in_the_matrix__one_shot__rock_margin_0',
},
}
),
running_with_scissors_in_the_matrix__one_shot_2=ScenarioConfig(
description='versus pure paper opponent',
tags={
'half_and_half',
'versus_pure_paper',
},
substrate='running_with_scissors_in_the_matrix__one_shot',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'running_with_scissors_in_the_matrix__one_shot__paper_margin_0',
},
}
),
running_with_scissors_in_the_matrix__one_shot_3=ScenarioConfig(
description='versus pure scissors opponent',
tags={
'half_and_half',
'versus_pure_scissors',
},
substrate='running_with_scissors_in_the_matrix__one_shot',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'running_with_scissors_in_the_matrix__one_shot__scissors_margin_0',
},
}
),
running_with_scissors_in_the_matrix__repeated_0=ScenarioConfig(
description='versus mixed strategy opponent',
tags={
'half_and_half',
'versus_pure_all',
},
substrate='running_with_scissors_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'running_with_scissors_in_the_matrix__repeated__rock_margin_0',
'running_with_scissors_in_the_matrix__repeated__paper_margin_0',
'running_with_scissors_in_the_matrix__repeated__scissors_margin_0',
},
}
),
running_with_scissors_in_the_matrix__repeated_1=ScenarioConfig(
description=('versus opponent who plays the best response to ' +
'what the focal player did in the last round.'),
tags={
'half_and_half',
'versus_best_response',
},
substrate='running_with_scissors_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'running_with_scissors_in_the_matrix__repeated__resp2prev_margin_0',
},
}
),
running_with_scissors_in_the_matrix__repeated_2=ScenarioConfig(
description=('versus opponent who sometimes plays a pure strategy ' +
'but sometimes plays the best response to what the ' +
'focal player did in the last round'),
tags={
'half_and_half',
'versus_best_response',
},
substrate='running_with_scissors_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'running_with_scissors_in_the_matrix__repeated__resp2prev_margin_0',
'running_with_scissors_in_the_matrix__repeated__rock_margin_0',
'running_with_scissors_in_the_matrix__repeated__paper_margin_0',
'running_with_scissors_in_the_matrix__repeated__scissors_margin_0',
},
}
),
running_with_scissors_in_the_matrix__repeated_3=ScenarioConfig(
description=('versus mixture of opponents who often flip to other ' +
'strategies after two interactions'),
tags={
'half_and_half',
'versus_strategy_flip',
},
substrate='running_with_scissors_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'running_with_scissors_in_the_matrix__repeated__rock_0',
'running_with_scissors_in_the_matrix__repeated__rock_margin_0',
'running_with_scissors_in_the_matrix__repeated__flip_r2s_0',
'running_with_scissors_in_the_matrix__repeated__paper_0',
'running_with_scissors_in_the_matrix__repeated__paper_margin_0',
'running_with_scissors_in_the_matrix__repeated__flip_p2r_0',
'running_with_scissors_in_the_matrix__repeated__scissors_0',
'running_with_scissors_in_the_matrix__repeated__scissors_margin_0',
'running_with_scissors_in_the_matrix__repeated__flip_s2p_0',
},
}
),
running_with_scissors_in_the_matrix__repeated_4=ScenarioConfig(
description=('versus mixture of opponents who either flip to another ' +
'strategy after one interaction and keep it forever or ' +
'continue to change, always best responding to what ' +
'the focal player just did'),
tags={
'half_and_half',
'versus_strategy_flip',
},
substrate='running_with_scissors_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'running_with_scissors_in_the_matrix__repeated__flip_r2s_1',
'running_with_scissors_in_the_matrix__repeated__flip_p2r_1',
'running_with_scissors_in_the_matrix__repeated__flip_s2p_1',
'running_with_scissors_in_the_matrix__repeated__resp2prev_margin_0',
},
}
),
running_with_scissors_in_the_matrix__repeated_5=ScenarioConfig(
description='versus gullible opponent',
tags={
'deception',
'half_and_half',
'versus_gullible',
},
substrate='running_with_scissors_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'running_with_scissors_in_the_matrix__repeated__free_0',
},
}
),
running_with_scissors_in_the_matrix__repeated_6=ScenarioConfig(
description='versus pure rock opponent',
tags={
'half_and_half',
'versus_pure_rock',
},
substrate='running_with_scissors_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'running_with_scissors_in_the_matrix__repeated__rock_margin_0',
},
}
),
running_with_scissors_in_the_matrix__repeated_7=ScenarioConfig(
description='versus pure paper opponent',
tags={
'half_and_half',
'versus_pure_paper',
},
substrate='running_with_scissors_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'running_with_scissors_in_the_matrix__repeated__paper_margin_0',
},
}
),
running_with_scissors_in_the_matrix__repeated_8=ScenarioConfig(
description='versus pure scissors opponent',
tags={
'half_and_half',
'versus_pure_scissors',
},
substrate='running_with_scissors_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'running_with_scissors_in_the_matrix__repeated__scissors_margin_0',
},
}
),
stag_hunt_in_the_matrix__arena_0=ScenarioConfig(
description='visiting unconditional stag players',
tags={
'visitor',
'versus_pure_stag_players',
'convention_following',
},
substrate='stag_hunt_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) + (False,) * 7,
bots_by_role={
'default': {
'stag_hunt_in_the_matrix__arena__puppet_stag_0',
'stag_hunt_in_the_matrix__arena__puppet_stag_margin_0',
},
},
),
stag_hunt_in_the_matrix__arena_1=ScenarioConfig(
description='visiting unconditional hare players',
tags={
'visitor',
'versus_pure_hare_players',
'convention_following',
},
substrate='stag_hunt_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) + (False,) * 7,
bots_by_role={
'default': {
'stag_hunt_in_the_matrix__arena__puppet_hare_0',
'stag_hunt_in_the_matrix__arena__puppet_hare_margin_0',
},
},
),
stag_hunt_in_the_matrix__arena_2=ScenarioConfig(
description=('focals are resident and visitors are unconditional ' +
'stag players'),
tags={
'resident',
'versus_pure_stag_players',
},
substrate='stag_hunt_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) * 5 + (False,) * 3,
bots_by_role={
'default': {
'stag_hunt_in_the_matrix__arena__puppet_stag_0',
'stag_hunt_in_the_matrix__arena__puppet_stag_margin_0',
},
},
),
stag_hunt_in_the_matrix__arena_3=ScenarioConfig(
description=('focals are resident and visitors are unconditional' +
'hare players'),
tags={
'resident',
'versus_pure_hare_players',
},
substrate='stag_hunt_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) * 5 + (False,) * 3,
bots_by_role={
'default': {
'stag_hunt_in_the_matrix__arena__puppet_hare_0',
'stag_hunt_in_the_matrix__arena__puppet_hare_margin_0',
},
},
),
stag_hunt_in_the_matrix__arena_4=ScenarioConfig(
description=('visiting a population of hair-trigger grim ' +
'reciprocator bots who initially play stag but, if ' +
'any partner plays hare once, they give up on trying to ' +
'cooperate and play hare in all future interactions'),
tags={
'visitor',
'reciprocity',
},
substrate='stag_hunt_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) + (False,) * 7,
bots_by_role={
'default': {
'stag_hunt_in_the_matrix__arena__puppet_grim_one_strike_0',
'stag_hunt_in_the_matrix__arena__puppet_grim_one_strike_margin_0',
},
},
),
stag_hunt_in_the_matrix__arena_5=ScenarioConfig(
description=('visiting a population of two-strikes grim ' +
'reciprocator bots who initially play stag but, if ' +
'their partners play hare twice, they give up on trying ' +
'to cooperate and play hare in all future interactions'),
tags={
'visitor',
'reciprocity',
},
substrate='stag_hunt_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) + (False,) * 7,
bots_by_role={
'default': {
'stag_hunt_in_the_matrix__arena__puppet_grim_two_strikes_0',
'stag_hunt_in_the_matrix__arena__puppet_grim_two_strikes_margin_0',
},
},
),
stag_hunt_in_the_matrix__arena_6=ScenarioConfig(
description=(
'visiting a mixed population of k-strikes grim reciprocator bots ' +
'with k values from 1 to 3, they initially play stag but, if ' +
'their partners play hare k times, they then play hare in all ' +
'future interactions'
),
tags={
'visitor',
'reciprocity',
},
substrate='stag_hunt_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) * 3 + (False,) * 5,
bots_by_role={
'default': {
'stag_hunt_in_the_matrix__arena__puppet_grim_one_strike_0',
'stag_hunt_in_the_matrix__arena__puppet_grim_one_strike_margin_0',
'stag_hunt_in_the_matrix__arena__puppet_grim_three_strikes_0',
'stag_hunt_in_the_matrix__arena__puppet_grim_three_strikes_margin_0',
'stag_hunt_in_the_matrix__arena__puppet_grim_two_strikes_0',
'stag_hunt_in_the_matrix__arena__puppet_grim_two_strikes_margin_0',
},
},
),
stag_hunt_in_the_matrix__arena_7=ScenarioConfig(
description='visiting a mixture of pure hare and pure stag players',
tags={
'visitor',
'versus_pure_all',
},
substrate='stag_hunt_in_the_matrix__arena',
roles=('default',) * 8,
is_focal=(True,) * 3 + (False,) * 5,
bots_by_role={
'default': {
'stag_hunt_in_the_matrix__arena__puppet_stag_0',
'stag_hunt_in_the_matrix__arena__puppet_stag_margin_0',
'stag_hunt_in_the_matrix__arena__puppet_hare_0',
'stag_hunt_in_the_matrix__arena__puppet_hare_margin_0',
},
},
),
stag_hunt_in_the_matrix__repeated_0=ScenarioConfig(
description='partner may play either stag or hare',
tags={
'half_and_half',
'versus_pure_all',
},
substrate='stag_hunt_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'stag_hunt_in_the_matrix__repeated__puppet_hare_margin_0',
'stag_hunt_in_the_matrix__repeated__puppet_hare_margin_1',
'stag_hunt_in_the_matrix__repeated__puppet_stag_margin_0',
'stag_hunt_in_the_matrix__repeated__puppet_stag_margin_1',
},
},
),
stag_hunt_in_the_matrix__repeated_1=ScenarioConfig(
description='partner typically plays stag',
tags={
'half_and_half',
'versus_pure_stag',
},
substrate='stag_hunt_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'stag_hunt_in_the_matrix__repeated__puppet_stag_margin_0',
'stag_hunt_in_the_matrix__repeated__puppet_stag_margin_1',
},
},
),
stag_hunt_in_the_matrix__repeated_2=ScenarioConfig(
description='partner typically plays hare',
tags={
'half_and_half',
'versus_pure_hare',
},
substrate='stag_hunt_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'stag_hunt_in_the_matrix__repeated__puppet_hare_margin_0',
'stag_hunt_in_the_matrix__repeated__puppet_hare_margin_1',
},
},
),
stag_hunt_in_the_matrix__repeated_3=ScenarioConfig(
description=('partner is a hair-trigger grim reciprocator, i.e. one ' +
'who initially cooperates but, if defected on once, will' +
' retaliate by defecting forever after'),
tags={
'half_and_half',
'reciprocity',
},
substrate='stag_hunt_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'stag_hunt_in_the_matrix__repeated__puppet_grim_one_strike_margin_0',
'stag_hunt_in_the_matrix__repeated__puppet_grim_one_strike_margin_1',
},
},
),
stag_hunt_in_the_matrix__repeated_4=ScenarioConfig(
description=('partner is a two-strikes grim reciprocator, i.e. one ' +
'who initially cooperates, but if defected on twice, ' +
'will retaliate by defecting forever after'),
tags={
'half_and_half',
'reciprocity',
},
substrate='stag_hunt_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'stag_hunt_in_the_matrix__repeated__puppet_grim_two_strikes_margin_0',
'stag_hunt_in_the_matrix__repeated__puppet_grim_two_strikes_margin_1',
},
},
),
stag_hunt_in_the_matrix__repeated_5=ScenarioConfig(
description='partner is a tit-for-tat conditional cooperator',
tags={
'half_and_half',
'reciprocity',
},
substrate='stag_hunt_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'stag_hunt_in_the_matrix__repeated__puppet_tft_margin_0',
'stag_hunt_in_the_matrix__repeated__puppet_tft_margin_1',
},
},
),
stag_hunt_in_the_matrix__repeated_6=ScenarioConfig(
description=('partner is a tit-for-tat conditional cooperator who ' +
'occasionally plays hare instead of stag'),
tags={
'half_and_half',
'reciprocity',
'forgiveness',
},
substrate='stag_hunt_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'stag_hunt_in_the_matrix__repeated__puppet_tft_tremble_margin_0',
'stag_hunt_in_the_matrix__repeated__puppet_tft_tremble_margin_1',
},
},
),
stag_hunt_in_the_matrix__repeated_7=ScenarioConfig(
description='partner plays stag for a while then switches to hare',
tags={
'half_and_half',
'flexibility',
},
substrate='stag_hunt_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'stag_hunt_in_the_matrix__repeated__puppet_flip_0',
},
},
),
stag_hunt_in_the_matrix__repeated_8=ScenarioConfig(
description=('partner initially plays hare, but if punished, partner ' +
'then switches to tit-for-tat conditional cooperation'),
tags={
'half_and_half',
'teaching',
'reciprocity',
'forgiveness',
},
substrate='stag_hunt_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'stag_hunt_in_the_matrix__repeated__puppet_corrigible_0',
},
},
),
stag_hunt_in_the_matrix__repeated_9=ScenarioConfig(
description=('partner initially plays hare, but if punished, partner ' +
'then switches to noisy tit-for-tat conditional ' +
'cooperation'),
tags={
'half_and_half',
'teaching',
'reciprocity',
'forgiveness',
},
substrate='stag_hunt_in_the_matrix__repeated',
roles=('default',) * 2,
is_focal=(True,) + (False,),
bots_by_role={
'default': {
'stag_hunt_in_the_matrix__repeated__puppet_corrigible_tremble_0',
},
},
),
territory__inside_out_0=ScenarioConfig(
description='focals are resident and visited by an aggressor',
tags={
'resident',
},
substrate='territory__inside_out',
roles=('default',) * 5,
is_focal=(True,) * 4 + (False,),
bots_by_role={
'default': {
'territory__inside_out__aggressor_0',
'territory__inside_out__aggressor_1',
'territory__inside_out__aggressor_2',
'territory__inside_out__aggressor_3',
'territory__inside_out__aggressor_with_extra_training_0',
},
},
),
territory__inside_out_1=ScenarioConfig(
description='visiting a population of aggressors',
tags={
'visitor',
},
substrate='territory__inside_out',
roles=('default',) * 5,
is_focal=(True,) + (False,) * 4,
bots_by_role={
'default': {
'territory__inside_out__aggressor_0',
'territory__inside_out__aggressor_1',
'territory__inside_out__aggressor_2',
'territory__inside_out__aggressor_3',
'territory__inside_out__aggressor_with_extra_training_0',
},
},
),
territory__inside_out_2=ScenarioConfig(
description='focals are resident, visited by a bot that does nothing',
tags={
'resident',
'versus_noop',
},
substrate='territory__inside_out',
roles=('default',) * 5,
is_focal=(True,) * 4 + (False,),
bots_by_role={'default': {'noop_bot'}},
),
territory__inside_out_3=ScenarioConfig(
description='focals visit a resident population that does nothing.',
tags={
'visitor',
'versus_noop',
},
substrate='territory__inside_out',
roles=('default',) * 5,
is_focal=(True,) + (False,) * 4,
bots_by_role={'default': {'noop_bot'}},
),
territory__inside_out_4=ScenarioConfig(
description=('focals are resident, visited by a bot that claims a ' +
'moderate size territory and mostly tolerates its ' +
'neighbors'),
tags={
'resident',
},
substrate='territory__inside_out',
roles=('default',) * 5,
is_focal=(True,) * 4 + (False,),
bots_by_role={
'default': {
'territory__inside_out__somewhat_tolerant_bot_0',
'territory__inside_out__somewhat_tolerant_bot_1',},
},
),
territory__inside_out_5=ScenarioConfig(
description=('focals visit a resident population that claims a ' +
'moderate size territory and mostly tolerates its ' +
'neighbors'),
tags={
'visitor',
},
substrate='territory__inside_out',
roles=('default',) * 5,
is_focal=(True,) + (False,) * 4,
bots_by_role={
'default': {
'territory__inside_out__somewhat_tolerant_bot_0',
'territory__inside_out__somewhat_tolerant_bot_1',},
},
),
territory__open_0=ScenarioConfig(
description='focals are resident and visited by an aggressor',
tags={
'resident',
},
substrate='territory__open',
roles=('default',) * 9,
is_focal=(True,) * 8 + (False,),
bots_by_role={
'default': {
'territory__open__aggressor_0',
'territory__open__aggressor_1',
'territory__open__aggressor_2',
'territory__open__aggressor_3',
'territory__open__aggressor_with_extra_training_0',
},
},
),
territory__open_1=ScenarioConfig(
description='visiting a population of aggressors',
tags={
'visitor',
},
substrate='territory__open',
roles=('default',) * 9,
is_focal=(True,) + (False,) * 8,
bots_by_role={
'default': {
'territory__open__aggressor_0',
'territory__open__aggressor_1',
'territory__open__aggressor_2',
'territory__open__aggressor_3',
'territory__open__aggressor_with_extra_training_0',
},
},
),
territory__open_2=ScenarioConfig(
description='focals are resident, visited by a bot that does nothing',
tags={
'resident',
'versus_noop',
},
substrate='territory__open',
roles=('default',) * 9,
is_focal=(True,) * 8 + (False,),
bots_by_role={'default': {'noop_bot'}},
),
territory__open_3=ScenarioConfig(
description='focals visit a resident population that does nothing',
tags={
'visitor',
'versus_noop',
},
substrate='territory__open',
roles=('default',) * 9,
is_focal=(True,) + (False,) * 8,
bots_by_role={'default': {'noop_bot'}},
),
territory__rooms_0=ScenarioConfig(
description='focals are resident and visited by an aggressor',
tags={
'resident',
},
substrate='territory__rooms',
roles=('default',) * 9,
is_focal=(True,) * 8 + (False,),
bots_by_role={
'default': {
'territory__rooms__aggressor_0',
'territory__rooms__aggressor_1',
'territory__rooms__aggressor_2',
'territory__rooms__aggressor_3',
'territory__rooms__aggressor_with_extra_training_0',
},
},
),
territory__rooms_1=ScenarioConfig(
description='visiting a population of aggressors',
tags={
'visitor',
},
substrate='territory__rooms',
roles=('default',) * 9,
is_focal=(True,) + (False,) * 8,
bots_by_role={
'default': {
'territory__rooms__aggressor_0',
'territory__rooms__aggressor_1',
'territory__rooms__aggressor_2',
'territory__rooms__aggressor_3',
'territory__rooms__aggressor_with_extra_training_0',
},
},
),
territory__rooms_2=ScenarioConfig(
description='focals are resident, visited by a bot that does nothing',
tags={
'resident',
'versus_noop',
},
substrate='territory__rooms',
roles=('default',) * 9,
is_focal=(True,) * 8 + (False,),
bots_by_role={'default': {'noop_bot'}},
),
territory__rooms_3=ScenarioConfig(
description='focals visit a resident population that does nothing',
tags={
'visitor',
'versus_noop',
},
substrate='territory__rooms',
roles=('default',) * 9,
is_focal=(True,) + (False,) * 8,
bots_by_role={'default': {'noop_bot'}},
),
# keep-sorted end
)
|
meltingpot-main
|
meltingpot/configs/scenarios/__init__.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for the substrate: fruit_market_concentric_rivers.
Example video: https://youtu.be/djmylRv1i_w
This substrate has three concentric rings of water that confer a small stamina
cost to players who step on them.
"""
from meltingpot.configs.substrates import fruit_market as base_config
from meltingpot.utils.substrates import specs
from ml_collections import config_dict as configdict
build = base_config.build
ASCII_MAP = """
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
x/___________________________+x
x'###########################`x
x!~~~~~~~~~~~~~~~~~~~~~~~~~~~|x
x!~~~~~~~~~~~~~~~~~~~~~~~~~~~|x
x!~~~LLLLLLLLLLLLLLLLLLLLL~~~|x
x!~~~L~~~~~~~~~~~~~~~~~~~L~~~|x
x!~~~L~~~~~~~~~~~~~~~~~~~L~~~|x
x!~~~L~~LLLLLLLLLLLLLLL~~L~~~|x
x!~~~L~~L~~~~~~~~~~~~~L~~L~~~|x
x!~~~L~~L~~~~~~~~~~~~~L~~L~~~|x
x!~~~L~~L~~LLLLLLLLL~~L~~L~~~|x
x!~~~L~~L~~LP~P~P~PL~~L~~L~~~|x
x!~~~L~~L~~L~P~P~P~L~~L~~L~~~|x
x!~~~L~~L~~L~~P~P~~L~~L~~L~~~|x
x!~~~L~~L~~L~P~P~P~L~~L~~L~~~|x
x!~~~L~~L~~L~~P~P~~L~~L~~L~~~|x
x!~~~L~~L~~L~P~P~P~L~~L~~L~~~|x
x!~~~L~~L~~LP~P~P~PL~~L~~L~~~|x
x!~~~L~~L~~LLLLLLLLL~~L~~L~~~|x
x!~~~L~~L~~~~~~~~~~~~~L~~L~~~|x
x!~~~L~~L~~~~~~~~~~~~~L~~L~~~|x
x!~~~L~~LLLLLLLLLLLLLLL~~L~~~|x
x!~~~L~~~~~~~~~~~~~~~~~~~L~~~|x
x!~~~L~~~~~~~~~~~~~~~~~~~L~~~|x
x!~~~LLLLLLLLLLLLLLLLLLLLL~~~|x
x!~~~~~~~~~~~~~~~~~~~~~~~~~~~|x
x!~~~~~~~~~~~~~~~~~~~~~~~~~~~|x
x!~~~~~~~~~~~~~~~~~~~~~~~~~~~|x
x(---------------------------)x
x<###########################>x
"""
# Map a character to the prefab it represents in the ASCII map.
CHAR_PREFAB_MAP = {
# wall prefabs
"/": {"type": "all", "list": ["ground", "nw_wall_corner"]},
"'": {"type": "all", "list": ["ground", "nw_inner_wall_corner"]},
"+": {"type": "all", "list": ["ground", "ne_wall_corner"]},
"`": {"type": "all", "list": ["ground", "ne_inner_wall_corner"]},
")": {"type": "all", "list": ["ground", "se_wall_corner"]},
"(": {"type": "all", "list": ["ground", "sw_wall_corner"]},
"_": {"type": "all", "list": ["ground", "wall_north"]},
"|": {"type": "all", "list": ["ground", "wall_east"]},
"-": {"type": "all", "list": ["ground", "wall_south"]},
"!": {"type": "all", "list": ["ground", "wall_west"]},
"#": {"type": "all", "list": ["ground", "wall_shadow_s"]},
">": {"type": "all", "list": ["ground", "wall_shadow_se"]},
"<": {"type": "all", "list": ["ground", "wall_shadow_sw"]},
# non-wall prefabs
"L": "river",
"P": {"type": "all", "list": ["ground", "potential_tree", "spawn_point"]},
"~": {"type": "all", "list": ["ground", "potential_tree"]},
"x": "ground",
}
def get_config():
"""Configuration for this substrate."""
config = base_config.get_config()
# Specify the number of players to particate in each episode (optional).
config.recommended_num_players = 16
# Override the map layout settings.
config.layout = configdict.ConfigDict()
config.layout.ascii_map = ASCII_MAP
config.layout.char_prefab_map = CHAR_PREFAB_MAP
# The specs of the environment (from a single-agent perspective).
config.action_spec = specs.action(len(base_config.ACTION_SET))
config.timestep_spec = specs.timestep({
"RGB": specs.OBSERVATION["RGB"],
"READY_TO_SHOOT": specs.OBSERVATION["READY_TO_SHOOT"],
"STAMINA": specs.float64(),
"INVENTORY": specs.int64(2),
"MY_OFFER": specs.int64(2),
"OFFERS": specs.int64(102),
"HUNGER": specs.float64(),
# Debug only (do not use the following observations in policies).
"WORLD.RGB": specs.rgb(248, 248,),
})
# The roles assigned to each player.
config.valid_roles = frozenset({"apple_farmer", "banana_farmer"})
config.default_player_roles = ("apple_farmer",) * 8 + ("banana_farmer",) * 8
return config
|
meltingpot-main
|
meltingpot/configs/substrates/fruit_market__concentric_rivers.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for Rationalizable Coordination in the Matrix.
Example video: https://youtu.be/IXakuZhvrxo
See _Running with Scissors in the Matrix_ for a general description of the
game dynamics. Here the payoff matrix represents a coordination game with
`K = 3` different ways to coordinate. Coordinating on one of the three options
yields a reward of 1 for both players, another yields a reward of 2, and the
third yields a reward of 3.
Players have the default `11 x 11` (off center) observation window.
Both players are removed and their inventories are reset after each interaction.
"""
from typing import Any, Dict, Mapping, Sequence
from meltingpot.configs.substrates import the_matrix
from meltingpot.utils.substrates import colors
from meltingpot.utils.substrates import game_object_utils
from meltingpot.utils.substrates import shapes
from meltingpot.utils.substrates import specs
from ml_collections import config_dict
PrefabConfig = game_object_utils.PrefabConfig
# Warning: setting `_ENABLE_DEBUG_OBSERVATIONS = True` may cause slowdown.
_ENABLE_DEBUG_OBSERVATIONS = False
# The number of resources must match the (square) size of the matrix.
NUM_RESOURCES = 3
# This color is yellow.
RESOURCE1_COLOR = (255, 227, 11, 255)
RESOURCE1_HIGHLIGHT_COLOR = (255, 214, 91, 255)
RESOURCE1_COLOR_DATA = (RESOURCE1_COLOR, RESOURCE1_HIGHLIGHT_COLOR)
# This color is violet.
RESOURCE2_COLOR = (109, 42, 255, 255)
RESOURCE2_HIGHLIGHT_COLOR = (132, 91, 255, 255)
RESOURCE2_COLOR_DATA = (RESOURCE2_COLOR, RESOURCE2_HIGHLIGHT_COLOR)
# This color is cyan.
RESOURCE3_COLOR = (42, 188, 255, 255)
RESOURCE3_HIGHLIGHT_COLOR = (91, 214, 255, 255)
RESOURCE3_COLOR_DATA = (RESOURCE3_COLOR, RESOURCE3_HIGHLIGHT_COLOR)
# The procedural generator replaces all 'a' chars in the default map with chars
# representing specific resources, i.e. with either '1' or '2'.
ASCII_MAP = """
WWWWWWWWWWWWWWWWWWWWWWWWW
WPPPP W W PPPPW
WPPPP PPPPW
WPPPP PPPPW
WPPPP PPPPW
W aa W
W 11 aa W
W 11 W
W 11 W
W WW W 222 W
WW 33 W 222 W
WWW 33 WWWWWWWWW W
W 33 111 WWW
W 111 W
W 22 W W
W 22 W WW W
W 22 W333 W
W 333 W
W aa W
WPPPP aa PPPPW
WPPPP PPPPW
WPPPP PPPPW
WPPPP W PPPPW
WWWWWWWWWWWWWWWWWWWWWWWWW
"""
_resource_names = [
"resource_class1",
"resource_class2",
"resource_class3",
]
# `prefab` determines which prefab game object to use for each `char` in the
# ascii map.
CHAR_PREFAB_MAP = {
"a": {"type": "choice", "list": _resource_names},
"1": _resource_names[0],
"2": _resource_names[1],
"3": _resource_names[2],
"P": "spawn_point",
"W": "wall",
}
_COMPASS = ["N", "E", "S", "W"]
WALL = {
"name": "wall",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "wall",
"stateConfigs": [{
"state": "wall",
"layer": "upperPhysical",
"sprite": "Wall",
}],
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["Wall"],
"spriteShapes": [shapes.WALL],
"palettes": [{"*": (95, 95, 95, 255),
"&": (100, 100, 100, 255),
"@": (109, 109, 109, 255),
"#": (152, 152, 152, 255)}],
"noRotates": [False]
}
},
{
"component": "BeamBlocker",
"kwargs": {
"beamType": "gameInteraction"
}
},
]
}
SPAWN_POINT = {
"name": "spawnPoint",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "spawnPoint",
"stateConfigs": [{
"state": "spawnPoint",
"layer": "alternateLogic",
"groups": ["spawnPoints"]
}],
}
},
{
"component": "Transform",
},
]
}
# PLAYER_COLOR_PALETTES is a list with each entry specifying the color to use
# for the player at the corresponding index.
NUM_PLAYERS_UPPER_BOUND = 32
PLAYER_COLOR_PALETTES = []
for idx in range(NUM_PLAYERS_UPPER_BOUND):
PLAYER_COLOR_PALETTES.append(shapes.get_palette(colors.palette[idx]))
# Primitive action components.
# pylint: disable=bad-whitespace
# pyformat: disable
NOOP = {"move": 0, "turn": 0, "interact": 0}
FORWARD = {"move": 1, "turn": 0, "interact": 0}
STEP_RIGHT = {"move": 2, "turn": 0, "interact": 0}
BACKWARD = {"move": 3, "turn": 0, "interact": 0}
STEP_LEFT = {"move": 4, "turn": 0, "interact": 0}
TURN_LEFT = {"move": 0, "turn": -1, "interact": 0}
TURN_RIGHT = {"move": 0, "turn": 1, "interact": 0}
INTERACT = {"move": 0, "turn": 0, "interact": 1}
# pyformat: enable
# pylint: enable=bad-whitespace
ACTION_SET = (
NOOP,
FORWARD,
BACKWARD,
STEP_LEFT,
STEP_RIGHT,
TURN_LEFT,
TURN_RIGHT,
INTERACT,
)
TARGET_SPRITE_SELF = {
"name": "Self",
"shape": shapes.CUTE_AVATAR,
"palette": shapes.get_palette((50, 100, 200)),
"noRotate": True,
}
TARGET_SPRITE_OTHER = {
"name": "Other",
"shape": shapes.CUTE_AVATAR,
"palette": shapes.get_palette((200, 100, 50)),
"noRotate": True,
}
def create_scene():
"""Creates the global scene."""
scene = {
"name": "scene",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "scene",
"stateConfigs": [{
"state": "scene",
}],
}
},
{
"component": "Transform",
},
{
"component": "TheMatrix",
"kwargs": {
# Prevent interaction before both interactors have collected
# at least one resource.
"disallowUnreadyInteractions": True,
"matrix": [
# 1 2 3
[1, 0, 0], # 1
[0, 2, 0], # 2
[0, 0, 3] # 3
],
"resultIndicatorColorIntervals": [
# red # yellow # green # blue
(0.0, 0.5), (0.5, 1.5), (1.5, 2.5), (2.5, 3.0)
],
}
},
{
"component": "StochasticIntervalEpisodeEnding",
"kwargs": {
"minimumFramesPerEpisode": 1000,
"intervalLength": 100, # Set equal to unroll length.
"probabilityTerminationPerInterval": 0.2
}
}
]
}
return scene
def create_resource_prefab(resource_id, color_data):
"""Creates resource prefab with provided `resource_id` (num) and color."""
resource_name = "resource_class{}".format(resource_id)
resource_prefab = {
"name": resource_name,
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": resource_name,
"stateConfigs": [
{"state": resource_name + "_wait",
"groups": ["resourceWaits"]},
{"state": resource_name,
"layer": "lowerPhysical",
"sprite": resource_name + "_sprite"},
]
},
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": [resource_name + "_sprite"],
"spriteShapes": [shapes.BUTTON],
"palettes": [{"*": color_data[0],
"#": color_data[1],
"x": (0, 0, 0, 0)}],
"noRotates": [False]
},
},
{
"component": "Resource",
"kwargs": {
"resourceClass": resource_id,
"visibleType": resource_name,
"waitState": resource_name + "_wait",
"regenerationRate": 0.04,
"regenerationDelay": 10,
},
},
{
"component": "Destroyable",
"kwargs": {
"waitState": resource_name + "_wait",
# It is possible to destroy resources but takes concerted
# effort to do so by zapping them `initialHealth` times.
"initialHealth": 3,
},
},
]
}
return resource_prefab
def create_prefabs() -> PrefabConfig:
"""Returns the prefabs.
Prefabs are a dictionary mapping names to template game objects that can
be cloned and placed in multiple locations accoring to an ascii map.
"""
prefabs = {
"wall": WALL,
"spawn_point": SPAWN_POINT,
}
prefabs["resource_class1"] = create_resource_prefab(1, RESOURCE1_COLOR_DATA)
prefabs["resource_class2"] = create_resource_prefab(2, RESOURCE2_COLOR_DATA)
prefabs["resource_class3"] = create_resource_prefab(3, RESOURCE3_COLOR_DATA)
return prefabs
def create_avatar_object(player_idx: int,
all_source_sprite_names: Sequence[str],
target_sprite_self: Dict[str, Any],
target_sprite_other: Dict[str, Any]) -> Dict[str, Any]:
"""Create an avatar object given self vs other sprite data."""
# Lua is 1-indexed.
lua_index = player_idx + 1
# Setup the self vs other sprite mapping.
source_sprite_self = "Avatar" + str(lua_index)
custom_sprite_map = {source_sprite_self: target_sprite_self["name"]}
for name in all_source_sprite_names:
if name != source_sprite_self:
custom_sprite_map[name] = target_sprite_other["name"]
live_state_name = "player{}".format(lua_index)
avatar_object = {
"name": "avatar",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": live_state_name,
"stateConfigs": [
{"state": live_state_name,
"layer": "upperPhysical",
"sprite": source_sprite_self,
"contact": "avatar",
"groups": ["players"]},
{"state": "playerWait",
"groups": ["playerWaits"]},
]
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "colored_square",
"spriteNames": [source_sprite_self],
# A white square should never be displayed. It will always be
# remapped since this is self vs other observation mode.
"spriteRGBColors": [(255, 255, 255, 255)],
}
},
{
"component": "AdditionalSprites",
"kwargs": {
"renderMode": "ascii_shape",
"customSpriteNames": [target_sprite_self["name"],
target_sprite_other["name"]],
"customSpriteShapes": [target_sprite_self["shape"],
target_sprite_other["shape"]],
"customPalettes": [target_sprite_self["palette"],
target_sprite_other["palette"]],
"customNoRotates": [target_sprite_self["noRotate"],
target_sprite_other["noRotate"]],
}
},
{
"component": "Avatar",
"kwargs": {
"index": lua_index,
"aliveState": live_state_name,
"waitState": "playerWait",
"speed": 1.0,
"spawnGroup": "spawnPoints",
"actionOrder": ["move", "turn", "interact"],
"actionSpec": {
"move": {"default": 0, "min": 0, "max": len(_COMPASS)},
"turn": {"default": 0, "min": -1, "max": 1},
"interact": {"default": 0, "min": 0, "max": 1},
},
"view": {
"left": 5,
"right": 5,
"forward": 9,
"backward": 1,
"centered": False
},
"spriteMap": custom_sprite_map,
# The following kwarg makes it possible to get rewarded even
# on frames when an avatar is "dead". It is needed for in the
# matrix games in order to correctly handle the case of two
# players getting hit simultaneously by the same beam.
"skipWaitStateRewards": False,
}
},
{
"component": "GameInteractionZapper",
"kwargs": {
"cooldownTime": 2,
"beamLength": 3,
"beamRadius": 1,
"framesTillRespawn": 50,
"numResources": NUM_RESOURCES,
"endEpisodeOnFirstInteraction": False,
# Reset both players' inventories after each interaction.
"reset_winner_inventory": True,
"reset_loser_inventory": True,
# Both players get removed after each interaction.
"losingPlayerDies": True,
"winningPlayerDies": True,
# `freezeOnInteraction` is the number of frames to display the
# interaction result indicator, freeze, and delay delivering
# all results of interacting.
"freezeOnInteraction": 16,
}
},
{
"component": "ReadyToShootObservation",
"kwargs": {
"zapperComponent": "GameInteractionZapper",
}
},
{
"component": "InventoryObserver",
"kwargs": {
}
},
{
"component": "Taste",
"kwargs": {
"mostTastyResourceClass": -1, # -1 indicates no preference.
# No resource is most tasty when mostTastyResourceClass == -1.
"mostTastyReward": 0.1,
}
},
{
"component": "InteractionTaste",
"kwargs": {
"mostTastyResourceClass": -1, # -1 indicates no preference.
"zeroDefaultInteractionReward": False,
"extraReward": 1.0,
}
},
{
"component": "AvatarMetricReporter",
"kwargs": {
"metrics": [
{
# Report the inventories of both players involved in
# an interaction on this frame formatted as
# (self inventory, partner inventory).
"name": "INTERACTION_INVENTORIES",
"type": "tensor.DoubleTensor",
"shape": (2, NUM_RESOURCES),
"component": "GameInteractionZapper",
"variable": "latest_interaction_inventories",
},
*the_matrix.get_cumulant_metric_configs(NUM_RESOURCES),
]
}
},
]
}
if _ENABLE_DEBUG_OBSERVATIONS:
avatar_object["components"].append({
"component": "LocationObserver",
"kwargs": {"objectIsAvatar": True, "alsoReportOrientation": True},
})
return avatar_object
def get_all_source_sprite_names(num_players):
all_source_sprite_names = []
for player_idx in range(0, num_players):
# Lua is 1-indexed.
lua_index = player_idx + 1
all_source_sprite_names.append("Avatar" + str(lua_index))
return all_source_sprite_names
def create_avatar_objects(num_players):
"""Returns list of avatar objects of length 'num_players'."""
all_source_sprite_names = get_all_source_sprite_names(num_players)
avatar_objects = []
for player_idx in range(0, num_players):
game_object = create_avatar_object(player_idx,
all_source_sprite_names,
TARGET_SPRITE_SELF,
TARGET_SPRITE_OTHER)
avatar_objects.append(game_object)
readiness_marker = the_matrix.create_ready_to_interact_marker(player_idx)
avatar_objects.append(readiness_marker)
return avatar_objects
def create_world_sprite_map(
num_players: int, target_sprite_other: Dict[str, Any]) -> Dict[str, str]:
all_source_sprite_names = get_all_source_sprite_names(num_players)
world_sprite_map = {}
for name in all_source_sprite_names:
world_sprite_map[name] = target_sprite_other["name"]
return world_sprite_map
def get_config():
"""Default configuration."""
config = config_dict.ConfigDict()
# Action set configuration.
config.action_set = ACTION_SET
# Observation format configuration.
config.individual_observation_names = [
"RGB",
"INVENTORY",
"READY_TO_SHOOT",
# Debug only (do not use the following observations in policies).
"INTERACTION_INVENTORIES",
]
config.global_observation_names = [
"WORLD.RGB",
]
# The specs of the environment (from a single-agent perspective).
config.action_spec = specs.action(len(ACTION_SET))
config.timestep_spec = specs.timestep({
"RGB": specs.OBSERVATION["RGB"],
"INVENTORY": specs.inventory(3),
"READY_TO_SHOOT": specs.OBSERVATION["READY_TO_SHOOT"],
# Debug only (do not use the following observations in policies).
"INTERACTION_INVENTORIES": specs.interaction_inventories(3),
"WORLD.RGB": specs.rgb(192, 200),
})
# The roles assigned to each player.
config.valid_roles = frozenset({"default"})
config.default_player_roles = ("default",) * 8
return config
def build(
roles: Sequence[str],
config: config_dict.ConfigDict,
) -> Mapping[str, Any]:
"""Build substrate definition given roles."""
del config
num_players = len(roles)
# Build the rest of the substrate definition.
substrate_definition = dict(
levelName="the_matrix",
levelDirectory="meltingpot/lua/levels",
numPlayers=num_players,
# Define upper bound of episode length since episodes end stochastically.
maxEpisodeLengthFrames=5000,
spriteSize=8,
topology="BOUNDED", # Choose from ["BOUNDED", "TORUS"],
simulation={
"map": ASCII_MAP,
"gameObjects": create_avatar_objects(num_players=num_players),
"scene": create_scene(),
"prefabs": create_prefabs(),
"charPrefabMap": CHAR_PREFAB_MAP,
# worldSpriteMap is needed to make the global view used in videos be
# be informative in cases where individual avatar views have had
# sprites remapped to one another (example: self vs other mode).
"worldSpriteMap": create_world_sprite_map(num_players,
TARGET_SPRITE_OTHER),
}
)
return substrate_definition
|
meltingpot-main
|
meltingpot/configs/substrates/rationalizable_coordination_in_the_matrix__arena.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for Collaborative Cooking: Crowded.
Example video: https://youtu.be/_6j3yYbf434
The recipe they must follow is for tomato soup:
1. Add three tomatoes to the cooking pot.
2. Wait for the soup to cook (status bar completion).
3. Bring a bowl to the pot and pour the soup from the pot into the bowl.
4. Deliver the bowl of soup at the goal location.
This substrate is a pure common interest game. All players share all rewards.
Players have a `5 x 5` observation window.
Map:
Crowded: here players can pass each other in the kitchen, allowing less
coordinated yet inefficient strategies by individual players. The
most efficient strategies involve passing ingredients over the central counter.
There is a choke point where it is likely that players who do not work as a
team will get in one another's way.
"""
from meltingpot.configs.substrates import collaborative_cooking as base_config
from meltingpot.utils.substrates import specs
from ml_collections import config_dict
build = base_config.build
# Crowded: notice that there are more spawn points than the recommended number
# of players. Since players are spawned randomly this means the numbers starting
# on either side of the divider will vary from episode to episode and generally
# be imbalanced.
ASCII_MAP = """
###D###O#O###
#P P# P ##
# # P ##
C P #P ##
# #P T
C P# P ##
# P # P ##
#P ##
#############
"""
def get_config():
"""Default configuration."""
config = base_config.get_config()
# Override the map layout settings.
config.layout = config_dict.ConfigDict()
config.layout.ascii_map = ASCII_MAP
# The specs of the environment (from a single-agent perspective).
config.timestep_spec = specs.timestep({
"RGB": specs.rgb(40, 40),
# Debug only (do not use the following observations in policies).
"WORLD.RGB": specs.rgb(72, 104),
})
# The roles assigned to each player.
config.valid_roles = frozenset({"default"})
config.default_player_roles = ("default",) * 9
return config
|
meltingpot-main
|
meltingpot/configs/substrates/collaborative_cooking__crowded.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for Externality Mushrooms: Dense.
Example video: https://youtu.be/MwHhg7sa0xs
See base config: externality_mushrooms.py. Here the map is such that mushrooms
may grow anywhere on the map and most of the map can become full of mushrooms.
This may sometimes make it necessary to actively avoid or destroy undesirable
mushrooms.
"""
from meltingpot.configs.substrates import externality_mushrooms as base_config
from meltingpot.utils.substrates import specs
from ml_collections import config_dict
build = base_config.build
ASCII_MAP = """
/_____________________+
'#####################`
! |
! R G |
! R |
! |
! G |
! B O |
! B |
! R |
! |
! B G |
! |
(---------------------)
"""
# Map a character to the prefab it represents in the ASCII map.
CHAR_PREFAB_MAP = {
" ": {"type": "all", "list": ["dirt", "spawn_point", "potential_mushroom"]},
"R": {"type": "all", "list": ["dirt", "red_mushroom"]},
"G": {"type": "all", "list": ["dirt", "green_mushroom"]},
"B": {"type": "all", "list": ["dirt", "blue_mushroom"]},
"O": {"type": "all", "list": ["dirt", "orange_mushroom"]},
# fence prefabs
"/": {"type": "all", "list": ["dirt", "nw_wall_corner"]},
"'": {"type": "all", "list": ["dirt", "nw_inner_wall_corner"]},
"+": {"type": "all", "list": ["dirt", "ne_wall_corner"]},
"`": {"type": "all", "list": ["dirt", "ne_inner_wall_corner"]},
")": {"type": "all", "list": ["dirt", "se_wall_corner"]},
"(": {"type": "all", "list": ["dirt", "sw_wall_corner"]},
"_": {"type": "all", "list": ["dirt", "wall_north"]},
"|": {"type": "all", "list": ["dirt", "wall_east"]},
"-": {"type": "all", "list": ["dirt", "wall_south"]},
"!": {"type": "all", "list": ["dirt", "wall_west"]},
"#": {"type": "all", "list": ["dirt", "wall_shadow_s"]},
">": {"type": "all", "list": ["dirt", "wall_shadow_se"]},
"<": {"type": "all", "list": ["dirt", "wall_shadow_sw"]},
}
def get_config():
"""Default configuration."""
config = base_config.get_config()
# Override the map layout settings.
config.layout = config_dict.ConfigDict()
config.layout.ascii_map = ASCII_MAP
config.layout.char_prefab_map = CHAR_PREFAB_MAP
# The specs of the environment (from a single-agent perspective).
config.timestep_spec = specs.timestep({
"RGB": specs.OBSERVATION["RGB"],
"READY_TO_SHOOT": specs.OBSERVATION["READY_TO_SHOOT"],
# Debug only (do not use the following observations in policies).
"WORLD.RGB": specs.rgb(112, 184),
})
config.default_player_roles = ("default",) * 5
return config
|
meltingpot-main
|
meltingpot/configs/substrates/externality_mushrooms__dense.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for Running with Scissors in the Matrix (two player, repeated).
Example video: https://youtu.be/rZH9nUKefcU
Players can move around the map and collect resources of `K` discrete types. In
addition to movement, the agents have an action to fire an "interaction" beam.
All players carry an inventory with the count of resources picked up since last
respawn.
Players can observe their own inventory but not the inventories of their
coplayers. When another agent is zapped with the interaction beam, an
interaction occurs. The resolution of the interactions is determined by a
traditional matrix game, where there is a `K x K` payoff matrix describing the
reward produced by the pure strategies available to the two players. The
resources map one-to-one to the pure strategies of the matrix game. Unless
stated otherwise, for the purposes of resolving the interaction, the zapping
agent is considered the row player, and the zapped agent the column player. The
actual strategy played depends on the resources picked up before the
interaction. The more resources of a given type an agent picks up, the more
committed the agent becomes to the pure strategy corresponding to that resource.
In the case of running with scissors, `K = 3`, corresponding to rock, paper, and
scissors pure strategies respectively.
The payoff matrix is the traditional rock-paper-scissors game matrix.
Running with scissors was first described in Vezhnevets et al. (2020). Two
players gather rock, paper or scissor resources in the environment and can
challenge one another to a 'rock, paper scissor' game, the outcome of which
depends on the resources they collected. It is possible to observe the policy
that one's partner is starting to implement, either by watching them pick up
resources or by noting which resources are missing, and then take
countermeasures. This induces a wealth of possible feinting strategies.
Players can also zap resources with their interaction beam to destroy them. This
creates additional scope for feinting strategies.
Players have a `5 x 5` observation window.
The episode has a chance of ending stochastically on every 100 step interval
after step 1000. This usually allows time for 8 or more interactions.
Vezhnevets, A., Wu, Y., Eckstein, M., Leblond, R. and Leibo, J.Z., 2020. OPtions
as REsponses: Grounding behavioural hierarchies in multi-agent reinforcement
learning. In International Conference on Machine Learning (pp. 9733-9742). PMLR.
"""
from typing import Any, Dict, Mapping, Sequence, Tuple
from meltingpot.configs.substrates import the_matrix
from meltingpot.utils.substrates import colors
from meltingpot.utils.substrates import shapes
from meltingpot.utils.substrates import specs
from ml_collections import config_dict
# Warning: setting `_ENABLE_DEBUG_OBSERVATIONS = True` may cause slowdown.
_ENABLE_DEBUG_OBSERVATIONS = False
# The number of resources must match the (square) size of the matrix.
NUM_RESOURCES = 3
# This color is yellow.
RESOURCE1_COLOR = (255, 227, 11, 255)
RESOURCE1_HIGHLIGHT_COLOR = (255, 214, 91, 255)
RESOURCE1_COLOR_DATA = (RESOURCE1_COLOR, RESOURCE1_HIGHLIGHT_COLOR)
# This color is violet.
RESOURCE2_COLOR = (109, 42, 255, 255)
RESOURCE2_HIGHLIGHT_COLOR = (132, 91, 255, 255)
RESOURCE2_COLOR_DATA = (RESOURCE2_COLOR, RESOURCE2_HIGHLIGHT_COLOR)
# This color is cyan.
RESOURCE3_COLOR = (42, 188, 255, 255)
RESOURCE3_HIGHLIGHT_COLOR = (91, 214, 255, 255)
RESOURCE3_COLOR_DATA = (RESOURCE3_COLOR, RESOURCE3_HIGHLIGHT_COLOR)
ASCII_MAP = """
WWWWWWWWWWWWWWWWWWWWWWW
Wn n nW
W WWW W W WW W
W W rra app W W
Wn WW rra app WW nW
W rra app W
W W
Wn WW n nW
W WWWW W
W ssa W W
Wn W ssa W aaa W nW
W W ssa W aaa WW W
W WWWW W W W WWW W
Wn n nW
WWWWWWWWWWWWWWWWWWWWWWW
"""
_resource_names = [
"resource_class1",
"resource_class2",
"resource_class3",
]
# `prefab` determines which prefab game object to use for each `char` in the
# ascii map.
CHAR_PREFAB_MAP = {
"a": {"type": "choice", "list": _resource_names},
"r": _resource_names[0],
"p": _resource_names[1],
"s": _resource_names[2],
"n": "spawn_point",
"W": "wall",
}
_COMPASS = ["N", "E", "S", "W"]
WALL = {
"name": "wall",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "wall",
"stateConfigs": [{
"state": "wall",
"layer": "upperPhysical",
"sprite": "Wall",
}],
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["Wall"],
"spriteShapes": [shapes.WALL],
"palettes": [{"*": (95, 95, 95, 255),
"&": (100, 100, 100, 255),
"@": (109, 109, 109, 255),
"#": (152, 152, 152, 255)}],
"noRotates": [False]
}
},
{
"component": "BeamBlocker",
"kwargs": {
"beamType": "gameInteraction"
}
},
]
}
SPAWN_POINT = {
"name": "spawnPoint",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "spawnPoint",
"stateConfigs": [{
"state": "spawnPoint",
"layer": "alternateLogic",
"groups": ["spawnPoints"]
}],
}
},
{
"component": "Transform",
},
]
}
# PLAYER_COLOR_PALETTES is a list with each entry specifying the color to use
# for the player at the corresponding index.
NUM_PLAYERS_UPPER_BOUND = 8
PLAYER_COLOR_PALETTES = []
for idx in range(NUM_PLAYERS_UPPER_BOUND):
PLAYER_COLOR_PALETTES.append(shapes.get_palette(colors.palette[idx]))
# Primitive action components.
# pylint: disable=bad-whitespace
# pyformat: disable
NOOP = {"move": 0, "turn": 0, "interact": 0}
FORWARD = {"move": 1, "turn": 0, "interact": 0}
STEP_RIGHT = {"move": 2, "turn": 0, "interact": 0}
BACKWARD = {"move": 3, "turn": 0, "interact": 0}
STEP_LEFT = {"move": 4, "turn": 0, "interact": 0}
TURN_LEFT = {"move": 0, "turn": -1, "interact": 0}
TURN_RIGHT = {"move": 0, "turn": 1, "interact": 0}
INTERACT = {"move": 0, "turn": 0, "interact": 1}
# pyformat: enable
# pylint: enable=bad-whitespace
ACTION_SET = (
NOOP,
FORWARD,
BACKWARD,
STEP_LEFT,
STEP_RIGHT,
TURN_LEFT,
TURN_RIGHT,
INTERACT,
)
TARGET_SPRITE_SELF = {
"name": "Self",
"shape": shapes.CUTE_AVATAR,
"palette": shapes.get_palette((50, 100, 200)),
"noRotate": True,
}
TARGET_SPRITE_OTHER = {
"name": "Other",
"shape": shapes.CUTE_AVATAR,
"palette": shapes.get_palette((200, 100, 50)),
"noRotate": True,
}
def create_scene():
"""Creates the global scene."""
scene = {
"name": "scene",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "scene",
"stateConfigs": [{
"state": "scene",
}],
}
},
{
"component": "Transform",
},
{
"component": "TheMatrix",
"kwargs": {
# Prevent interaction before both interactors have collected
# at least one resource.
"disallowUnreadyInteractions": True,
"matrix": [
[0, -10, 10],
[10, 0, -10],
[-10, 10, 0]
],
"resultIndicatorColorIntervals": [
(-10.0, -5.0), # red
(-5.0, -2.5), # yellow
(-2.5, 2.5), # green
(2.5, 5.0), # blue
(5.0, 10.0) # violet
],
}
},
{
"component": "StochasticIntervalEpisodeEnding",
"kwargs": {
"minimumFramesPerEpisode": 1000,
"intervalLength": 100, # Set equal to unroll length.
"probabilityTerminationPerInterval": 0.2
}
}
]
}
return scene
def create_resource_prefab(
resource_id: int,
resource_shape: str,
resource_palette: Dict[str, Tuple[int, int, int, int]]):
"""Creates resource prefab with provided resource_id, shape, and palette."""
resource_name = "resource_class{}".format(resource_id)
resource_prefab = {
"name": resource_name,
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": resource_name,
"stateConfigs": [
{"state": resource_name + "_wait",
"groups": ["resourceWaits"]},
{"state": resource_name,
"layer": "lowerPhysical",
"sprite": resource_name + "_sprite"},
]
},
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": [resource_name + "_sprite"],
"spriteShapes": [resource_shape],
"palettes": [resource_palette],
"noRotates": [True]
},
},
{
"component": "Resource",
"kwargs": {
"resourceClass": resource_id,
"visibleType": resource_name,
"waitState": resource_name + "_wait",
"regenerationRate": 0.02,
"regenerationDelay": 15,
},
},
{
"component": "Destroyable",
"kwargs": {
"waitState": resource_name + "_wait",
# It is possible to destroy resources but takes concerted
# effort to do so by zapping them `initialHealth` times.
"initialHealth": 3,
},
},
]
}
return resource_prefab
def create_avatar_object(
player_idx: int,
all_source_sprite_names: Sequence[str],
target_sprite_self: Dict[str, Any],
target_sprite_other: Dict[str, Any],
turn_off_default_reward: bool = False) -> Dict[str, Any]:
"""Create an avatar object given self vs other sprite data."""
# Lua is 1-indexed.
lua_index = player_idx + 1
# Setup the self vs other sprite mapping.
source_sprite_self = "Avatar" + str(lua_index)
custom_sprite_map = {source_sprite_self: target_sprite_self["name"]}
for name in all_source_sprite_names:
if name != source_sprite_self:
custom_sprite_map[name] = target_sprite_other["name"]
live_state_name = "player{}".format(lua_index)
avatar_object = {
"name": "avatar",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": live_state_name,
"stateConfigs": [
{"state": live_state_name,
"layer": "upperPhysical",
"sprite": source_sprite_self,
"contact": "avatar",
"groups": ["players"]},
{"state": "playerWait",
"groups": ["playerWaits"]},
]
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "colored_square",
"spriteNames": [source_sprite_self],
# A white square should never be displayed. It will always be
# remapped since this is self vs other observation mode.
"spriteRGBColors": [(255, 255, 255, 255)],
}
},
{
"component": "AdditionalSprites",
"kwargs": {
"renderMode": "ascii_shape",
"customSpriteNames": [target_sprite_self["name"],
target_sprite_other["name"]],
"customSpriteShapes": [target_sprite_self["shape"],
target_sprite_other["shape"]],
"customPalettes": [target_sprite_self["palette"],
target_sprite_other["palette"]],
"customNoRotates": [target_sprite_self["noRotate"],
target_sprite_other["noRotate"]],
}
},
{
"component": "Avatar",
"kwargs": {
"index": lua_index,
"aliveState": live_state_name,
"waitState": "playerWait",
"speed": 1.0,
"spawnGroup": "spawnPoints",
"actionOrder": ["move", "turn", "interact"],
"actionSpec": {
"move": {"default": 0, "min": 0, "max": len(_COMPASS)},
"turn": {"default": 0, "min": -1, "max": 1},
"interact": {"default": 0, "min": 0, "max": 1},
},
"view": {
"left": 2,
"right": 2,
"forward": 3,
"backward": 1,
"centered": False
},
"spriteMap": custom_sprite_map,
# The following kwarg makes it possible to get rewarded even
# on frames when an avatar is "dead". It is needed for in the
# matrix games in order to correctly handle the case of two
# players getting hit simultaneously by the same beam.
"skipWaitStateRewards": False,
}
},
{
"component": "GameInteractionZapper",
"kwargs": {
"cooldownTime": 2,
"beamLength": 3,
"beamRadius": 1,
"framesTillRespawn": 5,
"numResources": NUM_RESOURCES,
"endEpisodeOnFirstInteraction": False,
# Reset both players' inventories after each interaction.
"reset_winner_inventory": True,
"reset_loser_inventory": True,
# Both players get removed after each interaction.
"losingPlayerDies": True,
"winningPlayerDies": True,
# `freezeOnInteraction` is the number of frames to display the
# interaction result indicator, freeze, and delay delivering
# all results of interacting.
"freezeOnInteraction": 16,
}
},
{
"component": "ReadyToShootObservation",
"kwargs": {
"zapperComponent": "GameInteractionZapper",
}
},
{
"component": "InventoryObserver",
"kwargs": {
}
},
{
"component": "SpawnResourcesWhenAllPlayersZapped",
},
{
"component": "Taste",
"kwargs": {
"mostTastyResourceClass": -1, # -1 indicates no preference.
# No resource is most tasty when mostTastyResourceClass == -1.
"mostTastyReward": 0.1,
}
},
{
"component": "InteractionTaste",
"kwargs": {
"mostTastyResourceClass": -1, # -1 indicates no preference.
"zeroDefaultInteractionReward": turn_off_default_reward,
"extraReward": 1.0,
}
},
{
"component": "AvatarMetricReporter",
"kwargs": {
"metrics": [
{
# Report the inventories of both players involved in
# an interaction on this frame formatted as
# (self inventory, partner inventory).
"name": "INTERACTION_INVENTORIES",
"type": "tensor.DoubleTensor",
"shape": (2, NUM_RESOURCES),
"component": "GameInteractionZapper",
"variable": "latest_interaction_inventories",
},
*the_matrix.get_cumulant_metric_configs(NUM_RESOURCES),
]
}
},
]
}
if _ENABLE_DEBUG_OBSERVATIONS:
avatar_object["components"].append({
"component": "LocationObserver",
"kwargs": {"objectIsAvatar": True, "alsoReportOrientation": True},
})
return avatar_object
def create_prefabs():
"""Returns a dictionary mapping names to template game objects."""
prefabs = {
"wall": WALL,
"spawn_point": SPAWN_POINT,
}
prefabs["resource_class1"] = create_resource_prefab(
1, shapes.BUTTON, {"*": RESOURCE1_COLOR_DATA[0],
"#": RESOURCE1_COLOR_DATA[1],
"x": (0, 0, 0, 0)})
prefabs["resource_class2"] = create_resource_prefab(
2, shapes.BUTTON, {"*": RESOURCE2_COLOR_DATA[0],
"#": RESOURCE2_COLOR_DATA[1],
"x": (0, 0, 0, 0)})
prefabs["resource_class3"] = create_resource_prefab(
3, shapes.BUTTON, {"*": RESOURCE3_COLOR_DATA[0],
"#": RESOURCE3_COLOR_DATA[1],
"x": (0, 0, 0, 0)})
return prefabs
def get_all_source_sprite_names(num_players):
all_source_sprite_names = []
for player_idx in range(0, num_players):
# Lua is 1-indexed.
lua_index = player_idx + 1
all_source_sprite_names.append("Avatar" + str(lua_index))
return all_source_sprite_names
def create_avatar_objects(num_players,
turn_off_default_reward: bool = False):
"""Returns list of avatar objects of length 'num_players'."""
all_source_sprite_names = get_all_source_sprite_names(num_players)
avatar_objects = []
for player_idx in range(0, num_players):
game_object = create_avatar_object(
player_idx,
all_source_sprite_names,
TARGET_SPRITE_SELF,
TARGET_SPRITE_OTHER,
turn_off_default_reward=turn_off_default_reward)
readiness_marker = the_matrix.create_ready_to_interact_marker(player_idx)
avatar_objects.append(game_object)
avatar_objects.append(readiness_marker)
return avatar_objects
def create_world_sprite_map(
num_players: int, target_sprite_other: Dict[str, Any]) -> Dict[str, str]:
all_source_sprite_names = get_all_source_sprite_names(num_players)
world_sprite_map = {}
for name in all_source_sprite_names:
world_sprite_map[name] = target_sprite_other["name"]
return world_sprite_map
def get_config():
"""Default configuration."""
config = config_dict.ConfigDict()
# Other parameters that are useful to override in training config files.
config.turn_off_default_reward = False
# Action set configuration.
config.action_set = ACTION_SET
# Observation format configuration.
config.individual_observation_names = [
"RGB",
"INVENTORY",
"READY_TO_SHOOT",
# Debug only (do not use the following observations in policies).
"INTERACTION_INVENTORIES",
]
config.global_observation_names = [
"WORLD.RGB",
]
# The specs of the environment (from a single-agent perspective).
config.action_spec = specs.action(len(ACTION_SET))
config.timestep_spec = specs.timestep({
"RGB": specs.rgb(40, 40),
"INVENTORY": specs.inventory(3),
"READY_TO_SHOOT": specs.OBSERVATION["READY_TO_SHOOT"],
# Debug only (do not use the following observations in policies).
"INTERACTION_INVENTORIES": specs.interaction_inventories(3),
"WORLD.RGB": specs.rgb(120, 184),
})
# The roles assigned to each player.
config.valid_roles = frozenset({"default"})
config.default_player_roles = ("default",) * 2
return config
def build(
roles: Sequence[str],
config: config_dict.ConfigDict,
) -> Mapping[str, Any]:
"""Build substrate definition given roles."""
del config
num_players = len(roles)
# Build the rest of the substrate definition.
substrate_definition = dict(
levelName="the_matrix",
levelDirectory="meltingpot/lua/levels",
numPlayers=num_players,
# Define upper bound of episode length since episodes end stochastically.
maxEpisodeLengthFrames=5000,
spriteSize=8,
topology="BOUNDED", # Choose from ["BOUNDED", "TORUS"],
simulation={
"map": ASCII_MAP,
"gameObjects": create_avatar_objects(num_players=num_players),
"scene": create_scene(),
"prefabs": create_prefabs(),
"charPrefabMap": CHAR_PREFAB_MAP,
# worldSpriteMap is needed to make the global view used in videos be
# be informative in cases where individual avatar views have had
# sprites remapped to one another (example: self vs other mode).
"worldSpriteMap": create_world_sprite_map(num_players,
TARGET_SPRITE_OTHER),
}
)
return substrate_definition
|
meltingpot-main
|
meltingpot/configs/substrates/running_with_scissors_in_the_matrix__repeated.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for predator_prey__alley_hunt.
Example video: https://youtu.be/ctVjhn7VYgo
See predator_prey.py for a detailed description applicable to all predator_prey
substrates.
In this variant prey must forage for apples in a maze with many dangerous
dead-end corridors where they could easily be trapped by predators.
"""
from meltingpot.configs.substrates import predator_prey as base_config
from meltingpot.utils.substrates import specs
from ml_collections import config_dict
build = base_config.build
ASCII_MAP = """
;________________________,
!aa''''''''''''''''''''aa|
!a'''''''''a''=+''''''''a|
!''=~~~+''=+''|!''=~~~+''|
!''[__,!''|!''|!''[___]''|
!''''a|!''|!aa|!'''''''''|
!''=~~J!''|L~~J!'a'=~~~+'|
!''|///!''[____]'a'|///!a|
!''|///!'''''''''''[__,L~J
!''[___]'XX''''X''''<*[__,
!''''''''''a''''XX''<****|
!'aa'''X''''''a'''XX<****|
!''''''''''a''''XX''<****|
!''=~~~+'''''''X''''<*=~~J
!''|///!'XX''''''''=~~J;_,
!''|///!''=~~~~+'a'|///!a|
!''[__,!''|;__,!'a'[___]'|
!''''a|!''|!aa|!'''''''''|
!''=~~J!''|!''|!''=~~~+''|
!''[___]''[]''|!''[___]''|
!a'''''''''a''[]''''''''a|
!aa''''''''''''''''''''aa|
L~~~~~~~~~~~~~~~~~~~~~~~~J
"""
# `prefab` determines which prefab game object to use for each `char` in the
# ascii map.
CHAR_PREFAB_MAP = {
"*": {"type": "all", "list": ["safe_grass", "spawn_point_prey"]},
"X": {"type": "all", "list": ["tiled_floor", "spawn_point_predator"]},
"a": {"type": "all", "list": ["tiled_floor", "apple"]},
";": "nw_wall_corner",
",": "ne_wall_corner",
"J": "se_wall_corner",
"L": "sw_wall_corner",
"_": "wall_north",
"|": "wall_east",
"~": "wall_south",
"!": "wall_west",
"=": "nw_inner_wall_corner",
"+": "ne_inner_wall_corner",
"]": "se_inner_wall_corner",
"[": "sw_inner_wall_corner",
"'": "tiled_floor",
"<": "safe_grass_w_edge",
">": "safe_grass",
"/": "fill",
}
def get_config():
"""Default configuration."""
config = base_config.get_config()
# Override the map layout settings.
config.layout = config_dict.ConfigDict()
config.layout.ascii_map = ASCII_MAP
config.layout.char_prefab_map = CHAR_PREFAB_MAP
# The specs of the environment (from a single-agent perspective).
config.timestep_spec = specs.timestep({
"RGB": specs.OBSERVATION["RGB"],
"STAMINA": specs.float64(),
# Debug only (do not use the following observations in policies).
"WORLD.RGB": specs.rgb(184, 208),
})
# The roles assigned to each player.
config.default_player_roles = ("predator",) * 5 + ("prey",) * 8
return config
|
meltingpot-main
|
meltingpot/configs/substrates/predator_prey__alley_hunt.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for predator_prey__orchard.
Example video: https://youtu.be/gtd-ziZYJRI
See predator_prey.py for a detailed description applicable to all predator_prey
substrates.
In this variant there are two areas of the map containing food: an apple-rich
region to the north of the safe tall grass and an acorn-rich region to the east.
There are two possible prey strategies focusing on either apples or acorns.
However, in this case it is clear that focusing on acorns is the better
strategy since they are relatively close to the safe tall grass. They can easily
be collected and brought back to safety for consumption.
"""
from meltingpot.configs.substrates import predator_prey as base_config
from meltingpot.utils.substrates import specs
from ml_collections import config_dict
build = base_config.build
ASCII_MAP = """
/;__________,;_______,/
;]aa'X'XX''a|!a''''aA[,
!a''aaaaaa'X[]''aa&''A|
!X'aaAaaaaa''''aaaaa''|
!'&'aaaaaa''Aa'aaaaaa'|
!a'''X''''X'''a''''''a|
!aa''aaa''''''''''''aa|
L~+''aaa''=~~+XXXaA=~~J
;_]'''a'X'[_,L~~~~~J;_,
!XX'''a'X'''[_______]'|
!'''''a''''''XX'''''''|
!'r^^^^^^l'''X'A'''A''|
!'zv#****#^l'''''A''''|
!'''<#***##j'''A'''A''|
!''r###**#>''''''A'''X|
!''zv##***#^l''A'''A''|
!''''zvvvvvvj''''A'&''|
L+'''''''''''''''''''=J
/L~~~~~~~~~~~~~~~~~~~J/
"""
# `prefab` determines which prefab game object to use for each `char` in the
# ascii map.
CHAR_PREFAB_MAP = {
"*": {"type": "all", "list": ["safe_grass", "spawn_point_prey"]},
"&": {"type": "all", "list": ["tiled_floor", "spawn_point_prey"]},
"X": {"type": "all", "list": ["tiled_floor", "spawn_point_predator"]},
"a": {"type": "all", "list": ["tiled_floor", "apple"]},
"A": {"type": "all", "list": ["tiled_floor", "floor_acorn"]},
";": "nw_wall_corner",
",": "ne_wall_corner",
"J": "se_wall_corner",
"L": "sw_wall_corner",
"_": "wall_north",
"|": "wall_east",
"~": "wall_south",
"!": "wall_west",
"=": "nw_inner_wall_corner",
"+": "ne_inner_wall_corner",
"]": "se_inner_wall_corner",
"[": "sw_inner_wall_corner",
"'": "tiled_floor",
"#": "safe_grass",
"<": "safe_grass_w_edge",
"^": "safe_grass_n_edge",
">": "safe_grass_e_edge",
"v": "safe_grass_s_edge",
"l": "safe_grass_ne_corner",
"j": "safe_grass_se_corner",
"z": "safe_grass_sw_corner",
"r": "safe_grass_nw_corner",
"/": "fill",
}
def get_config():
"""Default configuration."""
config = base_config.get_config()
# Override the map layout settings.
config.layout = config_dict.ConfigDict()
config.layout.ascii_map = ASCII_MAP
config.layout.char_prefab_map = CHAR_PREFAB_MAP
# The specs of the environment (from a single-agent perspective).
config.timestep_spec = specs.timestep({
"RGB": specs.OBSERVATION["RGB"],
"STAMINA": specs.float64(),
# Debug only (do not use the following observations in policies).
"WORLD.RGB": specs.rgb(152, 184),
})
# The roles assigned to each player.
config.default_player_roles = ("predator",) * 5 + ("prey",) * 8
return config
|
meltingpot-main
|
meltingpot/configs/substrates/predator_prey__orchard.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for Allelopathic Harvest (open).
Example video: https://youtu.be/Bb0duMG0YF4
This substrate contains three different varieties of berry (red, green, & blue)
and a fixed number of berry patches, which could be replanted to grow any color
variety of berry. The growth rate of each berry variety depends linearly on the
fraction that that color comprises of the total. Players have three planting
actions with which they can replant berries in their chosen color. All players
prefer to eat red berries (reward of 2 per red berry they eat versus a reward
of 1 per other colored berry). Players can achieve higher return by selecting
just one single color of berry to plant, but which one to pick is, in principle,
difficult to coordinate (start-up problem) -- though in this case all prefer
red berries, suggesting a globally rational chioce. They also always prefer to
eat berries over spending time planting (free-rider problem).
Allelopathic Harvest was first described in Koster et al. (2020).
Köster, R., McKee, K.R., Everett, R., Weidinger, L., Isaac, W.S., Hughes, E.,
Duenez-Guzman, E.A., Graepel, T., Botvinick, M. and Leibo, J.Z., 2020.
Model-free conventions in multi-agent reinforcement learning with heterogeneous
preferences. arXiv preprint arXiv:2010.09054.
"""
from meltingpot.configs.substrates import allelopathic_harvest as base_config
OPEN_ASCII_MAP = """
333PPPP12PPP322P32PPP1P13P3P3
1PPPP2PP122PPP3P232121P2PP2P1
P1P3P11PPP13PPP31PPPP23PPPPPP
PPPPP2P2P1P2P3P33P23PP2P2PPPP
P1PPPPPPP2PPP12311PP3321PPPPP
133P2PP2PPP3PPP1PPP2213P112P1
3PPPPPPPPPPPPP31PPPPPP1P3112P
PP2P21P21P33PPPPPPP3PP2PPPP1P
PPPPP1P1P32P3PPP22PP1P2PPPP2P
PPP3PP3122211PPP2113P3PPP1332
PP12132PP1PP1P321PP1PPPPPP1P3
PPP222P12PPPP1PPPP1PPP321P11P
PPP2PPPP3P2P1PPP1P23322PP1P13
23PPP2PPPP2P3PPPP3PP3PPP3PPP2
2PPPP3P3P3PP3PP3P1P3PP11P21P1
21PPP2PP331PP3PPP2PPPPP2PP3PP
P32P2PP2P1PPPPPPP12P2PPP1PPPP
P3PP3P2P21P3PP2PP11PP1323P312
2P1PPPPP1PPP1P2PPP3P32P2P331P
PPPPP1312P3P2PPPP3P32PPPP2P11
P3PPPP221PPP2PPPPPPPP1PPP311P
32P3PPPPPPPPPP31PPPP3PPP13PPP
PPP3PPPPP3PPPPPP232P13PPPPP1P
P1PP1PPP2PP3PPPPP33321PP2P3PP
P13PPPP1P333PPPP2PP213PP2P3PP
1PPPPP3PP2P1PP21P3PPPP231P2PP
1331P2P12P2PPPP2PPP3P23P21PPP
P3P131P3PPP13P1PPP222PPPP11PP
2P3PPPPPPPP2P323PPP2PPP1PPP2P
21PPPPPPP12P23P1PPPPPP13P3P11
"""
build = base_config.build
def get_config():
"""Adjust default configuration."""
config = base_config.get_config()
config.ascii_map = OPEN_ASCII_MAP
config.default_player_roles = (
("player_who_likes_red",) * 8 + ("player_who_likes_green",) * 8)
return config
|
meltingpot-main
|
meltingpot/configs/substrates/allelopathic_harvest__open.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for Pure Coordination in the Matrix.
Example video: https://youtu.be/LG_qvqujxPU
See _Running with Scissors in the Matrix_ for a general description of the
game dynamics. Here the payoff matrix represents a pure coordination game with
`K = 3` different ways to coordinate, all equally beneficial.
Players have the default `11 x 11` (off center) observation window.
Both players are removed and their inventories are reset after each interaction.
"""
from typing import Any, Dict, Mapping, Sequence
from meltingpot.configs.substrates import the_matrix
from meltingpot.utils.substrates import colors
from meltingpot.utils.substrates import game_object_utils
from meltingpot.utils.substrates import shapes
from meltingpot.utils.substrates import specs
from ml_collections import config_dict
PrefabConfig = game_object_utils.PrefabConfig
# Warning: setting `_ENABLE_DEBUG_OBSERVATIONS = True` may cause slowdown.
_ENABLE_DEBUG_OBSERVATIONS = False
# The number of resources must match the (square) size of the matrix.
NUM_RESOURCES = 3
# This color is red.
RESOURCE1_COLOR = (150, 0, 0, 255)
RESOURCE1_HIGHLIGHT_COLOR = (200, 0, 0, 255)
RESOURCE1_COLOR_DATA = (RESOURCE1_COLOR, RESOURCE1_HIGHLIGHT_COLOR)
# This color is green.
RESOURCE2_COLOR = (0, 150, 0, 255)
RESOURCE2_HIGHLIGHT_COLOR = (0, 200, 0, 255)
RESOURCE2_COLOR_DATA = (RESOURCE2_COLOR, RESOURCE2_HIGHLIGHT_COLOR)
# This color is blue.
RESOURCE3_COLOR = (0, 0, 150, 255)
RESOURCE3_HIGHLIGHT_COLOR = (0, 0, 200, 255)
RESOURCE3_COLOR_DATA = (RESOURCE3_COLOR, RESOURCE3_HIGHLIGHT_COLOR)
# The procedural generator replaces all 'a' chars in the default map with chars
# representing specific resources, i.e. with either '1' or '2'.
ASCII_MAP = """
WWWWWWWWWWWWWWWWWWWWWWWWW
WPPPP W W PPPPW
WPPPP PPPPW
WPPPP PPPPW
WPPPP PPPPW
W aa W
W 11 aa W
W 11 W
W 11 W
W WW W 222 W
WW 33 W 222 W
WWW 33 WWWWWWWWW W
W 33 111 WWW
W 111 W
W 22 W W
W 22 W WW W
W 22 W333 W
W 333 W
W aa W
WPPPP aa PPPPW
WPPPP PPPPW
WPPPP PPPPW
WPPPP W PPPPW
WWWWWWWWWWWWWWWWWWWWWWWWW
"""
_resource_names = [
"resource_class1",
"resource_class2",
"resource_class3",
]
# `prefab` determines which prefab game object to use for each `char` in the
# ascii map.
CHAR_PREFAB_MAP = {
"a": {"type": "choice", "list": _resource_names},
"1": _resource_names[0],
"2": _resource_names[1],
"3": _resource_names[2],
"P": "spawn_point",
"W": "wall",
}
_COMPASS = ["N", "E", "S", "W"]
WALL = {
"name": "wall",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "wall",
"stateConfigs": [{
"state": "wall",
"layer": "upperPhysical",
"sprite": "Wall",
}],
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["Wall"],
"spriteShapes": [shapes.WALL],
"palettes": [{"*": (95, 95, 95, 255),
"&": (100, 100, 100, 255),
"@": (109, 109, 109, 255),
"#": (152, 152, 152, 255)}],
"noRotates": [False]
}
},
{
"component": "BeamBlocker",
"kwargs": {
"beamType": "gameInteraction"
}
},
]
}
SPAWN_POINT = {
"name": "spawnPoint",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "spawnPoint",
"stateConfigs": [{
"state": "spawnPoint",
"layer": "alternateLogic",
"groups": ["spawnPoints"]
}],
}
},
{
"component": "Transform",
},
]
}
# PLAYER_COLOR_PALETTES is a list with each entry specifying the color to use
# for the player at the corresponding index.
NUM_PLAYERS_UPPER_BOUND = 32
PLAYER_COLOR_PALETTES = []
for idx in range(NUM_PLAYERS_UPPER_BOUND):
PLAYER_COLOR_PALETTES.append(shapes.get_palette(colors.palette[idx]))
# Primitive action components.
# pylint: disable=bad-whitespace
# pyformat: disable
NOOP = {"move": 0, "turn": 0, "interact": 0}
FORWARD = {"move": 1, "turn": 0, "interact": 0}
STEP_RIGHT = {"move": 2, "turn": 0, "interact": 0}
BACKWARD = {"move": 3, "turn": 0, "interact": 0}
STEP_LEFT = {"move": 4, "turn": 0, "interact": 0}
TURN_LEFT = {"move": 0, "turn": -1, "interact": 0}
TURN_RIGHT = {"move": 0, "turn": 1, "interact": 0}
INTERACT = {"move": 0, "turn": 0, "interact": 1}
# pyformat: enable
# pylint: enable=bad-whitespace
ACTION_SET = (
NOOP,
FORWARD,
BACKWARD,
STEP_LEFT,
STEP_RIGHT,
TURN_LEFT,
TURN_RIGHT,
INTERACT,
)
TARGET_SPRITE_SELF = {
"name": "Self",
"shape": shapes.CUTE_AVATAR,
"palette": shapes.get_palette((50, 100, 200)),
"noRotate": True,
}
TARGET_SPRITE_OTHER = {
"name": "Other",
"shape": shapes.CUTE_AVATAR,
"palette": shapes.get_palette((200, 100, 50)),
"noRotate": True,
}
def create_scene():
"""Creates the global scene."""
scene = {
"name": "scene",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "scene",
"stateConfigs": [{
"state": "scene",
}],
}
},
{
"component": "Transform",
},
{
"component": "TheMatrix",
"kwargs": {
# Prevent interaction before both interactors have collected
# at least one resource.
"disallowUnreadyInteractions": True,
"matrix": [
# 1 2 3
[1, 0, 0], # 1
[0, 1, 0], # 2
[0, 0, 1] # 3
],
"resultIndicatorColorIntervals": [
# red # yellow # green # blue # violet
(0.0, 0.2), (0.2, 0.4), (0.4, 0.6), (0.6, 0.8), (0.8, 1.0)
],
}
},
{
"component": "StochasticIntervalEpisodeEnding",
"kwargs": {
"minimumFramesPerEpisode": 1000,
"intervalLength": 100, # Set equal to unroll length.
"probabilityTerminationPerInterval": 0.2
}
}
]
}
return scene
def create_resource_prefab(resource_id, color_data):
"""Creates resource prefab with provided `resource_id` (num) and color."""
resource_name = "resource_class{}".format(resource_id)
resource_prefab = {
"name": resource_name,
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": resource_name,
"stateConfigs": [
{"state": resource_name + "_wait",
"groups": ["resourceWaits"]},
{"state": resource_name,
"layer": "lowerPhysical",
"sprite": resource_name + "_sprite"},
]
},
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": [resource_name + "_sprite"],
"spriteShapes": [shapes.BUTTON],
"palettes": [{"*": color_data[0],
"#": color_data[1],
"x": (0, 0, 0, 0)}],
"noRotates": [False]
},
},
{
"component": "Resource",
"kwargs": {
"resourceClass": resource_id,
"visibleType": resource_name,
"waitState": resource_name + "_wait",
"regenerationRate": 0.04,
"regenerationDelay": 10,
},
},
{
"component": "Destroyable",
"kwargs": {
"waitState": resource_name + "_wait",
# It is possible to destroy resources but takes concerted
# effort to do so by zapping them `initialHealth` times.
"initialHealth": 3,
},
},
]
}
return resource_prefab
def create_prefabs() -> PrefabConfig:
"""Returns the prefabs.
Prefabs are a dictionary mapping names to template game objects that can
be cloned and placed in multiple locations accoring to an ascii map.
"""
prefabs = {
"wall": WALL,
"spawn_point": SPAWN_POINT,
}
prefabs["resource_class1"] = create_resource_prefab(1, RESOURCE1_COLOR_DATA)
prefabs["resource_class2"] = create_resource_prefab(2, RESOURCE2_COLOR_DATA)
prefabs["resource_class3"] = create_resource_prefab(3, RESOURCE3_COLOR_DATA)
return prefabs
def create_avatar_object(player_idx: int,
all_source_sprite_names: Sequence[str],
target_sprite_self: Dict[str, Any],
target_sprite_other: Dict[str, Any]) -> Dict[str, Any]:
"""Create an avatar object given self vs other sprite data."""
# Lua is 1-indexed.
lua_index = player_idx + 1
# Setup the self vs other sprite mapping.
source_sprite_self = "Avatar" + str(lua_index)
custom_sprite_map = {source_sprite_self: target_sprite_self["name"]}
for name in all_source_sprite_names:
if name != source_sprite_self:
custom_sprite_map[name] = target_sprite_other["name"]
live_state_name = "player{}".format(lua_index)
avatar_object = {
"name": "avatar",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": live_state_name,
"stateConfigs": [
{"state": live_state_name,
"layer": "upperPhysical",
"sprite": source_sprite_self,
"contact": "avatar",
"groups": ["players"]},
{"state": "playerWait",
"groups": ["playerWaits"]},
]
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "colored_square",
"spriteNames": [source_sprite_self],
# A white square should never be displayed. It will always be
# remapped since this is self vs other observation mode.
"spriteRGBColors": [(255, 255, 255, 255)],
}
},
{
"component": "AdditionalSprites",
"kwargs": {
"renderMode": "ascii_shape",
"customSpriteNames": [target_sprite_self["name"],
target_sprite_other["name"]],
"customSpriteShapes": [target_sprite_self["shape"],
target_sprite_other["shape"]],
"customPalettes": [target_sprite_self["palette"],
target_sprite_other["palette"]],
"customNoRotates": [target_sprite_self["noRotate"],
target_sprite_other["noRotate"]],
}
},
{
"component": "Avatar",
"kwargs": {
"index": lua_index,
"aliveState": live_state_name,
"waitState": "playerWait",
"speed": 1.0,
"spawnGroup": "spawnPoints",
"actionOrder": ["move", "turn", "interact"],
"actionSpec": {
"move": {"default": 0, "min": 0, "max": len(_COMPASS)},
"turn": {"default": 0, "min": -1, "max": 1},
"interact": {"default": 0, "min": 0, "max": 1},
},
"view": {
"left": 5,
"right": 5,
"forward": 9,
"backward": 1,
"centered": False
},
"spriteMap": custom_sprite_map,
# The following kwarg makes it possible to get rewarded even
# on frames when an avatar is "dead". It is needed for in the
# matrix games in order to correctly handle the case of two
# players getting hit simultaneously by the same beam.
"skipWaitStateRewards": False,
}
},
{
"component": "GameInteractionZapper",
"kwargs": {
"cooldownTime": 2,
"beamLength": 3,
"beamRadius": 1,
"framesTillRespawn": 50,
"numResources": NUM_RESOURCES,
"endEpisodeOnFirstInteraction": False,
# Reset both players' inventories after each interaction.
"reset_winner_inventory": True,
"reset_loser_inventory": True,
# Both players get removed after each interaction.
"losingPlayerDies": True,
"winningPlayerDies": True,
# `freezeOnInteraction` is the number of frames to display the
# interaction result indicator, freeze, and delay delivering
# all results of interacting.
"freezeOnInteraction": 16,
}
},
{
"component": "ReadyToShootObservation",
"kwargs": {
"zapperComponent": "GameInteractionZapper",
}
},
{
"component": "InventoryObserver",
"kwargs": {
}
},
{
"component": "Taste",
"kwargs": {
"mostTastyResourceClass": -1, # -1 indicates no preference.
# No resource is most tasty when mostTastyResourceClass == -1.
"mostTastyReward": 0.1,
}
},
{
"component": "InteractionTaste",
"kwargs": {
"mostTastyResourceClass": -1, # -1 indicates no preference.
"zeroDefaultInteractionReward": False,
"extraReward": 1.0,
}
},
{
"component": "AvatarMetricReporter",
"kwargs": {
"metrics": [
{
# Report the inventories of both players involved in
# an interaction on this frame formatted as
# (self inventory, partner inventory).
"name": "INTERACTION_INVENTORIES",
"type": "tensor.DoubleTensor",
"shape": (2, NUM_RESOURCES),
"component": "GameInteractionZapper",
"variable": "latest_interaction_inventories",
},
*the_matrix.get_cumulant_metric_configs(NUM_RESOURCES),
]
}
},
]
}
if _ENABLE_DEBUG_OBSERVATIONS:
avatar_object["components"].append({
"component": "LocationObserver",
"kwargs": {"objectIsAvatar": True, "alsoReportOrientation": True},
})
return avatar_object
def get_all_source_sprite_names(num_players):
all_source_sprite_names = []
for player_idx in range(0, num_players):
# Lua is 1-indexed.
lua_index = player_idx + 1
all_source_sprite_names.append("Avatar" + str(lua_index))
return all_source_sprite_names
def create_avatar_objects(num_players):
"""Returns list of avatar objects of length 'num_players'."""
all_source_sprite_names = get_all_source_sprite_names(num_players)
avatar_objects = []
for player_idx in range(0, num_players):
game_object = create_avatar_object(player_idx,
all_source_sprite_names,
TARGET_SPRITE_SELF,
TARGET_SPRITE_OTHER)
avatar_objects.append(game_object)
readiness_marker = the_matrix.create_ready_to_interact_marker(player_idx)
avatar_objects.append(readiness_marker)
return avatar_objects
def create_world_sprite_map(
num_players: int, target_sprite_other: Dict[str, Any]) -> Dict[str, str]:
all_source_sprite_names = get_all_source_sprite_names(num_players)
world_sprite_map = {}
for name in all_source_sprite_names:
world_sprite_map[name] = target_sprite_other["name"]
return world_sprite_map
def get_config():
"""Default configuration."""
config = config_dict.ConfigDict()
# Action set configuration.
config.action_set = ACTION_SET
# Observation format configuration.
config.individual_observation_names = [
"RGB",
"INVENTORY",
"READY_TO_SHOOT",
# Debug only (do not use the following observations in policies).
"INTERACTION_INVENTORIES",
]
config.global_observation_names = [
"WORLD.RGB",
]
# The specs of the environment (from a single-agent perspective).
config.action_spec = specs.action(len(ACTION_SET))
config.timestep_spec = specs.timestep({
"RGB": specs.OBSERVATION["RGB"],
"INVENTORY": specs.inventory(3),
"READY_TO_SHOOT": specs.OBSERVATION["READY_TO_SHOOT"],
# Debug only (do not use the following observations in policies).
"INTERACTION_INVENTORIES": specs.interaction_inventories(3),
"WORLD.RGB": specs.rgb(192, 200),
})
# The roles assigned to each player.
config.valid_roles = frozenset({"default"})
config.default_player_roles = ("default",) * 8
return config
def build(
roles: Sequence[str],
config: config_dict.ConfigDict,
) -> Mapping[str, Any]:
"""Build substrate definition given roles."""
del config
num_players = len(roles)
# Build the rest of the substrate definition.
substrate_definition = dict(
levelName="the_matrix",
levelDirectory="meltingpot/lua/levels",
numPlayers=num_players,
# Define upper bound of episode length since episodes end stochastically.
maxEpisodeLengthFrames=5000,
spriteSize=8,
topology="BOUNDED", # Choose from ["BOUNDED", "TORUS"],
simulation={
"map": ASCII_MAP,
"gameObjects": create_avatar_objects(num_players=num_players),
"scene": create_scene(),
"prefabs": create_prefabs(),
"charPrefabMap": CHAR_PREFAB_MAP,
# worldSpriteMap is needed to make the global view used in videos be
# be informative in cases where individual avatar views have had
# sprites remapped to one another (example: self vs other mode).
"worldSpriteMap": create_world_sprite_map(num_players,
TARGET_SPRITE_OTHER),
}
)
return substrate_definition
|
meltingpot-main
|
meltingpot/configs/substrates/pure_coordination_in_the_matrix__arena.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for Commons Harvest: Partnership.
Example video: https://youtu.be/dH_0-APGKSs
See _Commons Harvest: Open_ for the general description of the mechanics at play
in this substrate.
This substrate is similar to _Commons Harvest: Closed_, except that it now
requires two players to work together to defend a room of apples (there are two
entrance corridors to defend). It requires effective cooperation both to defend
the doors and to avoid over-harvesting. It can be seen as a test of whether or
not agents can learn to trust their partners to (a) defend their shared
territory from invasion, and (b) act sustainably with regard to their shared
resources. This is the kind of trust born of mutual self interest. To be
successful, agents must recognize the alignment of their interests with those of
their partner and act accordingly.
"""
from typing import Any, Dict, Mapping, Sequence
from meltingpot.utils.substrates import colors
from meltingpot.utils.substrates import shapes
from meltingpot.utils.substrates import specs
from ml_collections import config_dict
import numpy as np
# Warning: setting `_ENABLE_DEBUG_OBSERVATIONS = True` may cause slowdown.
_ENABLE_DEBUG_OBSERVATIONS = False
APPLE_RESPAWN_RADIUS = 2.0
REGROWTH_PROBABILITIES = [0.0, 0.001, 0.005, 0.025]
ASCII_MAP = """
WWWWWWWWWWWWWWWWWWWWWWWW
WAAA A II A AAAW
WAA AAA II AAA AAW
WA AAAAAIIAAAAA AW
W AAA II AAA W
W A II A W
W A II A W
W AAA Q WW Q AAA W
WAAAAA II AAAAAW
W AAA WWWWWWWWWW AAA W
W A WW A W
WWWWWWWWWW WW WWWWWWWWWW
W WW W
W WWWWWWWWWWWWWWWWWW W
W PPPPPPPPPPPPPPPPPP W
W PPPPPPPPPPPPPPPPPPPP W
WPPPPPPPPPPPPPPPPPPPPPPW
WWWWWWWWWWWWWWWWWWWWWWWW
"""
# `prefab` determines which prefab game object to use for each `char` in the
# ascii map.
CHAR_PREFAB_MAP = {
"P": {"type": "all", "list": ["floor", "spawn_point"]},
"Q": {"type": "all", "list": ["floor", "inside_spawn_point"]},
" ": "floor",
"W": "wall",
"A": {"type": "all", "list": ["grass", "apple"]},
"I": {"type": "all", "list": [
"floor", "hidden_role_based_punishment_tile"]},
}
_COMPASS = ["N", "E", "S", "W"]
FLOOR = {
"name": "floor",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "floor",
"stateConfigs": [{
"state": "floor",
"layer": "background",
"sprite": "Floor",
}],
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["Floor"],
"spriteShapes": [shapes.GRAINY_FLOOR],
"palettes": [{"*": (220, 205, 185, 255),
"+": (210, 195, 175, 255),}],
"noRotates": [False]
}
},
]
}
GRASS = {
"name":
"grass",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"grass",
"stateConfigs": [
{
"state": "grass",
"layer": "background",
"sprite": "Grass"
},
{
"state": "dessicated",
"layer": "background",
"sprite": "Floor"
},
],
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["Grass", "Floor"],
"spriteShapes": [
shapes.GRASS_STRAIGHT, shapes.GRAINY_FLOOR
],
"palettes": [{
"*": (158, 194, 101, 255),
"@": (170, 207, 112, 255)
}, {
"*": (220, 205, 185, 255),
"+": (210, 195, 175, 255),
}],
"noRotates": [False, False]
}
},
]
}
WALL = {
"name": "wall",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "wall",
"stateConfigs": [{
"state": "wall",
"layer": "upperPhysical",
"sprite": "Wall",
}],
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["Wall"],
"spriteShapes": [shapes.WALL],
"palettes": [{"*": (95, 95, 95, 255),
"&": (100, 100, 100, 255),
"@": (109, 109, 109, 255),
"#": (152, 152, 152, 255)}],
"noRotates": [False]
}
},
{
"component": "BeamBlocker",
"kwargs": {
"beamType": "zapHit"
}
},
]
}
SPAWN_POINT = {
"name": "spawnPoint",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "spawnPoint",
"stateConfigs": [{
"state": "spawnPoint",
"layer": "alternateLogic",
"groups": ["spawnPoints"]
}],
}
},
{
"component": "Transform",
},
]
}
INSIDE_SPAWN_POINT = {
"name": "spawnPoint",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "spawnPoint",
"stateConfigs": [{
"state": "spawnPoint",
"layer": "alternateLogic",
"groups": ["insideSpawnPoints"]
}],
}
},
{
"component": "Transform",
},
]
}
HIDDEN_ROLE_BASED_PUNISHMENT_TILE = {
"name": "hiddenRoleBasedRewardTile",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "active",
"stateConfigs": [{
"state": "active",
"layer": "alternateLogic",
}],
}
},
{"component": "Transform",},
{"component": "RoleBasedRewardTile",
"kwargs": {
"avatarRoleComponent": "Role",
"getRoleFunction": "getRole",
"rolesToRewards": {"putative_cooperator": -10},
}}
]
}
# Primitive action components.
# pylint: disable=bad-whitespace
# pyformat: disable
NOOP = {"move": 0, "turn": 0, "fireZap": 0}
FORWARD = {"move": 1, "turn": 0, "fireZap": 0}
STEP_RIGHT = {"move": 2, "turn": 0, "fireZap": 0}
BACKWARD = {"move": 3, "turn": 0, "fireZap": 0}
STEP_LEFT = {"move": 4, "turn": 0, "fireZap": 0}
TURN_LEFT = {"move": 0, "turn": -1, "fireZap": 0}
TURN_RIGHT = {"move": 0, "turn": 1, "fireZap": 0}
FIRE_ZAP = {"move": 0, "turn": 0, "fireZap": 1}
# pyformat: enable
# pylint: enable=bad-whitespace
ACTION_SET = (
NOOP,
FORWARD,
BACKWARD,
STEP_LEFT,
STEP_RIGHT,
TURN_LEFT,
TURN_RIGHT,
FIRE_ZAP,
)
TARGET_SPRITE_SELF = {
"name": "Self",
"shape": shapes.CUTE_AVATAR,
"palette": shapes.get_palette((50, 100, 200)),
"noRotate": True,
}
def create_scene():
"""Creates the scene with the provided args controlling apple regrowth."""
scene = {
"name": "scene",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "scene",
"stateConfigs": [{
"state": "scene",
}],
}
},
{
"component": "Transform",
},
{
"component": "Neighborhoods",
"kwargs": {}
},
{
"component": "StochasticIntervalEpisodeEnding",
"kwargs": {
"minimumFramesPerEpisode": 1000,
"intervalLength": 100, # Set equal to unroll length.
"probabilityTerminationPerInterval": 0.15
}
}
]
}
return scene
def create_apple_prefab(regrowth_radius=-1.0, # pylint: disable=dangerous-default-value
regrowth_probabilities=[0, 0.0, 0.0, 0.0]):
"""Creates the apple prefab with the provided settings."""
growth_rate_states = [
{
"state": "apple",
"layer": "lowerPhysical",
"sprite": "Apple",
"groups": ["apples"]
},
{
"state": "appleWait",
"layer": "logic",
"sprite": "AppleWait",
},
]
# Enumerate all possible states for a potential apple. There is one state for
# each regrowth rate i.e., number of nearby apples.
upper_bound_possible_neighbors = np.floor(np.pi*regrowth_radius**2+1)+1
for i in range(int(upper_bound_possible_neighbors)):
growth_rate_states.append(dict(state="appleWait_{}".format(i),
layer="logic",
groups=["waits_{}".format(i)],
sprite="AppleWait"))
apple_prefab = {
"name": "apple",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "apple",
"stateConfigs": growth_rate_states,
}
},
{
"component": "Transform",
"kwargs": {
"position": (0, 0),
"orientation": "N"
}
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["Apple", "AppleWait"],
"spriteShapes": [shapes.APPLE, shapes.FILL],
"palettes": [
{"x": (0, 0, 0, 0),
"*": (214, 88, 88, 255),
"#": (194, 79, 79, 255),
"o": (53, 132, 49, 255),
"|": (102, 51, 61, 255)},
{"i": (0, 0, 0, 0)}],
"noRotates": [True, True]
}
},
{
"component": "Edible",
"kwargs": {
"liveState": "apple",
"waitState": "appleWait",
"rewardForEating": 1.0,
}
},
{
"component": "DensityRegrow",
"kwargs": {
"liveState": "apple",
"waitState": "appleWait",
"radius": regrowth_radius,
"regrowthProbabilities": regrowth_probabilities,
}
},
]
}
return apple_prefab
def create_prefabs(regrowth_radius=-1.0,
# pylint: disable=dangerous-default-value
regrowth_probabilities=[0, 0.0, 0.0, 0.0]):
"""Returns a dictionary mapping names to template game objects."""
prefabs = {
"floor": FLOOR,
"grass": GRASS,
"wall": WALL,
"spawn_point": SPAWN_POINT,
"inside_spawn_point": INSIDE_SPAWN_POINT,
"hidden_role_based_punishment_tile": HIDDEN_ROLE_BASED_PUNISHMENT_TILE,
}
prefabs["apple"] = create_apple_prefab(
regrowth_radius=regrowth_radius,
regrowth_probabilities=regrowth_probabilities)
return prefabs
def create_avatar_object(player_idx: int,
target_sprite_self: Dict[str, Any],
spawn_group: str) -> Mapping[str, Any]:
"""Create an avatar object that always sees itself as blue."""
# Lua is 1-indexed.
lua_index = player_idx + 1
# Setup the self vs other sprite mapping.
source_sprite_self = "Avatar" + str(lua_index)
custom_sprite_map = {source_sprite_self: target_sprite_self["name"]}
live_state_name = "player{}".format(lua_index)
avatar_object = {
"name": "avatar",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": live_state_name,
"stateConfigs": [
{"state": live_state_name,
"layer": "upperPhysical",
"sprite": source_sprite_self,
"contact": "avatar",
"groups": ["players"]},
{"state": "playerWait",
"groups": ["playerWaits"]},
]
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": [source_sprite_self],
"spriteShapes": [shapes.CUTE_AVATAR],
"palettes": [shapes.get_palette(
colors.human_readable[player_idx])],
"noRotates": [True]
}
},
{
"component": "AdditionalSprites",
"kwargs": {
"renderMode": "ascii_shape",
"customSpriteNames": [target_sprite_self["name"]],
"customSpriteShapes": [target_sprite_self["shape"]],
"customPalettes": [target_sprite_self["palette"]],
"customNoRotates": [target_sprite_self["noRotate"]],
}
},
{
"component": "Avatar",
"kwargs": {
"index": lua_index,
"aliveState": live_state_name,
"waitState": "playerWait",
"speed": 1.0,
"spawnGroup": spawn_group,
"postInitialSpawnGroup": "spawnPoints",
"actionOrder": ["move", "turn", "fireZap"],
"actionSpec": {
"move": {"default": 0, "min": 0, "max": len(_COMPASS)},
"turn": {"default": 0, "min": -1, "max": 1},
"fireZap": {"default": 0, "min": 0, "max": 1},
},
"view": {
"left": 5,
"right": 5,
"forward": 9,
"backward": 1,
"centered": False
},
"spriteMap": custom_sprite_map,
}
},
{
"component": "Zapper",
"kwargs": {
"cooldownTime": 1,
"beamLength": 4,
"beamRadius": 1,
"framesTillRespawn": 100,
"penaltyForBeingZapped": 0,
"rewardForZapping": 0,
}
},
{
"component": "ReadyToShootObservation",
},
{
"component": "Role",
"kwargs": {
"role": "none",
}
},
]
}
if _ENABLE_DEBUG_OBSERVATIONS:
avatar_object["components"].append({
"component": "LocationObserver",
"kwargs": {"objectIsAvatar": True, "alsoReportOrientation": True},
})
return avatar_object
def create_avatar_objects(num_players):
"""Returns list of avatar objects of length 'num_players'."""
avatar_objects = []
for player_idx in range(0, num_players):
spawn_group = "spawnPoints"
if player_idx < 2:
# The first two player slots always spawn inside the rooms.
spawn_group = "insideSpawnPoints"
game_object = create_avatar_object(player_idx,
TARGET_SPRITE_SELF,
spawn_group=spawn_group)
avatar_objects.append(game_object)
return avatar_objects
def get_config():
"""Default configuration for training on the commons_harvest level."""
config = config_dict.ConfigDict()
# Action set configuration.
config.action_set = ACTION_SET
# Observation format configuration.
config.individual_observation_names = [
"RGB",
"READY_TO_SHOOT",
]
config.global_observation_names = [
"WORLD.RGB",
]
# The specs of the environment (from a single-agent perspective).
config.action_spec = specs.action(len(ACTION_SET))
config.timestep_spec = specs.timestep({
"RGB": specs.OBSERVATION["RGB"],
"READY_TO_SHOOT": specs.OBSERVATION["READY_TO_SHOOT"],
# Debug only (do not use the following observations in policies).
"WORLD.RGB": specs.rgb(144, 192),
})
# The roles assigned to each player.
config.valid_roles = frozenset({"default"})
config.default_player_roles = ("default",) * 7
return config
def build(
roles: Sequence[str],
config: config_dict.ConfigDict,
) -> Mapping[str, Any]:
"""Build substrate definition given player roles."""
del config
num_players = len(roles)
# Build the rest of the substrate definition.
substrate_definition = dict(
levelName="commons_harvest",
levelDirectory="meltingpot/lua/levels",
numPlayers=num_players,
# Define upper bound of episode length since episodes end stochastically.
maxEpisodeLengthFrames=5000,
spriteSize=8,
topology="BOUNDED", # Choose from ["BOUNDED", "TORUS"],
simulation={
"map": ASCII_MAP,
"gameObjects": create_avatar_objects(num_players),
"prefabs": create_prefabs(APPLE_RESPAWN_RADIUS,
REGROWTH_PROBABILITIES),
"charPrefabMap": CHAR_PREFAB_MAP,
"scene": create_scene(),
},
)
return substrate_definition
|
meltingpot-main
|
meltingpot/configs/substrates/commons_harvest__partnership.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for Collaborative Cooking: Circuit.
Example video: https://youtu.be/2nXe5OPvJ7g
The recipe they must follow is for tomato soup:
1. Add three tomatoes to the cooking pot.
2. Wait for the soup to cook (status bar completion).
3. Bring a bowl to the pot and pour the soup from the pot into the bowl.
4. Deliver the bowl of soup at the goal location.
This substrate is a pure common interest game. All players share all rewards.
Players have a `5 x 5` observation window.
Map:
Counter Circuit: Players are able to cook and deliver soups by themselves
through walking around the entire circuit. However, there exists a more optimal
coordinated strategy whereby players pass tomatoes across the counter.
Additionally, there are the clockwise and anti-clockwise strategies as in the
Ring layout.
"""
from meltingpot.configs.substrates import collaborative_cooking as base_config
from meltingpot.utils.substrates import specs
from ml_collections import config_dict
build = base_config.build
# Counter Circuit: Another layout where it is possible for agents to work
# independently but more efficient if they work together, with one agent passing
# tomatoes to the other.
ASCII_MAP = """
x###CC###
x#P #
xD #### T
x# P#
x###OO###
"""
def get_config():
"""Default configuration."""
config = base_config.get_config()
# Override the map layout settings.
config.layout = config_dict.ConfigDict()
config.layout.ascii_map = ASCII_MAP
# The specs of the environment (from a single-agent perspective).
config.timestep_spec = specs.timestep({
"RGB": specs.rgb(40, 40),
# Debug only (do not use the following observations in policies).
"WORLD.RGB": specs.rgb(40, 72),
})
# The roles assigned to each player.
config.valid_roles = frozenset({"default"})
config.default_player_roles = ("default",) * 2
return config
|
meltingpot-main
|
meltingpot/configs/substrates/collaborative_cooking__circuit.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for Commons Harvest: Open.
Example video: https://youtu.be/lZ-qpPP4BNE
Apples are spread around the map and can be consumed for a reward of 1. Apples
that have been consumed regrow with a per-step probability that depends on the
number of uneaten apples in a `L2` norm neighborhood of radius 2 (by default).
After an apple has been eaten and thus removed, its regrowth probability depends
on the number of uneaten apples still in its local neighborhood. With standard
parameters, it the grown rate decreases as the number of uneaten apples in the
neighborhood decreases and when there are zero uneaten apples in the
neighborhood then the regrowth rate is zero. As a consequence, a patch of apples
that collectively doesn't have any nearby apples, can be irrevocably lost if all
apples in the patch are consumed. Therefore, agents must exercise restraint when
consuming apples within a patch. Notice that in a single agent situation, there
is no incentive to collect the last apple in a patch (except near the end of the
episode). However, in a multi-agent situation, there is an incentive for any
agent to consume the last apple rather than risk another agent consuming it.
This creates a tragedy of the commons from which the substrate derives its name.
This mechanism was first described in Janssen et al (2010) and adapted for
multi-agent reinforcement learning in Perolat et al (2017).
Janssen, M.A., Holahan, R., Lee, A. and Ostrom, E., 2010. Lab experiments for
the study of social-ecological systems. Science, 328(5978), pp.613-617.
Perolat, J., Leibo, J.Z., Zambaldi, V., Beattie, C., Tuyls, K. and Graepel, T.,
2017. A multi-agent reinforcement learning model of common-pool
resource appropriation. In Proceedings of the 31st International Conference on
Neural Information Processing Systems (pp. 3646-3655).
"""
from typing import Any, Dict, Mapping, Sequence
from meltingpot.utils.substrates import colors
from meltingpot.utils.substrates import shapes
from meltingpot.utils.substrates import specs
from ml_collections import config_dict
import numpy as np
# Warning: setting `_ENABLE_DEBUG_OBSERVATIONS = True` may cause slowdown.
_ENABLE_DEBUG_OBSERVATIONS = False
APPLE_RESPAWN_RADIUS = 2.0
REGROWTH_PROBABILITIES = [0.0, 0.0025, 0.005, 0.025]
ASCII_MAP = """
WWWWWWWWWWWWWWWWWWWWWWWW
WAAA A A AAAW
WAA AAA AAA AAW
WA AAAAA AAAAA AW
W AAA AAA W
W A A W
W A A W
W AAA Q Q AAA W
WAAAAA AAAAAW
W AAA AAA W
W A A W
W W
W W
W W
W PPPPPPPPPPPPPPPPPP W
W PPPPPPPPPPPPPPPPPPPP W
WPPPPPPPPPPPPPPPPPPPPPPW
WWWWWWWWWWWWWWWWWWWWWWWW
"""
# `prefab` determines which prefab game object to use for each `char` in the
# ascii map.
CHAR_PREFAB_MAP = {
"P": {"type": "all", "list": ["floor", "spawn_point"]},
"Q": {"type": "all", "list": ["floor", "inside_spawn_point"]},
" ": "floor",
"W": "wall",
"A": {"type": "all", "list": ["grass", "apple"]},
}
_COMPASS = ["N", "E", "S", "W"]
FLOOR = {
"name": "floor",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "floor",
"stateConfigs": [{
"state": "floor",
"layer": "background",
"sprite": "Floor",
}],
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["Floor"],
"spriteShapes": [shapes.GRAINY_FLOOR],
"palettes": [{"*": (220, 205, 185, 255),
"+": (210, 195, 175, 255),}],
"noRotates": [False]
}
},
]
}
GRASS = {
"name":
"grass",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"grass",
"stateConfigs": [
{
"state": "grass",
"layer": "background",
"sprite": "Grass"
},
{
"state": "dessicated",
"layer": "background",
"sprite": "Floor"
},
],
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["Grass", "Floor"],
"spriteShapes": [
shapes.GRASS_STRAIGHT, shapes.GRAINY_FLOOR
],
"palettes": [{
"*": (158, 194, 101, 255),
"@": (170, 207, 112, 255)
}, {
"*": (220, 205, 185, 255),
"+": (210, 195, 175, 255),
}],
"noRotates": [False, False]
}
},
]
}
WALL = {
"name": "wall",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "wall",
"stateConfigs": [{
"state": "wall",
"layer": "upperPhysical",
"sprite": "Wall",
}],
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["Wall"],
"spriteShapes": [shapes.WALL],
"palettes": [{"*": (95, 95, 95, 255),
"&": (100, 100, 100, 255),
"@": (109, 109, 109, 255),
"#": (152, 152, 152, 255)}],
"noRotates": [False]
}
},
{
"component": "BeamBlocker",
"kwargs": {
"beamType": "zapHit"
}
},
]
}
SPAWN_POINT = {
"name": "spawnPoint",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "spawnPoint",
"stateConfigs": [{
"state": "spawnPoint",
"layer": "alternateLogic",
"groups": ["spawnPoints"]
}],
}
},
{
"component": "Transform",
},
]
}
INSIDE_SPAWN_POINT = {
"name": "spawnPoint",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "spawnPoint",
"stateConfigs": [{
"state": "spawnPoint",
"layer": "alternateLogic",
"groups": ["insideSpawnPoints"]
}],
}
},
{
"component": "Transform",
},
]
}
# Primitive action components.
# pylint: disable=bad-whitespace
# pyformat: disable
NOOP = {"move": 0, "turn": 0, "fireZap": 0}
FORWARD = {"move": 1, "turn": 0, "fireZap": 0}
STEP_RIGHT = {"move": 2, "turn": 0, "fireZap": 0}
BACKWARD = {"move": 3, "turn": 0, "fireZap": 0}
STEP_LEFT = {"move": 4, "turn": 0, "fireZap": 0}
TURN_LEFT = {"move": 0, "turn": -1, "fireZap": 0}
TURN_RIGHT = {"move": 0, "turn": 1, "fireZap": 0}
FIRE_ZAP = {"move": 0, "turn": 0, "fireZap": 1}
# pyformat: enable
# pylint: enable=bad-whitespace
ACTION_SET = (
NOOP,
FORWARD,
BACKWARD,
STEP_LEFT,
STEP_RIGHT,
TURN_LEFT,
TURN_RIGHT,
FIRE_ZAP,
)
TARGET_SPRITE_SELF = {
"name": "Self",
"shape": shapes.CUTE_AVATAR,
"palette": shapes.get_palette((50, 100, 200)),
"noRotate": True,
}
def create_scene():
"""Creates the scene with the provided args controlling apple regrowth."""
scene = {
"name": "scene",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "scene",
"stateConfigs": [{
"state": "scene",
}],
}
},
{
"component": "Transform",
},
{
"component": "Neighborhoods",
"kwargs": {}
},
{
"component": "StochasticIntervalEpisodeEnding",
"kwargs": {
"minimumFramesPerEpisode": 1000,
"intervalLength": 100, # Set equal to unroll length.
"probabilityTerminationPerInterval": 0.15
}
}
]
}
return scene
def create_apple_prefab(regrowth_radius=-1.0, # pylint: disable=dangerous-default-value
regrowth_probabilities=[0, 0.0, 0.0, 0.0]):
"""Creates the apple prefab with the provided settings."""
growth_rate_states = [
{
"state": "apple",
"layer": "lowerPhysical",
"sprite": "Apple",
"groups": ["apples"]
},
{
"state": "appleWait",
"layer": "logic",
"sprite": "AppleWait",
},
]
# Enumerate all possible states for a potential apple. There is one state for
# each regrowth rate i.e., number of nearby apples.
upper_bound_possible_neighbors = np.floor(np.pi*regrowth_radius**2+1)+1
for i in range(int(upper_bound_possible_neighbors)):
growth_rate_states.append(dict(state="appleWait_{}".format(i),
layer="logic",
groups=["waits_{}".format(i)],
sprite="AppleWait"))
apple_prefab = {
"name": "apple",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "apple",
"stateConfigs": growth_rate_states,
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["Apple", "AppleWait"],
"spriteShapes": [shapes.APPLE, shapes.FILL],
"palettes": [
{"x": (0, 0, 0, 0),
"*": (214, 88, 88, 255),
"#": (194, 79, 79, 255),
"o": (53, 132, 49, 255),
"|": (102, 51, 61, 255)},
{"i": (0, 0, 0, 0)}],
"noRotates": [True, True]
}
},
{
"component": "Edible",
"kwargs": {
"liveState": "apple",
"waitState": "appleWait",
"rewardForEating": 1.0,
}
},
{
"component": "DensityRegrow",
"kwargs": {
"liveState": "apple",
"waitState": "appleWait",
"radius": regrowth_radius,
"regrowthProbabilities": regrowth_probabilities,
}
},
]
}
return apple_prefab
def create_prefabs(regrowth_radius=-1.0,
# pylint: disable=dangerous-default-value
regrowth_probabilities=[0, 0.0, 0.0, 0.0]):
"""Returns a dictionary mapping names to template game objects."""
prefabs = {
"floor": FLOOR,
"grass": GRASS,
"wall": WALL,
"spawn_point": SPAWN_POINT,
"inside_spawn_point": INSIDE_SPAWN_POINT,
}
prefabs["apple"] = create_apple_prefab(
regrowth_radius=regrowth_radius,
regrowth_probabilities=regrowth_probabilities)
return prefabs
def create_avatar_object(player_idx: int,
target_sprite_self: Dict[str, Any],
spawn_group: str) -> Dict[str, Any]:
"""Create an avatar object that always sees itself as blue."""
# Lua is 1-indexed.
lua_index = player_idx + 1
# Setup the self vs other sprite mapping.
source_sprite_self = "Avatar" + str(lua_index)
custom_sprite_map = {source_sprite_self: target_sprite_self["name"]}
live_state_name = "player{}".format(lua_index)
avatar_object = {
"name": "avatar",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": live_state_name,
"stateConfigs": [
{"state": live_state_name,
"layer": "upperPhysical",
"sprite": source_sprite_self,
"contact": "avatar",
"groups": ["players"]},
{"state": "playerWait",
"groups": ["playerWaits"]},
]
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": [source_sprite_self],
"spriteShapes": [shapes.CUTE_AVATAR],
"palettes": [shapes.get_palette(
colors.human_readable[player_idx])],
"noRotates": [True]
}
},
{
"component": "AdditionalSprites",
"kwargs": {
"renderMode": "ascii_shape",
"customSpriteNames": [target_sprite_self["name"]],
"customSpriteShapes": [target_sprite_self["shape"]],
"customPalettes": [target_sprite_self["palette"]],
"customNoRotates": [target_sprite_self["noRotate"]],
}
},
{
"component": "Avatar",
"kwargs": {
"index": lua_index,
"aliveState": live_state_name,
"waitState": "playerWait",
"speed": 1.0,
"spawnGroup": spawn_group,
"postInitialSpawnGroup": "spawnPoints",
"actionOrder": ["move", "turn", "fireZap"],
"actionSpec": {
"move": {"default": 0, "min": 0, "max": len(_COMPASS)},
"turn": {"default": 0, "min": -1, "max": 1},
"fireZap": {"default": 0, "min": 0, "max": 1},
},
"view": {
"left": 5,
"right": 5,
"forward": 9,
"backward": 1,
"centered": False
},
"spriteMap": custom_sprite_map,
}
},
{
"component": "Zapper",
"kwargs": {
"cooldownTime": 2,
"beamLength": 3,
"beamRadius": 1,
"framesTillRespawn": 4,
"penaltyForBeingZapped": 0,
"rewardForZapping": 0,
}
},
{
"component": "ReadyToShootObservation",
},
]
}
if _ENABLE_DEBUG_OBSERVATIONS:
avatar_object["components"].append({
"component": "LocationObserver",
"kwargs": {"objectIsAvatar": True, "alsoReportOrientation": True},
})
return avatar_object
def create_avatar_objects(num_players):
"""Returns list of avatar objects of length 'num_players'."""
avatar_objects = []
for player_idx in range(0, num_players):
spawn_group = "spawnPoints"
if player_idx < 2:
# The first two player slots always spawn closer to the apples.
spawn_group = "insideSpawnPoints"
game_object = create_avatar_object(player_idx,
TARGET_SPRITE_SELF,
spawn_group=spawn_group)
avatar_objects.append(game_object)
return avatar_objects
def get_config():
"""Default configuration for training on the commons_harvest level."""
config = config_dict.ConfigDict()
# Action set configuration.
config.action_set = ACTION_SET
# Observation format configuration.
config.individual_observation_names = [
"RGB",
"READY_TO_SHOOT",
]
config.global_observation_names = [
"WORLD.RGB",
]
# The specs of the environment (from a single-agent perspective).
config.action_spec = specs.action(len(ACTION_SET))
config.timestep_spec = specs.timestep({
"RGB": specs.OBSERVATION["RGB"],
"READY_TO_SHOOT": specs.OBSERVATION["READY_TO_SHOOT"],
# Debug only (do not use the following observations in policies).
"WORLD.RGB": specs.rgb(144, 192),
})
# The roles assigned to each player.
config.valid_roles = frozenset({"default"})
config.default_player_roles = ("default",) * 7
return config
def build(
roles: Sequence[str],
config: config_dict.ConfigDict,
) -> Mapping[str, Any]:
"""Build substrate definition given player roles."""
del config
num_players = len(roles)
# Build the rest of the substrate definition.
substrate_definition = dict(
levelName="commons_harvest",
levelDirectory="meltingpot/lua/levels",
numPlayers=num_players,
# Define upper bound of episode length since episodes end stochastically.
maxEpisodeLengthFrames=5000,
spriteSize=8,
topology="BOUNDED", # Choose from ["BOUNDED", "TORUS"],
simulation={
"map": ASCII_MAP,
"gameObjects": create_avatar_objects(num_players),
"prefabs": create_prefabs(APPLE_RESPAWN_RADIUS,
REGROWTH_PROBABILITIES),
"charPrefabMap": CHAR_PREFAB_MAP,
"scene": create_scene(),
},
)
return substrate_definition
|
meltingpot-main
|
meltingpot/configs/substrates/commons_harvest__open.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for Commons Harvest: Closed.
Example video: https://youtu.be/WbkTSbiSOw0
See _Commons Harvest: Open_ for the general description of the mechanics at play
in this substrate.
In the case of _Commons Harvest: Closed, agents can learn to defend naturally
enclosed regions. Once they have done that then they have an incentive to avoid
overharvesting the patches within their region. It is usually much easier to
learn sustainable strategies here than it is in _Commons Harvest: Open_.
However, they usually involve significant inequality since many agents are
excluded from any natural region.
"""
from typing import Any, Dict, Mapping, Sequence
from meltingpot.utils.substrates import colors
from meltingpot.utils.substrates import shapes
from meltingpot.utils.substrates import specs
from ml_collections import config_dict
import numpy as np
# Warning: setting `_ENABLE_DEBUG_OBSERVATIONS = True` may cause slowdown.
_ENABLE_DEBUG_OBSERVATIONS = False
APPLE_RESPAWN_RADIUS = 2.0
REGROWTH_PROBABILITIES = [0.0, 0.001, 0.005, 0.025]
ASCII_MAP = """
WWWWWWWWWWWWWWWWWWWWWWWW
WAAA A WW A AAAW
WAA AAA WW AAA AAW
WA AAAAAWWAAAAA AW
W AAA WW AAA W
W A WW A W
W A WW A W
W AAA Q WW Q AAA W
WAAAAA WW AAAAAW
W AAA WWWWWWWWWW AAA W
W A WW A W
WWWWWWWWWW WW WWWWWWWWWW
W WW W
W WWWWWWWWWWWWWWWWWW W
W PPPPPPPPPPPPPPPPPP W
W PPPPPPPPPPPPPPPPPPPP W
WPPPPPPPPPPPPPPPPPPPPPPW
WWWWWWWWWWWWWWWWWWWWWWWW
"""
# `prefab` determines which prefab game object to use for each `char` in the
# ascii map.
CHAR_PREFAB_MAP = {
"P": {"type": "all", "list": ["floor", "spawn_point"]},
"Q": {"type": "all", "list": ["floor", "inside_spawn_point"]},
" ": "floor",
"W": "wall",
"A": {"type": "all", "list": ["grass", "apple"]},
}
_COMPASS = ["N", "E", "S", "W"]
FLOOR = {
"name": "floor",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "floor",
"stateConfigs": [{
"state": "floor",
"layer": "background",
"sprite": "Floor",
}],
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["Floor"],
"spriteShapes": [shapes.GRAINY_FLOOR],
"palettes": [{"*": (220, 205, 185, 255),
"+": (210, 195, 175, 255),}],
"noRotates": [False]
}
},
]
}
GRASS = {
"name":
"grass",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"grass",
"stateConfigs": [
{
"state": "grass",
"layer": "background",
"sprite": "Grass"
},
{
"state": "dessicated",
"layer": "background",
"sprite": "Floor"
},
],
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["Grass", "Floor"],
"spriteShapes": [
shapes.GRASS_STRAIGHT, shapes.GRAINY_FLOOR
],
"palettes": [{
"*": (158, 194, 101, 255),
"@": (170, 207, 112, 255)
}, {
"*": (220, 205, 185, 255),
"+": (210, 195, 175, 255),
}],
"noRotates": [False, False]
}
},
]
}
WALL = {
"name": "wall",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "wall",
"stateConfigs": [{
"state": "wall",
"layer": "upperPhysical",
"sprite": "Wall",
}],
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["Wall"],
"spriteShapes": [shapes.WALL],
"palettes": [{"*": (95, 95, 95, 255),
"&": (100, 100, 100, 255),
"@": (109, 109, 109, 255),
"#": (152, 152, 152, 255)}],
"noRotates": [False]
}
},
{
"component": "BeamBlocker",
"kwargs": {
"beamType": "zapHit"
}
},
]
}
SPAWN_POINT = {
"name": "spawnPoint",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "spawnPoint",
"stateConfigs": [{
"state": "spawnPoint",
"layer": "alternateLogic",
"groups": ["spawnPoints"]
}],
}
},
{
"component": "Transform",
},
]
}
INSIDE_SPAWN_POINT = {
"name": "spawnPoint",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "spawnPoint",
"stateConfigs": [{
"state": "spawnPoint",
"layer": "alternateLogic",
"groups": ["insideSpawnPoints"]
}],
}
},
{
"component": "Transform",
},
]
}
# Primitive action components.
# pylint: disable=bad-whitespace
# pyformat: disable
NOOP = {"move": 0, "turn": 0, "fireZap": 0}
FORWARD = {"move": 1, "turn": 0, "fireZap": 0}
STEP_RIGHT = {"move": 2, "turn": 0, "fireZap": 0}
BACKWARD = {"move": 3, "turn": 0, "fireZap": 0}
STEP_LEFT = {"move": 4, "turn": 0, "fireZap": 0}
TURN_LEFT = {"move": 0, "turn": -1, "fireZap": 0}
TURN_RIGHT = {"move": 0, "turn": 1, "fireZap": 0}
FIRE_ZAP = {"move": 0, "turn": 0, "fireZap": 1}
# pyformat: enable
# pylint: enable=bad-whitespace
ACTION_SET = (
NOOP,
FORWARD,
BACKWARD,
STEP_LEFT,
STEP_RIGHT,
TURN_LEFT,
TURN_RIGHT,
FIRE_ZAP,
)
TARGET_SPRITE_SELF = {
"name": "Self",
"shape": shapes.CUTE_AVATAR,
"palette": shapes.get_palette((50, 100, 200)),
"noRotate": True,
}
def create_scene():
"""Creates the scene with the provided args controlling apple regrowth."""
scene = {
"name": "scene",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "scene",
"stateConfigs": [{
"state": "scene",
}],
}
},
{
"component": "Transform",
},
{
"component": "Neighborhoods",
"kwargs": {}
},
{
"component": "StochasticIntervalEpisodeEnding",
"kwargs": {
"minimumFramesPerEpisode": 1000,
"intervalLength": 100, # Set equal to unroll length.
"probabilityTerminationPerInterval": 0.15
}
}
]
}
return scene
def create_apple_prefab(regrowth_radius=-1.0, # pylint: disable=dangerous-default-value
regrowth_probabilities=[0, 0.0, 0.0, 0.0]):
"""Creates the apple prefab with the provided settings."""
growth_rate_states = [
{
"state": "apple",
"layer": "lowerPhysical",
"sprite": "Apple",
"groups": ["apples"]
},
{
"state": "appleWait",
"layer": "logic",
"sprite": "AppleWait",
},
]
# Enumerate all possible states for a potential apple. There is one state for
# each regrowth rate i.e., number of nearby apples.
upper_bound_possible_neighbors = np.floor(np.pi*regrowth_radius**2+1)+1
for i in range(int(upper_bound_possible_neighbors)):
growth_rate_states.append(dict(state="appleWait_{}".format(i),
layer="logic",
groups=["waits_{}".format(i)],
sprite="AppleWait"))
apple_prefab = {
"name": "apple",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "apple",
"stateConfigs": growth_rate_states,
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["Apple", "AppleWait"],
"spriteShapes": [shapes.APPLE, shapes.FILL],
"palettes": [
{"x": (0, 0, 0, 0),
"*": (214, 88, 88, 255),
"#": (194, 79, 79, 255),
"o": (53, 132, 49, 255),
"|": (102, 51, 61, 255)},
{"i": (0, 0, 0, 0)}],
"noRotates": [True, True]
}
},
{
"component": "Edible",
"kwargs": {
"liveState": "apple",
"waitState": "appleWait",
"rewardForEating": 1.0,
}
},
{
"component": "DensityRegrow",
"kwargs": {
"liveState": "apple",
"waitState": "appleWait",
"radius": regrowth_radius,
"regrowthProbabilities": regrowth_probabilities,
}
},
]
}
return apple_prefab
def create_prefabs(regrowth_radius=-1.0,
# pylint: disable=dangerous-default-value
regrowth_probabilities=[0, 0.0, 0.0, 0.0]):
"""Returns a dictionary mapping names to template game objects."""
prefabs = {
"floor": FLOOR,
"grass": GRASS,
"wall": WALL,
"spawn_point": SPAWN_POINT,
"inside_spawn_point": INSIDE_SPAWN_POINT,
}
prefabs["apple"] = create_apple_prefab(
regrowth_radius=regrowth_radius,
regrowth_probabilities=regrowth_probabilities)
return prefabs
def create_avatar_object(player_idx: int,
target_sprite_self: Dict[str, Any],
spawn_group: str) -> Dict[str, Any]:
"""Create an avatar object that always sees itself as blue."""
# Lua is 1-indexed.
lua_index = player_idx + 1
# Setup the self vs other sprite mapping.
source_sprite_self = "Avatar" + str(lua_index)
custom_sprite_map = {source_sprite_self: target_sprite_self["name"]}
live_state_name = "player{}".format(lua_index)
avatar_object = {
"name": "avatar",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": live_state_name,
"stateConfigs": [
{"state": live_state_name,
"layer": "upperPhysical",
"sprite": source_sprite_self,
"contact": "avatar",
"groups": ["players"]},
{"state": "playerWait",
"groups": ["playerWaits"]},
]
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": [source_sprite_self],
"spriteShapes": [shapes.CUTE_AVATAR],
"palettes": [shapes.get_palette(
colors.human_readable[player_idx])],
"noRotates": [True]
}
},
{
"component": "AdditionalSprites",
"kwargs": {
"renderMode": "ascii_shape",
"customSpriteNames": [target_sprite_self["name"]],
"customSpriteShapes": [target_sprite_self["shape"]],
"customPalettes": [target_sprite_self["palette"]],
"customNoRotates": [target_sprite_self["noRotate"]],
}
},
{
"component": "Avatar",
"kwargs": {
"index": lua_index,
"aliveState": live_state_name,
"waitState": "playerWait",
"speed": 1.0,
"spawnGroup": spawn_group,
"postInitialSpawnGroup": "spawnPoints",
"actionOrder": ["move", "turn", "fireZap"],
"actionSpec": {
"move": {"default": 0, "min": 0, "max": len(_COMPASS)},
"turn": {"default": 0, "min": -1, "max": 1},
"fireZap": {"default": 0, "min": 0, "max": 1},
},
"view": {
"left": 5,
"right": 5,
"forward": 9,
"backward": 1,
"centered": False
},
"spriteMap": custom_sprite_map,
}
},
{
"component": "Zapper",
"kwargs": {
"cooldownTime": 1,
"beamLength": 4,
"beamRadius": 1,
"framesTillRespawn": 100,
"penaltyForBeingZapped": 0,
"rewardForZapping": 0,
}
},
{
"component": "ReadyToShootObservation",
},
]
}
if _ENABLE_DEBUG_OBSERVATIONS:
avatar_object["components"].append({
"component": "LocationObserver",
"kwargs": {"objectIsAvatar": True, "alsoReportOrientation": True},
})
return avatar_object
def create_avatar_objects(num_players):
"""Returns list of avatar objects of length 'num_players'."""
avatar_objects = []
for player_idx in range(0, num_players):
spawn_group = "spawnPoints"
if player_idx < 2:
# The first two player slots always spawn inside the rooms.
spawn_group = "insideSpawnPoints"
game_object = create_avatar_object(player_idx,
TARGET_SPRITE_SELF,
spawn_group=spawn_group)
avatar_objects.append(game_object)
return avatar_objects
def get_config():
"""Default configuration for training on the commons_harvest level."""
config = config_dict.ConfigDict()
# Action set configuration.
config.action_set = ACTION_SET
# Observation format configuration.
config.individual_observation_names = [
"RGB",
"READY_TO_SHOOT",
]
config.global_observation_names = [
"WORLD.RGB",
]
# The specs of the environment (from a single-agent perspective).
config.action_spec = specs.action(len(ACTION_SET))
config.timestep_spec = specs.timestep({
"RGB": specs.OBSERVATION["RGB"],
"READY_TO_SHOOT": specs.OBSERVATION["READY_TO_SHOOT"],
# Debug only (do not use the following observations in policies).
"WORLD.RGB": specs.rgb(144, 192),
})
# The roles assigned to each player.
config.valid_roles = frozenset({"default"})
config.default_player_roles = ("default",) * 7
return config
def build(
roles: Sequence[str],
config: config_dict.ConfigDict,
) -> Mapping[str, Any]:
"""Build substrate definition given player roles."""
del config
num_players = len(roles)
# Build the rest of the substrate definition.
substrate_definition = dict(
levelName="commons_harvest",
levelDirectory="meltingpot/lua/levels",
numPlayers=num_players,
# Define upper bound of episode length since episodes end stochastically.
maxEpisodeLengthFrames=5000,
spriteSize=8,
topology="BOUNDED", # Choose from ["BOUNDED", "TORUS"],
simulation={
"map": ASCII_MAP,
"gameObjects": create_avatar_objects(num_players),
"prefabs": create_prefabs(APPLE_RESPAWN_RADIUS,
REGROWTH_PROBABILITIES),
"charPrefabMap": CHAR_PREFAB_MAP,
"scene": create_scene(),
},
)
return substrate_definition
|
meltingpot-main
|
meltingpot/configs/substrates/commons_harvest__closed.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for Collaborative Cooking: Ring.
Example video: https://youtu.be/j5v7B9pfG9I
The recipe they must follow is for tomato soup:
1. Add three tomatoes to the cooking pot.
2. Wait for the soup to cook (status bar completion).
3. Bring a bowl to the pot and pour the soup from the pot into the bowl.
4. Deliver the bowl of soup at the goal location.
This substrate is a pure common interest game. All players share all rewards.
Players have a `5 x 5` observation window.
Map:
Coordination Ring: A layout with two equally successful movement strategies –
(1) both players moving clockwise, and (2) both players moving anti-clockwise.
If players do not coordinate, they will block each other’s movement.
"""
from meltingpot.configs.substrates import collaborative_cooking as base_config
from meltingpot.utils.substrates import specs
from ml_collections import config_dict
build = base_config.build
# Coordination Ring: Another tight layout requiring significant movement
# coordination between the players, this time in terms of moving clockwise vs
# counterclockwise.
ASCII_MAP = """
xx###C#xx
xx# Cxx
xxDP# #xx
xxO P #xx
xx#OT##xx
"""
def get_config():
"""Default configuration."""
config = base_config.get_config()
# Override the map layout settings.
config.layout = config_dict.ConfigDict()
config.layout.ascii_map = ASCII_MAP
# The specs of the environment (from a single-agent perspective).
config.timestep_spec = specs.timestep({
"RGB": specs.rgb(40, 40),
# Debug only (do not use the following observations in policies).
"WORLD.RGB": specs.rgb(40, 72),
})
# The roles assigned to each player.
config.valid_roles = frozenset({"default"})
config.default_player_roles = ("default",) * 2
return config
|
meltingpot-main
|
meltingpot/configs/substrates/collaborative_cooking__ring.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for Factory of the Commons."""
from typing import Any, Dict, Generator, Mapping, Sequence
from meltingpot.utils.substrates import colors
from meltingpot.utils.substrates import shapes
from meltingpot.utils.substrates import specs
from ml_collections import config_dict
# Warning: setting `_ENABLE_DEBUG_OBSERVATIONS = True` may cause slowdown.
_ENABLE_DEBUG_OBSERVATIONS = False
_COMPASS = ["N", "E", "S", "W"]
INVISIBLE = (0, 0, 0, 0)
GRASP_SHAPE = """
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xxxxxxxx
xoxxxxox
xxooooxx
"""
FLOOR_MARKING = {
"name":
"floor_marking",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"floor_marking",
"stateConfigs": [{
"state": "floor_marking",
"layer": "lowestPhysical",
"sprite": "floor_marking",
}],
}
},
{
"component": "Transform"
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["floor_marking"],
"spriteShapes": [shapes.FLOOR_MARKING],
"palettes": [shapes.DISPENSER_BELT_PALETTE],
"noRotates": [False]
}
},
]
}
PINK_CUBE_DISPENSING_ANIMATION = {
"name":
"pink_cube_dispensing_animation",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"waitState",
"stateConfigs": [
{
"state": "waitState",
"layer": "overlay",
},
{
"state": "pink_cube_dispensing_1",
"layer": "overlay",
"sprite": "pink_cube_dispensing_1",
},
{
"state": "pink_cube_dispensing_2",
"layer": "overlay",
"sprite": "pink_cube_dispensing_2",
},
{
"state": "pink_cube_dispensing_3",
"layer": "overlay",
"sprite": "pink_cube_dispensing_3",
},
]
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["pink_cube_dispensing_1",
"pink_cube_dispensing_2",
"pink_cube_dispensing_3"],
"spriteShapes": [shapes.CUBE_DISPENSING_ANIMATION_1,
shapes.CUBE_DISPENSING_ANIMATION_2,
shapes.CUBE_DISPENSING_ANIMATION_3],
"palettes": [{
"a": (255, 174, 182, 255),
"A": (240, 161, 169, 255),
"&": (237, 140, 151, 255),
"x": (0, 0, 0, 0),
}] * 3,
"noRotates": [True] * 3,
}
},
{
"component": "ObjectDispensingAnimation",
"kwargs": {
"frameOne": "pink_cube_dispensing_1",
"frameTwo": "pink_cube_dispensing_2",
"frameThree": "pink_cube_dispensing_3",
"waitState": "waitState",
}
},
]
}
DISPENSER_INDICATOR_PINK_CUBE = {
"name":
"dispenser_indicator_pink_cube",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"dispenser_pink_cube",
"stateConfigs": [
{
"state": "dispenser_pink_cube",
"layer": "midPhysical",
"sprite": "dispenser_pink_cube",
},
]
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["dispenser_pink_cube"],
"spriteShapes": [shapes.HOPPER_INDICATOR_SINGLE_BLOCK],
"palettes": [{
"x": (0, 0, 0, 0),
"a": (255, 174, 182, 255),
}],
"noRotates": [False]
}
},
{
"component": "DispenserIndicator",
"kwargs": {
"objectOne": "PinkCube",
"objectTwo": "NoneNeeded",
}
}
]
}
SPAWN_POINT = {
"name":
"spawnPoint",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"spawnPoint",
"stateConfigs": [{
"state": "spawnPoint",
"layer": "alternateLogic",
"groups": ["spawnPoints"]
}],
}
},
{
"component": "Transform"
},
]
}
def get_blue_cube(initial_state: str):
"""Get a blue cube prefab."""
prefab = {
"name":
"blue_cube_live",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": initial_state,
"stateConfigs": [
{
"state": "blue_cube",
"layer": "lowerPhysical",
"sprite": "blue_cube",
},
{
"state": "blue_jump",
"layer": "lowerPhysical",
"sprite": "blue_jump",
},
{
"state": "blue_cube_drop_one",
"layer": "lowerPhysical",
"sprite": "blue_cube_drop_one",
},
{
"state": "blue_cube_drop_two",
"layer": "lowerPhysical",
"sprite": "blue_cube_drop_two",
},
{
"state": "waitState",
}
],
}
},
{
"component": "Transform"
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["blue_cube", "blue_cube_drop_one",
"blue_cube_drop_two", "blue_jump"],
"spriteShapes": [shapes.BLOCK,
shapes.BLOCK_DROP_1,
shapes.BLOCK_DROP_2,
shapes.CUBE_DISPENSING_ANIMATION_1],
"palettes": [shapes.FACTORY_OBJECTS_PALETTE,] * 4,
"noRotates": [True] * 4
}
},
{
"component": "Receivable",
"kwargs": {
"waitState": "waitState",
"liveState": "blue_cube",
}
},
{
"component": "ReceiverDropAnimation",
"kwargs": {
"dropOne": "blue_cube_drop_one",
"dropTwo": "blue_cube_drop_two",
}
},
{
"component": "Token",
"kwargs": {
"type": "BlueCube"
}
},
{
"component": "ObjectJumpAnimation",
"kwargs": {
"jump": "blue_jump",
"drop": "blue_cube",
"waitState": "waitState",
}
},
{
"component": "Graspable",
"kwargs": {
"graspableStates": ("blue_cube",),
"disconnectStates": (
"blue_jump", "blue_cube_drop_one", "blue_cube_drop_two",
"waitState",),
}
}
]
}
return prefab
BANANA = {
"name":
"banana",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "waitState",
"stateConfigs": [
{
"state": "banana",
"layer": "lowerPhysical",
"sprite": "banana",
},
{
"state": "banana_jump",
"layer": "lowerPhysical",
"sprite": "banana_jump",
},
{
"state": "banana_drop_one",
"layer": "lowerPhysical",
"sprite": "banana_drop_one",
},
{
"state": "banana_drop_two",
"layer": "lowerPhysical",
"sprite": "banana_drop_two",
},
{
"state": "waitState"
}
],
}
},
{
"component": "Transform"
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["banana", "banana_drop_one", "banana_drop_two",
"banana_jump"],
"spriteShapes": [shapes.BANANA,
shapes.BANANA_DROP_1,
shapes.BANANA_DROP_2,
shapes.BANANA,],
"palettes": [shapes.FACTORY_OBJECTS_PALETTE,] * 4,
"noRotates": [True] * 4
}
},
{
"component": "Receivable",
"kwargs": {
"waitState": "waitState",
"liveState": "banana",
}
},
{
"component": "ReceiverDropAnimation",
"kwargs": {
"dropOne": "banana_drop_one",
"dropTwo": "banana_drop_two",
}
},
{
"component": "Token",
"kwargs": {
"type": "Banana"
}
},
{
"component": "SecondObjectJumpAnimation",
"kwargs": {
"jump": "banana",
"drop": "banana",
"waitState": "waitState",
}
},
{
"component": "Graspable",
"kwargs": {
"graspableStates": ("banana",),
"disconnectStates": (
"banana_jump", "banana_drop_one", "banana_drop_two",
"waitState",),
}
}
]
}
APPLE = {
"name":
"apples",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"waitState",
"stateConfigs": [
{
"state": "waitState",
},
{
"state": "apple",
"layer": "appleLayer",
"sprite": "apple",
},
{
"state": "apple_jump_state",
"layer": "appleLayer",
"sprite": "apple_jump_sprite",
},
]
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["apple", "apple_jump_sprite"],
"spriteShapes": [shapes.APPLE, shapes.APPLE_JUMP],
"palettes": [shapes.APPLE_RED_PALETTE] * 2,
"noRotates": [True] * 2,
}
},
{
"component": "Graspable",
"kwargs": {
"graspableStates": ("apple",),
"disconnectStates": ("apple_jump_state", "waitState",),
}
},
{
"component": "AppleComponent",
"kwargs": {
"liveState": "apple",
"waitState": "waitState",
"rewardForEating": 1,
}
},
{
"component": "Token",
"kwargs": {
"type": "Apple"
}
},
{
"component": "ObjectJumpAnimation",
"kwargs": {
"jump": "apple_jump_state",
"drop": "apple",
"waitState": "waitState",
}
},
{
"component": "SecondObjectJumpAnimation",
"kwargs": {
"jump": "apple",
"drop": "apple",
"waitState": "waitState",
}
},
]
}
PINK_CUBE = {
"name":
"pink_cube",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "waitState",
"stateConfigs": [
{
"state": "pink_cube",
"layer": "lowerPhysical",
"sprite": "pink_cube",
},
{
"state": "pink_cube_drop_one",
"layer": "lowerPhysical",
"sprite": "pink_cube_drop_one",
},
{
"state": "pink_cube_drop_two",
"layer": "lowerPhysical",
"sprite": "pink_cube_drop_two",
},
{
"state": "pink_jump",
"layer": "lowerPhysical",
"sprite": "pink_jump",
},
{
"state": "waitState",
}
],
}
},
{
"component": "Transform"
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["pink_cube", "pink_cube_drop_one",
"pink_cube_drop_two", "pink_jump"],
"spriteShapes": [shapes.BLOCK,
shapes.BLOCK_DROP_1,
shapes.BLOCK_DROP_2,
shapes.CUBE_DISPENSING_ANIMATION_1],
"palettes": [{
"a": (255, 174, 182, 255),
"A": (240, 161, 169, 255),
"&": (237, 140, 151, 255),
"x": (0, 0, 0, 0),
}] * 4,
"noRotates": [True] * 4
}
},
{
"component": "Receivable",
"kwargs": {
"waitState": "waitState",
"liveState": "pink_cube",
}
},
{
"component": "Token",
"kwargs": {
"type": "PinkCube"
}
},
{
"component": "ReceiverDropAnimation",
"kwargs": {
"dropOne": "pink_cube_drop_one",
"dropTwo": "pink_cube_drop_two",
}
},
{
"component": "ObjectJumpAnimation",
"kwargs": {
"jump": "pink_jump",
"drop": "pink_cube",
"waitState": "waitState",
}
},
{
"component": "Graspable",
"kwargs": {
"graspableStates": ("pink_cube",),
"disconnectStates": (
"pink_cube_drop_one", "pink_cube_drop_two", "pink_jump",
"waitState",),
}
}
]
}
APPLE_DISPENSING = {
"name":
"apple_dispensing",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"waitState",
"stateConfigs": [
{
"state": "waitState",
"layer": "overlay",
},
{
"state": "apple_dispensing_1",
"layer": "overlay",
"sprite": "apple_dispensing_1",
},
{
"state": "apple_dispensing_2",
"layer": "overlay",
"sprite": "apple_dispensing_2",
},
{
"state": "apple_dispensing_3",
"layer": "overlay",
"sprite": "apple_dispensing_3",
},
]
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["apple_dispensing_1", "apple_dispensing_2",
"apple_dispensing_3"],
"spriteShapes": [shapes.APPLE_DISPENSING_ANIMATION_1,
shapes.APPLE_DISPENSING_ANIMATION_2,
shapes.APPLE_DISPENSING_ANIMATION_3],
"palettes": [shapes.FACTORY_OBJECTS_PALETTE] * 3,
"noRotates": [True] * 3,
}
},
{
"component": "ObjectDispensingAnimation",
"kwargs": {
"frameOne": "apple_dispensing_1",
"frameTwo": "apple_dispensing_2",
"frameThree": "apple_dispensing_3",
"waitState": "waitState",
}
},
]
}
CUBE_APPLE_DISPENSING_ANIMATION = {
"name":
"cube_apple_dispensing_animation",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"waitState",
"stateConfigs": [
{
"state": "waitState",
"layer": "overlay",
},
{
"state": "apple_dispensing_1",
"layer": "overlay",
"sprite": "apple_dispensing_1",
},
{
"state": "apple_dispensing_2",
"layer": "overlay",
"sprite": "apple_dispensing_2",
},
{
"state": "apple_dispensing_3",
"layer": "overlay",
"sprite": "apple_dispensing_3",
},
{
"state": "blue_cube_dispensing_1",
"layer": "overlay",
"sprite": "blue_cube_dispensing_1",
},
{
"state": "blue_cube_dispensing_2",
"layer": "overlay",
"sprite": "blue_cube_dispensing_2",
},
{
"state": "blue_cube_dispensing_3",
"layer": "overlay",
"sprite": "blue_cube_dispensing_3",
},
]
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["apple_dispensing_1", "apple_dispensing_2",
"apple_dispensing_3", "blue_cube_dispensing_1",
"blue_cube_dispensing_2",
"blue_cube_dispensing_3"],
"spriteShapes": [shapes.APPLE_DISPENSING_ANIMATION_1,
shapes.APPLE_DISPENSING_ANIMATION_2,
shapes.APPLE_DISPENSING_ANIMATION_3,
shapes.CUBE_DISPENSING_ANIMATION_1,
shapes.CUBE_DISPENSING_ANIMATION_2,
shapes.CUBE_DISPENSING_ANIMATION_3],
"palettes": [shapes.FACTORY_OBJECTS_PALETTE] * 6,
"noRotates": [True] * 6,
}
},
{
"component": "DoubleObjectDispensingAnimation",
"kwargs": {
"frameOne": "blue_cube_dispensing_1",
"frameTwo": "blue_cube_dispensing_2",
"frameThree": "blue_cube_dispensing_3",
"frameFour": "apple_dispensing_1",
"frameFive": "apple_dispensing_2",
"frameSix": "apple_dispensing_3",
"waitState": "waitState",
}
},
]
}
BANANA_CUBE_DISPENSING_ANIMATION = {
"name":
"banana_cube_dispensing_animation",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"waitState",
"stateConfigs": [
{
"state": "waitState",
"layer": "overlay",
},
{
"state": "banana_dispensing_1",
"layer": "overlay",
"sprite": "banana_dispensing_1",
},
{
"state": "banana_dispensing_2",
"layer": "overlay",
"sprite": "banana_dispensing_2",
},
{
"state": "banana_dispensing_3",
"layer": "overlay",
"sprite": "banana_dispensing_3",
},
{
"state": "blue_cube_dispensing_1",
"layer": "overlay",
"sprite": "blue_cube_dispensing_1",
},
{
"state": "blue_cube_dispensing_2",
"layer": "overlay",
"sprite": "blue_cube_dispensing_2",
},
{
"state": "blue_cube_dispensing_3",
"layer": "overlay",
"sprite": "blue_cube_dispensing_3",
},
]
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["banana_dispensing_1", "banana_dispensing_2",
"banana_dispensing_3", "blue_cube_dispensing_1",
"blue_cube_dispensing_2",
"blue_cube_dispensing_3"],
"spriteShapes": [shapes.BANANA_DISPENSING_ANIMATION_1,
shapes.BANANA,
shapes.BANANA_DISPENSING_ANIMATION_3,
shapes.CUBE_DISPENSING_ANIMATION_1,
shapes.CUBE_DISPENSING_ANIMATION_2,
shapes.CUBE_DISPENSING_ANIMATION_3],
"palettes": [shapes.FACTORY_OBJECTS_PALETTE] * 6,
"noRotates": [True] * 6,
}
},
{
"component": "DoubleObjectDispensingAnimation",
"kwargs": {
"frameOne": "blue_cube_dispensing_1",
"frameTwo": "blue_cube_dispensing_2",
"frameThree": "blue_cube_dispensing_3",
"frameFour": "banana_dispensing_1",
"frameFive": "banana_dispensing_2",
"frameSix": "banana_dispensing_3",
"waitState": "waitState",
}
},
]
}
PINK_CUBE_DISPENSING = {
"name":
"pink_cube_dispensing",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"waitState",
"stateConfigs": [
{
"state": "waitState",
"layer": "overlay",
},
{
"state": "pink_cube_dispensing_1",
"layer": "overlay",
"sprite": "pink_cube_dispensing_1",
},
{
"state": "pink_cube_dispensing_2",
"layer": "overlay",
"sprite": "pink_cube_dispensing_2",
},
{
"state": "pink_cube_dispensing_3",
"layer": "overlay",
"sprite": "pink_cube_dispensing_3",
},
]
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["pink_cube_dispensing_1",
"pink_cube_dispensing_2",
"pink_cube_dispensing_3"],
"spriteShapes": [shapes.CUBE_DISPENSING_ANIMATION_1,
shapes.CUBE_DISPENSING_ANIMATION_2,
shapes.CUBE_DISPENSING_ANIMATION_3],
"palettes": [{
"a": (255, 174, 182, 255),
"A": (240, 161, 169, 255),
"&": (237, 140, 151, 255),
"x": (0, 0, 0, 0),
}] * 3,
"noRotates": [True] * 3,
}
},
{
"component": "DoubleObjectDispensingAnimation",
"kwargs": {
"frameOne": "pink_cube_dispensing_1",
"frameTwo": "pink_cube_dispensing_2",
"frameThree": "pink_cube_dispensing_3",
"frameFour": "waitState",
"frameFive": "waitState",
"frameSix": "waitState",
"waitState": "waitState",
}
},
]
}
HOPPER_MOUTH = {
"name":
"hopper_mouth",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"hopper_mouth_open",
"stateConfigs": [
{
"state": "hopper_mouth_closed",
"layer": "lowestPhysical",
"sprite": "hopper_mouth_closed",
},
{
"state": "hopper_mouth_closing",
"layer": "lowestPhysical",
"sprite": "hopper_mouth_closing",
},
{
"state": "hopper_mouth_open",
"layer": "lowestPhysical",
"sprite": "hopper_mouth_open",
},
{
"state": "waitState"
}
],
}
},
{
"component": "Transform"
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["hopper_mouth_closed", "hopper_mouth_closing",
"hopper_mouth_open"],
"spriteShapes": [shapes.HOPPER_CLOSED,
shapes.HOPPER_CLOSING,
shapes.HOPPER_OPEN],
"palettes": [shapes.FACTORY_MACHINE_BODY_PALETTE] * 3,
"noRotates": [False] * 3
}
},
{
"component": "Receiver"
},
{
"component": "HopperMouth",
"kwargs": {
"closed": "hopper_mouth_closed",
"opening": "hopper_mouth_closing",
"open": "hopper_mouth_open",
}
},
]
}
HOPPER_BODY = {
"name":
"hopper_body",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"hopper_body",
"stateConfigs": [
{
"state": "hopper_body",
"layer": "midPhysical",
"sprite": "hopper_body",
},
{
"state": "hopper_body_activated",
"layer": "midPhysical",
"sprite": "hopper_body_activated",
}
],
}
},
{
"component": "Transform"
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["hopper_body", "hopper_body_activated"],
"spriteShapes": [shapes.HOPPER_BODY,
shapes.HOPPER_BODY_ACTIVATED],
"palettes": [{
"a": (140, 129, 129, 255),
"b": (84, 77, 77, 255),
"f": (92, 98, 120, 255),
"g": (92, 98, 120, 255),
"c": (92, 98, 120, 255),
"x": (0, 0, 0, 0),
}] * 2,
"noRotates": [False] * 2
}
},
]
}
HOPPER_INDICATOR = {
"name":
"hopper_indicator",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"hopper_indicator_two",
"stateConfigs": [
{
"state": "waitState",
},
{
"state": "hopper_indicator_one",
"layer": "upperPhysical",
"sprite": "hopper_indicator_one",
"groups": ["indicator"]
},
{
"state": "hopper_indicator_two",
"layer": "upperPhysical",
"sprite": "hopper_indicator_two",
"groups": ["indicator"]
},
]
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames":
[
"hopper_indicator_two", "hopper_indicator_one",
],
"spriteShapes": [
shapes.HOPPER_INDICATOR_TWO_BLOCKS,
shapes.HOPPER_INDICATOR_ONE_BLOCK,],
"palettes": [shapes.FACTORY_OBJECTS_PALETTE] * 2,
"noRotates": [False] * 2
}
},
{
"component": "ReceiverIndicator",
"kwargs": {
"waitState": "waitState",
"liveState": "hopper_indicator_two",
"secondLiveState": "hopper_indicator_one",
"count": "Double",
"type": "TwoBlocks",
}
}
]
}
HOPPER_INDICATOR_BLUE_CUBE = {
"name":
"hopper_indicator_blue_cube",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"waitState",
"stateConfigs": [
{
"state": "waitState",
},
{
"state": "blue_cube_indicator",
"layer": "upperPhysical",
"sprite": "blue_cube_indicator",
},
]
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["blue_cube_indicator"],
"spriteShapes": [shapes.HOPPER_INDICATOR_SINGLE_BLOCK],
"palettes": [shapes.FACTORY_OBJECTS_PALETTE],
"noRotates": [False]
}
},
{
"component": "ReceiverIndicator",
"kwargs": {
"waitState": "waitState",
"liveState": "blue_cube_indicator",
"secondLiveState": "waitState",
"count": "Single",
"type": "BlueCube"
}
}
]
}
HOPPER_INDICATOR_BANANA = {
"name":
"hopper_indicator_banana",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"hopper_banana",
"stateConfigs": [
{
"state": "hopper_banana",
"layer": "upperPhysical",
"sprite": "hopper_banana",
},
{
"state": "waitState"
}
]
}
},
{
"component": "ReceiverIndicator",
"kwargs": {
"waitState": "waitState",
"liveState": "hopper_banana",
"secondLiveState": "waitState",
"count": "Single",
"type": "Banana",
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["hopper_banana"],
"spriteShapes": [shapes.HOPPER_INDICATOR_SINGLE_BANANA],
"palettes": [shapes.FACTORY_OBJECTS_PALETTE],
"noRotates": [False]
}
},
]
}
HOPPER_INDICATOR_PINK_CUBE = {
"name":
"hopper_indicator_pink_cube",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"waitState",
"stateConfigs": [
{
"state": "waitState",
},
{
"state": "hopper_pink_cube",
"layer": "upperPhysical",
"sprite": "hopper_pink_cube",
},
]
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["hopper_pink_cube"],
"spriteShapes": [shapes.HOPPER_INDICATOR_SINGLE_BLOCK],
"palettes":
[{
"x": (0, 0, 0, 0),
"a": (255, 174, 182, 255),
}],
"noRotates": [False]
}
},
{
"component": "ReceiverIndicator",
"kwargs": {
"waitState": "waitState",
"liveState": "hopper_pink_cube",
"secondLiveState": "waitState",
"count": "Single",
"type": "PinkCube",
}
}
]
}
DISPENSER_INDICATOR_BANANA_CUBE = {
"name":
"dispenser_indicator_banana_cube",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"banana_cube",
"stateConfigs": [
{
"state": "banana_cube",
"layer": "midPhysical",
"sprite": "banana_cube",
},
]
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["banana_cube"],
"spriteShapes": [shapes.HOPPER_INDICATOR_ON],
"palettes": [shapes.FACTORY_OBJECTS_PALETTE],
"noRotates": [False]
}
},
{
"component": "DispenserIndicator",
"kwargs": {
"objectOne": "BlueCube",
"objectTwo": "Banana",
}
}
]
}
DISPENSER_INDICATOR_CUBE_APPLE = {
"name":
"dispenser_indicator_cube_apple",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"cube_apple",
"stateConfigs": [
{
"state": "cube_apple",
"layer": "midPhysical",
"sprite": "cube_apple",
},
]
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["cube_apple"],
"spriteShapes": [shapes.APPLE_CUBE_INDICATOR],
"palettes": [shapes.FACTORY_OBJECTS_PALETTE],
"noRotates": [False]
}
},
{
"component": "DispenserIndicator",
"kwargs": {
"objectOne": "Apple",
"objectTwo": "BlueCube",
}
}
]
}
DISPENSER_INDICATOR_APPLE = {
"name":
"dispenser_indicator_apple",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"dispenser_indicator_apple",
"stateConfigs": [
{
"state": "dispenser_indicator_apple",
"layer": "midPhysical",
"sprite": "dispenser_indicator_apple",
"groups": ["indicator"]
},
]
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames":
[
"dispenser_indicator_apple",
],
"spriteShapes": [
shapes.APPLE_INDICATOR],
"palettes": [shapes.FACTORY_OBJECTS_PALETTE],
"noRotates": [False]
}
},
{
"component": "DispenserIndicator",
"kwargs": {
"objectOne": "Apple",
"objectTwo": "NoneNeeded",
}
}
]
}
DISPENSER_INDICATOR_TWO_APPLES = {
"name":
"dispenser_indicator_two_apples",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"two_apples",
"stateConfigs": [
{
"state": "two_apples",
"layer": "midPhysical",
"sprite": "two_apples",
},
]
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["two_apples"],
"spriteShapes": [shapes.DOUBLE_APPLE_INDICATOR],
"palettes": [shapes.FACTORY_OBJECTS_PALETTE],
"noRotates": [False]
}
},
{
"component": "DispenserIndicator",
"kwargs": {
"objectOne": "Apple",
"objectTwo": "Apple",
}
}
]
}
DISPENSER_BODY = {
"name":
"dispenser_body",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "dispenser_body",
"stateConfigs": [
{
"state": "dispenser_body",
"layer": "lowerPhysical",
"sprite": "dispenser_body",
"groups": ["dispenser"]
},
{
"state": "dispenser_body_activated",
"layer": "lowerPhysical",
"sprite": "dispenser_body_activated",
"groups": ["dispenser"]
},
]
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": [
"dispenser_body",
"dispenser_body_activated",
],
"spriteShapes": [
shapes.DISPENSER_BODY,
shapes.DISPENSER_BODY_ACTIVATED,
],
"palettes": [shapes.FACTORY_MACHINE_BODY_PALETTE] * 2,
"noRotates": [False] * 2
}
},
]
}
DISPENSER_BELT = {
"name":
"dispenser_belt",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "dispenser_belt_deactivated",
"stateConfigs": [
{
"state": "dispenser_belt_deactivated",
"layer": "lowestPhysical",
"sprite": "dispenser_belt_deactivated",
"groups": ["dispenser"]
},
{
"state": "dispenser_belt_on_position_1",
"layer": "lowestPhysical",
"sprite": "dispenser_belt_on_position_1",
"groups": ["dispenser"]
},
{
"state": "dispenser_belt_on_position_2",
"layer": "lowestPhysical",
"sprite": "dispenser_belt_on_position_2",
"groups": ["dispenser"]
},
{
"state": "dispenser_belt_on_position_3",
"layer": "lowestPhysical",
"sprite": "dispenser_belt_on_position_3",
"groups": ["dispenser"]
},
]
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": [
"dispenser_belt_deactivated",
"dispenser_belt_on_position_1",
"dispenser_belt_on_position_2",
"dispenser_belt_on_position_3",
],
"spriteShapes": [
shapes.DISPENSER_BELT_OFF,
shapes.DISPENSER_BELT_ON_POSITION_1,
shapes.DISPENSER_BELT_ON_POSITION_2,
shapes.DISPENSER_BELT_ON_POSITION_3,
],
"palettes": [shapes.DISPENSER_BELT_PALETTE] * 4,
"noRotates": [False] * 4
}
},
{
"component": "ConveyerBeltOnAnimation",
"kwargs": {
"waitState": "dispenser_belt_deactivated",
"stateOne": "dispenser_belt_on_position_1",
"stateTwo": "dispenser_belt_on_position_2",
"stateThree": "dispenser_belt_on_position_3",
}
}
]
}
NW_WALL_CORNER = {
"name":
"nw_wall_corner",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"nw_wall_corner",
"stateConfigs": [{
"state": "nw_wall_corner",
"layer": "lowerPhysical",
"sprite": "NwWallCorner",
}],
}
},
{
"component": "Transform"
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["NwWallCorner"],
"spriteShapes": [shapes.NW_PERSPECTIVE_WALL],
"palettes": [shapes.PERSPECTIVE_WALL_PALETTE],
"noRotates": [False]
}
},
{"component": "BeamBlocker", "kwargs": {"beamType": "hold"}},
{"component": "BeamBlocker", "kwargs": {"beamType": "shove"}},
]
}
NE_WALL_CORNER = {
"name":
"ne_wall_corner",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"ne_wall_corner",
"stateConfigs": [{
"state": "ne_wall_corner",
"layer": "upperPhysical",
"sprite": "NeWallCorner",
}],
}
},
{
"component": "Transform"
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["NeWallCorner"],
"spriteShapes": [shapes.NE_PERSPECTIVE_WALL],
"palettes": [shapes.PERSPECTIVE_WALL_PALETTE],
"noRotates": [False]
}
},
{"component": "BeamBlocker", "kwargs": {"beamType": "hold"}},
{"component": "BeamBlocker", "kwargs": {"beamType": "shove"}},
]
}
WALL_HORIZONTAL = {
"name":
"wall_horizontal",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"wall_horizontal",
"stateConfigs": [{
"state": "wall_horizontal",
"layer": "lowerPhysical",
"sprite": "WallHorizontal",
}],
}
},
{
"component": "Transform"
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["WallHorizontal"],
"spriteShapes": [shapes.PERSPECTIVE_WALL],
"palettes": [shapes.PERSPECTIVE_WALL_PALETTE],
"noRotates": [False]
}
},
{"component": "BeamBlocker", "kwargs": {"beamType": "hold"}},
{"component": "BeamBlocker", "kwargs": {"beamType": "shove"}},
]
}
WALL_T_COUPLING = {
"name":
"wall_t_coupling",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"wall_t_coupling",
"stateConfigs": [{
"state": "wall_t_coupling",
"layer": "upperPhysical",
"sprite": "WallTCoupling",
}],
}
},
{
"component": "Transform"
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["WallTCoupling"],
"spriteShapes": [shapes.PERSPECTIVE_WALL_T_COUPLING],
"palettes": [shapes.PERSPECTIVE_WALL_PALETTE],
"noRotates": [False]
}
},
{"component": "BeamBlocker", "kwargs": {"beamType": "hold"}},
{"component": "BeamBlocker", "kwargs": {"beamType": "shove"}},
]
}
WALL_EAST = {
"name":
"wall_east",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"wall_east",
"stateConfigs": [{
"state": "wall_east",
"layer": "lowerPhysical",
"sprite": "WallEast",
}],
}
},
{
"component": "Transform"
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["WallEast"],
"spriteShapes": [shapes.E_PERSPECTIVE_WALL],
"palettes": [shapes.PERSPECTIVE_WALL_PALETTE],
"noRotates": [False]
}
},
{"component": "BeamBlocker", "kwargs": {"beamType": "hold"}},
{"component": "BeamBlocker", "kwargs": {"beamType": "shove"}},
]
}
WALL_WEST = {
"name":
"wall_west",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"wall_west",
"stateConfigs": [{
"state": "wall_west",
"layer": "lowerPhysical",
"sprite": "WallWest",
}],
}
},
{
"component": "Transform"
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["WallWest"],
"spriteShapes": [shapes.W_PERSPECTIVE_WALL],
"palettes": [shapes.PERSPECTIVE_WALL_PALETTE],
"noRotates": [False]
}
},
{"component": "BeamBlocker", "kwargs": {"beamType": "hold"}},
{"component": "BeamBlocker", "kwargs": {"beamType": "shove"}},
]
}
WALL_MIDDLE = {
"name":
"wall_middle",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"wall_middle",
"stateConfigs": [{
"state": "wall_middle",
"layer": "lowerPhysical",
"sprite": "WallMiddle",
}],
}
},
{
"component": "Transform"
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["WallMiddle"],
"spriteShapes": [shapes.MID_PERSPECTIVE_WALL],
"palettes": [shapes.PERSPECTIVE_WALL_PALETTE],
"noRotates": [False]
}
},
{"component": "BeamBlocker", "kwargs": {"beamType": "hold"}},
{"component": "BeamBlocker", "kwargs": {"beamType": "shove"}},
]
}
THRESHOLD = {
"name":
"threshold",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"threshold",
"stateConfigs": [{
"state": "threshold",
"layer": "lowestPhysical",
"sprite": "Threshold",
}],
}
},
{
"component": "Transform"
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["Threshold"],
"spriteShapes": [shapes.PERSPECTIVE_THRESHOLD],
"palettes": [shapes.PERSPECTIVE_WALL_PALETTE],
"noRotates": [False]
}
},
]
}
TILED_FLOOR = {
"name":
"tiled_floor",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"tiled_floor",
"stateConfigs": [{
"state": "tiled_floor",
"layer": "background",
"sprite": "tiled_floor",
}],
}
},
{
"component": "Transform"
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["tiled_floor"],
"spriteShapes": [shapes.METAL_FLOOR_DOUBLE_SPACED],
"palettes": [shapes.FACTORY_FLOOR_PALETTE],
"noRotates": [False]
}
},
]
}
FLOOR_MARKING = {
"name":
"floor_marking",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"floor_marking",
"stateConfigs": [{
"state": "floor_marking",
"layer": "lowestPhysical",
"sprite": "floor_marking",
}],
}
},
{
"component": "Transform"
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["floor_marking"],
"spriteShapes": [shapes.FLOOR_MARKING],
"palettes": [shapes.DISPENSER_BELT_PALETTE],
"noRotates": [False]
}
},
]
}
FLOOR_MARKING_TOP = {
"name":
"floor_marking_top",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"floor_marking_top",
"stateConfigs": [{
"state": "floor_marking_top",
"layer": "lowestPhysical",
"sprite": "floor_marking_top",
}],
}
},
{
"component": "Transform"
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["floor_marking_top"],
"spriteShapes": [shapes.FLOOR_MARKING_LONG_TOP],
"palettes": [shapes.DISPENSER_BELT_PALETTE],
"noRotates": [False]
}
},
]
}
FLOOR_MARKING_BOTTOM = {
"name":
"floor_marking_bottom",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
"floor_marking_bottom",
"stateConfigs": [{
"state": "floor_marking_bottom",
"layer": "lowestPhysical",
"sprite": "floor_marking_bottom",
}],
}
},
{
"component": "Transform"
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["floor_marking_bottom"],
"spriteShapes": [shapes.FLOOR_MARKING_LONG_BOTTOM],
"palettes": [shapes.DISPENSER_BELT_PALETTE],
"noRotates": [False]
}
},
]
}
human_readable_colors = list(colors.human_readable)
target_sprite_color = human_readable_colors.pop(0)
grappling_target_color_palette = shapes.get_palette(target_sprite_color)
# Add character mappings to avatar pallete for Magic Beam overlay
grappling_target_color_palette["P"] = (196, 77, 190, 130)
grappling_target_color_palette["p"] = (184, 72, 178, 80)
TARGET_SPRITE_SELF = {
"default": {
"name": "Self",
"shape": shapes.CUTE_AVATAR,
"palette": shapes.get_palette(target_sprite_color),
"noRotate": True,
},
"grappling": {
"name": "SelfGrappling",
"shape": shapes.CUTE_AVATAR_ARMS_UP,
"palette": grappling_target_color_palette,
"noRotate": True,
},
"grappled": {
"name": "SelfGrappled",
"shape": shapes.MAGIC_GRAPPLED_AVATAR,
"palette": grappling_target_color_palette,
"noRotate": True,
},
}
# PREFABS is a dictionary mapping names to template game objects that can
# be cloned and placed in multiple locations accoring to an ascii map.graspable
PREFABS = {
"spawn_point": SPAWN_POINT,
# Graspable objects.
"apple": APPLE,
"blue_cube_live": get_blue_cube(initial_state="blue_cube"),
"pink_cube": PINK_CUBE,
"blue_cube_wait": get_blue_cube(initial_state="waitState"),
"banana": BANANA,
# Dynamic components.
"hopper_body": HOPPER_BODY,
"hopper_mouth": HOPPER_MOUTH,
# Hopper indicators.
"hopper_indicator": HOPPER_INDICATOR,
"hopper_indicator_pink_cube": HOPPER_INDICATOR_PINK_CUBE,
"hopper_indicator_blue_cube": HOPPER_INDICATOR_BLUE_CUBE,
"hopper_indicator_banana": HOPPER_INDICATOR_BANANA,
# Dispenser indicators.
"dispenser_indicator_apple": DISPENSER_INDICATOR_APPLE,
"dispenser_indicator_two_apples": DISPENSER_INDICATOR_TWO_APPLES,
"dispenser_indicator_pink_cube": DISPENSER_INDICATOR_PINK_CUBE,
"dispenser_indicator_banana_cube": DISPENSER_INDICATOR_BANANA_CUBE,
"dispenser_indicator_cube_apple": DISPENSER_INDICATOR_CUBE_APPLE,
"dispenser_body": DISPENSER_BODY,
"dispenser_belt": DISPENSER_BELT,
"apple_dispensing_animation": APPLE_DISPENSING,
"pink_cube_dispensing_animation": PINK_CUBE_DISPENSING_ANIMATION,
"banana_cube_dispensing_animation": BANANA_CUBE_DISPENSING_ANIMATION,
"cube_apple_dispensing_animation": CUBE_APPLE_DISPENSING_ANIMATION,
# Static components.
"nw_wall_corner": NW_WALL_CORNER,
"ne_wall_corner": NE_WALL_CORNER,
"wall_horizontal": WALL_HORIZONTAL,
"wall_t_coupling": WALL_T_COUPLING,
"wall_east": WALL_EAST,
"wall_west": WALL_WEST,
"wall_middle": WALL_MIDDLE,
"threshold": THRESHOLD,
"tiled_floor": TILED_FLOOR,
"floor_marking": FLOOR_MARKING,
"floor_marking_top": FLOOR_MARKING_TOP,
"floor_marking_bottom": FLOOR_MARKING_BOTTOM,
}
# Primitive action components.
# pylint: disable=bad-whitespace
# pyformat: disable
NOOP = {"move": 0, "turn": 0, "pickup": 0, "grasp": 0, "hold": 0, "shove": 0}
FORWARD = {"move": 1, "turn": 0, "pickup": 0, "grasp": 0, "hold": 0, "shove": 0}
STEP_RIGHT = {"move": 2, "turn": 0, "pickup": 0, "grasp": 0, "hold": 0, "shove": 0}
BACKWARD = {"move": 3, "turn": 0, "pickup": 0, "grasp": 0, "hold": 0, "shove": 0}
STEP_LEFT = {"move": 4, "turn": 0, "pickup": 0, "grasp": 0, "hold": 0, "shove": 0}
TURN_LEFT = {"move": 0, "turn": -1, "pickup": 0, "grasp": 0, "hold": 0, "shove": 0}
TURN_RIGHT = {"move": 0, "turn": 1, "pickup": 0, "grasp": 0, "hold": 0, "shove": 0}
PICKUP = {"move": 0, "turn": 0, "pickup": 1, "grasp": 0, "hold": 0, "shove": 0}
GRASP = {"move": 0, "turn": 0, "pickup": 0, "grasp": 1, "hold": 0, "shove": 0}
HOLD = {"move": 0, "turn": 0, "pickup": 0, "grasp": 0, "hold": 1, "shove": 0}
# Notice that SHOVE includes both `hold` and `shove` parts.
SHOVE = {"move": 0, "turn": 0, "pickup": 0, "grasp": 0, "hold": 1, "shove": 1}
PULL = {"move": 0, "turn": 0, "pickup": 0, "grasp": 0, "hold": 1, "shove": -1}
# pyformat: enable
# pylint: enable=bad-whitespace
ACTION_SET = (
NOOP,
FORWARD,
BACKWARD,
STEP_LEFT,
STEP_RIGHT,
TURN_LEFT,
TURN_RIGHT,
PICKUP,
GRASP,
HOLD,
SHOVE,
PULL,
)
def create_scene():
"""Creates the global scene."""
scene = {
"name":
"scene",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "scene",
"stateConfigs": [{
"state": "scene",
}],
}
},
{
"component": "Transform"
},
{
"component": "StochasticIntervalEpisodeEnding",
"kwargs": {
"minimumFramesPerEpisode": 1000,
"intervalLength": 100, # Set equal to unroll length.
"probabilityTerminationPerInterval": 0.1
}
}
]
}
return scene
def _create_stamina_overlay(player_idx: int,
max_stamina_bar_states: int,
) -> Generator[Dict[str, Any], None, None]:
"""Create stamina marker overlay objects."""
# Lua is 1-indexed.
lua_idx = player_idx + 1
stamina_bar_state_configs = [
# Invisible inactive (dead) overlay type.
{"state": "staminaBarWait"},
]
stamina_bar_sprite_names = []
stamina_bar_sprite_shapes = []
# Each player's stamina bars must be in their own layer so they do not
# interact/collide with other players' stamina bars.
stamina_bar_layer = f"superOverlay_{player_idx}"
# Declare one state per level of the stamina bar.
for i in range(max_stamina_bar_states):
sprite_name = f"sprite_for_level_{i}"
stamina_bar_state_configs.append(
{"state": f"level_{i}",
"layer": stamina_bar_layer,
"sprite": sprite_name})
stamina_bar_sprite_names.append(sprite_name)
xs = "\nxxxxxxxx"
blank_space = xs * 7
number_of_rs = max(6 - i, 0)
number_of_ys = i if i < 7 else 12 - i
number_of_gs = max(i - 6, 0)
if i >= 13:
level = blank_space + xs
else:
level = (
blank_space
+ "\nx"
+ "G" * number_of_gs
+ "Y" * number_of_ys
+ "R" * number_of_rs
+ "x"
)
empty = "\n".join(["x" * 8] * 8)
# Replace the east/south/west sprites with invisible sprites so the only
# stamina bar rendered is the one in the direction that the current player
# is facing.
stamina_bar_sprite_shapes.append((level, empty, empty, empty))
# Create a stamina bar for each compass direction. Only the direction the
# current player is facing is visible.
for direction in ("N", "E", "S", "W"):
yield {
"name": "avatar_stamina_bar",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "staminaBarWait",
"stateConfigs": stamina_bar_state_configs
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": stamina_bar_sprite_names,
"spriteShapes": stamina_bar_sprite_shapes,
"palettes": [{"G": (62, 137, 72, 255),
"Y": (255, 216, 97, 255),
"R": (162, 38, 51, 255),
"x": INVISIBLE,}] * max_stamina_bar_states,
"noRotates": [True] * max_stamina_bar_states
}
},
{
"component": "StaminaBar",
"kwargs": {
"playerIndex": lua_idx,
"waitState": "staminaBarWait",
"layer": stamina_bar_layer,
"direction": direction
}
},
]
}
def create_avatar_object(player_idx: int,
target_sprite_self: Dict[str, Any],
max_stamina_bar_states: int) -> Dict[str, Any]:
"""Create an avatar object."""
# Lua is 1-indexed.
lua_index = player_idx + 1
# Setup the self vs other sprite mapping.
avatar_sprite_name = "avatarSprite{}".format(lua_index)
grappling_sprite = "AvatarGrappling" + str(lua_index)
grappled_sprite = "AvatarGrappled" + str(lua_index)
custom_sprite_map = {
avatar_sprite_name: target_sprite_self["default"]["name"],
grappling_sprite: target_sprite_self["grappling"]["name"],
grappled_sprite: target_sprite_self["grappled"]["name"],
}
live_state_name = "player{}".format(lua_index)
grappling_state_name = f"player{lua_index}_grappling"
grappled_state_name = f"player{lua_index}_grappled"
color_palette = shapes.get_palette(colors.palette[player_idx])
# Add character mappings to avatar pallete for Magic Beam overlay
color_palette["P"] = (196, 77, 190, 130)
color_palette["p"] = (184, 72, 178, 80)
spawn_group = "spawnPoints"
avatar_object = {
"name":
"avatar",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState":
live_state_name,
"stateConfigs": [
# Initial player state.
{
"state": live_state_name,
"layer": "midPhysical",
"sprite": avatar_sprite_name,
"contact": "avatar",
"groups": ["players"]
},
{
"state": grappling_state_name,
"layer": "upperPhysical",
"sprite": grappling_sprite,
"contact": "avatar",
"groups": ["players"]
},
{
"state": grappled_state_name,
"layer": "upperPhysical",
"sprite": grappled_sprite,
"contact": "avatar",
"groups": ["players"]},
# Player wait type for times when they are zapped out.
{
"state": "playerWait",
"groups": ["playerWaits"]
},
]
}
},
{
"component": "Transform"
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": [avatar_sprite_name, grappling_sprite,
grappled_sprite],
"spriteShapes": [shapes.CUTE_AVATAR,
shapes.CUTE_AVATAR_ARMS_UP,
shapes.MAGIC_GRAPPLED_AVATAR],
"palettes": [color_palette] * 3,
"noRotates": [True] * 3
}
},
{
"component": "AdditionalSprites",
"kwargs": {
"renderMode": "ascii_shape",
"customSpriteNames": [
target_sprite_self["default"]["name"],
target_sprite_self["grappling"]["name"],
target_sprite_self["grappled"]["name"],
],
"customSpriteShapes": [
target_sprite_self["default"]["shape"],
target_sprite_self["grappling"]["shape"],
target_sprite_self["grappled"]["shape"],
],
"customPalettes": [
target_sprite_self["default"]["palette"],
target_sprite_self["grappling"]["palette"],
target_sprite_self["grappled"]["palette"],
],
"customNoRotates": [
target_sprite_self["default"]["noRotate"],
target_sprite_self["grappling"]["noRotate"],
target_sprite_self["grappled"]["noRotate"],
],
}
},
{
"component": "Avatar",
"kwargs": {
"index": lua_index,
"aliveState": live_state_name,
"additionalLiveStates": [grappled_state_name,
grappling_state_name],
"waitState": "playerWait",
"spawnGroup": spawn_group,
"actionOrder": [
"move",
"turn",
"pickup",
"grasp",
# Grappling actions
"hold",
"shove",
],
"actionSpec": {
"move": {"default": 0, "min": 0, "max": len(_COMPASS)},
"turn": {"default": 0, "min": -1, "max": 1},
"pickup": {"default": 0, "min": 0, "max": 1},
"grasp": {"default": 0, "min": 0, "max": 1},
# Grappling actions
"hold": {"default": 0, "min": 0, "max": 1},
"shove": {"default": 0, "min": -1, "max": 1},
},
"view": {
"left": 5,
"right": 5,
"forward": 9,
"backward": 1,
"centered": False
},
"spriteMap": custom_sprite_map,
}
},
{
"component": "AvatarGrasp",
"kwargs": {
"shape": GRASP_SHAPE,
"palette": color_palette,
"graspAction": "grasp",
# If multiple objects are at the same position then grasp them
# according to their layer in order `precedenceOrder`.
"precedenceOrder": ("appleLayer", "lowerPhysical",),
}
},
{
"component": "Grappling",
"kwargs": {
"shape": shapes.MAGIC_BEAM,
"palette": shapes.MAGIC_BEAM_PALETTE,
"liveState": live_state_name,
"grappledState": grappled_state_name,
"grapplingState": grappling_state_name,
}
},
{
"component": "ReadyToShootObservation",
"kwargs": {
# In this case READY_TO_SHOOT will be 1 if hold is allowed and
# will be 0 if not.
"zapperComponent": "Grappling",
}
},
{
"component": "Stamina",
"kwargs": {
"maxStamina": max_stamina_bar_states,
"classConfig": {
"name": "player",
"greenFreezeTime": 0,
"yellowFreezeTime": 2,
"redFreezeTime": 6,
# `decrementRate` = 0.5 means decrease stamina on every
# other costly step. `decrementRate` = 1 means decrease
# stamina on every costly step.
"decrementRate": 1.0,
},
"amountInvisible": 6,
"amountGreen": 6,
"amountYellow": 6,
"amountRed": 1,
"costlyActions": ["move",],
}
},
{
"component": "StaminaObservation",
"kwargs": {
"staminaComponent": "Stamina",
}
},
]
}
if _ENABLE_DEBUG_OBSERVATIONS:
avatar_object["components"].append({
"component": "LocationObserver",
"kwargs": {"objectIsAvatar": True, "alsoReportOrientation": True},
})
return avatar_object
def create_avatar_objects(num_players: int,
max_stamina_bar_states: int = 19):
"""Returns list of avatar objects of length 'num_players'."""
avatar_objects = []
for player_idx in range(num_players):
avatar_object = create_avatar_object(player_idx, TARGET_SPRITE_SELF,
max_stamina_bar_states - 1)
stamina_bar_objects = _create_stamina_overlay(player_idx,
max_stamina_bar_states)
enter_obstacle = _create_enter_obstacle(player_idx)
avatar_objects.append(avatar_object)
avatar_objects.append(enter_obstacle)
avatar_objects.extend(stamina_bar_objects)
return avatar_objects
def _create_enter_obstacle(player_idx: int) -> Dict[str, Any]:
# Lua is 1-indexed.
lua_idx = player_idx + 1
return {
"name":
"enter_obstacle",
"components": [
{
"component": "StateManager",
"kwargs":
{
"initialState": "obstacleWait",
"stateConfigs": [
{
"state": "obstacleWait"
},
{
"state": "obstacleLive",
"layer": "lowerPhysical",
}
]
}
},
{
"component": "Transform",
},
{
"component": "AvatarConnector",
"kwargs": {
"playerIndex": lua_idx,
"aliveState": "obstacleLive",
"waitState": "obstacleWait"
}
},
]
}
def get_config():
"""Default configuration for training on the factory2d level."""
config = config_dict.ConfigDict()
# Specify the number of players to particate in each episode (optional).
config.recommended_num_players = 12
# Action set configuration.
config.action_set = ACTION_SET
# Observation format configuration.
config.individual_observation_names = [
"RGB",
"READY_TO_SHOOT",
"STAMINA",
]
config.global_observation_names = [
"WORLD.RGB",
]
config.action_spec = specs.action(len(ACTION_SET))
config.valid_roles = frozenset({"default"})
return config
def build(
roles: Sequence[str],
config: config_dict.ConfigDict,
) -> Mapping[str, Any]:
"""Build substrate definition given player roles."""
num_players = len(roles)
# Build the rest of the substrate definition.
substrate_definition = dict(
levelName="factory_of_the_commons",
levelDirectory="meltingpot/lua/levels",
numPlayers=num_players,
maxEpisodeLengthFrames=5000, # The maximum possible number of frames.
spriteSize=8,
topology="BOUNDED", # Choose from ["BOUNDED", "TORUS"],
simulation={
"map": config.layout.ascii_map,
"gameObjects": create_avatar_objects(num_players),
"scene": create_scene(),
"prefabs": PREFABS,
"charPrefabMap": config.layout.char_prefab_map,
},
)
return substrate_definition
|
meltingpot-main
|
meltingpot/configs/substrates/factory_commons.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for Chicken in the Matrix (two player, repeated version).
Example video: https://youtu.be/bFwV-udmRb4
See _Running with Scissors in the Matrix_ for a general description of the
game dynamics. Here the payoff matrix represents the Chicken game. `K = 2`
resources represent "hawk" and "dove" pure strategies.
Players have a `5 x 5` observation window.
The episode has a chance of ending stochastically on every 100 step interval
after step 1000. This usually allows time for 8 or more interactions.
"""
from typing import Any, Dict, Mapping, Sequence, Tuple
from meltingpot.configs.substrates import the_matrix
from meltingpot.utils.substrates import colors
from meltingpot.utils.substrates import shapes
from meltingpot.utils.substrates import specs
from ml_collections import config_dict
# Warning: setting `_ENABLE_DEBUG_OBSERVATIONS = True` may cause slowdown.
_ENABLE_DEBUG_OBSERVATIONS = False
# The number of resources must match the (square) size of the matrix.
NUM_RESOURCES = 2
# This color is green.
RESOURCE1_COLOR = (30, 225, 185, 255)
RESOURCE1_HIGHLIGHT_COLOR = (98, 234, 206, 255)
RESOURCE1_COLOR_DATA = (RESOURCE1_COLOR, RESOURCE1_HIGHLIGHT_COLOR)
# This color is red.
RESOURCE2_COLOR = (225, 30, 70, 255)
RESOURCE2_HIGHLIGHT_COLOR = (234, 98, 126, 255)
RESOURCE2_COLOR_DATA = (RESOURCE2_COLOR, RESOURCE2_HIGHLIGHT_COLOR)
ASCII_MAP = """
WWWWWWWWWWWWWWWWWWWWWWW
Wn n nW
W WWW W W W WW W
W W 11a W a22 W W
Wn WW 11a W a22 WW nW
W 11a a22 W
W W
Wn WW WW n WW WWW nW
W W
W 22a W a11 W
Wn W 22a W a11 W nW
W W 22a W a11 WW W
W WWWW W W W WWW W
Wn n nW
WWWWWWWWWWWWWWWWWWWWWWW
"""
_resource_names = [
"resource_class1", # Dove
"resource_class2", # Hawk
]
# `prefab` determines which prefab game object to use for each `char` in the
# ascii map.
CHAR_PREFAB_MAP = {
"a": {"type": "choice", "list": _resource_names},
"1": _resource_names[0],
"2": _resource_names[1],
"n": "spawn_point",
"W": "wall",
}
_COMPASS = ["N", "E", "S", "W"]
WALL = {
"name": "wall",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "wall",
"stateConfigs": [{
"state": "wall",
"layer": "upperPhysical",
"sprite": "Wall",
}],
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["Wall"],
"spriteShapes": [shapes.WALL],
"palettes": [{"*": (95, 95, 95, 255),
"&": (100, 100, 100, 255),
"@": (109, 109, 109, 255),
"#": (152, 152, 152, 255)}],
"noRotates": [False]
}
},
{
"component": "BeamBlocker",
"kwargs": {
"beamType": "gameInteraction"
}
},
]
}
SPAWN_POINT = {
"name": "spawnPoint",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "spawnPoint",
"stateConfigs": [{
"state": "spawnPoint",
"layer": "alternateLogic",
"groups": ["spawnPoints"]
}],
}
},
{
"component": "Transform",
},
]
}
# PLAYER_COLOR_PALETTES is a list with each entry specifying the color to use
# for the player at the corresponding index.
NUM_PLAYERS_UPPER_BOUND = 8
PLAYER_COLOR_PALETTES = []
for idx in range(NUM_PLAYERS_UPPER_BOUND):
PLAYER_COLOR_PALETTES.append(shapes.get_palette(colors.palette[idx]))
# Primitive action components.
# pylint: disable=bad-whitespace
# pyformat: disable
NOOP = {"move": 0, "turn": 0, "interact": 0}
FORWARD = {"move": 1, "turn": 0, "interact": 0}
STEP_RIGHT = {"move": 2, "turn": 0, "interact": 0}
BACKWARD = {"move": 3, "turn": 0, "interact": 0}
STEP_LEFT = {"move": 4, "turn": 0, "interact": 0}
TURN_LEFT = {"move": 0, "turn": -1, "interact": 0}
TURN_RIGHT = {"move": 0, "turn": 1, "interact": 0}
INTERACT = {"move": 0, "turn": 0, "interact": 1}
# pyformat: enable
# pylint: enable=bad-whitespace
ACTION_SET = (
NOOP,
FORWARD,
BACKWARD,
STEP_LEFT,
STEP_RIGHT,
TURN_LEFT,
TURN_RIGHT,
INTERACT,
)
TARGET_SPRITE_SELF = {
"name": "Self",
"shape": shapes.CUTE_AVATAR,
"palette": shapes.get_palette((50, 100, 200)),
"noRotate": True,
}
TARGET_SPRITE_OTHER = {
"name": "Other",
"shape": shapes.CUTE_AVATAR,
"palette": shapes.get_palette((200, 100, 50)),
"noRotate": True,
}
def create_scene():
"""Creates the global scene."""
scene = {
"name": "scene",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "scene",
"stateConfigs": [{
"state": "scene",
}],
}
},
{
"component": "Transform",
},
{
"component": "TheMatrix",
"kwargs": {
# Prevent interaction before both interactors have collected
# at least one resource.
"disallowUnreadyInteractions": True,
"matrix": [
# row player chooses a row of this matrix.
# D H (conventionally D = dove and H = hawk)
[3, 2], # D
[5, 0], # H
],
"columnPlayerMatrix": [
# column player chooses a column of this matrix.
# D H (conventionally D = dove and H = hawk)
[3, 5], # D
[2, 0], # H
],
"resultIndicatorColorIntervals": [
# red # yellow # green # blue
(0.0, 1.5), (1.5, 2.5), (2.5, 3.5), (3.5, 5.0)
],
}
},
{
"component": "StochasticIntervalEpisodeEnding",
"kwargs": {
"minimumFramesPerEpisode": 1000,
"intervalLength": 100, # Set equal to unroll length.
"probabilityTerminationPerInterval": 0.1
}
}
]
}
return scene
def create_resource_prefab(
resource_id: int,
resource_shape: str,
resource_palette: Dict[str, Tuple[int, int, int, int]]):
"""Creates resource prefab with provided resource_id, shape, and palette."""
resource_name = "resource_class{}".format(resource_id)
resource_prefab = {
"name": resource_name,
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": resource_name,
"stateConfigs": [
{"state": resource_name + "_wait",
"groups": ["resourceWaits"]},
{"state": resource_name,
"layer": "lowerPhysical",
"sprite": resource_name + "_sprite"},
]
},
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": [resource_name + "_sprite"],
"spriteShapes": [resource_shape],
"palettes": [resource_palette],
"noRotates": [True]
},
},
{
"component": "Resource",
"kwargs": {
"resourceClass": resource_id,
"visibleType": resource_name,
"waitState": resource_name + "_wait",
"regenerationRate": 0.02,
"regenerationDelay": 10,
},
},
{
"component": "Destroyable",
"kwargs": {
"waitState": resource_name + "_wait",
# It is possible to destroy resources but takes concerted
# effort to do so by zapping them `initialHealth` times.
"initialHealth": 3,
},
},
]
}
return resource_prefab
def create_avatar_object(
player_idx: int,
all_source_sprite_names: Sequence[str],
target_sprite_self: Dict[str, Any],
target_sprite_other: Dict[str, Any],
turn_off_default_reward: bool = False) -> Dict[str, Any]:
"""Create an avatar object given self vs other sprite data."""
# Lua is 1-indexed.
lua_index = player_idx + 1
# Setup the self vs other sprite mapping.
source_sprite_self = "Avatar" + str(lua_index)
custom_sprite_map = {source_sprite_self: target_sprite_self["name"]}
for name in all_source_sprite_names:
if name != source_sprite_self:
custom_sprite_map[name] = target_sprite_other["name"]
live_state_name = "player{}".format(lua_index)
avatar_object = {
"name": "avatar",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": live_state_name,
"stateConfigs": [
{"state": live_state_name,
"layer": "upperPhysical",
"sprite": source_sprite_self,
"contact": "avatar",
"groups": ["players"]},
{"state": "playerWait",
"groups": ["playerWaits"]},
]
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "colored_square",
"spriteNames": [source_sprite_self],
# A white square should never be displayed. It will always be
# remapped since this is self vs other observation mode.
"spriteRGBColors": [(255, 255, 255, 255)],
}
},
{
"component": "AdditionalSprites",
"kwargs": {
"renderMode": "ascii_shape",
"customSpriteNames": [target_sprite_self["name"],
target_sprite_other["name"]],
"customSpriteShapes": [target_sprite_self["shape"],
target_sprite_other["shape"]],
"customPalettes": [target_sprite_self["palette"],
target_sprite_other["palette"]],
"customNoRotates": [target_sprite_self["noRotate"],
target_sprite_other["noRotate"]],
}
},
{
"component": "Avatar",
"kwargs": {
"index": lua_index,
"aliveState": live_state_name,
"waitState": "playerWait",
"speed": 1.0,
"spawnGroup": "spawnPoints",
"actionOrder": ["move", "turn", "interact"],
"actionSpec": {
"move": {"default": 0, "min": 0, "max": len(_COMPASS)},
"turn": {"default": 0, "min": -1, "max": 1},
"interact": {"default": 0, "min": 0, "max": 1},
},
"view": {
"left": 2,
"right": 2,
"forward": 3,
"backward": 1,
"centered": False
},
"spriteMap": custom_sprite_map,
# The following kwarg makes it possible to get rewarded even
# on frames when an avatar is "dead". It is needed for in the
# matrix games in order to correctly handle the case of two
# players getting hit simultaneously by the same beam.
"skipWaitStateRewards": False,
}
},
{
"component": "GameInteractionZapper",
"kwargs": {
"cooldownTime": 2,
"beamLength": 3,
"beamRadius": 1,
"framesTillRespawn": 5,
"numResources": NUM_RESOURCES,
"endEpisodeOnFirstInteraction": False,
# Reset both players' inventories after each interaction.
"reset_winner_inventory": True,
"reset_loser_inventory": True,
# Both players get removed after each interaction.
"losingPlayerDies": True,
"winningPlayerDies": True,
# `freezeOnInteraction` is the number of frames to display the
# interaction result indicator, freeze, and delay delivering
# all results of interacting.
"freezeOnInteraction": 16,
}
},
{
"component": "ReadyToShootObservation",
"kwargs": {
"zapperComponent": "GameInteractionZapper",
}
},
{
"component": "InventoryObserver",
"kwargs": {
}
},
{
"component": "SpawnResourcesWhenAllPlayersZapped",
},
{
"component": "Taste",
"kwargs": {
"mostTastyResourceClass": -1, # -1 indicates no preference.
# No resource is most tasty when mostTastyResourceClass == -1.
"mostTastyReward": 0.1,
}
},
{
"component": "InteractionTaste",
"kwargs": {
"mostTastyResourceClass": -1, # -1 indicates no preference.
"zeroDefaultInteractionReward": turn_off_default_reward,
"extraReward": 1.0,
}
},
{
"component": "AvatarMetricReporter",
"kwargs": {
"metrics": [
{
# Report the inventories of both players involved in
# an interaction on this frame formatted as
# (self inventory, partner inventory).
"name": "INTERACTION_INVENTORIES",
"type": "tensor.DoubleTensor",
"shape": (2, NUM_RESOURCES),
"component": "GameInteractionZapper",
"variable": "latest_interaction_inventories",
},
*the_matrix.get_cumulant_metric_configs(NUM_RESOURCES),
]
}
},
]
}
if _ENABLE_DEBUG_OBSERVATIONS:
avatar_object["components"].append(
{
"component": "LocationObserver",
"kwargs": {
"objectIsAvatar": True,
"alsoReportOrientation": True
}
},
)
return avatar_object
def create_prefabs():
"""Returns a dictionary mapping names to template game objects."""
prefabs = {
"wall": WALL,
"spawn_point": SPAWN_POINT,
}
prefabs["resource_class1"] = create_resource_prefab(
1, shapes.BUTTON, {"*": RESOURCE1_COLOR_DATA[0],
"#": RESOURCE1_COLOR_DATA[1],
"x": (0, 0, 0, 0)})
prefabs["resource_class2"] = create_resource_prefab(
2, shapes.BUTTON, {"*": RESOURCE2_COLOR_DATA[0],
"#": RESOURCE2_COLOR_DATA[1],
"x": (0, 0, 0, 0)})
return prefabs
def get_all_source_sprite_names(num_players):
all_source_sprite_names = []
for player_idx in range(0, num_players):
# Lua is 1-indexed.
lua_index = player_idx + 1
all_source_sprite_names.append("Avatar" + str(lua_index))
return all_source_sprite_names
def create_avatar_objects(num_players,
turn_off_default_reward: bool = False):
"""Returns list of avatar objects of length 'num_players'."""
all_source_sprite_names = get_all_source_sprite_names(num_players)
avatar_objects = []
for player_idx in range(0, num_players):
game_object = create_avatar_object(
player_idx,
all_source_sprite_names,
TARGET_SPRITE_SELF,
TARGET_SPRITE_OTHER,
turn_off_default_reward=turn_off_default_reward)
readiness_marker = the_matrix.create_ready_to_interact_marker(player_idx)
avatar_objects.append(game_object)
avatar_objects.append(readiness_marker)
return avatar_objects
def create_world_sprite_map(
num_players: int, target_sprite_other: Dict[str, Any]) -> Dict[str, str]:
all_source_sprite_names = get_all_source_sprite_names(num_players)
world_sprite_map = {}
for name in all_source_sprite_names:
world_sprite_map[name] = target_sprite_other["name"]
return world_sprite_map
def get_config():
"""Default configuration."""
config = config_dict.ConfigDict()
# Other parameters that are useful to override in training config files.
config.turn_off_default_reward = False
# Action set configuration.
config.action_set = ACTION_SET
# Observation format configuration.
config.individual_observation_names = [
"RGB",
"INVENTORY",
"READY_TO_SHOOT",
# Debug only (do not use the following observations in policies).
"INTERACTION_INVENTORIES",
]
config.global_observation_names = [
"WORLD.RGB",
]
# The specs of the environment (from a single-agent perspective).
config.action_spec = specs.action(len(ACTION_SET))
config.timestep_spec = specs.timestep({
"RGB": specs.rgb(40, 40),
"INVENTORY": specs.inventory(2),
"READY_TO_SHOOT": specs.OBSERVATION["READY_TO_SHOOT"],
# Debug only (do not use the following observations in policies).
"INTERACTION_INVENTORIES": specs.interaction_inventories(2),
"WORLD.RGB": specs.rgb(120, 184),
})
# The roles assigned to each player.
config.valid_roles = frozenset({"default"})
config.default_player_roles = ("default",) * 2
return config
def build(
roles: Sequence[str],
config: config_dict.ConfigDict,
) -> Mapping[str, Any]:
"""Build substrate definition given roles."""
del config
num_players = len(roles)
# Build the rest of the substrate definition.
substrate_definition = dict(
levelName="the_matrix",
levelDirectory="meltingpot/lua/levels",
numPlayers=num_players,
# Define upper bound of episode length since episodes end stochastically.
maxEpisodeLengthFrames=5000,
spriteSize=8,
topology="BOUNDED", # Choose from ["BOUNDED", "TORUS"],
simulation={
"map": ASCII_MAP,
"gameObjects": create_avatar_objects(num_players=num_players),
"scene": create_scene(),
"prefabs": create_prefabs(),
"charPrefabMap": CHAR_PREFAB_MAP,
# worldSpriteMap is needed to make the global view used in videos be
# be informative in cases where individual avatar views have had
# sprites remapped to one another (example: self vs other mode).
"worldSpriteMap": create_world_sprite_map(num_players,
TARGET_SPRITE_OTHER),
}
)
return substrate_definition
|
meltingpot-main
|
meltingpot/configs/substrates/chicken_in_the_matrix__repeated.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for Running with Scissors in the Matrix (arena version).
Example video: https://youtu.be/6BL6JIbS2cE
This substrate is the same as _Running with Scissors in the Matrix_ except in
this case there are eight players and the map layout is different. Even though
there are eight players, they still interact in dyadic pairs via the usual
rock-paper-scissors payoff matrix.
Players have the default `11 x 11` (off center) observation window.
"""
from typing import Any, Dict, Mapping, Sequence
from meltingpot.configs.substrates import the_matrix
from meltingpot.utils.substrates import colors
from meltingpot.utils.substrates import game_object_utils
from meltingpot.utils.substrates import shapes
from meltingpot.utils.substrates import specs
from ml_collections import config_dict
PrefabConfig = game_object_utils.PrefabConfig
# Warning: setting `_ENABLE_DEBUG_OBSERVATIONS = True` may cause slowdown.
_ENABLE_DEBUG_OBSERVATIONS = False
# The number of resources must match the (square) size of the matrix.
NUM_RESOURCES = 3
# This color is yellow.
RESOURCE1_COLOR = (255, 227, 11, 255)
RESOURCE1_HIGHLIGHT_COLOR = (255, 214, 91, 255)
RESOURCE1_COLOR_DATA = (RESOURCE1_COLOR, RESOURCE1_HIGHLIGHT_COLOR)
# This color is violet.
RESOURCE2_COLOR = (109, 42, 255, 255)
RESOURCE2_HIGHLIGHT_COLOR = (132, 91, 255, 255)
RESOURCE2_COLOR_DATA = (RESOURCE2_COLOR, RESOURCE2_HIGHLIGHT_COLOR)
# This color is cyan.
RESOURCE3_COLOR = (42, 188, 255, 255)
RESOURCE3_HIGHLIGHT_COLOR = (91, 214, 255, 255)
RESOURCE3_COLOR_DATA = (RESOURCE3_COLOR, RESOURCE3_HIGHLIGHT_COLOR)
# The map parser replaces all 'a' chars in the default map with chars
# representing specific resources, i.e. with either '1' or '2'.
ASCII_MAP = """
WWWWWWWWWWWWWWWWWWWWWWWWW
WPPPP W W PPPPW
WPPPP PPPPW
WPPPP PPPPW
WPPPP PPPPW
W aa W
W 11 aa W
W 11 W
W 11 W
W WW W 222 W
WW 33 W 222 W
WWW 33 WWWWWWWWW W
W 33 111 WWW
W 111 W
W 22 W W
W 22 W WW W
W 22 W333 W
W 333 W
W aa W
WPPPP aa PPPPW
WPPPP PPPPW
WPPPP PPPPW
WPPPP W PPPPW
WWWWWWWWWWWWWWWWWWWWWWWWW
"""
_resource_names = [
"resource_class1",
"resource_class2",
"resource_class3",
]
# `prefab` determines which prefab game object to use for each `char` in the
# ascii map.
CHAR_PREFAB_MAP = {
"a": {"type": "choice", "list": _resource_names},
"1": _resource_names[0],
"2": _resource_names[1],
"3": _resource_names[2],
"P": "spawn_point",
"W": "wall",
}
_COMPASS = ["N", "E", "S", "W"]
WALL = {
"name": "wall",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "wall",
"stateConfigs": [{
"state": "wall",
"layer": "upperPhysical",
"sprite": "Wall",
}],
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["Wall"],
"spriteShapes": [shapes.WALL],
"palettes": [{"*": (95, 95, 95, 255),
"&": (100, 100, 100, 255),
"@": (109, 109, 109, 255),
"#": (152, 152, 152, 255)}],
"noRotates": [False]
}
},
{
"component": "BeamBlocker",
"kwargs": {
"beamType": "gameInteraction"
}
},
]
}
SPAWN_POINT = {
"name": "spawnPoint",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "spawnPoint",
"stateConfigs": [{
"state": "spawnPoint",
"layer": "alternateLogic",
"groups": ["spawnPoints"]
}],
}
},
{
"component": "Transform",
},
]
}
# Remove the first entry from human_readable_colors after using it for the self
# color to prevent it from being used again as another avatar color.
human_readable_colors = list(colors.human_readable)
TARGET_SPRITE_SELF = {
"name": "Self",
"shape": shapes.CUTE_AVATAR,
"palette": shapes.get_palette(human_readable_colors.pop(0)),
"noRotate": True,
}
# PLAYER_COLOR_PALETTES is a list with each entry specifying the color to use
# for the player at the corresponding index.
PLAYER_COLOR_PALETTES = []
for human_readable_color in human_readable_colors:
PLAYER_COLOR_PALETTES.append(shapes.get_palette(human_readable_color))
# Primitive action components.
# pylint: disable=bad-whitespace
# pyformat: disable
NOOP = {"move": 0, "turn": 0, "interact": 0}
FORWARD = {"move": 1, "turn": 0, "interact": 0}
STEP_RIGHT = {"move": 2, "turn": 0, "interact": 0}
BACKWARD = {"move": 3, "turn": 0, "interact": 0}
STEP_LEFT = {"move": 4, "turn": 0, "interact": 0}
TURN_LEFT = {"move": 0, "turn": -1, "interact": 0}
TURN_RIGHT = {"move": 0, "turn": 1, "interact": 0}
INTERACT = {"move": 0, "turn": 0, "interact": 1}
# pyformat: enable
# pylint: enable=bad-whitespace
ACTION_SET = (
NOOP,
FORWARD,
BACKWARD,
STEP_LEFT,
STEP_RIGHT,
TURN_LEFT,
TURN_RIGHT,
INTERACT,
)
def create_scene():
"""Creates the global scene."""
scene = {
"name": "scene",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "scene",
"stateConfigs": [{
"state": "scene",
}],
}
},
{
"component": "Transform",
},
{
"component": "TheMatrix",
"kwargs": {
# Prevent interaction before both interactors have collected
# at least one resource.
"disallowUnreadyInteractions": True,
"matrix": [
[0, -10, 10],
[10, 0, -10],
[-10, 10, 0]
],
"resultIndicatorColorIntervals": [
(-10.0, -5.0), # red
(-5.0, -2.5), # yellow
(-2.5, 2.5), # green
(2.5, 5.0), # blue
(5.0, 10.0) # violet
],
}
},
{
"component": "StochasticIntervalEpisodeEnding",
"kwargs": {
"minimumFramesPerEpisode": 1000,
"intervalLength": 100, # Set equal to unroll length.
"probabilityTerminationPerInterval": 0.2
}
}
]
}
return scene
def create_resource_prefab(resource_id, color_data):
"""Creates resource prefab with provided `resource_id` (num) and color."""
resource_name = "resource_class{}".format(resource_id)
resource_prefab = {
"name": resource_name,
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": resource_name,
"stateConfigs": [
{"state": resource_name + "_wait",
"groups": ["resourceWaits"]},
{"state": resource_name,
"layer": "lowerPhysical",
"sprite": resource_name + "_sprite"},
]
},
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": [resource_name + "_sprite"],
"spriteShapes": [shapes.BUTTON],
"palettes": [{"*": color_data[0],
"#": color_data[1],
"x": (0, 0, 0, 0)}],
"noRotates": [False]
},
},
{
"component": "Resource",
"kwargs": {
"resourceClass": resource_id,
"visibleType": resource_name,
"waitState": resource_name + "_wait",
"regenerationRate": 0.04,
"regenerationDelay": 10,
},
},
{
"component": "Destroyable",
"kwargs": {
"waitState": resource_name + "_wait",
# It is possible to destroy resources but takes concerted
# effort to do so by zapping them `initialHealth` times.
"initialHealth": 3,
},
},
]
}
return resource_prefab
def create_prefabs() -> PrefabConfig:
"""Returns the prefabs.
Prefabs are a dictionary mapping names to template game objects that can
be cloned and placed in multiple locations accoring to an ascii map.
"""
prefabs = {
"wall": WALL,
"spawn_point": SPAWN_POINT,
}
prefabs["resource_class1"] = create_resource_prefab(1, RESOURCE1_COLOR_DATA)
prefabs["resource_class2"] = create_resource_prefab(2, RESOURCE2_COLOR_DATA)
prefabs["resource_class3"] = create_resource_prefab(3, RESOURCE3_COLOR_DATA)
return prefabs
def create_avatar_object(
player_idx: int,
target_sprite_self: Dict[str, Any],
turn_off_default_reward: bool = False) -> Dict[str, Any]:
"""Create an avatar object that always sees itself as blue."""
# Lua is 1-indexed.
lua_index = player_idx + 1
# Setup the self vs other sprite mapping.
source_sprite_self = "Avatar" + str(lua_index)
custom_sprite_map = {source_sprite_self: target_sprite_self["name"]}
live_state_name = "player{}".format(lua_index)
avatar_object = {
"name": "avatar",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": live_state_name,
"stateConfigs": [
{"state": live_state_name,
"layer": "upperPhysical",
"sprite": source_sprite_self,
"contact": "avatar",
"groups": ["players"]},
{"state": "playerWait",
"groups": ["playerWaits"]},
]
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": [source_sprite_self],
"spriteShapes": [shapes.CUTE_AVATAR],
"palettes": [shapes.get_palette(
human_readable_colors[player_idx])],
"noRotates": [True]
}
},
{
"component": "AdditionalSprites",
"kwargs": {
"renderMode": "ascii_shape",
"customSpriteNames": [target_sprite_self["name"]],
"customSpriteShapes": [target_sprite_self["shape"]],
"customPalettes": [target_sprite_self["palette"]],
"customNoRotates": [target_sprite_self["noRotate"]],
}
},
{
"component": "Avatar",
"kwargs": {
"index": lua_index,
"aliveState": live_state_name,
"waitState": "playerWait",
"speed": 1.0,
"spawnGroup": "spawnPoints",
"actionOrder": ["move", "turn", "interact"],
"actionSpec": {
"move": {"default": 0, "min": 0, "max": len(_COMPASS)},
"turn": {"default": 0, "min": -1, "max": 1},
"interact": {"default": 0, "min": 0, "max": 1},
},
"view": {
"left": 5,
"right": 5,
"forward": 9,
"backward": 1,
"centered": False
},
"spriteMap": custom_sprite_map,
# The following kwarg makes it possible to get rewarded even
# on frames when an avatar is "dead". It is needed for in the
# matrix games in order to correctly handle the case of two
# players getting hit simultaneously by the same beam.
"skipWaitStateRewards": False,
}
},
{
"component": "GameInteractionZapper",
"kwargs": {
"cooldownTime": 2,
"beamLength": 3,
"beamRadius": 1,
"framesTillRespawn": 50,
"numResources": NUM_RESOURCES,
"endEpisodeOnFirstInteraction": False,
# Reset both players' inventories after each interaction.
"reset_winner_inventory": True,
"reset_loser_inventory": True,
# Both players get removed after each interaction.
"losingPlayerDies": True,
"winningPlayerDies": True,
# `freezeOnInteraction` is the number of frames to display the
# interaction result indicator, freeze, and delay delivering
# all results of interacting.
"freezeOnInteraction": 16,
}
},
{
"component": "ReadyToShootObservation",
"kwargs": {
"zapperComponent": "GameInteractionZapper",
}
},
{
"component": "InventoryObserver",
"kwargs": {
}
},
{
"component": "Taste",
"kwargs": {
"mostTastyResourceClass": -1, # -1 indicates no preference.
"mostTastyReward": 1.0, # reward for most tasty.
"defaultTastinessReward": 0.0, # reward for others.
}
},
{
"component": "InteractionTaste",
"kwargs": {
"mostTastyResourceClass": -1, # -1 indicates no preference.
"zeroDefaultInteractionReward": turn_off_default_reward,
"extraReward": 1.0,
}
},
{
"component": "AvatarMetricReporter",
"kwargs": {
"metrics": [
{
# Report the inventories of both players involved in
# an interaction on this frame formatted as
# (self inventory, partner inventory).
"name": "INTERACTION_INVENTORIES",
"type": "tensor.DoubleTensor",
"shape": (2, NUM_RESOURCES),
"component": "GameInteractionZapper",
"variable": "latest_interaction_inventories",
},
*the_matrix.get_cumulant_metric_configs(NUM_RESOURCES),
]
}
},
]
}
if _ENABLE_DEBUG_OBSERVATIONS:
avatar_object["components"].append({
"component": "LocationObserver",
"kwargs": {"objectIsAvatar": True, "alsoReportOrientation": True},
})
return avatar_object
def create_avatar_objects(
num_players: int,
turn_off_default_reward: bool = False) -> Sequence[PrefabConfig]:
"""Returns all game objects for the map.
Args:
num_players: number of players to create avatars for.
turn_off_default_reward: if true then zero the main game reward. This is
used for training specialist background populations.
"""
avatar_objects = []
for player_idx in range(num_players):
avatar = create_avatar_object(
player_idx,
TARGET_SPRITE_SELF,
turn_off_default_reward=turn_off_default_reward)
readiness_marker = the_matrix.create_ready_to_interact_marker(player_idx)
avatar_objects.append(avatar)
avatar_objects.append(readiness_marker)
return avatar_objects
def get_config():
"""Default configuration."""
config = config_dict.ConfigDict()
# Other parameters that are useful to override in training config files.
config.turn_off_default_reward = False
# Action set configuration.
config.action_set = ACTION_SET
# Observation format configuration.
config.individual_observation_names = [
"RGB",
"INVENTORY",
"READY_TO_SHOOT",
# Debug only (do not use the following observations in policies).
"INTERACTION_INVENTORIES",
]
config.global_observation_names = [
"WORLD.RGB",
]
# The specs of the environment (from a single-agent perspective).
config.action_spec = specs.action(len(ACTION_SET))
config.timestep_spec = specs.timestep({
"RGB": specs.OBSERVATION["RGB"],
"INVENTORY": specs.inventory(3),
"READY_TO_SHOOT": specs.OBSERVATION["READY_TO_SHOOT"],
# Debug only (do not use the following observations in policies).
"INTERACTION_INVENTORIES": specs.interaction_inventories(3),
"WORLD.RGB": specs.rgb(192, 200),
})
# The roles assigned to each player.
config.valid_roles = frozenset({"default"})
config.default_player_roles = ("default",) * 8
return config
def build(
roles: Sequence[str],
config: config_dict.ConfigDict,
) -> Mapping[str, Any]:
"""Build substrate definition given roles."""
del config
num_players = len(roles)
# Build the rest of the substrate definition.
substrate_definition = dict(
levelName="the_matrix",
levelDirectory="meltingpot/lua/levels",
numPlayers=num_players,
# Define upper bound of episode length since episodes end stochastically.
maxEpisodeLengthFrames=5000,
spriteSize=8,
topology="BOUNDED", # Choose from ["BOUNDED", "TORUS"],
simulation={
"map": ASCII_MAP,
"gameObjects": create_avatar_objects(num_players=num_players),
"scene": create_scene(),
"prefabs": create_prefabs(),
"charPrefabMap": CHAR_PREFAB_MAP,
}
)
return substrate_definition
|
meltingpot-main
|
meltingpot/configs/substrates/running_with_scissors_in_the_matrix__arena.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for Collaborative Cooking: Figure Eight.
Example video: https://youtu.be/hUCbOL5l-Gw
The recipe they must follow is for tomato soup:
1. Add three tomatoes to the cooking pot.
2. Wait for the soup to cook (status bar completion).
3. Bring a bowl to the pot and pour the soup from the pot into the bowl.
4. Deliver the bowl of soup at the goal location.
This substrate is a pure common interest game. All players share all rewards.
Players have a `5 x 5` observation window.
Map:
Figure Eight: The map is a figure eight shaped maze that generates numerous
places where players might get in one another's way, blocking critical paths.
While it is technically possible for a single player to complete the task on
their own it is very unlikely that poor performing partners would get out of its
way, so in practice, collaboration is essential.
"""
from meltingpot.configs.substrates import collaborative_cooking as base_config
from meltingpot.utils.substrates import specs
from ml_collections import config_dict
build = base_config.build
# Figure Eight: Strong performance on this map requires two stages of teamwork.
ASCII_MAP = """
################
####C#C##C#C####
# P P #
## ########## ##
# P P #
## ########## ##
# P P #
### #ODTTOD# ###
################
"""
def get_config():
"""Default configuration."""
config = base_config.get_config()
# Override the map layout settings.
config.layout = config_dict.ConfigDict()
config.layout.ascii_map = ASCII_MAP
# The specs of the environment (from a single-agent perspective).
config.timestep_spec = specs.timestep({
"RGB": specs.rgb(40, 40),
# Debug only (do not use the following observations in policies).
"WORLD.RGB": specs.rgb(72, 128),
})
# The roles assigned to each player.
config.valid_roles = frozenset({"default"})
config.default_player_roles = ("default",) * 6
return config
|
meltingpot-main
|
meltingpot/configs/substrates/collaborative_cooking__figure_eight.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for Collaborative Cooking: Cramped.
Example video: https://youtu.be/8qQFbxO8UNY
The recipe they must follow is for tomato soup:
1. Add three tomatoes to the cooking pot.
2. Wait for the soup to cook (status bar completion).
3. Bring a bowl to the pot and pour the soup from the pot into the bowl.
4. Deliver the bowl of soup at the goal location.
This substrate is a pure common interest game. All players share all rewards.
Players have a `5 x 5` observation window.
Map:
Cramped Room: A tight layout requiring significant movement coordination between
the players in order to avoid being blocked by each other.
"""
from meltingpot.configs.substrates import collaborative_cooking as base_config
from meltingpot.utils.substrates import specs
from ml_collections import config_dict
build = base_config.build
# Cramped Room: A tight layout requiring significant movement coordination
# between the players in order to avoid being blocked by each other.
ASCII_MAP = """
xx##C##xx
xxOP POxx
xx# #xx
xx#D#T#xx
xxxxxxxxx
"""
def get_config():
"""Default configuration."""
config = base_config.get_config()
# Override the map layout settings.
config.layout = config_dict.ConfigDict()
config.layout.ascii_map = ASCII_MAP
# The specs of the environment (from a single-agent perspective).
config.timestep_spec = specs.timestep({
"RGB": specs.rgb(40, 40),
# Debug only (do not use the following observations in policies).
"WORLD.RGB": specs.rgb(40, 72),
})
# The roles assigned to each player.
config.valid_roles = frozenset({"default"})
config.default_player_roles = ("default",) * 2
return config
|
meltingpot-main
|
meltingpot/configs/substrates/collaborative_cooking__cramped.py
|
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for predator_prey__random_forest.
Example video: https://youtu.be/ZYkXwvn5_Sc
See predator_prey.py for a detailed description applicable to all predator_prey
substrates.
In this variant there are only acorns, no apples. And, there is no fully safe
tall grass. The tall grass that there is on this map is never large enough for
prey to be fully safe from predation. The grass merely provides an obstacle that
predators must navigate around while chasing prey.
"""
from meltingpot.configs.substrates import predator_prey as base_config
from meltingpot.utils.substrates import map_helpers
from meltingpot.utils.substrates import specs
from ml_collections import config_dict
build = base_config.build
ASCII_MAP = """
/;___________________,/
;]XAXXXXXXXAXXXXXXXAX[,
!XXXXXXXXXXXXXXXXXXXXX|
!''''M'M''MMM''M'M''''|
!'M''M'MM''Q''MM'M''M'|
!'MQ'M''MMMMMMM''M'QM'|
!''''''QM'''''MQ''''''|
!M'MMMMMM@@@@@MMMMMM'M|
!M''''''@@@@@@@''''''M|
!Q'MMQ''@@@A@@@''QMM'Q|
!M''''''@@@@@@@''''''M|
!M'MMMMMM@@@@@MMMMMM'M|
!''''''QM'''''MQ''''''|
!'MQ'M''MMMMMMM''M'QM'|
!'M''M'MM''Q''MM'M''M'|
!''''M'M''MMM''M'M''''|
!XXXXXXXXXXXXXXXXXXXXX|
L+XAXXXXXXXAXXXXXXXAX=J
/L~~~~~~~~~~~~~~~~~~~J/
"""
prey_spawn_point = {"type": "all", "list": ["tiled_floor", "spawn_point_prey"]}
predator_spawn_point = {"type": "all", "list": ["tiled_floor",
"spawn_point_predator"]}
acorn = {"type": "all", "list": ["tiled_floor", "floor_acorn"]}
# `prefab` determines which prefab game object to use for each `char` in the
# ascii map.
CHAR_PREFAB_MAP = {
"@": prey_spawn_point,
"*": {"type": "all", "list": ["safe_grass", "spawn_point_prey"]},
"&": {"type": "all", "list": ["tiled_floor", "apple", "spawn_point_prey"]},
"X": predator_spawn_point,
"a": {"type": "all", "list": ["tiled_floor", "apple"]},
"A": acorn,
";": "nw_wall_corner",
",": "ne_wall_corner",
"J": "se_wall_corner",
"L": "sw_wall_corner",
"_": "wall_north",
"|": "wall_east",
"~": "wall_south",
"!": "wall_west",
"=": "nw_inner_wall_corner",
"+": "ne_inner_wall_corner",
"]": "se_inner_wall_corner",
"[": "sw_inner_wall_corner",
"'": "tiled_floor",
"#": "safe_grass",
"<": "safe_grass_w_edge",
"^": "safe_grass_n_edge",
">": "safe_grass_e_edge",
"v": "safe_grass_s_edge",
"l": "safe_grass_ne_corner",
"j": "safe_grass_se_corner",
"z": "safe_grass_sw_corner",
"r": "safe_grass_nw_corner",
"/": "fill",
"Q": map_helpers.a_or_b_with_odds(acorn, "tiled_floor", odds=(1, 2)),
"M": map_helpers.a_or_b_with_odds("safe_grass", "tiled_floor", odds=(1, 2)),
}
def get_config():
"""Default configuration."""
config = base_config.get_config()
# Override the map layout settings.
config.layout = config_dict.ConfigDict()
config.layout.ascii_map = ASCII_MAP
config.layout.char_prefab_map = CHAR_PREFAB_MAP
# The specs of the environment (from a single-agent perspective).
config.timestep_spec = specs.timestep({
"RGB": specs.OBSERVATION["RGB"],
"STAMINA": specs.float64(),
# Debug only (do not use the following observations in policies).
"WORLD.RGB": specs.rgb(152, 184),
})
# The roles assigned to each player.
config.default_player_roles = ("predator",) * 5 + ("prey",) * 8
return config
|
meltingpot-main
|
meltingpot/configs/substrates/predator_prey__random_forest.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.