mirror of
https://github.com/illiumst/marl-factory-grid.git
synced 2025-05-23 15:26:43 +02:00
226 lines
9.6 KiB
Python
226 lines
9.6 KiB
Python
from typing import List, Union, NamedTuple
|
|
import random
|
|
|
|
import numpy as np
|
|
|
|
from environments.factory.base_factory import BaseFactory
|
|
from environments import helpers as h
|
|
|
|
from environments.factory.renderer import Renderer, Entity
|
|
from environments.utility_classes import AgentState, MovementProperties
|
|
|
|
DIRT_INDEX = -1
|
|
CLEAN_UP_ACTION = 'clean_up'
|
|
|
|
|
|
class DirtProperties(NamedTuple):
|
|
clean_amount: int = 2 # How much does the robot clean with one actions.
|
|
max_spawn_ratio: float = 0.2 # On max how much tiles does the dirt spawn in percent.
|
|
gain_amount: float = 0.5 # How much dirt does spawn per tile
|
|
spawn_frequency: int = 5 # Spawn Frequency in Steps
|
|
max_local_amount: int = 1 # Max dirt amount per tile.
|
|
max_global_amount: int = 20 # Max dirt amount in the whole environment.
|
|
|
|
|
|
# noinspection PyAttributeOutsideInit
|
|
class SimpleFactory(BaseFactory):
|
|
|
|
@property
|
|
def additional_actions(self) -> List[str]:
|
|
return [CLEAN_UP_ACTION]
|
|
|
|
def _is_clean_up_action(self, action: Union[str, int]):
|
|
if isinstance(action, str):
|
|
action = self._actions.by_name(action)
|
|
return self._actions[action] == CLEAN_UP_ACTION
|
|
|
|
def __init__(self, *args, dirt_properties: DirtProperties = DirtProperties(), verbose=False, **kwargs):
|
|
self.dirt_properties = dirt_properties
|
|
self.verbose = verbose
|
|
self.max_dirt = 20
|
|
self._renderer = None # expensive - don't use it when not required !
|
|
super(SimpleFactory, self).__init__(*args, additional_slices=['dirt'], **kwargs)
|
|
|
|
def render(self, mode='human'):
|
|
|
|
if not self._renderer: # lazy init
|
|
height, width = self._state.shape[1:]
|
|
self._renderer = Renderer(width, height, view_radius=self.pomdp_radius, fps=5)
|
|
|
|
dirt = [Entity('dirt', [x, y], min(0.15 + self._state[DIRT_INDEX, x, y], 1.5), 'scale')
|
|
for x, y in np.argwhere(self._state[DIRT_INDEX] > h.IS_FREE_CELL)]
|
|
walls = [Entity('wall', pos)
|
|
for pos in np.argwhere(self._state[self._state_slices.by_name(h.LEVEL)] > h.IS_FREE_CELL)]
|
|
|
|
def asset_str(agent):
|
|
if any([x is None for x in [self._state_slices[j] for j in agent.collisions]]):
|
|
print('error')
|
|
cols = ' '.join([self._state_slices[j] for j in agent.collisions])
|
|
if h.AGENT in cols:
|
|
return 'agent_collision', 'blank'
|
|
elif not agent.action_valid or 'level' in cols or h.AGENT in cols:
|
|
return h.AGENT, 'invalid'
|
|
elif self._is_clean_up_action(agent.action):
|
|
return h.AGENT, 'valid'
|
|
else:
|
|
return h.AGENT, 'idle'
|
|
agents = []
|
|
for i, agent in enumerate(self._agent_states):
|
|
name, state = asset_str(agent)
|
|
agents.append(Entity(name, agent.pos, 1, 'none', state, i+1))
|
|
doors = []
|
|
if self.has_doors:
|
|
for i, door in enumerate(self._door_states):
|
|
name, state = 'door_open' if door.is_open else 'door_closed', 'blank'
|
|
agents.append(Entity(name, door.pos, 1, 'none', state, i+1))
|
|
self._renderer.render(dirt+walls+agents+doors)
|
|
|
|
def spawn_dirt(self) -> None:
|
|
if not np.argwhere(self._state[DIRT_INDEX] != h.IS_FREE_CELL).shape[0] > self.dirt_properties.max_global_amount:
|
|
free_for_dirt = self.free_cells(excluded_slices=DIRT_INDEX)
|
|
|
|
# randomly distribute dirt across the grid
|
|
n_dirt_tiles = int(random.uniform(0, self.dirt_properties.max_spawn_ratio) * len(free_for_dirt))
|
|
for x, y in free_for_dirt[:n_dirt_tiles]:
|
|
new_value = self._state[DIRT_INDEX, x, y] + self.dirt_properties.gain_amount
|
|
self._state[DIRT_INDEX, x, y] = max(new_value, self.dirt_properties.max_local_amount)
|
|
else:
|
|
pass
|
|
|
|
def clean_up(self, pos: (int, int)) -> ((int, int), bool):
|
|
new_dirt_amount = self._state[DIRT_INDEX][pos] - self.dirt_properties.clean_amount
|
|
cleanup_was_sucessfull: bool
|
|
if self._state[DIRT_INDEX][pos] == h.IS_FREE_CELL:
|
|
cleanup_was_sucessfull = False
|
|
return pos, cleanup_was_sucessfull
|
|
else:
|
|
cleanup_was_sucessfull = True
|
|
self._state[DIRT_INDEX][pos] = max(new_dirt_amount, h.IS_FREE_CELL)
|
|
return pos, cleanup_was_sucessfull
|
|
|
|
def step(self, actions):
|
|
_, reward, done, info = super(SimpleFactory, self).step(actions)
|
|
if not self._next_dirt_spawn:
|
|
self.spawn_dirt()
|
|
self._next_dirt_spawn = self.dirt_properties.spawn_frequency
|
|
else:
|
|
self._next_dirt_spawn -= 1
|
|
|
|
obs = self._get_observations()
|
|
return obs, reward, done, info
|
|
|
|
def do_additional_actions(self, agent_i: int, action: int) -> ((int, int), bool):
|
|
if action != self._actions.is_moving_action(action):
|
|
if self._is_clean_up_action(action):
|
|
agent_i_pos = self.agent_i_position(agent_i)
|
|
_, valid = self.clean_up(agent_i_pos)
|
|
return agent_i_pos, valid
|
|
else:
|
|
raise RuntimeError('This should not happen!!!')
|
|
else:
|
|
raise RuntimeError('This should not happen!!!')
|
|
|
|
def reset(self) -> (np.ndarray, int, bool, dict):
|
|
_ = super().reset() # state, reward, done, info ... =
|
|
dirt_slice = np.zeros((1, *self._state.shape[1:]))
|
|
self._state = np.concatenate((self._state, dirt_slice)) # dirt is now the last slice
|
|
self.spawn_dirt()
|
|
self._next_dirt_spawn = self.dirt_properties.spawn_frequency
|
|
obs = self._get_observations()
|
|
return obs
|
|
|
|
def calculate_reward(self, agent_states: List[AgentState]) -> (int, dict):
|
|
info_dict = dict()
|
|
current_dirt_amount = self._state[DIRT_INDEX].sum()
|
|
dirty_tiles = np.argwhere(self._state[DIRT_INDEX] != h.IS_FREE_CELL).shape[0]
|
|
info_dict.update(dirt_amount=current_dirt_amount)
|
|
info_dict.update(dirty_tile_count=dirty_tiles)
|
|
|
|
try:
|
|
# penalty = current_dirt_amount
|
|
reward = 0
|
|
except (ZeroDivisionError, RuntimeWarning):
|
|
reward = 0
|
|
|
|
for agent_state in agent_states:
|
|
agent_name = f'{h.AGENT.capitalize()} {agent_state.i}'
|
|
cols = agent_state.collisions
|
|
|
|
list_of_collisions = [self._state_slices[entity] for entity in cols
|
|
if entity != self._state_slices.by_name('dirt')]
|
|
|
|
if list_of_collisions:
|
|
self.print(f't = {self._steps}\t{agent_name} has collisions with {list_of_collisions}')
|
|
|
|
if self._is_clean_up_action(agent_state.action):
|
|
if agent_state.action_valid:
|
|
reward += 1
|
|
self.print(f'{agent_name} did just clean up some dirt at {agent_state.pos}.')
|
|
info_dict.update(dirt_cleaned=1)
|
|
else:
|
|
reward -= 0.01
|
|
self.print(f'{agent_name} just tried to clean up some dirt at {agent_state.pos}, but failed.')
|
|
info_dict.update({f'{h.AGENT}_{agent_state.i}_failed_action': 1})
|
|
info_dict.update({f'{h.AGENT}_{agent_state.i}_failed_dirt_cleanup': 1})
|
|
|
|
elif self._actions.is_moving_action(agent_state.action):
|
|
if agent_state.action_valid:
|
|
# info_dict.update(movement=1)
|
|
reward -= 0.00
|
|
else:
|
|
# self.print('collision')
|
|
reward -= 0.01
|
|
|
|
elif self._actions.is_door_usage(agent_state.action):
|
|
if agent_state.action_valid:
|
|
reward += 0.1
|
|
self.print(f'{agent_name} did just use the door at {agent_state.pos}.')
|
|
info_dict.update(door_used=1)
|
|
else:
|
|
self.print(f'{agent_name} just tried to use a door at {agent_state.pos}, but failed.')
|
|
info_dict.update({f'{h.AGENT}_{agent_state.i}_failed_action': 1})
|
|
info_dict.update({f'{h.AGENT}_{agent_state.i}_failed_door_open': 1})
|
|
|
|
else:
|
|
info_dict.update(no_op=1)
|
|
reward -= 0.00
|
|
|
|
for entity in list_of_collisions:
|
|
entity = h.AGENT if h.AGENT in entity else entity
|
|
info_dict.update({f'{h.AGENT}_{agent_state.i}_vs_{entity}': 1})
|
|
|
|
self.print(f"reward is {reward}")
|
|
# Potential based rewards ->
|
|
# track the last reward , minus the current reward = potential
|
|
return reward, info_dict
|
|
|
|
def print(self, string):
|
|
if self.verbose:
|
|
print(string)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
render = False
|
|
|
|
move_props = MovementProperties(allow_diagonal_movement=True, allow_square_movement=True)
|
|
dirt_props = DirtProperties()
|
|
factory = SimpleFactory(movement_properties=move_props, dirt_properties=dirt_props, n_agents=10,
|
|
combin_agent_slices_in_obs=True, level_name='rooms',
|
|
pomdp_radius=3)
|
|
|
|
n_actions = factory.action_space.n - 1
|
|
_ = factory.observation_space
|
|
|
|
for epoch in range(10000):
|
|
random_actions = [[random.randint(0, n_actions) for _ in range(factory.n_agents)] for _ in range(200)]
|
|
env_state = factory.reset()
|
|
r = 0
|
|
for agent_i_action in random_actions:
|
|
env_state, step_r, done_bool, info_obj = factory.step(agent_i_action)
|
|
r += step_r
|
|
if render:
|
|
factory.render()
|
|
if done_bool:
|
|
break
|
|
print(f'Factory run {epoch} done, reward is:\n {r}')
|