diff --git a/environments/factory/base_factory.py b/environments/factory/base_factory.py index 25e089c..c7d9863 100644 --- a/environments/factory/base_factory.py +++ b/environments/factory/base_factory.py @@ -8,6 +8,10 @@ from environments import helpers as h class BaseFactory: + @property + def movement_actions(self): + return (int(self.allow_vertical_movement) + int(self.allow_horizontal_movement)) * 4 + def __init__(self, level='simple', n_agents=1, max_steps=1e3): self.n_agents = n_agents self.max_steps = max_steps @@ -64,8 +68,7 @@ class BaseFactory: return self.state, r, self.done, info def _is_moving_action(self, action): - movement_actions = (int(self.allow_vertical_movement) + int(self.allow_horizontal_movement)) * 4 - if action < movement_actions: + if action < self.movement_actions: return True else: return False @@ -106,6 +109,9 @@ class BaseFactory: # Agent seems to be trying to collide in this step return old_pos, valid + def agent_i_position(self, agent_i): + return np.argwhere(self.state[h.AGENT_START_IDX+agent_i] == h.IS_OCCUPIED_CELL) + @property def free_cells(self) -> np.ndarray: free_cells = self.state.sum(0) diff --git a/environments/factory/simple_factory_getting_dirty.py b/environments/factory/simple_factory_getting_dirty.py index c3a8b01..739a480 100644 --- a/environments/factory/simple_factory_getting_dirty.py +++ b/environments/factory/simple_factory_getting_dirty.py @@ -1,6 +1,8 @@ import numpy as np from environments.factory.base_factory import BaseFactory from collections import namedtuple +from typing import Iterable +from environments import helpers as h DIRT_INDEX = -1 DirtProperties = namedtuple('DirtProperties', ['clean_amount', 'max_spawn_ratio', 'gain_amount'], @@ -9,29 +11,49 @@ DirtProperties = namedtuple('DirtProperties', ['clean_amount', 'max_spawn_ratio' class GettingDirty(BaseFactory): + @property + def _clean_up_action(self): + return self.movement_actions + 1 + def __init__(self, *args, dirt_properties:DirtProperties, **kwargs): super(GettingDirty, self).__init__(*args, **kwargs) self._dirt_properties = dirt_properties self.slice_strings.update({self.state.shape[0]-1: 'dirt'}) - def spawn_dirt(self): + def spawn_dirt(self) -> None: free_for_dirt = self.free_cells # randomly distribute dirt across the grid n_dirt_tiles = self._dirt_properties.max_spawn_ratio * len(free_for_dirt) for x, y in free_for_dirt[:n_dirt_tiles]: self.state[DIRT_INDEX, x, y] += self._dirt_properties.gain_amount - def additional_actions(self, agent_i, action) -> ((int, int), bool): - if action == + def clean_up(self, pos: (int, int)) -> ((int, int), bool): + new_dirt_amount = self.state[DIRT_INDEX][pos] - self._dirt_properties.clean_amount + cleanup_was_sucessfull: bool + if self.state[DIRT_INDEX][pos] == h.IS_FREE_CELL: + cleanup_was_sucessfull = False + return pos, cleanup_was_sucessfull + else: + cleanup_was_sucessfull = True + self.state[DIRT_INDEX][pos] = max(new_dirt_amount, h.IS_FREE_CELL) + return pos, cleanup_was_sucessfull - def reset(self): + + def additional_actions(self, agent_i, action) -> ((int, int), bool): + if not action == self._is_moving_action(action): + if action == self._clean_up_action: + self.clean_up() + else: + raise RuntimeError('This should not happen!!!') + + def reset(self) -> None: # ToDo: When self.reset returns the new states and stuff, use it here! super().reset() # state, agents, ... = dirt_slice = np.zeros((1, *self.state.shape[1:])) self.state = np.concatenate((self.state, dirt_slice)) # dirt is now the last slice self.spawn_dirt() - def calculate_reward(self, collisions_vecs, actions, r): + def calculate_reward(self, collisions_vec: np.ndarray, actions: Iterable[int], r: int) -> (int, dict): for agent_i, cols in enumerate(collisions_vecs): cols = np.argwhere(cols != 0).flatten() print(f't = {self.steps}\tAgent {agent_i} has collisions with '