diff --git a/environments/factory/base_factory.py b/environments/factory/base_factory.py index 3bd1286..25e089c 100644 --- a/environments/factory/base_factory.py +++ b/environments/factory/base_factory.py @@ -1,4 +1,5 @@ import random +from typing import Tuple, List, Union, Iterable import numpy as np from pathlib import Path @@ -10,6 +11,8 @@ class BaseFactory: def __init__(self, level='simple', n_agents=1, max_steps=1e3): self.n_agents = n_agents self.max_steps = max_steps + self.allow_vertical_movement = True + self.allow_horizontal_movement = True self.level = h.one_hot_level( h.parse_level(Path(__file__).parent / h.LEVELS_DIR / f'{level}.txt') ) @@ -38,49 +41,66 @@ class BaseFactory: actions = [actions] if isinstance(actions, int) else actions assert isinstance(actions, list), f'"actions has to be in [{int, list}]' self.steps += 1 + + # FixMe: Why do we need this? r = 0 + # Move this in a seperate function? actions = list(enumerate(actions)) random.shuffle(actions) for agent_i, action in actions: - if action <= 8: - pos, did_collide = self.move_or_colide(agent_i, action) + if self._is_moving_action(action): + pos, valid = self.move_or_colide(agent_i, action) else: - pos, did_collide = self.additional_actions(agent_i, action) - actions[agent_i] = (pos, did_collide) + pos, valid = self.additional_actions(agent_i, action) + actions[agent_i] = (agent_i, action, pos, valid) - collision_vecs = np.zeros((self.n_agents, self.state.shape[0])) # n_agents x n_slices - for agent_i, action in enumerate(actions): - collision_vecs[agent_i] = self.check_collisions(agent_i, *action) + collision_vecs = self.check_all_collisions(actions, self.state.shape[0]) - reward, info = self.step_core(collision_vecs, actions, r) + reward, info = self.calculate_reward(collision_vecs, [a[1] for a in actions], r) r += reward if self.steps >= self.max_steps: self.done = True return self.state, r, self.done, info - def check_collisions(self, agent_i, pos, valid): + def _is_moving_action(self, action): + movement_actions = (int(self.allow_vertical_movement) + int(self.allow_horizontal_movement)) * 4 + if action < movement_actions: + return True + else: + return False + + def check_all_collisions(self, agent_action_pos_valid_tuples: (int, int, (int, int), bool), collisions: int) -> np.ndarray: + collision_vecs = np.zeros((len(agent_action_pos_valid_tuples), collisions)) # n_agents x n_slices + for agent_i, action, pos, valid in agent_action_pos_valid_tuples: + if self._is_moving_action(action): + collision_vecs[agent_i] = self.check_collisions(agent_i, pos, valid) + return collision_vecs + + def check_collisions(self, agent_i: int, pos: (int, int), valid: bool) -> np.ndarray: pos_x, pos_y = pos - collisions_vec = self.state[:, pos_x, pos_y].copy() # "vertical fiber" at position of agent i + # FixMe: We need to find a way to spare out some dimensions, eg. an info dimension etc... a[?,] + collisions_vec = self.state[:, pos_x, pos_y].copy() # "vertical fiber" at position of agent i collisions_vec[h.AGENT_START_IDX + agent_i] = h.IS_FREE_CELL # no self-collisions if valid: + # ToDo: Place a function hook here pass else: collisions_vec[h.LEVEL_IDX] = h.IS_OCCUPIED_CELL return collisions_vec - def move(self, agent_i, old_pos, new_pos): + def do_move(self, agent_i: int, old_pos: (int, int), new_pos: (int, int)) -> None: (x, y), (x_new, y_new) = old_pos, new_pos self.state[agent_i + h.AGENT_START_IDX, x, y] = h.IS_FREE_CELL self.state[agent_i + h.AGENT_START_IDX, x_new, y_new] = h.IS_OCCUPIED_CELL - def move_or_colide(self, agent_i, action) -> ((int, int), bool): + def move_or_colide(self, agent_i: int, action: int) -> ((int, int), bool): old_pos, new_pos, valid = h.check_agent_move(state=self.state, - dim=agent_i + h.AGENT_START_IDX, - action=action) + dim=agent_i + h.AGENT_START_IDX, + action=action) if valid: # Does not collide width level boundrys - self.move(agent_i, old_pos, new_pos) + self.do_move(agent_i, old_pos, new_pos) return new_pos, valid else: # Agent seems to be trying to collide in this step @@ -93,7 +113,7 @@ class BaseFactory: np.random.shuffle(free_cells) return free_cells - def step_core(self, collisions_vec, actions, r): + def calculate_reward(self, collisions_vec: np.ndarray, actions: Iterable[int], r: int) -> (int, dict): # Returns: Reward, Info # Set to "raise NotImplementedError" - return 0, {} # What is returned here? + return 0, {} diff --git a/environments/factory/simple_factory.py b/environments/factory/simple_factory.py index 0588f82..fe38709 100644 --- a/environments/factory/simple_factory.py +++ b/environments/factory/simple_factory.py @@ -19,7 +19,7 @@ class SimpleFactory(BaseFactory): self.state = np.concatenate((self.state, dirt_slice)) # dirt is now the last slice self.spawn_dirt() - def step_core(self, collisions_vecs, actions, r): + def calculate_reward(self, collisions_vecs, actions, r): for agent_i, cols in enumerate(collisions_vecs): cols = np.argwhere(cols != 0).flatten() print(f't = {self.steps}\tAgent {agent_i} has collisions with ' diff --git a/environments/factory/simple_factory_getting_dirty.py b/environments/factory/simple_factory_getting_dirty.py index 8b766f3..c018130 100644 --- a/environments/factory/simple_factory_getting_dirty.py +++ b/environments/factory/simple_factory_getting_dirty.py @@ -30,7 +30,7 @@ class GettingDirty(BaseFactory): self.state = np.concatenate((self.state, dirt_slice)) # dirt is now the last slice self.spawn_dirt() - def step_core(self, collisions_vecs, actions, r): + def calculate_reward(self, collisions_vecs, actions, r): for agent_i, cols in enumerate(collisions_vecs): cols = np.argwhere(cols != 0).flatten() print(f't = {self.steps}\tAgent {agent_i} has collisions with '