From c8883a9c0d221866ba1c54ce09fdf3a4c051f0f3 Mon Sep 17 00:00:00 2001 From: steffen-illium Date: Thu, 17 Jun 2021 16:21:13 +0200 Subject: [PATCH] Restructuring --- environments/factory/base_factory.py | 93 +++++++++++++++----------- environments/factory/levels/rooms.txt | 4 +- environments/factory/simple_factory.py | 48 ++++++------- environments/helpers.py | 10 ++- environments/utility_classes.py | 6 +- main_test.py | 6 +- 6 files changed, 94 insertions(+), 73 deletions(-) diff --git a/environments/factory/base_factory.py b/environments/factory/base_factory.py index 7bb131e..c10ea68 100644 --- a/environments/factory/base_factory.py +++ b/environments/factory/base_factory.py @@ -36,6 +36,10 @@ class BaseFactory(gym.Env): def movement_actions(self): return self._actions.movement_actions + @property + def has_doors(self): + return hasattr(self, '_doors') + def __enter__(self): return self if self.frames_to_stack == 0 else FrameStack(self, self.frames_to_stack) @@ -43,7 +47,7 @@ class BaseFactory(gym.Env): self.close() def __init__(self, level_name='simple', n_agents=1, max_steps=int(5e2), pomdp_radius: Union[None, int] = 0, - movement_properties: MovementProperties = MovementProperties(), + movement_properties: MovementProperties = MovementProperties(), parse_doors=False, combin_agent_slices_in_obs: bool = False, frames_to_stack=0, omit_agent_slice_in_obs=False, **kwargs): assert (combin_agent_slices_in_obs != omit_agent_slice_in_obs) or \ @@ -64,25 +68,31 @@ class BaseFactory(gym.Env): self.done_at_collision = False self._state_slices = StateSlices() + + # Level level_filepath = Path(__file__).parent / h.LEVELS_DIR / f'{self.level_name}.txt' parsed_level = h.parse_level(level_filepath) self._level = h.one_hot_level(parsed_level) - parsed_doors = h.one_hot_level(parsed_level, h.DOOR) - if parsed_doors.any(): - self._doors = parsed_doors - level_slices = ['level', 'doors'] - can_use_doors = True - else: - level_slices = ['level'] - can_use_doors = False + level_slices = [h.LEVEL] + + # Doors + if parse_doors: + parsed_doors = h.one_hot_level(parsed_level, h.DOOR) + if parsed_doors.any(): + self._doors = parsed_doors + level_slices.append(h.DOORS) + + # Agents offset = len(level_slices) self._state_slices.register_additional_items([*level_slices, *[f'agent#{i}' for i in range(offset, n_agents + offset)]]) + + # Additional Slices from SubDomains if 'additional_slices' in kwargs: self._state_slices.register_additional_items(kwargs.get('additional_slices')) self._zones = Zones(parsed_level) - self._actions = Actions(self.movement_properties, can_use_doors=can_use_doors) + self._actions = Actions(self.movement_properties, can_use_doors=self.has_doors) self._actions.register_additional_items(self.additional_actions) self.reset() @@ -99,30 +109,29 @@ class BaseFactory(gym.Env): raise NotImplementedError('Please register additional actions ') def reset(self) -> (np.ndarray, int, bool, dict): + slices = [np.expand_dims(self._level, 0)] self._steps = 0 self._agent_states = list() + # Door Init + if self.has_doors: + self._door_states = [DoorState(i, tuple(pos)) for i, pos + in enumerate(np.argwhere(self._doors == h.IS_OCCUPIED_CELL))] + slices.append(np.expand_dims(self._doors, 0)) + # Agent placement ... - agents = np.zeros((self.n_agents, *self._level.shape), dtype=np.int8) floor_tiles = np.argwhere(self._level == h.IS_FREE_CELL) # ... on random positions np.random.shuffle(floor_tiles) + agents = np.zeros((self.n_agents, *self._level.shape), dtype=np.int8) for i, (x, y) in enumerate(floor_tiles[:self.n_agents]): agents[i, x, y] = h.IS_OCCUPIED_CELL - agent_state = AgentState(i, -1) - agent_state.update(pos=(x, y)) + agent_state = AgentState(i, -1, pos=(x, y)) self._agent_states.append(agent_state) - # state.shape = level, agent 1,..., agent n, - if 'doors' in self._state_slices.values(): - self._door_states = [DoorState(i, tuple(pos)) for i, pos - in enumerate(np.argwhere(self._doors == h.IS_OCCUPIED_CELL))] - self._state = np.concatenate((np.expand_dims(self._level, axis=0), - np.expand_dims(self._doors, axis=0), - agents), axis=0) + slices.append(agents) - else: - self._state = np.concatenate((np.expand_dims(self._level, axis=0), agents), axis=0) - # Returns State + # GLOBAL STATE + self._state = np.concatenate(slices, axis=0) return None def _get_observations(self) -> np.ndarray: @@ -138,21 +147,22 @@ class BaseFactory(gym.Env): first_agent_slice = self._state_slices.AGENTSTARTIDX # Todo: make this more efficient! if self.pomdp_radius: - global_pos = self._agent_states[agent_i].pos - x0, x1 = max(0, global_pos[0] - self.pomdp_radius), global_pos[0] + self.pomdp_radius + 1 - y0, y1 = max(0, global_pos[1] - self.pomdp_radius), global_pos[1] + self.pomdp_radius + 1 + pomdp_diameter = self.pomdp_radius * 2 + 1 + global_x, global_y = self._agent_states[agent_i].pos + x0, x1 = max(0, global_x - self.pomdp_radius), global_x + self.pomdp_radius + 1 + y0, y1 = max(0, global_y - self.pomdp_radius), global_y + self.pomdp_radius + 1 obs = self._state[:, x0:x1, y0:y1] - if obs.shape[1] != self.pomdp_radius * 2 + 1 or obs.shape[2] != self.pomdp_radius * 2 + 1: - obs_padded = np.full((obs.shape[0], self.pomdp_radius * 2 + 1, self.pomdp_radius * 2 + 1), 1) - a_pos = np.argwhere(obs[first_agent_slice + agent_i] == h.IS_OCCUPIED_CELL)[0] + if obs.shape[1] != pomdp_diameter or obs.shape[2] != pomdp_diameter: + obs_padded = np.full((obs.shape[0], pomdp_diameter, pomdp_diameter), h.IS_OCCUPIED_CELL) + local_x, local_y = np.argwhere(obs[first_agent_slice + agent_i] == h.IS_OCCUPIED_CELL)[0] obs_padded[:, - abs(a_pos[0]-self.pomdp_radius):abs(a_pos[0]-self.pomdp_radius)+obs.shape[1], - abs(a_pos[1]-self.pomdp_radius):abs(a_pos[1]-self.pomdp_radius)+obs.shape[2]] = obs + abs(local_x-self.pomdp_radius):abs(local_x-self.pomdp_radius)+obs.shape[1], + abs(local_y-self.pomdp_radius):abs(local_y-self.pomdp_radius)+obs.shape[2]] = obs obs = obs_padded else: obs = self._state if self.omit_agent_slice_in_obs: - obs_new = obs[[key for key, val in self._state_slices.items() if 'agent' not in val]] + obs_new = obs[[key for key, val in self._state_slices.items() if h.AGENT not in val]] return obs_new else: if self.combin_agent_slices_in_obs: @@ -174,16 +184,19 @@ class BaseFactory(gym.Env): # Move this in a seperate function? for agent_i, action in enumerate(actions): + agent = self._agent_states[agent_i] if self._actions.is_moving_action(action): pos, valid = self.move_or_colide(agent_i, action) elif self._actions.is_no_op(action): - pos, valid = self._agent_states[agent_i].pos, h.VALID + pos, valid = agent.pos, h.VALID elif self._actions.is_door_usage(action): - try: + # Check if agent raly stands on a door: + if self._state[self._state_slices.by_name(h.DOORS)][agent.pos] in [h.IS_OCCUPIED_CELL, ]: door = [door for door in self._door_states if door.pos == self._agent_states[agent_i].pos][0] door.use() pos, valid = self._agent_states[agent_i].pos, h.VALID - except IndexError: + # When he doesn't... + else: pos, valid = self._agent_states[agent_i].pos, h.NOT_VALID else: pos, valid = self.do_additional_actions(agent_i, action) @@ -202,6 +215,7 @@ class BaseFactory(gym.Env): door.time_to_close -= 1 elif door.is_open and not door.time_to_close and door.pos not in agents_pos: door.use() + self._state[self._state_slices.by_name(h.DOORS)] = 1 if door.is_closed else -1 reward, info = self.calculate_reward(self._agent_states) @@ -230,11 +244,12 @@ class BaseFactory(gym.Env): collisions_vec[self._state_slices.by_name('doors')] = h.IS_FREE_CELL # no door-collisions if agent_state.action_valid: - # ToDo: Place a function hook here + # All well, no collision. + # Place a function hook here if needed. pass else: # Place a marker to indicate a collision with the level boundrys - collisions_vec[h.LEVEL_IDX] = h.IS_OCCUPIED_CELL + collisions_vec[self._state_slices.by_name(h.LEVEL)] = h.IS_OCCUPIED_CELL return collisions_vec def do_move(self, agent_i: int, old_pos: (int, int), new_pos: (int, int)) -> None: @@ -265,7 +280,7 @@ class BaseFactory(gym.Env): x_new = x + x_diff y_new = y + y_diff - if h.DOORS in self._state_slices.values() and self._agent_states[agent_i]._last_pos != (-1, -1): + if self.has_doors and self._agent_states[agent_i]._last_pos != (-1, -1): door = [door for door in self._door_states if door.pos == (x, y)] if door: door = door[0] @@ -298,7 +313,7 @@ class BaseFactory(gym.Env): else: pass - valid = h.check_position(self._state[h.LEVEL_IDX], (x_new, y_new)) + valid = h.check_position(self._state[self._state_slices.by_name(h.LEVEL)], (x_new, y_new)) return (x, y), (x_new, y_new), valid diff --git a/environments/factory/levels/rooms.txt b/environments/factory/levels/rooms.txt index 43e8193..66cf1ff 100644 --- a/environments/factory/levels/rooms.txt +++ b/environments/factory/levels/rooms.txt @@ -4,10 +4,10 @@ #333333xx#4444# #333333#444444# #333333#444444# -###x#######x### +###x#######D### #1111##2222222# #11111#2222#22# -#11111x2222222# +#11111D2222222# #11111#2222222# #11111#2222222# ############### \ No newline at end of file diff --git a/environments/factory/simple_factory.py b/environments/factory/simple_factory.py index 206febb..f92af26 100644 --- a/environments/factory/simple_factory.py +++ b/environments/factory/simple_factory.py @@ -49,28 +49,30 @@ class SimpleFactory(BaseFactory): dirt = [Entity('dirt', [x, y], min(0.15 + self._state[DIRT_INDEX, x, y], 1.5), 'scale') for x, y in np.argwhere(self._state[DIRT_INDEX] > h.IS_FREE_CELL)] - walls = [Entity('wall', pos) for pos in np.argwhere(self._state[h.LEVEL_IDX] > h.IS_FREE_CELL)] + walls = [Entity('wall', pos) + for pos in np.argwhere(self._state[self._state_slices.by_name(h.LEVEL)] > h.IS_FREE_CELL)] def asset_str(agent): if any([x is None for x in [self._state_slices[j] for j in agent.collisions]]): print('error') cols = ' '.join([self._state_slices[j] for j in agent.collisions]) - if 'agent' in cols: + if h.AGENT in cols: return 'agent_collision', 'blank' - elif not agent.action_valid or 'level' in cols or 'agent' in cols: - return 'agent', 'invalid' + elif not agent.action_valid or 'level' in cols or h.AGENT in cols: + return h.AGENT, 'invalid' elif self._is_clean_up_action(agent.action): - return 'agent', 'valid' + return h.AGENT, 'valid' else: - return 'agent', 'idle' + return h.AGENT, 'idle' agents = [] for i, agent in enumerate(self._agent_states): name, state = asset_str(agent) agents.append(Entity(name, agent.pos, 1, 'none', state, i+1)) doors = [] - for i, door in enumerate(self._door_states): - name, state = 'door_open' if door.is_open else 'door_closed', 'blank' - agents.append(Entity(name, door.pos, 1, 'none', state, i+1)) + if self.has_doors: + for i, door in enumerate(self._door_states): + name, state = 'door_open' if door.is_open else 'door_closed', 'blank' + agents.append(Entity(name, door.pos, 1, 'none', state, i+1)) self._renderer.render(dirt+walls+agents+doors) def spawn_dirt(self) -> None: @@ -141,26 +143,25 @@ class SimpleFactory(BaseFactory): reward = 0 for agent_state in agent_states: + agent_name = f'{h.AGENT.capitalize()} {agent_state.i}' cols = agent_state.collisions list_of_collisions = [self._state_slices[entity] for entity in cols - if entity != self._state_slices.by_name("dirt")] + if entity != self._state_slices.by_name('dirt')] if list_of_collisions: - self.print(f't = {self._steps}\tAgent {agent_state.i} has collisions with ' - f'{list_of_collisions}') + self.print(f't = {self._steps}\t{agent_name} has collisions with {list_of_collisions}') if self._is_clean_up_action(agent_state.action): if agent_state.action_valid: reward += 1 - self.print(f'Agent {agent_state.i} did just clean up some dirt at {agent_state.pos}.') + self.print(f'{agent_name} did just clean up some dirt at {agent_state.pos}.') info_dict.update(dirt_cleaned=1) else: reward -= 0.01 - self.print(f'Agent {agent_state.i} just tried to clean up some dirt ' - f'at {agent_state.pos}, but was unsucsessfull.') - info_dict.update({f'agent_{agent_state.i}_failed_action': 1}) - info_dict.update({f'agent_{agent_state.i}_failed_dirt_cleanup': 1}) + self.print(f'{agent_name} just tried to clean up some dirt at {agent_state.pos}, but failed.') + info_dict.update({f'{h.AGENT}_{agent_state.i}_failed_action': 1}) + info_dict.update({f'{h.AGENT}_{agent_state.i}_failed_dirt_cleanup': 1}) elif self._actions.is_moving_action(agent_state.action): if agent_state.action_valid: @@ -173,21 +174,20 @@ class SimpleFactory(BaseFactory): elif self._actions.is_door_usage(agent_state.action): if agent_state.action_valid: reward += 0.1 - self.print(f'Agent {agent_state.i} did just use the door at {agent_state.pos}.') + self.print(f'{agent_name} did just use the door at {agent_state.pos}.') info_dict.update(door_used=1) else: - self.print(f'Agent {agent_state.i} just tried to use a door ' - f'at {agent_state.pos}, but was unsucsessfull.') - info_dict.update({f'agent_{agent_state.i}_failed_action': 1}) - info_dict.update({f'agent_{agent_state.i}_failed_door_open': 1}) + self.print(f'{agent_name} just tried to use a door at {agent_state.pos}, but failed.') + info_dict.update({f'{h.AGENT}_{agent_state.i}_failed_action': 1}) + info_dict.update({f'{h.AGENT}_{agent_state.i}_failed_door_open': 1}) else: info_dict.update(no_op=1) reward -= 0.00 for entity in list_of_collisions: - entity = 'agent' if 'agent' in entity else entity - info_dict.update({f'agent_{agent_state.i}_vs_{entity}': 1}) + entity = h.AGENT if h.AGENT in entity else entity + info_dict.update({f'{h.AGENT}_{agent_state.i}_vs_{entity}': 1}) self.print(f"reward is {reward}") # Potential based rewards -> diff --git a/environments/helpers.py b/environments/helpers.py index 6357739..fca3a92 100644 --- a/environments/helpers.py +++ b/environments/helpers.py @@ -10,10 +10,16 @@ DOOR = 'D' DANGER_ZONE = 'x' LEVELS_DIR = 'levels' LEVEL = 'level' -DOORS = 'doors' -LEVEL_IDX = 0 +AGENT = 'agent' IS_FREE_CELL = 0 IS_OCCUPIED_CELL = 1 + +DOORS = 'doors' +IS_CLOSED_DOOR = IS_OCCUPIED_CELL +IS_OPEN_DOOR = -1 + +LEVEL_IDX = 0 + TO_BE_AVERAGED = ['dirt_amount', 'dirty_tiles'] IGNORED_DF_COLUMNS = ['Episode', 'Run', 'train_step', 'step', 'index', 'dirt_amount', 'dirty_tile_count'] diff --git a/environments/utility_classes.py b/environments/utility_classes.py index 2ee27a5..98eb061 100644 --- a/environments/utility_classes.py +++ b/environments/utility_classes.py @@ -83,13 +83,13 @@ class AgentState: curr_x, curr_y = self.pos return last_x-curr_x, last_y-curr_y - def __init__(self, i: int, action: int): + def __init__(self, i: int, action: int, pos=None): self.i = i self.action = action self.collision_vector = None self.action_valid = None - self.pos = None + self.pos = pos self._last_pos = (-1, -1) def update(self, **kwargs): # is this hacky?? o.0 @@ -248,7 +248,7 @@ class StateSlices(Register): if self._agent_start_idx: return self._agent_start_idx else: - self._agent_start_idx = min([idx for idx, x in self.items() if 'agent' in x]) + self._agent_start_idx = min([idx for idx, x in self.items() if h.AGENT in x]) return self._agent_start_idx def __init__(self): diff --git a/main_test.py b/main_test.py index 0925487..7648316 100644 --- a/main_test.py +++ b/main_test.py @@ -29,8 +29,8 @@ if __name__ == '__main__': # rewards += [total reward] # boxplot total rewards - run_id = '1623241962' - model_name = 'PPO' + run_id = '1623923982' + model_name = 'A2C' # ----------------------- out_path = Path(__file__).parent / 'debug_out' @@ -48,7 +48,7 @@ if __name__ == '__main__': env_kwargs = yaml.load(f, Loader=yaml.FullLoader) dirt_props = DirtProperties(clean_amount=3, gain_amount=0.2, max_global_amount=30, max_local_amount=3, spawn_frequency=1, max_spawn_ratio=0.05) - env_kwargs.update(n_agents=1, dirt_properties=dirt_props) + # env_kwargs.update(n_agents=1, dirt_properties=dirt_props) env = SimpleFactory(**env_kwargs) env = FrameStack(env, 4)