Monitor and Recorder are Wrappers.
This commit is contained in:
@ -65,7 +65,7 @@ class BaseFactory(gym.Env):
|
||||
def __init__(self, level_name='simple', n_agents=1, max_steps=int(5e2),
|
||||
mv_prop: MovementProperties = MovementProperties(),
|
||||
obs_prop: ObservationProperties = ObservationProperties(),
|
||||
parse_doors=False, record_episodes=False, done_at_collision=False,
|
||||
parse_doors=False, done_at_collision=False,
|
||||
verbose=False, doors_have_area=True, env_seed=time.time_ns(), individual_rewards=False,
|
||||
**kwargs):
|
||||
|
||||
@ -97,7 +97,7 @@ class BaseFactory(gym.Env):
|
||||
self._pomdp_r = self.obs_prop.pomdp_r
|
||||
|
||||
self.done_at_collision = done_at_collision
|
||||
self.record_episodes = record_episodes
|
||||
self._record_episodes = False
|
||||
self.parse_doors = parse_doors
|
||||
self.doors_have_area = doors_have_area
|
||||
self.individual_rewards = individual_rewards
|
||||
@ -249,7 +249,7 @@ class BaseFactory(gym.Env):
|
||||
if self._steps >= self.max_steps:
|
||||
done = True
|
||||
info.update(step_reward=reward, step=self._steps)
|
||||
if self.record_episodes:
|
||||
if self._record_episodes:
|
||||
info.update(self._summarize_state())
|
||||
|
||||
# Post step Hook for later use
|
||||
@ -280,7 +280,7 @@ class BaseFactory(gym.Env):
|
||||
if self.n_agents == 1:
|
||||
obs = self._build_per_agent_obs(self[c.AGENT][0], state_array_dict)
|
||||
elif self.n_agents >= 2:
|
||||
obs = np.stack([self._build_per_agent_obs(agent, state_array_dict) for agent in self[c.AGENT]])
|
||||
obs = np.stack(self._build_per_agent_obs(agent, state_array_dict) for agent in self[c.AGENT])
|
||||
else:
|
||||
raise ValueError('n_agents cannot be smaller than 1!!')
|
||||
return obs
|
||||
@ -290,9 +290,6 @@ class BaseFactory(gym.Env):
|
||||
agent_omit_idx = None
|
||||
|
||||
if self.obs_prop.omit_agent_self and self.n_agents == 1:
|
||||
# There is only a single agent and we want to omit the agent obs, so just remove the array.
|
||||
# del state_array_dict[c.AGENT]
|
||||
# Not Needed any more,
|
||||
pass
|
||||
elif self.obs_prop.omit_agent_self and self.obs_prop.render_agents in [a_obs.COMBINED, ] and self.n_agents > 1:
|
||||
state_array_dict[c.AGENT][0, agent.x, agent.y] -= agent.encoding
|
||||
@ -439,7 +436,7 @@ class BaseFactory(gym.Env):
|
||||
tiles_with_collisions = list()
|
||||
for tile in self[c.FLOOR]:
|
||||
if tile.is_occupied():
|
||||
guests = [guest for guest in tile.guests if guest.can_collide]
|
||||
guests = tile.guests_that_can_collide
|
||||
if len(guests) >= 2:
|
||||
tiles_with_collisions.append(tile)
|
||||
return tiles_with_collisions
|
||||
@ -521,7 +518,7 @@ class BaseFactory(gym.Env):
|
||||
per_agent_info_dict[agent.name].update(no_op=1)
|
||||
# per_agent_reward -= 0.00
|
||||
|
||||
# Monitor Notes
|
||||
# EnvMonitor Notes
|
||||
if agent.temp_valid:
|
||||
per_agent_info_dict[agent.name].update(valid_action=1)
|
||||
per_agent_info_dict[agent.name].update({f'{agent.name}_valid_action': 1})
|
||||
|
@ -209,7 +209,7 @@ class Tile(Object):
|
||||
return not len(self._guests)
|
||||
|
||||
def is_occupied(self):
|
||||
return len(self._guests)
|
||||
return bool(len(self._guests))
|
||||
|
||||
def enter(self, guest):
|
||||
if guest.name not in self._guests:
|
||||
|
@ -28,7 +28,7 @@ class DirtProperties(NamedTuple):
|
||||
max_global_amount: int = 20 # Max dirt amount in the whole environment.
|
||||
dirt_smear_amount: float = 0.2 # Agents smear dirt, when not cleaning up in place.
|
||||
agent_can_interact: bool = True # Whether the agents can interact with the dirt in this environment.
|
||||
done_when_clean = True
|
||||
done_when_clean: bool = True
|
||||
|
||||
|
||||
class Dirt(Entity):
|
||||
@ -228,14 +228,14 @@ class DirtFactory(BaseFactory):
|
||||
dirt = [dirt.amount for dirt in self[c.DIRT]]
|
||||
current_dirt_amount = sum(dirt)
|
||||
dirty_tile_count = len(dirt)
|
||||
if dirty_tile_count:
|
||||
dirt_distribution_score = entropy(softmax(np.asarray(dirt)) / dirty_tile_count)
|
||||
else:
|
||||
dirt_distribution_score = 0
|
||||
# if dirty_tile_count:
|
||||
# dirt_distribution_score = entropy(softmax(np.asarray(dirt)) / dirty_tile_count)
|
||||
#else:
|
||||
# dirt_distribution_score = 0
|
||||
|
||||
info_dict.update(dirt_amount=current_dirt_amount)
|
||||
info_dict.update(dirty_tile_count=dirty_tile_count)
|
||||
info_dict.update(dirt_distribution_score=dirt_distribution_score)
|
||||
# info_dict.update(dirt_distribution_score=dirt_distribution_score)
|
||||
|
||||
if agent.temp_action == CLEAN_UP_ACTION:
|
||||
if agent.temp_valid:
|
||||
|
@ -1,7 +1,7 @@
|
||||
import pickle
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
from typing import List, Dict
|
||||
from typing import List, Dict, Union
|
||||
|
||||
from stable_baselines3.common.callbacks import BaseCallback
|
||||
|
||||
@ -10,57 +10,50 @@ from environments.helpers import IGNORED_DF_COLUMNS
|
||||
import pandas as pd
|
||||
|
||||
|
||||
class MonitorCallback(BaseCallback):
|
||||
class EnvMonitor(BaseCallback):
|
||||
|
||||
ext = 'png'
|
||||
|
||||
def __init__(self, filepath=Path('debug_out/monitor.pick')):
|
||||
super(MonitorCallback, self).__init__()
|
||||
self.filepath = Path(filepath)
|
||||
def __init__(self, env):
|
||||
super(EnvMonitor, self).__init__()
|
||||
self.unwrapped = env
|
||||
self._monitor_df = pd.DataFrame()
|
||||
self._monitor_dicts = defaultdict(dict)
|
||||
self.started = False
|
||||
self.closed = False
|
||||
|
||||
def __enter__(self):
|
||||
self.start()
|
||||
return self
|
||||
def __getattr__(self, item):
|
||||
return getattr(self.unwrapped, item)
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.stop()
|
||||
def step(self, action):
|
||||
obs, reward, done, info = self.unwrapped.step(action)
|
||||
self._read_info(0, info)
|
||||
self._read_done(0, done)
|
||||
return obs, reward, done, info
|
||||
|
||||
def reset(self):
|
||||
return self.unwrapped.reset()
|
||||
|
||||
def _on_training_start(self) -> None:
|
||||
if self.started:
|
||||
pass
|
||||
else:
|
||||
self.start()
|
||||
pass
|
||||
|
||||
def _on_training_end(self) -> None:
|
||||
if self.closed:
|
||||
pass
|
||||
else:
|
||||
self.stop()
|
||||
pass
|
||||
|
||||
def _on_step(self, alt_infos: List[Dict] = None, alt_dones: List[bool] = None) -> bool:
|
||||
if self.started:
|
||||
for env_idx, info in enumerate(self.locals.get('infos', [])):
|
||||
self.read_info(env_idx, info)
|
||||
for env_idx, info in enumerate(self.locals.get('infos', [])):
|
||||
self._read_info(env_idx, info)
|
||||
|
||||
for env_idx, done in list(
|
||||
enumerate(self.locals.get('dones', []))) + list(enumerate(self.locals.get('done', []))):
|
||||
self.read_done(env_idx, done)
|
||||
else:
|
||||
pass
|
||||
for env_idx, done in list(
|
||||
enumerate(self.locals.get('dones', []))) + list(enumerate(self.locals.get('done', []))):
|
||||
self._read_done(env_idx, done)
|
||||
return True
|
||||
|
||||
def read_info(self, env_idx, info: dict):
|
||||
def _read_info(self, env_idx, info: dict):
|
||||
self._monitor_dicts[env_idx][len(self._monitor_dicts[env_idx])] = {
|
||||
key: val for key, val in info.items() if
|
||||
key not in ['terminal_observation', 'episode'] and not key.startswith('rec_')}
|
||||
return
|
||||
|
||||
def read_done(self, env_idx, done):
|
||||
def _read_done(self, env_idx, done):
|
||||
if done:
|
||||
env_monitor_df = pd.DataFrame.from_dict(self._monitor_dicts[env_idx], orient='index')
|
||||
self._monitor_dicts[env_idx] = dict()
|
||||
@ -74,16 +67,8 @@ class MonitorCallback(BaseCallback):
|
||||
pass
|
||||
return
|
||||
|
||||
def stop(self):
|
||||
# self.out_file.unlink(missing_ok=True)
|
||||
with self.filepath.open('wb') as f:
|
||||
def save_run(self, filepath: Union[Path, str]):
|
||||
filepath = Path(filepath)
|
||||
filepath.parent.mkdir(exist_ok=True, parents=True)
|
||||
with filepath.open('wb') as f:
|
||||
pickle.dump(self._monitor_df.reset_index(), f, protocol=pickle.HIGHEST_PROTOCOL)
|
||||
self.closed = True
|
||||
|
||||
def start(self):
|
||||
if self.started:
|
||||
pass
|
||||
else:
|
||||
self.filepath.parent.mkdir(exist_ok=True, parents=True)
|
||||
self.started = True
|
||||
pass
|
@ -1,4 +1,3 @@
|
||||
import json
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
@ -11,22 +10,13 @@ from stable_baselines3.common.callbacks import BaseCallback
|
||||
from environments.factory.base.base_factory import REC_TAC
|
||||
|
||||
|
||||
# noinspection PyAttributeOutsideInit
|
||||
from environments.helpers import Constants as c
|
||||
class EnvRecorder(BaseCallback):
|
||||
|
||||
|
||||
class RecorderCallback(BaseCallback):
|
||||
|
||||
def __init__(self, filepath: Union[str, Path], occupation_map: bool = False, trajectory_map: bool = False,
|
||||
entities='all'):
|
||||
super(RecorderCallback, self).__init__()
|
||||
self.trajectory_map = trajectory_map
|
||||
self.occupation_map = occupation_map
|
||||
self.filepath = Path(filepath)
|
||||
def __init__(self, env, entities='all'):
|
||||
super(EnvRecorder, self).__init__()
|
||||
self.unwrapped = env
|
||||
self._recorder_dict = defaultdict(list)
|
||||
self._recorder_out_list = list()
|
||||
self._env_params = None
|
||||
self.do_record: bool
|
||||
if isinstance(entities, str):
|
||||
if entities.lower() == 'all':
|
||||
self._entities = None
|
||||
@ -37,10 +27,18 @@ class RecorderCallback(BaseCallback):
|
||||
self.started = False
|
||||
self.closed = False
|
||||
|
||||
def read_params(self, params):
|
||||
self._env_params = params
|
||||
def __getattr__(self, item):
|
||||
return getattr(self.unwrapped, item)
|
||||
|
||||
def read_info(self, env_idx, info: dict):
|
||||
def reset(self):
|
||||
self.unwrapped._record_episodes = True
|
||||
return self.unwrapped.reset()
|
||||
|
||||
def _on_training_start(self) -> None:
|
||||
self.unwrapped._record_episodes = True
|
||||
pass
|
||||
|
||||
def _read_info(self, env_idx, info: dict):
|
||||
if info_dict := {key.replace(REC_TAC, ''): val for key, val in info.items() if key.startswith(f'{REC_TAC}')}:
|
||||
if self._entities:
|
||||
info_dict = {k: v for k, v in info_dict.items() if k in self._entities}
|
||||
@ -51,7 +49,7 @@ class RecorderCallback(BaseCallback):
|
||||
pass
|
||||
return
|
||||
|
||||
def read_done(self, env_idx, done):
|
||||
def _read_done(self, env_idx, done):
|
||||
if done:
|
||||
self._recorder_out_list.append({'steps': self._recorder_dict[env_idx],
|
||||
'episode': len(self._recorder_out_list)})
|
||||
@ -59,77 +57,46 @@ class RecorderCallback(BaseCallback):
|
||||
else:
|
||||
pass
|
||||
|
||||
def start(self, force=False):
|
||||
if (hasattr(self.training_env, 'record_episodes') and self.training_env.record_episodes) or force:
|
||||
self.do_record = True
|
||||
self.filepath.parent.mkdir(exist_ok=True, parents=True)
|
||||
self.started = True
|
||||
else:
|
||||
self.do_record = False
|
||||
def save_records(self, filepath: Union[Path, str], save_occupation_map=False, save_trajectory_map=False):
|
||||
filepath = Path(filepath)
|
||||
filepath.parent.mkdir(exist_ok=True, parents=True)
|
||||
# self.out_file.unlink(missing_ok=True)
|
||||
with filepath.open('w') as f:
|
||||
out_dict = {'episodes': self._recorder_out_list, 'header': self.unwrapped.params}
|
||||
try:
|
||||
simplejson.dump(out_dict, f, indent=4)
|
||||
except TypeError:
|
||||
print('Shit')
|
||||
|
||||
def stop(self):
|
||||
if self.do_record and self.started:
|
||||
# self.out_file.unlink(missing_ok=True)
|
||||
with self.filepath.open('w') as f:
|
||||
out_dict = {'episodes': self._recorder_out_list, 'header': self._env_params}
|
||||
try:
|
||||
simplejson.dump(out_dict, f, indent=4)
|
||||
except TypeError:
|
||||
print('Shit')
|
||||
if save_occupation_map:
|
||||
a = np.zeros((15, 15))
|
||||
for episode in out_dict['episodes']:
|
||||
df = pd.DataFrame([y for x in episode['steps'] for y in x['Agents']])
|
||||
|
||||
if self.occupation_map:
|
||||
a = np.zeros((15, 15))
|
||||
for episode in out_dict['episodes']:
|
||||
df = pd.DataFrame([y for x in episode['steps'] for y in x['Agents']])
|
||||
b = list(df[['x', 'y']].to_records(index=False))
|
||||
|
||||
b = list(df[['x', 'y']].to_records(index=False))
|
||||
np.add.at(a, tuple(zip(*b)), 1)
|
||||
|
||||
np.add.at(a, tuple(zip(*b)), 1)
|
||||
# a = np.rot90(a)
|
||||
import seaborn as sns
|
||||
from matplotlib import pyplot as plt
|
||||
hm = sns.heatmap(data=a)
|
||||
hm.set_title('Very Nice Heatmap')
|
||||
plt.show()
|
||||
|
||||
# a = np.rot90(a)
|
||||
import seaborn as sns
|
||||
from matplotlib import pyplot as plt
|
||||
hm = sns.heatmap(data=a)
|
||||
hm.set_title('Very Nice Heatmap')
|
||||
plt.show()
|
||||
|
||||
if self.trajectory_map:
|
||||
print('Recorder files were dumped to disk, now plotting the occupation map...')
|
||||
|
||||
self.closed = True
|
||||
self.started = False
|
||||
else:
|
||||
pass
|
||||
if save_trajectory_map:
|
||||
raise NotImplementedError('This has not yet been implemented.')
|
||||
|
||||
def _on_step(self) -> bool:
|
||||
if self.do_record and self.started:
|
||||
for env_idx, info in enumerate(self.locals.get('infos', [])):
|
||||
self.read_info(env_idx, info)
|
||||
for env_idx, info in enumerate(self.locals.get('infos', [])):
|
||||
self._read_info(env_idx, info)
|
||||
|
||||
dones = list(enumerate(self.locals.get('dones', [])))
|
||||
dones.extend(list(enumerate(self.locals.get('done', []))))
|
||||
for env_idx, done in dones:
|
||||
self._read_done(env_idx, done)
|
||||
|
||||
for env_idx, done in list(
|
||||
enumerate(self.locals.get('dones', []))) + list(
|
||||
enumerate(self.locals.get('done', []))):
|
||||
self.read_done(env_idx, done)
|
||||
else:
|
||||
pass
|
||||
return True
|
||||
|
||||
def __enter__(self):
|
||||
self.start(force=True)
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.stop()
|
||||
|
||||
def _on_training_start(self) -> None:
|
||||
if self.started:
|
||||
pass
|
||||
else:
|
||||
self.start()
|
||||
pass
|
||||
|
||||
def _on_training_end(self) -> None:
|
||||
if self.closed:
|
||||
pass
|
||||
else:
|
||||
self.stop()
|
||||
pass
|
||||
|
@ -1,54 +0,0 @@
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from stable_baselines3.common.callbacks import BaseCallback
|
||||
|
||||
from environments.logging.plotting import prepare_plot
|
||||
|
||||
|
||||
class TraningMonitor(BaseCallback):
|
||||
|
||||
def __init__(self, filepath, flush_interval=None):
|
||||
super(TraningMonitor, self).__init__()
|
||||
self.values = defaultdict(dict)
|
||||
self.rewards = defaultdict(lambda: 0)
|
||||
|
||||
self.filepath = Path(filepath)
|
||||
self.flush_interval = flush_interval
|
||||
self.next_flush: int
|
||||
pass
|
||||
|
||||
def _on_training_start(self) -> None:
|
||||
self.flush_interval = self.flush_interval or (self.locals['total_timesteps'] * 0.1)
|
||||
self.next_flush = self.flush_interval
|
||||
|
||||
def _flush(self):
|
||||
df = pd.DataFrame.from_dict(self.values, orient='index')
|
||||
if not self.filepath.exists():
|
||||
df.to_csv(self.filepath, mode='wb', header=True)
|
||||
else:
|
||||
df.to_csv(self.filepath, mode='a', header=False)
|
||||
|
||||
def _on_step(self) -> bool:
|
||||
for idx, done in np.ndenumerate(self.locals.get('dones', [])):
|
||||
idx = idx[0]
|
||||
# self.values[self.num_timesteps].update(**{f'reward_env_{idx}': self.locals['rewards'][idx]})
|
||||
self.rewards[idx] += self.locals['rewards'][idx]
|
||||
if done:
|
||||
self.values[self.num_timesteps].update(**{f'acc_epispde_r_env_{idx}': self.rewards[idx]})
|
||||
self.rewards[idx] = 0
|
||||
|
||||
if self.num_timesteps >= self.next_flush and self.values:
|
||||
self._flush()
|
||||
self.values = defaultdict(dict)
|
||||
|
||||
self.next_flush += self.flush_interval
|
||||
return True
|
||||
|
||||
def on_training_end(self) -> None:
|
||||
self._flush()
|
||||
self.values = defaultdict(dict)
|
||||
# prepare_plot()
|
||||
|
@ -1,4 +1,3 @@
|
||||
from enum import Enum
|
||||
from typing import NamedTuple, Union
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user