Adjustments and Documentation, recording and new environments, refactoring

This commit is contained in:
Steffen Illium
2022-08-04 14:57:48 +02:00
parent e7461d7dcf
commit 6a24e7b518
41 changed files with 1660 additions and 760 deletions

View File

@@ -0,0 +1,187 @@
import sys
from pathlib import Path
##############################################
# keep this for stand alone script execution #
##############################################
from environments.factory.base.base_factory import BaseFactory
from environments.logging.recorder import EnvRecorder
try:
# noinspection PyUnboundLocalVariable
if __package__ is None:
DIR = Path(__file__).resolve().parent
sys.path.insert(0, str(DIR.parent))
__package__ = DIR.name
else:
DIR = None
except NameError:
DIR = None
pass
##############################################
##############################################
##############################################
import simplejson
from environments import helpers as h
from environments.factory.additional.combined_factories import DestBatteryFactory
from environments.factory.additional.dest.factory_dest import DestFactory
from environments.factory.additional.dirt.factory_dirt import DirtFactory
from environments.factory.additional.item.factory_item import ItemFactory
from environments.helpers import ObservationTranslator, ActionTranslator
from environments.logging.envmonitor import EnvMonitor
from environments.utility_classes import ObservationProperties, AgentRenderOptions, MovementProperties
def policy_model_kwargs():
return dict(ent_coef=0.01)
def dqn_model_kwargs():
return dict(buffer_size=50000,
learning_starts=64,
batch_size=64,
target_update_interval=5000,
exploration_fraction=0.25,
exploration_final_eps=0.025
)
def encapsule_env_factory(env_fctry, env_kwrgs):
def _init():
with env_fctry(**env_kwrgs) as init_env:
return init_env
return _init
if __name__ == '__main__':
# Define Global Env Parameters
# Define properties object parameters
factory_kwargs = dict(
max_steps=400, parse_doors=True,
level_name='rooms',
doors_have_area=True, verbose=False,
mv_prop=MovementProperties(allow_diagonal_movement=True,
allow_square_movement=True,
allow_no_op=False),
obs_prop=ObservationProperties(
frames_to_stack=3,
cast_shadows=True,
omit_agent_self=True,
render_agents=AgentRenderOptions.LEVEL,
additional_agent_placeholder=None,
)
)
# Bundle both environments with global kwargs and parameters
# Todo: find a better solution, like outo module loading
env_map = {'DirtFactory': DirtFactory,
'ItemFactory': ItemFactory,
'DestFactory': DestFactory,
'DestBatteryFactory': DestBatteryFactory
}
env_names = list(env_map.keys())
# Put all your multi-seed agends in a single folder, we do not need specific names etc.
available_models = dict()
available_envs = dict()
available_runs_kwargs = dict()
available_runs_agents = dict()
max_seed = 0
# Define this folder
combinations_path = Path('combinations')
# Those are all differently trained combinations of mdoels, env and parameters
for combination in (x for x in combinations_path.iterdir() if x.is_dir()):
# These are all the models for this specific combination
for model_run in (x for x in combination.iterdir() if x.is_dir()):
model_name, env_name = model_run.name.split('_')[:2]
if model_name not in available_models:
available_models[model_name] = h.MODEL_MAP[model_name]
if env_name not in available_envs:
available_envs[env_name] = env_map[env_name]
# Those are all available seeds
for seed_run in (x for x in model_run.iterdir() if x.is_dir()):
max_seed = max(int(seed_run.name.split('_')[0]), max_seed)
# Read the env configuration from ROM
with next(seed_run.glob('env_params.json')).open('r') as f:
env_kwargs = simplejson.load(f)
available_runs_kwargs[seed_run.name] = env_kwargs
# Read the trained model_path from ROM
model_path = next(seed_run.glob('model.zip'))
available_runs_agents[seed_run.name] = model_path
# We start by combining all SAME MODEL CLASSES per available Seed, across ALL available ENVIRONMENTS.
for model_name, model_cls in available_models.items():
for seed in range(max_seed):
combined_env_kwargs = dict()
model_paths = list()
comparable_runs = {key: val for key, val in available_runs_kwargs.items() if (
key.startswith(str(seed)) and model_name in key and key != 'key')
}
for name, run_kwargs in comparable_runs.items():
# Select trained agent as a candidate:
model_paths.append(available_runs_agents[name])
# Sort Env Kwars:
for key, val in run_kwargs.items():
if key not in combined_env_kwargs:
combined_env_kwargs.update(dict(key=val))
else:
assert combined_env_kwargs[key] == val, "Check the combinations you try to make!"
# Update and combine all kwargs to account for multiple agents etc.
# We cannot capture all configuration cases!
for key, val in factory_kwargs.items():
if key not in combined_env_kwargs:
combined_env_kwargs[key] = val
else:
assert combined_env_kwargs[key] == val
combined_env_kwargs.update(n_agents=len(comparable_runs))
with(type("CombinedEnv", tuple(available_envs.values()), {})(**combined_env_kwargs)) as combEnv:
# EnvMonitor Init
comb = f'comb_{model_name}_{seed}'
comb_monitor_path = combinations_path / comb / f'{comb}_monitor.pick'
comb_recorder_path = combinations_path / comb / f'{comb}_recorder.pick'
comb_monitor_path.parent.mkdir(parents=True, exist_ok=True)
monitoredCombEnv = EnvMonitor(combEnv, filepath=comb_monitor_path)
# monitoredCombEnv = EnvRecorder(monitoredCombEnv, filepath=comb_monitor_path)
# Evaluation starts here #####################################################
# Load all models
loaded_models = [available_models[model_name].load(model_path) for model_path in model_paths]
obs_translators = ObservationTranslator(
monitoredCombEnv.named_observation_space,
*[agent.named_observation_space for agent in loaded_models],
placeholder_fill_value='n')
act_translators = ActionTranslator(
monitoredCombEnv.named_action_space,
*(agent.named_action_space for agent in loaded_models)
)
for episode in range(50):
obs, _ = monitoredCombEnv.reset(), monitoredCombEnv.render()
rew, done_bool = 0, False
while not done_bool:
actions = []
for i, model in enumerate(loaded_models):
pred = model.predict(obs_translators.translate_observation(i, obs[i]))[0]
actions.append(act_translators.translate_action(i, pred))
obs, step_r, done_bool, info_obj = monitoredCombEnv.step(actions)
rew += step_r
monitoredCombEnv.render()
if done_bool:
break
print(f'Factory run {episode} done, reward is:\n {rew}')
# Eval monitor outputs are automatically stored by the monitor object
# TODO: Plotting
monitoredCombEnv.save_records(comb_monitor_path)
monitoredCombEnv.save_run()
pass

View File

@@ -0,0 +1,203 @@
import sys
import time
from pathlib import Path
import simplejson
import stable_baselines3 as sb3
# This is needed, when you put this file in a subfolder.
try:
# noinspection PyUnboundLocalVariable
if __package__ is None:
DIR = Path(__file__).resolve().parent
sys.path.insert(0, str(DIR.parent))
__package__ = DIR.name
else:
DIR = None
except NameError:
DIR = None
pass
from environments import helpers as h
from environments.factory.additional.dest.dest_util import DestModeOptions, DestProperties
from environments.factory.additional.btry.btry_util import BatteryProperties
from environments.logging.envmonitor import EnvMonitor
from environments.logging.recorder import EnvRecorder
from environments.factory.additional.combined_factories import DestBatteryFactory
from environments.utility_classes import MovementProperties, ObservationProperties, AgentRenderOptions
from plotting.compare_runs import compare_seed_runs
"""
Welcome to this quick start file. Here we will see how to:
0. Setup I/O Paths
1. Setup parameters for the environments (dirt-factory).
2. Setup parameters for the agent training (SB3: PPO) and save metrics.
Run the training.
3. Save env and agent for later analysis.
4. Load the agent from drive
5. Rendering the env with a run of the trained agent.
6. Plot metrics
"""
if __name__ == '__main__':
#########################################################
# 0. Setup I/O Paths
# Define some general parameters
train_steps = 1e6
n_seeds = 3
model_class = sb3.PPO
env_class = DestBatteryFactory
env_params_json = 'env_params.json'
# Define a global studi save path
start_time = int(time.time())
study_root_path = Path(__file__).parent.parent / 'study_out' / f'{Path(__file__).stem}_{start_time}'
# Create an identifier, which is unique for every combination and easy to read in filesystem
identifier = f'{model_class.__name__}_{env_class.__name__}_{start_time}'
exp_path = study_root_path / identifier
#########################################################
# 1. Setup parameters for the environments (dirt-factory).
# Define property object parameters.
# 'ObservationProperties' are for specifying how the agent sees the env.
obs_props = ObservationProperties(render_agents=AgentRenderOptions.NOT, # Agents won`t be shown in the obs at all
omit_agent_self=True, # This is default
additional_agent_placeholder=None, # We will not take care of future agents
frames_to_stack=3, # To give the agent a notion of time
pomdp_r=2 # the agents view-radius
)
# 'MovementProperties' are for specifying how the agent is allowed to move in the env.
move_props = MovementProperties(allow_diagonal_movement=True, # Euclidean style (vertices)
allow_square_movement=True, # Manhattan (edges)
allow_no_op=False) # Pause movement (do nothing)
# 'DirtProperties' control if and how dirt is spawned
# TODO: Comments
dest_props = DestProperties(
n_dests = 2, # How many destinations are there
dwell_time = 0, # How long does the agent need to "wait" on a destination
spawn_frequency = 0,
spawn_in_other_zone = True, #
spawn_mode = DestModeOptions.DONE,
)
btry_props = BatteryProperties(
initial_charge = 0.9, #
charge_rate = 0.4, #
charge_locations = 3, #
per_action_costs = 0.01,
done_when_discharged = True,
multi_charge = False,
)
# These are the EnvKwargs for initializing the env class, holding all former parameter-classes
# TODO: Comments
factory_kwargs = dict(n_agents=1,
max_steps=400,
parse_doors=True,
level_name='rooms',
doors_have_area=True, #
verbose=False,
mv_prop=move_props, # See Above
obs_prop=obs_props, # See Above
done_at_collision=True,
dest_prop=dest_props,
btry_prop=btry_props
)
#########################################################
# 2. Setup parameters for the agent training (SB3: PPO) and save metrics.
agent_kwargs = dict()
#########################################################
# Run the Training
for seed in range(n_seeds):
# Make a copy if you want to alter things in the training loop; like the seed.
env_kwargs = factory_kwargs.copy()
env_kwargs.update(env_seed=seed)
# Output folder
seed_path = exp_path / f'{str(seed)}_{identifier}'
seed_path.mkdir(parents=True, exist_ok=True)
# Parameter Storage
param_path = seed_path / env_params_json
# Observation (measures) Storage
monitor_path = seed_path / 'monitor.pick'
recorder_path = seed_path / 'recorder.json'
# Model save Path for the trained model
model_save_path = seed_path / f'model.zip'
# Env Init & Model kwargs definition
with env_class(**env_kwargs) as env_factory:
# EnvMonitor Init
env_monitor_callback = EnvMonitor(env_factory)
# EnvRecorder Init
env_recorder_callback = EnvRecorder(env_factory, freq=int(train_steps / 400 / 10))
# Model Init
model = model_class("MlpPolicy", env_factory, verbose=1, seed=seed, device='cpu')
# Model train
model.learn(total_timesteps=int(train_steps), callback=[env_monitor_callback, env_recorder_callback])
#########################################################
# 3. Save env and agent for later analysis.
# Save the trained Model, the monitor (env measures) and the env parameters
model.named_observation_space = env_factory.named_observation_space
model.named_action_space = env_factory.named_action_space
model.save(model_save_path)
env_factory.save_params(param_path)
env_monitor_callback.save_run(monitor_path)
env_recorder_callback.save_records(recorder_path, save_occupation_map=False)
# Compare performance runs, for each seed within a model
try:
compare_seed_runs(exp_path, use_tex=False)
except ValueError:
pass
# Train ends here ############################################################
# Evaluation starts here #####################################################
# First Iterate over every model and monitor "as trained"
print('Start Measurement Tracking')
# For trained policy in study_root_path / identifier
for policy_path in [x for x in exp_path.iterdir() if x.is_dir()]:
# retrieve model class
model_cls = next(val for key, val in h.MODEL_MAP.items() if key in policy_path.parent.name)
# Load the agent agent
model = model_cls.load(policy_path / 'model.zip', device='cpu')
# Load old env kwargs
with next(policy_path.glob(env_params_json)).open('r') as f:
env_kwargs = simplejson.load(f)
# Make the env stop ar collisions
# (you only want to have a single collision per episode hence the statistics)
env_kwargs.update(done_at_collision=True)
# Init Env
with env_class(**env_kwargs) as env_factory:
monitored_env_factory = EnvMonitor(env_factory)
# Evaluation Loop for i in range(n Episodes)
for episode in range(100):
# noinspection PyRedeclaration
env_state = monitored_env_factory.reset()
rew, done_bool = 0, False
while not done_bool:
action = model.predict(env_state, deterministic=True)[0]
env_state, step_r, done_bool, info_obj = monitored_env_factory.step(action)
rew += step_r
if done_bool:
break
print(f'Factory run {episode} done, reward is:\n {rew}')
monitored_env_factory.save_run(filepath=policy_path / 'eval_run_monitor.pick')
print('Measurements Done')

View File

@@ -0,0 +1,193 @@
import sys
import time
from pathlib import Path
import simplejson
import stable_baselines3 as sb3
# This is needed, when you put this file in a subfolder.
try:
# noinspection PyUnboundLocalVariable
if __package__ is None:
DIR = Path(__file__).resolve().parent
sys.path.insert(0, str(DIR.parent))
__package__ = DIR.name
else:
DIR = None
except NameError:
DIR = None
pass
from environments import helpers as h
from environments.factory.additional.dest.dest_util import DestModeOptions, DestProperties
from environments.logging.envmonitor import EnvMonitor
from environments.logging.recorder import EnvRecorder
from environments.factory.additional.dest.factory_dest import DestFactory
from environments.utility_classes import MovementProperties, ObservationProperties, AgentRenderOptions
from plotting.compare_runs import compare_seed_runs
"""
Welcome to this quick start file. Here we will see how to:
0. Setup I/O Paths
1. Setup parameters for the environments (dest-factory).
2. Setup parameters for the agent training (SB3: PPO) and save metrics.
Run the training.
3. Save env and agent for later analysis.
4. Load the agent from drive
5. Rendering the env with a run of the trained agent.
6. Plot metrics
"""
if __name__ == '__main__':
#########################################################
# 0. Setup I/O Paths
# Define some general parameters
train_steps = 1e6
n_seeds = 3
model_class = sb3.PPO
env_class = DestFactory
env_params_json = 'env_params.json'
# Define a global studi save path
start_time = int(time.time())
study_root_path = Path(__file__).parent.parent / 'study_out' / f'{Path(__file__).stem}_{start_time}'
# Create an identifier, which is unique for every combination and easy to read in filesystem
identifier = f'{model_class.__name__}_{env_class.__name__}_{start_time}'
exp_path = study_root_path / identifier
#########################################################
# 1. Setup parameters for the environments (dest-factory).
# Define property object parameters.
# 'ObservationProperties' are for specifying how the agent sees the env.
obs_props = ObservationProperties(render_agents=AgentRenderOptions.NOT, # Agents won`t be shown in the obs at all
omit_agent_self=True, # This is default
additional_agent_placeholder=None, # We will not take care of future agents
frames_to_stack=3, # To give the agent a notion of time
pomdp_r=2 # the agents view-radius
)
# 'MovementProperties' are for specifying how the agent is allowed to move in the env.
move_props = MovementProperties(allow_diagonal_movement=True, # Euclidean style (vertices)
allow_square_movement=True, # Manhattan (edges)
allow_no_op=False) # Pause movement (do nothing)
# 'DestProperties' control if and how dest is spawned
# TODO: Comments
dest_props = DestProperties(
n_dests = 2, # How many destinations are there
dwell_time = 0, # How long does the agent need to "wait" on a destination
spawn_frequency = 0,
spawn_in_other_zone = True, #
spawn_mode = DestModeOptions.DONE,
)
# These are the EnvKwargs for initializing the env class, holding all former parameter-classes
# TODO: Comments
factory_kwargs = dict(n_agents=1,
max_steps=400,
parse_doors=True,
level_name='rooms',
doors_have_area=True, #
verbose=False,
mv_prop=move_props, # See Above
obs_prop=obs_props, # See Above
done_at_collision=True,
dest_prop=dest_props
)
#########################################################
# 2. Setup parameters for the agent training (SB3: PPO) and save metrics.
agent_kwargs = dict()
#########################################################
# Run the Training
for seed in range(n_seeds):
# Make a copy if you want to alter things in the training loop; like the seed.
env_kwargs = factory_kwargs.copy()
env_kwargs.update(env_seed=seed)
# Output folder
seed_path = exp_path / f'{str(seed)}_{identifier}'
seed_path.mkdir(parents=True, exist_ok=True)
# Parameter Storage
param_path = seed_path / env_params_json
# Observation (measures) Storage
monitor_path = seed_path / 'monitor.pick'
recorder_path = seed_path / 'recorder.json'
# Model save Path for the trained model
model_save_path = seed_path / f'model.zip'
# Env Init & Model kwargs definition
with env_class(**env_kwargs) as env_factory:
# EnvMonitor Init
env_monitor_callback = EnvMonitor(env_factory)
# EnvRecorder Init
env_recorder_callback = EnvRecorder(env_factory, freq=int(train_steps / 400 / 10))
# Model Init
model = model_class("MlpPolicy", env_factory,verbose=1, seed=seed, device='cpu')
# Model train
model.learn(total_timesteps=int(train_steps), callback=[env_monitor_callback, env_recorder_callback])
#########################################################
# 3. Save env and agent for later analysis.
# Save the trained Model, the monitor (env measures) and the env parameters
model.named_observation_space = env_factory.named_observation_space
model.named_action_space = env_factory.named_action_space
model.save(model_save_path)
env_factory.save_params(param_path)
env_monitor_callback.save_run(monitor_path)
env_recorder_callback.save_records(recorder_path, save_occupation_map=False)
# Compare performance runs, for each seed within a model
try:
compare_seed_runs(exp_path, use_tex=False)
except ValueError:
pass
# Train ends here ############################################################
# Evaluation starts here #####################################################
# First Iterate over every model and monitor "as trained"
print('Start Measurement Tracking')
# For trained policy in study_root_path / identifier
for policy_path in [x for x in exp_path.iterdir() if x.is_dir()]:
# retrieve model class
model_cls = next(val for key, val in h.MODEL_MAP.items() if key in policy_path.parent.name)
# Load the agent agent
model = model_cls.load(policy_path / 'model.zip', device='cpu')
# Load old env kwargs
with next(policy_path.glob(env_params_json)).open('r') as f:
env_kwargs = simplejson.load(f)
# Make the env stop ar collisions
# (you only want to have a single collision per episode hence the statistics)
env_kwargs.update(done_at_collision=True)
# Init Env
with env_class(**env_kwargs) as env_factory:
monitored_env_factory = EnvMonitor(env_factory)
# Evaluation Loop for i in range(n Episodes)
for episode in range(100):
# noinspection PyRedeclaration
env_state = monitored_env_factory.reset()
rew, done_bool = 0, False
while not done_bool:
action = model.predict(env_state, deterministic=True)[0]
env_state, step_r, done_bool, info_obj = monitored_env_factory.step(action)
rew += step_r
if done_bool:
break
print(f'Factory run {episode} done, reward is:\n {rew}')
monitored_env_factory.save_run(filepath=policy_path / 'eval_run_monitor.pick')
print('Measurements Done')

View File

@@ -1,11 +1,12 @@
import sys
import time
from pathlib import Path
from matplotlib import pyplot as plt
import itertools as it
import simplejson
import stable_baselines3 as sb3
# This is needed, when you put this file in a subfolder.
try:
# noinspection PyUnboundLocalVariable
if __package__ is None:
@@ -18,19 +19,14 @@ except NameError:
DIR = None
pass
import simplejson
from stable_baselines3.common.vec_env import SubprocVecEnv
from environments import helpers as h
from environments.factory.factory_dirt import DirtProperties, DirtFactory
from environments.logging.envmonitor import EnvMonitor
from environments.logging.recorder import EnvRecorder
from environments.factory.additional.dirt.dirt_util import DirtProperties
from environments.factory.additional.dirt.factory_dirt import DirtFactory
from environments.utility_classes import MovementProperties, ObservationProperties, AgentRenderOptions
import pickle
from plotting.compare_runs import compare_seed_runs, compare_model_runs
import pandas as pd
import seaborn as sns
import multiprocessing as mp
from plotting.compare_runs import compare_seed_runs
"""
Welcome to this quick start file. Here we will see how to:
@@ -53,6 +49,8 @@ if __name__ == '__main__':
model_class = sb3.PPO
env_class = DirtFactory
env_params_json = 'env_params.json'
# Define a global studi save path
start_time = int(time.time())
study_root_path = Path(__file__).parent.parent / 'study_out' / f'{Path(__file__).stem}_{start_time}'
@@ -100,7 +98,7 @@ if __name__ == '__main__':
mv_prop=move_props, # See Above
obs_prop=obs_props, # See Above
done_at_collision=True,
dirt_props=dirt_props
dirt_prop=dirt_props
)
#########################################################
@@ -120,30 +118,37 @@ if __name__ == '__main__':
seed_path.mkdir(parents=True, exist_ok=True)
# Parameter Storage
param_path = seed_path / f'env_params.json'
param_path = seed_path / env_params_json
# Observation (measures) Storage
monitor_path = seed_path / 'monitor.pick'
recorder_path = seed_path / 'recorder.json'
# Model save Path for the trained model
model_save_path = seed_path / f'model.zip'
# Env Init & Model kwargs definition
with DirtFactory(env_kwargs) as env_factory:
with env_class(**env_kwargs) as env_factory:
# EnvMonitor Init
env_monitor_callback = EnvMonitor(env_factory)
# EnvRecorder Init
env_recorder_callback = EnvRecorder(env_factory, freq=int(train_steps / 400 / 10))
# Model Init
model = model_class("MlpPolicy", env_factory,verbose=1, seed=seed, device='cpu')
# Model train
model.learn(total_timesteps=int(train_steps), callback=[env_monitor_callback])
model.learn(total_timesteps=int(train_steps), callback=[env_monitor_callback, env_recorder_callback])
#########################################################
# 3. Save env and agent for later analysis.
# Save the trained Model, the monitor (env measures) and the env parameters
model.named_observation_space = env_factory.named_observation_space
model.named_action_space = env_factory.named_action_space
model.save(model_save_path)
env_factory.save_params(param_path)
env_monitor_callback.save_run(monitor_path)
env_recorder_callback.save_records(recorder_path, save_occupation_map=False)
# Compare performance runs, for each seed within a model
try:
@@ -164,18 +169,19 @@ if __name__ == '__main__':
# Load the agent agent
model = model_cls.load(policy_path / 'model.zip', device='cpu')
# Load old env kwargs
with next(policy_path.glob('*.json')).open('r') as f:
with next(policy_path.glob(env_params_json)).open('r') as f:
env_kwargs = simplejson.load(f)
# Make the env stop ar collisions
# (you only want to have a single collision per episode hence the statistics)
env_kwargs.update(done_at_collision=True)
# Init Env
with env_to_run(**env_kwargs) as env_factory:
with env_class(**env_kwargs) as env_factory:
monitored_env_factory = EnvMonitor(env_factory)
# Evaluation Loop for i in range(n Episodes)
for episode in range(100):
# noinspection PyRedeclaration
env_state = monitored_env_factory.reset()
rew, done_bool = 0, False
while not done_bool:
@@ -185,8 +191,5 @@ if __name__ == '__main__':
if done_bool:
break
print(f'Factory run {episode} done, reward is:\n {rew}')
monitored_env_factory.save_run(filepath=policy_path / f'{baseline_monitor_file}.pick')
# for policy_path in (y for y in policy_path.iterdir() if y.is_dir()):
# load_model_run_baseline(policy_path)
monitored_env_factory.save_run(filepath=policy_path / 'eval_run_monitor.pick')
print('Measurements Done')

View File

@@ -0,0 +1,191 @@
import sys
import time
from pathlib import Path
import simplejson
import stable_baselines3 as sb3
# This is needed, when you put this file in a subfolder.
try:
# noinspection PyUnboundLocalVariable
if __package__ is None:
DIR = Path(__file__).resolve().parent
sys.path.insert(0, str(DIR.parent))
__package__ = DIR.name
else:
DIR = None
except NameError:
DIR = None
pass
from environments import helpers as h
from environments.factory.additional.item.factory_item import ItemFactory
from environments.factory.additional.item.item_util import ItemProperties
from environments.logging.envmonitor import EnvMonitor
from environments.logging.recorder import EnvRecorder
from environments.utility_classes import MovementProperties, ObservationProperties, AgentRenderOptions
from plotting.compare_runs import compare_seed_runs
"""
Welcome to this quick start file. Here we will see how to:
0. Setup I/O Paths
1. Setup parameters for the environments (item-factory).
2. Setup parameters for the agent training (SB3: PPO) and save metrics.
Run the training.
3. Save env and agent for later analysis.
4. Load the agent from drive
5. Rendering the env with a run of the trained agent.
6. Plot metrics
"""
if __name__ == '__main__':
#########################################################
# 0. Setup I/O Paths
# Define some general parameters
train_steps = 1e6
n_seeds = 3
model_class = sb3.PPO
env_class = ItemFactory
env_params_json = 'env_params.json'
# Define a global studi save path
start_time = int(time.time())
study_root_path = Path(__file__).parent.parent / 'study_out' / f'{Path(__file__).stem}_{start_time}'
# Create an identifier, which is unique for every combination and easy to read in filesystem
identifier = f'{model_class.__name__}_{env_class.__name__}_{start_time}'
exp_path = study_root_path / identifier
#########################################################
# 1. Setup parameters for the environments (item-factory).
#
# Define property object parameters.
# 'ObservationProperties' are for specifying how the agent sees the env.
obs_props = ObservationProperties(render_agents=AgentRenderOptions.NOT, # Agents won`t be shown in the obs at all
omit_agent_self=True, # This is default
additional_agent_placeholder=None, # We will not take care of future agents
frames_to_stack=3, # To give the agent a notion of time
pomdp_r=2 # the agents view-radius
)
# 'MovementProperties' are for specifying how the agent is allowed to move in the env.
move_props = MovementProperties(allow_diagonal_movement=True, # Euclidean style (vertices)
allow_square_movement=True, # Manhattan (edges)
allow_no_op=False) # Pause movement (do nothing)
# 'ItemProperties' control if and how item is spawned
# TODO: Comments
item_props = ItemProperties(
n_items = 7, # How many items are there at the same time
spawn_frequency = 50, # Spawn Frequency in Steps
n_drop_off_locations = 10, # How many DropOff locations are there at the same time
max_dropoff_storage_size = 0, # How many items are needed until the dropoff is full
max_agent_inventory_capacity = 5, # How many items are needed until the agent inventory is full)
)
# These are the EnvKwargs for initializing the env class, holding all former parameter-classes
# TODO: Comments
factory_kwargs = dict(n_agents=1,
max_steps=400,
parse_doors=True,
level_name='rooms',
doors_have_area=True, #
verbose=False,
mv_prop=move_props, # See Above
obs_prop=obs_props, # See Above
done_at_collision=True,
item_prop=item_props
)
#########################################################
# 2. Setup parameters for the agent training (SB3: PPO) and save metrics.
agent_kwargs = dict()
#########################################################
# Run the Training
for seed in range(n_seeds):
# Make a copy if you want to alter things in the training loop; like the seed.
env_kwargs = factory_kwargs.copy()
env_kwargs.update(env_seed=seed)
# Output folder
seed_path = exp_path / f'{str(seed)}_{identifier}'
seed_path.mkdir(parents=True, exist_ok=True)
# Parameter Storage
param_path = seed_path / env_params_json
# Observation (measures) Storage
monitor_path = seed_path / 'monitor.pick'
recorder_path = seed_path / 'recorder.json'
# Model save Path for the trained model
model_save_path = seed_path / f'model.zip'
# Env Init & Model kwargs definition
with ItemFactory(**env_kwargs) as env_factory:
# EnvMonitor Init
env_monitor_callback = EnvMonitor(env_factory)
# EnvRecorder Init
env_recorder_callback = EnvRecorder(env_factory, freq=int(train_steps / 400 / 10))
# Model Init
model = model_class("MlpPolicy", env_factory,verbose=1, seed=seed, device='cpu')
# Model train
model.learn(total_timesteps=int(train_steps), callback=[env_monitor_callback, env_recorder_callback])
#########################################################
# 3. Save env and agent for later analysis.
# Save the trained Model, the monitor (env measures) and the env parameters
model.named_observation_space = env_factory.named_observation_space
model.named_action_space = env_factory.named_action_space
model.save(model_save_path)
env_factory.save_params(param_path)
env_monitor_callback.save_run(monitor_path)
env_recorder_callback.save_records(recorder_path, save_occupation_map=False)
# Compare performance runs, for each seed within a model
try:
compare_seed_runs(exp_path, use_tex=False)
except ValueError:
pass
# Train ends here ############################################################
# Evaluation starts here #####################################################
# First Iterate over every model and monitor "as trained"
print('Start Measurement Tracking')
# For trained policy in study_root_path / identifier
for policy_path in [x for x in exp_path.iterdir() if x.is_dir()]:
# retrieve model class
model_cls = next(val for key, val in h.MODEL_MAP.items() if key in policy_path.parent.name)
# Load the agent agent
model = model_cls.load(policy_path / 'model.zip', device='cpu')
# Load old env kwargs
with next(policy_path.glob(env_params_json)).open('r') as f:
env_kwargs = simplejson.load(f)
# Make the env stop ar collisions
# (you only want to have a single collision per episode hence the statistics)
env_kwargs.update(done_at_collision=True)
# Init Env
with ItemFactory(**env_kwargs) as env_factory:
monitored_env_factory = EnvMonitor(env_factory)
# Evaluation Loop for i in range(n Episodes)
for episode in range(100):
# noinspection PyRedeclaration
env_state = monitored_env_factory.reset()
rew, done_bool = 0, False
while not done_bool:
action = model.predict(env_state, deterministic=True)[0]
env_state, step_r, done_bool, info_obj = monitored_env_factory.step(action)
rew += step_r
if done_bool:
break
print(f'Factory run {episode} done, reward is:\n {rew}')
monitored_env_factory.save_run(filepath=policy_path / 'eval_run_monitor.pick')
print('Measurements Done')