Apply suggestions from code review.

This commit is contained in:
Marek Wolan
2024-02-26 16:58:43 +00:00
parent f9cc5af7aa
commit 33d2ecc26a
2 changed files with 8 additions and 2 deletions

View File

@@ -7,4 +7,4 @@ RL environments are the objects that directly interface with RL libraries such a
* Ray Single agent API - For training a single Ray RLLib agent
* Ray MARL API - For training multi-agent systems with Ray RLLib. ``PrimaiteRayMARLEnv`` adheres to the `Official Ray documentation <https://docs.ray.io/en/latest/rllib/package_ref/env/multi_agent_env.html>`_.
There is a Jupyter notebook which demonstrates integration with each of these three environments. They are located in ``~/primaite/<VERSION>/notebooks/example_notebooks``.
There are Jupyter notebooks which demonstrate integration with each of these three environments. They are located in ``~/primaite/<VERSION>/notebooks/example_notebooks``.

View File

@@ -1,11 +1,17 @@
training_config:
rl_framework: RLLIB_multi_agent
# rl_framework: SB3
rl_algorithm: PPO
seed: 333
n_learn_episodes: 1
n_eval_episodes: 5
max_steps_per_episode: 256
deterministic_eval: false
n_agents: 2
agent_references:
- defender_1
- defender_2
io_settings:
save_checkpoints: true
checkpoint_interval: 5