Update to make things work with new layout

This commit is contained in:
Marek Wolan
2023-11-22 12:59:33 +00:00
parent afd64e4674
commit 1138644a4b
4 changed files with 514 additions and 564 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -1,12 +1,10 @@
from typing import Any, Dict, List, Optional, SupportsFloat, Tuple, TYPE_CHECKING
from typing import Any, Dict, List, Optional, SupportsFloat, Tuple
import gymnasium
from gymnasium.core import ActType, ObsType
from primaite.game.agent.interface import ProxyAgent
if TYPE_CHECKING:
from primaite.game.game import PrimaiteGame
from primaite.game.game import PrimaiteGame
class PrimaiteGymEnv(gymnasium.Env):
@@ -17,10 +15,10 @@ class PrimaiteGymEnv(gymnasium.Env):
assumptions about the agent list always having a list of length 1.
"""
def __init__(self, session: "PrimaiteGame", agents: List[ProxyAgent]):
def __init__(self, game: PrimaiteGame, agents: List[ProxyAgent]):
"""Initialise the environment."""
super().__init__()
self.session: "PrimaiteGame" = session
self.game: "PrimaiteGame" = game
self.agent: ProxyAgent = agents[0]
def step(self, action: ActType) -> Tuple[ObsType, SupportsFloat, bool, bool, Dict[str, Any]]:
@@ -28,24 +26,24 @@ class PrimaiteGymEnv(gymnasium.Env):
# make ProxyAgent store the action chosen my the RL policy
self.agent.store_action(action)
# apply_agent_actions accesses the action we just stored
self.session.apply_agent_actions()
self.session.advance_timestep()
state = self.session.get_sim_state()
self.session.update_agents(state)
self.game.apply_agent_actions()
self.game.advance_timestep()
state = self.game.get_sim_state()
self.game.update_agents(state)
next_obs = self._get_obs()
reward = self.agent.reward_function.current_reward
terminated = False
truncated = self.session.calculate_truncated()
truncated = self.game.calculate_truncated()
info = {}
return next_obs, reward, terminated, truncated, info
def reset(self, seed: Optional[int] = None) -> Tuple[ObsType, Dict[str, Any]]:
"""Reset the environment."""
self.session.reset()
state = self.session.get_sim_state()
self.session.update_agents(state)
self.game.reset()
state = self.game.get_sim_state()
self.game.update_agents(state)
next_obs = self._get_obs()
info = {}
return next_obs, info

View File

@@ -177,7 +177,7 @@ class PrimaiteGame:
:rtype: PrimaiteSession
"""
game = cls()
game.options = PrimaiteGameOptions(cfg["game"])
game.options = PrimaiteGameOptions(**cfg["game"])
# 1. create simulation
sim = game.simulation
@@ -305,8 +305,7 @@ class PrimaiteGame:
game.ref_map_links[link_cfg["ref"]] = new_link.uuid
# 3. create agents
game_cfg = cfg["game_config"]
agents_cfg = game_cfg["agents"]
agents_cfg = cfg["agents"]
for agent_cfg in agents_cfg:
agent_ref = agent_cfg["ref"] # noqa: F841

View File

@@ -4,19 +4,7 @@
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/cade/repos/PrimAITE/venv/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
" from .autonotebook import tqdm as notebook_tqdm\n",
"2023-11-18 09:06:45,876\tINFO util.py:159 -- Missing packages: ['ipywidgets']. Run `pip install -U ipywidgets`, then restart the notebook server for rich notebook output.\n",
"2023-11-18 09:06:48,446\tINFO util.py:159 -- Missing packages: ['ipywidgets']. Run `pip install -U ipywidgets`, then restart the notebook server for rich notebook output.\n",
"2023-11-18 09:06:48,692\tWARNING __init__.py:10 -- PG has/have been moved to `rllib_contrib` and will no longer be maintained by the RLlib team. You can still use it/them normally inside RLlib util Ray 2.8, but from Ray 2.9 on, all `rllib_contrib` algorithms will no longer be part of the core repo, and will therefore have to be installed separately with pinned dependencies for e.g. ray[rllib] and other packages! See https://github.com/ray-project/ray/tree/master/rllib_contrib#rllib-contrib for more information on the RLlib contrib effort.\n"
]
}
],
"outputs": [],
"source": [
"from primaite.game.game import PrimaiteGame\n",
"from primaite.game.environment import PrimaiteGymEnv\n",
@@ -56,7 +44,7 @@
"with open(example_config_path(), 'r') as f:\n",
" cfg = yaml.safe_load(f)\n",
"\n",
"sess = PrimaiteGame.from_config(cfg)"
"game = PrimaiteGame.from_config(cfg)"
]
},
{
@@ -65,44 +53,8 @@
"metadata": {},
"outputs": [],
"source": [
"sess.env = PrimaiteGymEnv(session=sess, agents=sess.rl_agents)"
"gym = PrimaiteGymEnv(game=game, agents=game.rl_agents)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"env = sess.env"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"<primaite.game.environment.PrimaiteGymEnv at 0x7fad7190d7b0>"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"env"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {