Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Put in common gnn features extractor between ray.rllib and sb3 wrappers #463

Merged
merged 1 commit into from
Jan 31, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion skdecide/hub/solver/ray_rllib/gnn/models/torch/gnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,11 @@
from ray.rllib.utils.typing import ModelConfigDict
from torch import nn

from skdecide.hub.solver.ray_rllib.gnn.torch_layers import GraphFeaturesExtractor
from skdecide.hub.solver.ray_rllib.gnn.utils.spaces.space_utils import (
convert_dict_space_to_graph_space,
is_graph_dict_space,
)
from skdecide.hub.solver.utils.gnn.torch_layers import GraphFeaturesExtractor


class GnnBasedModel(TorchModelV2, nn.Module):
Expand Down
116 changes: 0 additions & 116 deletions skdecide/hub/solver/ray_rllib/gnn/policy/sample_batch.py

This file was deleted.

26 changes: 0 additions & 26 deletions skdecide/hub/solver/ray_rllib/gnn/policy/torch_mixins.py

This file was deleted.

25 changes: 1 addition & 24 deletions skdecide/hub/solver/ray_rllib/gnn/utils/torch_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,30 +15,7 @@
is_graph_dict_multiinput,
is_masked_obs,
)


def graph_obs_to_thg_data(
obs: gym.spaces.GraphInstance,
device: Optional[th.device] = None,
pin_memory: bool = False,
) -> thg.data.Data:
# Node features
flatten_node_features = obs.nodes.reshape((len(obs.nodes), -1))
x = th.tensor(flatten_node_features).float()
# Edge features
if obs.edges is None:
edge_attr = None
else:
flatten_edge_features = obs.edges.reshape((len(obs.edges), -1))
edge_attr = th.tensor(flatten_edge_features).float()
edge_index = th.tensor(obs.edge_links, dtype=th.long).t().contiguous().view(2, -1)
# thg.Data
data = thg.data.Data(x=x, edge_index=edge_index, edge_attr=edge_attr)
# Pin the tensor's memory (for faster transfer to GPU later).
if pin_memory and th.cuda.is_available():
data.pin_memory()

return data if device is None else data.to(device)
from skdecide.hub.solver.utils.gnn.torch_utils import graph_obs_to_thg_data


def convert_to_torch_tensor(
Expand Down
86 changes: 17 additions & 69 deletions skdecide/hub/solver/stable_baselines/gnn/common/torch_layers.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
from typing import Any, Optional, Union

import gymnasium as gym
import numpy as np
import torch as th
import torch_geometric as thg
from stable_baselines3.common.preprocessing import get_flattened_obs_dim, is_image_space
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, NatureCNN
from torch import nn
from torch_geometric.nn import global_max_pool

from skdecide.hub.solver.utils.gnn import torch_layers


class GraphFeaturesExtractor(BaseFeaturesExtractor):
Expand All @@ -20,6 +20,10 @@ class GraphFeaturesExtractor(BaseFeaturesExtractor):
- gnn: a 2-layers GCN
- reduction layer: global_max_pool + linear layer + relu

This merely wraps `skdecide.hub.solver.utils.gnn.torch_layers.GraphFeaturesExtractor` to
makes it a `stable_baselines3.common.torch_layers.BaseFeaturesExtractor`. See the former documentation
for more precisions about its arguments.

Args:
observation_space:
features_dim: Number of extracted features
Expand All @@ -45,75 +49,19 @@ def __init__(
reduction_layer_class: Optional[type[nn.Module]] = None,
reduction_layer_kwargs: Optional[dict[str, Any]] = None,
):

super().__init__(observation_space, features_dim=features_dim)

if gnn_out_dim is None:
if gnn_class is None:
gnn_out_dim = 2 * features_dim
else:
raise ValueError(
"`gnn_out_dim` cannot be None if `gnn` is not None, "
"and should match `gnn` output."
)

if gnn_class is None:
node_features_dim = int(np.prod(observation_space.node_space.shape))
self.gnn = thg.nn.models.GCN(
in_channels=node_features_dim,
hidden_channels=gnn_out_dim,
num_layers=2,
dropout=0.2,
)
else:
if gnn_kwargs is None:
gnn_kwargs = {}
self.gnn = gnn_class(**gnn_kwargs)

if reduction_layer_class is None:
self.reduction_layer = _DefaultReductionLayer(
gnn_out_dim=gnn_out_dim, features_dim=features_dim
)
else:
if reduction_layer_kwargs is None:
reduction_layer_kwargs = {}
self.reduction_layer = reduction_layer_class(**reduction_layer_kwargs)
super().__init__(observation_space=observation_space, features_dim=features_dim)
self._extractor = torch_layers.GraphFeaturesExtractor(
observation_space=observation_space,
features_dim=features_dim,
gnn_out_dim=gnn_out_dim,
gnn_class=gnn_class,
gnn_kwargs=gnn_kwargs,
reduction_layer_class=reduction_layer_class,
reduction_layer_kwargs=reduction_layer_kwargs,
)

def forward(self, observations: thg.data.Data) -> th.Tensor:
x, edge_index, edge_attr, batch = (
observations.x,
observations.edge_index,
observations.edge_attr,
observations.batch,
)
# construct edge weights, for GNNs needing it, as the first edge feature
edge_weight = edge_attr[:, 0]
h = self.gnn(
x=x, edge_index=edge_index, edge_weight=edge_weight, edge_attr=edge_attr
)
embedded_observations = thg.data.Data(
x=h, edge_index=edge_index, edge_attr=edge_attr, batch=batch
)
h = self.reduction_layer(embedded_observations=embedded_observations)
return h


class _DefaultReductionLayer(nn.Module):
def __init__(self, gnn_out_dim: int, features_dim: int):
super().__init__()
self.gnn_out_dim = gnn_out_dim
self.features_dim = features_dim
self.linear_layer = nn.Linear(gnn_out_dim, features_dim)

def forward(self, embedded_observations: thg.data.Data) -> th.Tensor:
x, edge_index, batch = (
embedded_observations.x,
embedded_observations.edge_index,
embedded_observations.batch,
)
h = global_max_pool(x, batch)
h = self.linear_layer(h).relu()
return h
return self._extractor.forward(observations=observations)


class CombinedFeaturesExtractor(BaseFeaturesExtractor):
Expand Down
18 changes: 2 additions & 16 deletions skdecide/hub/solver/stable_baselines/gnn/common/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@
import torch as th
import torch_geometric as thg

from skdecide.hub.solver.utils.gnn.torch_utils import graph_obs_to_thg_data

SubObsType = Union[np.ndarray, gym.spaces.GraphInstance, list[gym.spaces.GraphInstance]]
ObsType = Union[SubObsType, dict[str, SubObsType]]
TorchSubObsType = Union[th.Tensor, thg.data.Data]
Expand All @@ -27,22 +29,6 @@ def copy_np_array_or_list_of_graph_instances(
return np.copy(obs)


def graph_obs_to_thg_data(
obs: gym.spaces.GraphInstance, device: th.device
) -> thg.data.Data:
# Node features
flatten_node_features = obs.nodes.reshape((len(obs.nodes), -1))
x = th.tensor(flatten_node_features).float()
# Edge features
if obs.edges is None:
edge_attr = None
else:
flatten_edge_features = obs.edges.reshape((len(obs.edges), -1))
edge_attr = th.tensor(flatten_edge_features).float()
edge_index = th.tensor(obs.edge_links, dtype=th.long).t().contiguous().view(2, -1)
return thg.data.Data(x=x, edge_index=edge_index, edge_attr=edge_attr).to(device)


def obs_as_tensor(
obs: ObsType,
device: th.device,
Expand Down
Empty file.
Empty file.
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def __init__(
reduction_layer_kwargs = {}
self.reduction_layer = reduction_layer_class(**reduction_layer_kwargs)

def forward(self, observations) -> th.Tensor:
def forward(self, observations: thg.data.Data) -> th.Tensor:
x, edge_index, edge_attr, batch = (
observations.x,
observations.edge_index,
Expand Down
29 changes: 29 additions & 0 deletions skdecide/hub/solver/utils/gnn/torch_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
from typing import Optional

import gymnasium as gym
import torch as th
import torch_geometric as thg


def graph_obs_to_thg_data(
obs: gym.spaces.GraphInstance,
device: Optional[th.device] = None,
pin_memory: bool = False,
) -> thg.data.Data:
# Node features
flatten_node_features = obs.nodes.reshape((len(obs.nodes), -1))
x = th.tensor(flatten_node_features).float()
# Edge features
if obs.edges is None:
edge_attr = None
else:
flatten_edge_features = obs.edges.reshape((len(obs.edges), -1))
edge_attr = th.tensor(flatten_edge_features).float()
edge_index = th.tensor(obs.edge_links, dtype=th.long).t().contiguous().view(2, -1)
# thg.Data
data = thg.data.Data(x=x, edge_index=edge_index, edge_attr=edge_attr)
# Pin the tensor's memory (for faster transfer to GPU later).
if pin_memory and th.cuda.is_available():
data.pin_memory()

return data if device is None else data.to(device)
Loading