Skip to content

Commit

Permalink
[RLlib] Upgrade tf eager code to no longer use `experimental_relax_sh…
Browse files Browse the repository at this point in the history
…apes` (but `reduce_retracing` instead). (ray-project#29214)

Signed-off-by: Artur Niederfahrenhorst <artur@anyscale.com>
  • Loading branch information
ArturNiederfahrenhorst authored and clarence-wu committed Jan 31, 2023
1 parent d370bbf commit 91111bd
Showing 1 changed file with 4 additions and 4 deletions.
8 changes: 4 additions & 4 deletions rllib/policy/eager_tf_policy.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ def compute_actions_from_input_dict(
tf.function(
super(TracedEagerPolicy, self)._compute_actions_helper,
autograph=False,
experimental_relax_shapes=True,
reduce_retracing=True,
)
)
self._traced_compute_actions_helper = True
Expand All @@ -206,7 +206,7 @@ def learn_on_batch(self, samples):
tf.function(
super(TracedEagerPolicy, self)._learn_on_batch_helper,
autograph=False,
experimental_relax_shapes=True,
reduce_retracing=True,
)
)
self._traced_learn_on_batch_helper = True
Expand All @@ -226,7 +226,7 @@ def compute_gradients(self, samples: SampleBatch) -> ModelGradients:
tf.function(
super(TracedEagerPolicy, self)._compute_gradients_helper,
autograph=False,
experimental_relax_shapes=True,
reduce_retracing=True,
)
)
self._traced_compute_gradients_helper = True
Expand All @@ -246,7 +246,7 @@ def apply_gradients(self, grads: ModelGradients) -> None:
tf.function(
super(TracedEagerPolicy, self)._apply_gradients_helper,
autograph=False,
experimental_relax_shapes=True,
reduce_retracing=True,
)
)
self._traced_apply_gradients_helper = True
Expand Down

0 comments on commit 91111bd

Please sign in to comment.