Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[AIR] Rename DLPredictor.call_model tensor parameter to inputs #30574

Merged
merged 3 commits into from
Nov 28, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions python/ray/train/_internal/dl_predictor.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,13 +52,14 @@ def _tensor_to_array(self, tensor: TensorType) -> np.ndarray:
raise NotImplementedError

@abc.abstractmethod
@DeveloperAPI
def call_model(
self, tensor: Union[TensorType, Dict[str, TensorType]]
self, inputs: Union[TensorType, Dict[str, TensorType]]
) -> Union[TensorType, Dict[str, TensorType]]:
"""Inputs the tensor to the model for this Predictor and returns the result.

Args:
tensor: The tensor to input to the model.
inputs: The tensor to input to the model.

Returns:
A tensor or dictionary of tensors containing the model output.
Expand Down
17 changes: 9 additions & 8 deletions python/ray/train/tensorflow/tensorflow_predictor.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from ray.air._internal.tensorflow_utils import convert_ndarray_batch_to_tf_tensor_batch
from ray.train._internal.dl_predictor import DLPredictor
from ray.train.tensorflow.tensorflow_checkpoint import TensorflowCheckpoint
from ray.util.annotations import PublicAPI
from ray.util.annotations import DeveloperAPI, PublicAPI

if TYPE_CHECKING:
from ray.data.preprocessor import Preprocessor
Expand Down Expand Up @@ -107,8 +107,9 @@ def from_checkpoint(
use_gpu=use_gpu,
)

@DeveloperAPI
def call_model(
self, tensor: Union[tf.Tensor, Dict[str, tf.Tensor]]
self, inputs: Union[tf.Tensor, Dict[str, tf.Tensor]]
) -> Union[tf.Tensor, Dict[str, tf.Tensor]]:
"""Runs inference on a single batch of tensor data.

Expand All @@ -130,8 +131,8 @@ def build_model() -> tf.keras.Model:

# Use a custom predictor to format model output as a dict.
class CustomPredictor(TensorflowPredictor):
def call_model(self, tensor):
model_output = super().call_model(tensor)
def call_model(self, inputs):
model_output = super().call_model(inputs)
return {
str(i): model_output[i] for i in range(len(model_output))
}
Expand All @@ -140,18 +141,18 @@ def call_model(self, tensor):
predictions = predictor.predict(data_batch)

Args:
tensor: A batch of data to predict on, represented as either a single
PyTorch tensor or for multi-input models, a dictionary of tensors.
inputs: A batch of data to predict on, represented as either a single
TensorFlow tensor or for multi-input models, a dictionary of tensors.

Returns:
The model outputs, either as a single tensor or a dictionary of tensors.

"""
if self.use_gpu:
with tf.device("GPU:0"):
return self._model(tensor)
return self._model(inputs)
else:
return self._model(tensor)
return self._model(inputs)

def predict(
self,
Expand Down
13 changes: 7 additions & 6 deletions python/ray/train/torch/torch_predictor.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from ray.air._internal.torch_utils import convert_ndarray_batch_to_torch_tensor_batch
from ray.train.torch.torch_checkpoint import TorchCheckpoint
from ray.train._internal.dl_predictor import DLPredictor
from ray.util.annotations import PublicAPI
from ray.util.annotations import DeveloperAPI, PublicAPI

if TYPE_CHECKING:
from ray.data.preprocessor import Preprocessor
Expand Down Expand Up @@ -94,8 +94,9 @@ def from_checkpoint(
preprocessor = checkpoint.get_preprocessor()
return cls(model=model, preprocessor=preprocessor, use_gpu=use_gpu)

@DeveloperAPI
def call_model(
self, tensor: Union[torch.Tensor, Dict[str, torch.Tensor]]
self, inputs: Union[torch.Tensor, Dict[str, torch.Tensor]]
) -> Union[torch.Tensor, Dict[str, torch.Tensor]]:
"""Runs inference on a single batch of tensor data.

Expand All @@ -106,7 +107,7 @@ def call_model(
output.

Args:
tensor: A batch of data to predict on, represented as either a single
inputs: A batch of data to predict on, represented as either a single
PyTorch tensor or for multi-input models, a dictionary of tensors.

Returns:
Expand All @@ -124,8 +125,8 @@ def forward(self, input_tensor):

# Use a custom predictor to format model output as a dict.
class CustomPredictor(TorchPredictor):
def call_model(self, tensor):
model_output = super().call_model(tensor)
def call_model(self, inputs):
model_output = super().call_model(inputs)
return {
str(i): model_output[i] for i in range(len(model_output))
}
Expand All @@ -142,7 +143,7 @@ def call_model(self, tensor):
Predictions: [1 2], [1 2]
"""
with torch.no_grad():
output = self.model(tensor)
output = self.model(inputs)
return output

def predict(
Expand Down