diff --git a/CHANGELOG.md b/CHANGELOG.md index 80efd6ed42..52af8ae11a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -99,6 +99,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ### Changed +- 🔨 Move all export functionalities to AnomalyModule as base methods in https://github.com/openvinotoolkit/anomalib/pull/1803 - 🔨 Version bump by @ashwinvaidya17 in https://github.com/openvinotoolkit/anomalib/pull/1305 - 🔨 Modify README custom dataset by @Kiminjo in https://github.com/openvinotoolkit/anomalib/pull/1314 - 🔨 Change the documentation URL in `README.md` and add commands to run each inferencer by @samet-akcay in https://github.com/openvinotoolkit/anomalib/pull/1326 diff --git a/notebooks/000_getting_started/001_getting_started.ipynb b/notebooks/000_getting_started/001_getting_started.ipynb index cfc4620eb8..a0fcd2d0c9 100644 --- a/notebooks/000_getting_started/001_getting_started.ipynb +++ b/notebooks/000_getting_started/001_getting_started.ipynb @@ -168,7 +168,7 @@ "from anomalib import TaskType\n", "from anomalib.data import MVTec\n", "from anomalib.data.utils import read_image\n", - "from anomalib.deploy import ExportType, OpenVINOInferencer\n", + "from anomalib.deploy import OpenVINOInferencer, ExportType\n", "from anomalib.engine import Engine\n", "from anomalib.models import Padim" ] diff --git a/src/anomalib/deploy/__init__.py b/src/anomalib/deploy/__init__.py index 1e98d69e10..45581bd8dd 100644 --- a/src/anomalib/deploy/__init__.py +++ b/src/anomalib/deploy/__init__.py @@ -3,15 +3,7 @@ # Copyright (C) 2022-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -from .export import ExportType, export_to_onnx, export_to_openvino, export_to_torch +from .export import ExportType from .inferencers import Inferencer, OpenVINOInferencer, TorchInferencer -__all__ = [ - "ExportType", - "Inferencer", - "OpenVINOInferencer", - "TorchInferencer", - "export_to_onnx", - "export_to_openvino", - "export_to_torch", -] +__all__ = ["Inferencer", "OpenVINOInferencer", "TorchInferencer", "ExportType"] diff --git a/src/anomalib/deploy/export.py b/src/anomalib/deploy/export.py index ecaa72a42a..c800a77e89 100644 --- a/src/anomalib/deploy/export.py +++ b/src/anomalib/deploy/export.py @@ -6,6 +6,7 @@ import json import logging +from collections.abc import Callable from enum import Enum from pathlib import Path from typing import TYPE_CHECKING, Any @@ -17,7 +18,6 @@ from anomalib import TaskType from anomalib.data.transforms import ExportableCenterCrop -from anomalib.models.components import AnomalyModule from anomalib.utils.exceptions import try_import if TYPE_CHECKING: @@ -97,271 +97,254 @@ def convert_center_crop(self) -> None: transforms[index] = ExportableCenterCrop(size=transforms[index].size) -def export_to_torch( - model: AnomalyModule, - export_root: Path | str, - transform: Transform | None = None, - task: TaskType | None = None, -) -> Path: - """Export AnomalibModel to torch. - - Args: - model (AnomalyModule): Model to export. - export_root (Path): Path to the output folder. - transform (Transform, optional): Input transforms used for the model. If not provided, the transform is taken - from the model. - Defaults to ``None``. - task (TaskType | None): Task type. - Defaults to ``None``. - - Returns: - Path: Path to the exported pytorch model. - - Examples: - Assume that we have a model to train and we want to export it to torch format. - - >>> from anomalib.data import Visa - >>> from anomalib.models import Patchcore - >>> from anomalib.engine import Engine - ... - >>> datamodule = Visa() - >>> model = Patchcore() - >>> engine = Engine() - ... - >>> engine.fit(model, datamodule) - - Now that we have a model trained, we can export it to torch format. - - >>> from anomalib.deploy import export_to_torch - ... - >>> export_to_torch( - ... model=model, - ... export_root="path/to/export", - ... transform=datamodule.test_data.transform, - ... task=datamodule.test_data.task, - ... ) - """ - transform = transform or model.transform or model.configure_transforms() - inference_model = InferenceModel(model=model.model, transform=transform) - export_root = _create_export_root(export_root, ExportType.TORCH) - metadata = get_metadata(task=task, model=model) - pt_model_path = export_root / "model.pt" - torch.save( - obj={"model": inference_model, "metadata": metadata}, - f=pt_model_path, - ) - return pt_model_path - - -def export_to_onnx( - model: AnomalyModule, - export_root: Path | str, - input_size: tuple[int, int] | None = None, - transform: Transform | None = None, - task: TaskType | None = None, - export_type: ExportType = ExportType.ONNX, -) -> Path: - """Export model to onnx. - - Args: - model (AnomalyModule): Model to export. - export_root (Path): Path to the root folder of the exported model. - input_size (tuple[int, int] | None, optional): Image size used as the input for onnx converter. - Defaults to None. - transform (Transform, optional): Input transforms used for the model. If not provided, the transform is taken - from the model. - Defaults to ``None``. - task (TaskType | None): Task type. - Defaults to ``None``. - export_type (ExportType): Mode to export the model. Since this method is used by OpenVINO export as well, we - need to pass the export type so that the right export path is created. - Defaults to ``ExportType.ONNX``. - - Returns: - Path: Path to the exported onnx model. - - Examples: - Export the Lightning Model to ONNX: - - >>> from anomalib.models import Patchcore - >>> from anomalib.data import Visa - >>> from anomalib.deploy import export_to_onnx - ... - >>> datamodule = Visa() - >>> model = Patchcore() - ... - >>> export_to_onnx( - ... model=model, - ... export_root="path/to/export", - ... transform=datamodule.test_data.transform, - ... task=datamodule.test_data.task - ... ) - - Using Custom Transforms: - This example shows how to use a custom ``Compose`` object for the ``transform`` argument. - - >>> export_to_onnx( - ... model=model, - ... export_root="path/to/export", - ... task="segmentation", - ... ) - """ - # TODO(djdameln): Move export functionality to anomaly module - # https://github.com/openvinotoolkit/anomalib/issues/1752 - transform = transform or model.transform or model.configure_transforms() - inference_model = InferenceModel(model=model.model, transform=transform, disable_antialias=True) - export_root = _create_export_root(export_root, export_type) - input_shape = torch.zeros((1, 3, *input_size)) if input_size else torch.zeros((1, 3, 1, 1)) - dynamic_axes = ( - None if input_size else {"input": {0: "batch_size", 2: "height", 3: "weight"}, "output": {0: "batch_size"}} - ) - _write_metadata_to_json(export_root, model, task) - onnx_path = export_root / "model.onnx" - torch.onnx.export( - inference_model, - input_shape.to(model.device), - str(onnx_path), - opset_version=14, - dynamic_axes=dynamic_axes, - input_names=["input"], - output_names=["output"], - ) - - return onnx_path - - -def export_to_openvino( - model: AnomalyModule, - export_root: Path | str, - input_size: tuple[int, int] | None = None, - transform: Transform | None = None, - ov_args: dict[str, Any] | None = None, - task: TaskType | None = None, -) -> Path: - """Convert onnx model to OpenVINO IR. - - Args: - model (AnomalyModule): AnomalyModule to export. - export_root (Path): Path to the export folder. - input_size (tuple[int, int] | None, optional): Input size of the model. Used for adding metadata to the IR. - Defaults to None. - transform (Transform, optional): Input transforms used for the model. If not provided, the transform is taken - from the model. - Defaults to ``None``. - ov_args: Model optimizer arguments for OpenVINO model conversion. - Defaults to ``None``. - task (TaskType | None): Task type. - Defaults to ``None``. - - Returns: - Path: Path to the exported onnx model. - - Raises: - ModuleNotFoundError: If OpenVINO is not installed. - - Returns: - Path: Path to the exported OpenVINO IR. - - Examples: - Export the Lightning Model to OpenVINO IR: - This example demonstrates how to export the Lightning Model to OpenVINO IR. - - >>> from anomalib.models import Patchcore - >>> from anomalib.data import Visa - >>> from anomalib.deploy import export_to_openvino - ... - >>> datamodule = Visa() - >>> model = Patchcore() - ... - >>> export_to_openvino( - ... export_root="path/to/export", - ... model=model, - ... input_size=(224, 224), - ... transform=datamodule.test_data.transform, - ... task=datamodule.test_data.task - ... ) - - Using Custom Transforms: - This example shows how to use a custom ``Transform`` object for the ``transform`` argument. - - >>> from torchvision.transforms.v2 import Resize - >>> transform = Resize(224, 224) - ... - >>> export_to_openvino( - ... export_root="path/to/export", - ... model=model, - ... transform=transform, - ... task="segmentation", - ... ) - - """ - if not try_import("openvino"): - logger.exception("Could not find OpenVINO. Please check OpenVINO installation.") - raise ModuleNotFoundError - - import openvino as ov +class ExportMixin: + """This mixin allows exporting models to torch and ONNX/OpenVINO.""" + + model: nn.Module + transform: Transform + configure_transforms: Callable + device: torch.device + + def to_torch( + self, + export_root: Path | str, + transform: Transform | None = None, + task: TaskType | None = None, + ) -> Path: + """Export AnomalibModel to torch. + + Args: + export_root (Path): Path to the output folder. + transform (Transform, optional): Input transforms used for the model. If not provided, the transform is + taken from the model. + Defaults to ``None``. + task (TaskType | None): Task type. + Defaults to ``None``. + + Returns: + Path: Path to the exported pytorch model. + + Examples: + Assume that we have a model to train and we want to export it to torch format. + + >>> from anomalib.data import Visa + >>> from anomalib.models import Patchcore + >>> from anomalib.engine import Engine + ... + >>> datamodule = Visa() + >>> model = Patchcore() + >>> engine = Engine() + ... + >>> engine.fit(model, datamodule) + + Now that we have a model trained, we can export it to torch format. + + >>> model.to_torch( + ... export_root="path/to/export", + ... transform=datamodule.test_data.transform, + ... task=datamodule.test_data.task, + ... ) + """ + transform = transform or self.transform or self.configure_transforms() + inference_model = InferenceModel(model=self.model, transform=transform) + export_root = _create_export_root(export_root, ExportType.TORCH) + metadata = self.get_metadata(task=task) + pt_model_path = export_root / "model.pt" + torch.save( + obj={"model": inference_model, "metadata": metadata}, + f=pt_model_path, + ) + return pt_model_path + + def to_onnx( + self, + export_root: Path | str, + input_size: tuple[int, int] | None = None, + transform: Transform | None = None, + task: TaskType | None = None, + ) -> Path: + """Export model to onnx. + + Args: + export_root (Path): Path to the root folder of the exported model. + input_size (tuple[int, int] | None, optional): Image size used as the input for onnx converter. + Defaults to None. + transform (Transform, optional): Input transforms used for the model. If not provided, the transform is + taken from the model. + Defaults to ``None``. + task (TaskType | None): Task type. + Defaults to ``None``. + + Returns: + Path: Path to the exported onnx model. + + Examples: + Export the Lightning Model to ONNX: + + >>> from anomalib.models import Patchcore + >>> from anomalib.data import Visa + ... + >>> datamodule = Visa() + >>> model = Patchcore() + ... + >>> model.to_onnx( + ... export_root="path/to/export", + ... transform=datamodule.test_data.transform, + ... task=datamodule.test_data.task + ... ) + + Using Custom Transforms: + This example shows how to use a custom ``Compose`` object for the ``transform`` argument. + + >>> model.to_onnx( + ... export_root="path/to/export", + ... task="segmentation", + ... ) + """ + transform = transform or self.transform or self.configure_transforms() + inference_model = InferenceModel(model=self.model, transform=transform, disable_antialias=True) + export_root = _create_export_root(export_root, ExportType.ONNX) + input_shape = torch.zeros((1, 3, *input_size)) if input_size else torch.zeros((1, 3, 1, 1)) + dynamic_axes = ( + None if input_size else {"input": {0: "batch_size", 2: "height", 3: "weight"}, "output": {0: "batch_size"}} + ) + _write_metadata_to_json(self.get_metadata(task), export_root) + onnx_path = export_root / "model.onnx" + torch.onnx.export( + inference_model, + input_shape.to(self.device), + str(onnx_path), + opset_version=14, + dynamic_axes=dynamic_axes, + input_names=["input"], + output_names=["output"], + ) + + return onnx_path + + def to_openvino( + self, + export_root: Path | str, + input_size: tuple[int, int] | None = None, + transform: Transform | None = None, + ov_args: dict[str, Any] | None = None, + task: TaskType | None = None, + ) -> Path: + """Convert onnx model to OpenVINO IR. + + Args: + export_root (Path): Path to the export folder. + input_size (tuple[int, int] | None, optional): Input size of the model. Used for adding metadata to the IR. + Defaults to None. + transform (Transform, optional): Input transforms used for the model. If not provided, the transform is + taken from the model. + Defaults to ``None``. + ov_args: Model optimizer arguments for OpenVINO model conversion. + Defaults to ``None``. + task (TaskType | None): Task type. + Defaults to ``None``. + + Returns: + Path: Path to the exported onnx model. + + Raises: + ModuleNotFoundError: If OpenVINO is not installed. + + Returns: + Path: Path to the exported OpenVINO IR. + + Examples: + Export the Lightning Model to OpenVINO IR: + This example demonstrates how to export the Lightning Model to OpenVINO IR. + + >>> from anomalib.models import Patchcore + >>> from anomalib.data import Visa + ... + >>> datamodule = Visa() + >>> model = Patchcore() + ... + >>> model.to_openvino( + ... export_root="path/to/export", + ... transform=datamodule.test_data.transform, + ... task=datamodule.test_data.task + ... ) + + Using Custom Transforms: + This example shows how to use a custom ``Transform`` object for the ``transform`` argument. + + >>> from torchvision.transforms.v2 import Resize + >>> transform = Resize(224, 224) + ... + >>> model.to_openvino( + ... export_root="path/to/export", + ... transform=transform, + ... task="segmentation", + ... ) + """ + if not try_import("openvino"): + logger.exception("Could not find OpenVINO. Please check OpenVINO installation.") + raise ModuleNotFoundError - model_path = export_to_onnx(model, export_root, input_size, transform, task, ExportType.OPENVINO) - ov_model_path = model_path.with_suffix(".xml") - ov_args = {} if ov_args is None else ov_args - # fp16 compression is enabled by default - compress_to_fp16 = ov_args.get("compress_to_fp16", True) + import openvino as ov - model = ov.convert_model(model_path, **ov_args) - ov.save_model(model, ov_model_path, compress_to_fp16=compress_to_fp16) + model_path = self.to_onnx(export_root, input_size, transform, task) + export_root = _create_export_root(export_root, ExportType.OPENVINO) + ov_model_path = export_root / "model.xml" + ov_args = {} if ov_args is None else ov_args + # fp16 compression is enabled by default + compress_to_fp16 = ov_args.get("compress_to_fp16", True) - return ov_model_path + model = ov.convert_model(model_path, **ov_args) + ov.save_model(model, ov_model_path, compress_to_fp16=compress_to_fp16) + return ov_model_path -def get_metadata( - model: AnomalyModule, - task: TaskType | None = None, -) -> dict[str, Any]: - """Get metadata for the exported model. + def get_metadata( + self, + task: TaskType | None = None, + ) -> dict[str, Any]: + """Get metadata for the exported model. - Args: - model (AnomalyModule): Anomaly model which contains metadata related to normalization. - task (TaskType | None): Task type. - Defaults to None. - - Returns: - dict[str, Any]: Metadata for the exported model. - """ - data_metadata = {"task": task} - model_metadata = _get_model_metadata(model) - metadata = {**data_metadata, **model_metadata} + Args: + task (TaskType | None): Task type. + Defaults to None. - # Convert torch tensors to python lists or values for json serialization. - for key, value in metadata.items(): - if isinstance(value, torch.Tensor): - metadata[key] = value.numpy().tolist() - - return metadata - - -def _get_model_metadata(model: AnomalyModule) -> dict[str, torch.Tensor]: - """Get meta data related to normalization from model. + Returns: + dict[str, Any]: Metadata for the exported model. + """ + data_metadata = {"task": task} + model_metadata = {} + cached_metadata: dict[str, Number | torch.Tensor] = {} + for threshold_name in ("image_threshold", "pixel_threshold"): + if hasattr(self, threshold_name): + cached_metadata[threshold_name] = getattr(self, threshold_name).cpu().value.item() + if hasattr(self, "normalization_metrics") and self.normalization_metrics.state_dict() is not None: + for key, value in self.normalization_metrics.state_dict().items(): + cached_metadata[key] = value.cpu() + # Remove undefined values by copying in a new dict + for key, val in cached_metadata.items(): + if not np.isinf(val).all(): + model_metadata[key] = val + del cached_metadata + metadata = {**data_metadata, **model_metadata} + + # Convert torch tensors to python lists or values for json serialization. + for key, value in metadata.items(): + if isinstance(value, torch.Tensor): + metadata[key] = value.numpy().tolist() + + return metadata + + +def _write_metadata_to_json(metadata: dict[str, Any], export_root: Path) -> None: + """Write metadata to json file. Args: - model (AnomalyModule): Anomaly model which contains metadata related to normalization. - - Returns: - dict[str, torch.Tensor]: Model metadata + metadata (dict[str, Any]): Metadata to export. + export_root (Path): Path to the exported model. """ - metadata = {} - cached_metadata: dict[str, Number | torch.Tensor] = {} - for threshold_name in ("image_threshold", "pixel_threshold"): - if hasattr(model, threshold_name): - cached_metadata[threshold_name] = getattr(model, threshold_name).cpu().value.item() - if hasattr(model, "normalization_metrics") and model.normalization_metrics.state_dict() is not None: - for key, value in model.normalization_metrics.state_dict().items(): - cached_metadata[key] = value.cpu() - # Remove undefined values by copying in a new dict - for key, val in cached_metadata.items(): - if not np.isinf(val).all(): - metadata[key] = val - del cached_metadata - return metadata + with (export_root / "metadata.json").open("w", encoding="utf-8") as metadata_file: + json.dump(metadata, metadata_file, ensure_ascii=False, indent=4) def _create_export_root(export_root: str | Path, export_type: ExportType) -> Path: @@ -377,23 +360,3 @@ def _create_export_root(export_root: str | Path, export_type: ExportType) -> Pat export_root = Path(export_root) / "weights" / export_type.value export_root.mkdir(parents=True, exist_ok=True) return export_root - - -def _write_metadata_to_json( - export_root: Path, - model: AnomalyModule, - task: TaskType | None = None, -) -> None: - """Write metadata to json file. - - Args: - export_root (Path): Path to the exported model. - transform (dict[str, Any] | AnomalibDataset | AnomalibDataModule | A.Compose): Data transforms (augmentations) - used for the model. - model (AnomalyModule): AnomalyModule to export. - task (TaskType | None): Task type. - Defaults to None. - """ - metadata = get_metadata(task=task, model=model) - with (export_root / "metadata.json").open("w", encoding="utf-8") as metadata_file: - json.dump(metadata, metadata_file, ensure_ascii=False, indent=4) diff --git a/src/anomalib/engine/engine.py b/src/anomalib/engine/engine.py index 16d9f4f80d..8e7e679650 100644 --- a/src/anomalib/engine/engine.py +++ b/src/anomalib/engine/engine.py @@ -26,7 +26,7 @@ from anomalib.callbacks.timer import TimerCallback from anomalib.callbacks.visualizer import _VisualizationCallback from anomalib.data import AnomalibDataModule, AnomalibDataset, PredictDataset -from anomalib.deploy.export import ExportType, export_to_onnx, export_to_openvino, export_to_torch +from anomalib.deploy import ExportType from anomalib.models import AnomalyModule from anomalib.utils.normalization import NormalizationMethod from anomalib.utils.path import create_versioned_dir @@ -923,23 +923,20 @@ def export( exported_model_path: Path | None = None if export_type == ExportType.TORCH: - exported_model_path = export_to_torch( - model=model, + exported_model_path = model.to_torch( export_root=export_root, transform=transform, task=self.task, ) elif export_type == ExportType.ONNX: - exported_model_path = export_to_onnx( - model=model, + exported_model_path = model.to_onnx( export_root=export_root, input_size=input_size, transform=transform, task=self.task, ) elif export_type == ExportType.OPENVINO: - exported_model_path = export_to_openvino( - model=model, + exported_model_path = model.to_openvino( export_root=export_root, input_size=input_size, transform=transform, diff --git a/src/anomalib/models/__init__.py b/src/anomalib/models/__init__.py index 46f2fd34ea..722cd1dfe5 100644 --- a/src/anomalib/models/__init__.py +++ b/src/anomalib/models/__init__.py @@ -36,7 +36,7 @@ class UnknownModelError(ModuleNotFoundError): - ... + pass __all__ = [ diff --git a/src/anomalib/models/components/base/anomaly_module.py b/src/anomalib/models/components/base/anomaly_module.py index 4ae12fb397..fe17eb6a55 100644 --- a/src/anomalib/models/components/base/anomaly_module.py +++ b/src/anomalib/models/components/base/anomaly_module.py @@ -17,6 +17,7 @@ from torchvision.transforms.v2 import Compose, Normalize, Resize, Transform from anomalib import LearningType +from anomalib.deploy.export import ExportMixin from anomalib.metrics import AnomalibMetricCollection from anomalib.metrics.threshold import BaseThreshold @@ -28,7 +29,7 @@ logger = logging.getLogger(__name__) -class AnomalyModule(pl.LightningModule, ABC): +class AnomalyModule(ExportMixin, pl.LightningModule, ABC): """AnomalyModule to train, validate, predict and test images. Acts as a base class for all the Anomaly Modules in the library. diff --git a/tests/integration/cli/test_cli.py b/tests/integration/cli/test_cli.py index a163ef73ad..689676203c 100644 --- a/tests/integration/cli/test_cli.py +++ b/tests/integration/cli/test_cli.py @@ -13,7 +13,7 @@ import torch from anomalib.cli import AnomalibCLI -from anomalib.deploy.export import ExportType +from anomalib.deploy import ExportType class TestCLI: diff --git a/tests/integration/model/test_models.py b/tests/integration/model/test_models.py index e49cc11e6b..627e793a5f 100644 --- a/tests/integration/model/test_models.py +++ b/tests/integration/model/test_models.py @@ -12,7 +12,7 @@ from anomalib import TaskType from anomalib.data import AnomalibDataModule, MVTec -from anomalib.deploy.export import ExportType +from anomalib.deploy import ExportType from anomalib.engine import Engine from anomalib.models import AnomalyModule, get_available_models, get_model diff --git a/tests/integration/tools/test_gradio_entrypoint.py b/tests/integration/tools/test_gradio_entrypoint.py index bbdcdd1444..48cde00fc1 100644 --- a/tests/integration/tools/test_gradio_entrypoint.py +++ b/tests/integration/tools/test_gradio_entrypoint.py @@ -12,7 +12,7 @@ import pytest from anomalib import TaskType -from anomalib.deploy import OpenVINOInferencer, TorchInferencer, export_to_openvino, export_to_torch +from anomalib.deploy import OpenVINOInferencer, TorchInferencer from anomalib.models import Padim sys.path.append("tools/inference") @@ -45,8 +45,7 @@ def test_torch_inference( model = Padim.load_from_checkpoint(_ckpt_path) # export torch model - export_to_torch( - model=model, + model.to_torch( export_root=_ckpt_path.parent.parent.parent, task=TaskType.SEGMENTATION, ) @@ -70,9 +69,8 @@ def test_openvino_inference( model = Padim.load_from_checkpoint(_ckpt_path) # export OpenVINO model - export_to_openvino( + model.to_openvino( export_root=_ckpt_path.parent.parent.parent, - model=model, ov_args={}, task=TaskType.SEGMENTATION, ) diff --git a/tests/integration/tools/test_openvino_entrypoint.py b/tests/integration/tools/test_openvino_entrypoint.py index 31da21a138..48bc31a00a 100644 --- a/tests/integration/tools/test_openvino_entrypoint.py +++ b/tests/integration/tools/test_openvino_entrypoint.py @@ -12,7 +12,6 @@ import pytest from anomalib import TaskType -from anomalib.deploy import export_to_openvino from anomalib.models import Padim sys.path.append("tools/inference") @@ -43,9 +42,8 @@ def test_openvino_inference( model = Padim.load_from_checkpoint(_ckpt_path) # export OpenVINO model - export_to_openvino( + model.to_openvino( export_root=_ckpt_path.parent.parent.parent, - model=model, ov_args={}, task=TaskType.SEGMENTATION, ) diff --git a/tests/integration/tools/test_torch_entrypoint.py b/tests/integration/tools/test_torch_entrypoint.py index 5a8f5848a8..e146b78e54 100644 --- a/tests/integration/tools/test_torch_entrypoint.py +++ b/tests/integration/tools/test_torch_entrypoint.py @@ -12,7 +12,6 @@ import pytest from anomalib import TaskType -from anomalib.deploy import export_to_torch from anomalib.models import Padim sys.path.append("tools/inference") @@ -42,8 +41,7 @@ def test_torch_inference( _ckpt_path = ckpt_path("Padim") get_parser, infer = get_functions model = Padim.load_from_checkpoint(_ckpt_path) - export_to_torch( - model=model, + model.to_torch( export_root=_ckpt_path.parent.parent.parent, task=TaskType.SEGMENTATION, )