From 21c765ca420fc27e705ebdfcd55f4d2b64a1e3fc Mon Sep 17 00:00:00 2001 From: Adrian Boguszewski Date: Fri, 12 Apr 2024 19:12:53 +0200 Subject: [PATCH 1/4] Rename mo with ov Signed-off-by: Adrian Boguszewski --- src/anomalib/cli/cli.py | 2 +- src/anomalib/cli/utils/openvino.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/anomalib/cli/cli.py b/src/anomalib/cli/cli.py index 9de4a1d9c1..524e20a90e 100644 --- a/src/anomalib/cli/cli.py +++ b/src/anomalib/cli/cli.py @@ -234,7 +234,7 @@ def add_export_arguments(self, parser: ArgumentParser) -> None: added = parser.add_method_arguments( Engine, "export", - skip={"mo_args", "model"}, + skip={"ov_args", "model"}, ) self.subcommand_method_arguments["export"] = added add_openvino_export_arguments(parser) diff --git a/src/anomalib/cli/utils/openvino.py b/src/anomalib/cli/utils/openvino.py index 70e329f6b4..65ac7b80db 100644 --- a/src/anomalib/cli/utils/openvino.py +++ b/src/anomalib/cli/utils/openvino.py @@ -22,11 +22,11 @@ def add_openvino_export_arguments(parser: ArgumentParser) -> None: """Add OpenVINO arguments to parser under --mo key.""" if get_common_cli_parser is not None: group = parser.add_argument_group("OpenVINO Model Optimizer arguments (optional)") - mo_parser = get_common_cli_parser() + ov_parser = get_common_cli_parser() # remove redundant keys from mo keys - for arg in mo_parser._actions: # noqa: SLF001 + for arg in ov_parser._actions: # noqa: SLF001 if arg.dest in ("help", "input_model", "output_dir"): continue - group.add_argument(f"--mo_args.{arg.dest}", type=arg.type, default=arg.default, help=arg.help) + group.add_argument(f"--ov_args.{arg.dest}", type=arg.type, default=arg.default, help=arg.help) else: logger.info("OpenVINO is possibly not installed in the environment. Skipping adding it to parser.") From 69d0c5b0ca4e5a8de102c801f7b926ff52d46418 Mon Sep 17 00:00:00 2001 From: Adrian Boguszewski Date: Wed, 17 Apr 2024 16:32:02 +0200 Subject: [PATCH 2/4] Support static shapes when exporting to ONNX or OpenVINO Signed-off-by: Adrian Boguszewski --- CHANGELOG.md | 2 ++ src/anomalib/deploy/export.py | 20 ++++++++++++++----- .../deploy/inferencers/openvino_inferencer.py | 4 ++++ src/anomalib/engine/engine.py | 9 +++++++-- 4 files changed, 28 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c2ed8c1a68..ad25f78187 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ### Added +- 🚀 Update OpenVINO and ONNX export to support fixed input shape by @adrianboguszewski in https://github.com/openvinotoolkit/anomalib/pull/2006 + ### Changed - 🔨Rename OptimalF1 to F1Max for consistency with the literature, by @samet-akcay in https://github.com/openvinotoolkit/anomalib/pull/1980 diff --git a/src/anomalib/deploy/export.py b/src/anomalib/deploy/export.py index 83d63bf0dd..0b33ee4fad 100644 --- a/src/anomalib/deploy/export.py +++ b/src/anomalib/deploy/export.py @@ -160,6 +160,7 @@ def export_to_torch( def export_to_onnx( model: AnomalyModule, export_root: Path | str, + input_size: tuple[int, int] | None = None, transform: Transform | None = None, task: TaskType | None = None, export_type: ExportType = ExportType.ONNX, @@ -169,6 +170,8 @@ def export_to_onnx( Args: model (AnomalyModule): Model to export. export_root (Path): Path to the root folder of the exported model. + input_size (tuple[int, int] | None, optional): Image size used as the input for onnx converter. + Defaults to None. transform (Transform, optional): Input transforms used for the model. If not provided, the transform is taken from the model. Defaults to ``None``. @@ -212,14 +215,18 @@ def export_to_onnx( transform = transform or model.transform or model.configure_transforms() inference_model = InferenceModel(model=model.model, transform=transform, disable_antialias=True) export_root = _create_export_root(export_root, export_type) + input_shape = torch.zeros((1, 3, *input_size)) if input_size else torch.zeros((1, 3, 1, 1)) + dynamic_axes = ( + None if input_size else {"input": {0: "batch_size", 2: "height", 3: "weight"}, "output": {0: "batch_size"}} + ) _write_metadata_to_json(export_root, model, task) onnx_path = export_root / "model.onnx" torch.onnx.export( inference_model, - torch.zeros((1, 3, 1, 1)).to(model.device), + input_shape.to(model.device), str(onnx_path), opset_version=14, - dynamic_axes={"input": {0: "batch_size", 2: "height", 3: "weight"}, "output": {0: "batch_size"}}, + dynamic_axes=dynamic_axes, input_names=["input"], output_names=["output"], ) @@ -228,8 +235,9 @@ def export_to_onnx( def export_to_openvino( - export_root: Path | str, model: AnomalyModule, + export_root: Path | str, + input_size: tuple[int, int] | None = None, transform: Transform | None = None, ov_args: dict[str, Any] | None = None, task: TaskType | None = None, @@ -237,8 +245,10 @@ def export_to_openvino( """Convert onnx model to OpenVINO IR. Args: - export_root (Path): Path to the export folder. model (AnomalyModule): AnomalyModule to export. + export_root (Path): Path to the export folder. + input_size (tuple[int, int] | None, optional): Input size of the model. Used for adding metadata to the IR. + Defaults to None. transform (Transform, optional): Input transforms used for the model. If not provided, the transform is taken from the model. Defaults to ``None``. @@ -289,7 +299,7 @@ def export_to_openvino( ... ) """ - model_path = export_to_onnx(model, export_root, transform, task, ExportType.OPENVINO) + model_path = export_to_onnx(model, export_root, input_size, transform, task, ExportType.OPENVINO) ov_model_path = model_path.with_suffix(".xml") ov_args = {} if ov_args is None else ov_args if convert_model is not None and serialize is not None: diff --git a/src/anomalib/deploy/inferencers/openvino_inferencer.py b/src/anomalib/deploy/inferencers/openvino_inferencer.py index db0d966fad..d0e2742d34 100644 --- a/src/anomalib/deploy/inferencers/openvino_inferencer.py +++ b/src/anomalib/deploy/inferencers/openvino_inferencer.py @@ -199,6 +199,10 @@ def predict( msg = f"Input image must be a numpy array or a path to an image. Got {type(image)}" raise TypeError(msg) + # Resize image to model input size if not dynamic + if self.input_blob.partial_shape[2:].is_static: + image = cv2.resize(image, tuple(self.input_blob.shape[2:][::-1])) + # Normalize numpy array to range [0, 1] if image.dtype != np.float32: image = image.astype(np.float32) diff --git a/src/anomalib/engine/engine.py b/src/anomalib/engine/engine.py index a08b78e529..75fd5c684f 100644 --- a/src/anomalib/engine/engine.py +++ b/src/anomalib/engine/engine.py @@ -840,6 +840,7 @@ def export( model: AnomalyModule, export_type: ExportType, export_root: str | Path | None = None, + input_size: tuple[int, int] | None = None, transform: Transform | None = None, ov_args: dict[str, Any] | None = None, ckpt_path: str | Path | None = None, @@ -851,6 +852,8 @@ def export( export_type (ExportType): Export type. export_root (str | Path | None, optional): Path to the output directory. If it is not set, the model is exported to trainer.default_root_dir. Defaults to None. + input_size (tuple[int, int] | None, optional): A statis input shape for the model, which is exported to ONNX + and OpenVINO format. Defaults to None. transform (Transform | None, optional): Input transform to include in the exported model. If not provided, the engine will try to use the transform from the datamodule or dataset. Defaults to None. ov_args (dict[str, Any] | None, optional): This is optional and used only for OpenVINO's model optimizer. @@ -877,10 +880,10 @@ def export( ```python anomalib export --model Padim --export_mode OPENVINO --data Visa --input_size "[256,256]" ``` - 4. You can also overrride OpenVINO model optimizer by adding the ``--mo_args.`` arguments. + 4. You can also overrride OpenVINO model optimizer by adding the ``--ov_args.`` arguments. ```python anomalib export --model Padim --export_mode OPENVINO --data Visa --input_size "[256,256]" \ - --mo_args.compress_to_fp16 False + --ov_args.compress_to_fp16 False ``` """ self._setup_trainer(model) @@ -903,6 +906,7 @@ def export( exported_model_path = export_to_onnx( model=model, export_root=export_root, + input_size=input_size, transform=transform, task=self.task, ) @@ -910,6 +914,7 @@ def export( exported_model_path = export_to_openvino( model=model, export_root=export_root, + input_size=input_size, transform=transform, task=self.task, ov_args=ov_args, From 30a6ad27df09a1cdf8a2adc5cb468b37ee537e08 Mon Sep 17 00:00:00 2001 From: Adrian Boguszewski Date: Fri, 19 Apr 2024 16:12:21 +0200 Subject: [PATCH 3/4] Updated OpenVINO Signed-off-by: Adrian Boguszewski --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 99a15349bd..66dddd5045 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,7 +39,7 @@ core = [ "torchmetrics>=1.3.2", "open-clip-torch>=2.23.0", ] -openvino = ["openvino-dev>=2023.0", "nncf>=2.5.0", "onnx>=1.16.0"] +openvino = ["openvino-dev>=2023.1", "nncf>=2.6.0", "onnx>=1.16.0"] loggers = [ "comet-ml>=3.31.7", "gradio>=4", From be72598e859ec70d03710d0140262b8f75acdf1f Mon Sep 17 00:00:00 2001 From: Adrian Boguszewski Date: Fri, 19 Apr 2024 16:25:34 +0200 Subject: [PATCH 4/4] Use newer OV API Signed-off-by: Adrian Boguszewski --- src/anomalib/deploy/export.py | 22 ++++++++++--------- .../deploy/inferencers/openvino_inferencer.py | 2 +- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/src/anomalib/deploy/export.py b/src/anomalib/deploy/export.py index 0b33ee4fad..ecaa72a42a 100644 --- a/src/anomalib/deploy/export.py +++ b/src/anomalib/deploy/export.py @@ -25,10 +25,6 @@ logger = logging.getLogger("anomalib") -if try_import("openvino"): - from openvino.runtime import serialize - from openvino.tools.ovc import convert_model - class ExportType(str, Enum): """Model export type. @@ -299,15 +295,21 @@ def export_to_openvino( ... ) """ + if not try_import("openvino"): + logger.exception("Could not find OpenVINO. Please check OpenVINO installation.") + raise ModuleNotFoundError + + import openvino as ov + model_path = export_to_onnx(model, export_root, input_size, transform, task, ExportType.OPENVINO) ov_model_path = model_path.with_suffix(".xml") ov_args = {} if ov_args is None else ov_args - if convert_model is not None and serialize is not None: - model = convert_model(model_path, **ov_args) - serialize(model, ov_model_path) - else: - logger.exception("Could not find OpenVINO methods. Please check OpenVINO installation.") - raise ModuleNotFoundError + # fp16 compression is enabled by default + compress_to_fp16 = ov_args.get("compress_to_fp16", True) + + model = ov.convert_model(model_path, **ov_args) + ov.save_model(model, ov_model_path, compress_to_fp16=compress_to_fp16) + return ov_model_path diff --git a/src/anomalib/deploy/inferencers/openvino_inferencer.py b/src/anomalib/deploy/inferencers/openvino_inferencer.py index d0e2742d34..3e8f18540e 100644 --- a/src/anomalib/deploy/inferencers/openvino_inferencer.py +++ b/src/anomalib/deploy/inferencers/openvino_inferencer.py @@ -200,7 +200,7 @@ def predict( raise TypeError(msg) # Resize image to model input size if not dynamic - if self.input_blob.partial_shape[2:].is_static: + if self.input_blob.partial_shape[2].is_static and self.input_blob.partial_shape[3].is_static: image = cv2.resize(image, tuple(self.input_blob.shape[2:][::-1])) # Normalize numpy array to range [0, 1]