From 9514d9194d6a8a45d3ceb42567e45d020d5226c0 Mon Sep 17 00:00:00 2001 From: Seunghoon Lee Date: Sun, 5 May 2024 23:29:44 +0900 Subject: [PATCH] Fix ONNX/Olive. --- modules/onnx_impl/pipelines/__init__.py | 8 ++++---- modules/onnx_impl/ui.py | 2 +- modules/onnx_impl/utils.py | 3 +-- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/modules/onnx_impl/pipelines/__init__.py b/modules/onnx_impl/pipelines/__init__.py index 5870ffa031d..df8260168c1 100644 --- a/modules/onnx_impl/pipelines/__init__.py +++ b/modules/onnx_impl/pipelines/__init__.py @@ -379,17 +379,17 @@ def preprocess(self, p: StableDiffusionProcessing): } in_dir = out_dir - if shared.opts.cuda_compile_backend == "olive-ai": + if shared.opts.olive_enable: submodels_for_olive = [] - if "Text Encoder" in shared.opts.cuda_compile: + if "Text Encoder" in shared.opts.olive_submodels: if not self.is_refiner: submodels_for_olive.append("text_encoder") if self._is_sdxl: submodels_for_olive.append("text_encoder_2") - if "Model" in shared.opts.cuda_compile: + if "Model" in shared.opts.olive_submodels: submodels_for_olive.append("unet") - if "VAE" in shared.opts.cuda_compile: + if "VAE" in shared.opts.olive_submodels: submodels_for_olive.append("vae_encoder") submodels_for_olive.append("vae_decoder") diff --git a/modules/onnx_impl/ui.py b/modules/onnx_impl/ui.py index ed3f0080e53..331a05da8d5 100644 --- a/modules/onnx_impl/ui.py +++ b/modules/onnx_impl/ui.py @@ -38,7 +38,7 @@ def create_ui(): ep_log = gr.HTML("") ep_install.click(fn=install_execution_provider, inputs=[ep_checkbox], outputs=[ep_log]) - if opts.cuda_compile_backend == "olive-ai": + if opts.olive_enable: import olive.passes as olive_passes from olive.hardware.accelerator import AcceleratorSpec, Device accelerator = AcceleratorSpec(accelerator_type=Device.GPU, execution_provider=opts.onnx_execution_provider) diff --git a/modules/onnx_impl/utils.py b/modules/onnx_impl/utils.py index 54fd020ee37..4cd631fcf39 100644 --- a/modules/onnx_impl/utils.py +++ b/modules/onnx_impl/utils.py @@ -38,8 +38,7 @@ def move_inference_session(session: ort.InferenceSession, device: torch.device): def check_diffusers_cache(path: os.PathLike): - from modules.shared import opts - return opts.diffusers_dir in os.path.abspath(path) + return False def check_pipeline_sdxl(cls: Type[diffusers.DiffusionPipeline]) -> bool: