diff --git a/python/TorchMLIRModule.cpp b/python/TorchMLIRModule.cpp index e0b045143366..73abf5cd5577 100644 --- a/python/TorchMLIRModule.cpp +++ b/python/TorchMLIRModule.cpp @@ -7,7 +7,6 @@ // //===----------------------------------------------------------------------===// -#include "mlir-c/Bindings/Python/Interop.h" #include "mlir/Bindings/Python/PybindAdaptors.h" #include "torch-mlir-c/Dialects.h" #include "torch-mlir-c/Registration.h" diff --git a/python/test/lit.cfg.py b/python/test/lit.cfg.py index 0bb6760a7a9d..f0423c46fcaa 100644 --- a/python/test/lit.cfg.py +++ b/python/test/lit.cfg.py @@ -37,6 +37,12 @@ # test_exec_root: The root path where tests should be run. config.test_exec_root = os.path.join(config.torch_mlir_obj_root, 'test') +# On Windows the path to python could contains spaces in which case it needs to +# be provided in quotes. This is the equivalent of how %python is setup in +# llvm/utils/lit/lit/llvm/config.py. +if "Windows" in config.host_os: + config.python_executable = '"%s"' % (config.python_executable) + config.substitutions.append(('%PATH%', config.environment['PATH'])) config.substitutions.append(('%shlibext', config.llvm_shlib_ext)) config.substitutions.append(('%PYTHON', config.python_executable)) diff --git a/python/torch_mlir/dialects/torch/importer/jit_ir/csrc/class_annotator.cpp b/python/torch_mlir/dialects/torch/importer/jit_ir/csrc/class_annotator.cpp index c02f014c774b..b144e946ba5e 100644 --- a/python/torch_mlir/dialects/torch/importer/jit_ir/csrc/class_annotator.cpp +++ b/python/torch_mlir/dialects/torch/importer/jit_ir/csrc/class_annotator.cpp @@ -135,11 +135,12 @@ const ClassAnnotationMap &ClassAnnotator::getAnnotationMap() { ClassAnnotation & ClassAnnotator::getOrCreateClassAnnotation(c10::ClassType *classType) { - auto it = classAnnotations.find(classType); + auto className = classType->name()->qualifiedName(); + auto it = classAnnotations.find(className); if (it == classAnnotations.end()) { auto newAnnotation = std::make_unique( classType->shared_from_this()->cast()); - it = classAnnotations.insert({classType, std::move(newAnnotation)}).first; + it = classAnnotations.insert({className, std::move(newAnnotation)}).first; for (int i = 0, e = classType->methods().size(); i != e; i++) { functionToMethodMap[classType->methods()[i]] = &it->second->getMethodAnnotations()[i]; diff --git a/python/torch_mlir/dialects/torch/importer/jit_ir/csrc/class_annotator.h b/python/torch_mlir/dialects/torch/importer/jit_ir/csrc/class_annotator.h index 03f39c1b10db..0a0815eabbfd 100644 --- a/python/torch_mlir/dialects/torch/importer/jit_ir/csrc/class_annotator.h +++ b/python/torch_mlir/dialects/torch/importer/jit_ir/csrc/class_annotator.h @@ -124,9 +124,9 @@ class ClassAnnotation { std::vector methodAnnotations; }; -// A map of annotations on `c10::ClassType`s +// A map of annotations on `c10::ClassType` names using ClassAnnotationMap = - std::unordered_map>; + std::map>; // A collection of class annotations + methods to create the annotations. // diff --git a/python/torch_mlir/dialects/torch/importer/jit_ir/csrc/function_importer.h b/python/torch_mlir/dialects/torch/importer/jit_ir/csrc/function_importer.h index 9b8ae065f4af..626068f76268 100644 --- a/python/torch_mlir/dialects/torch/importer/jit_ir/csrc/function_importer.h +++ b/python/torch_mlir/dialects/torch/importer/jit_ir/csrc/function_importer.h @@ -39,7 +39,7 @@ namespace torch_mlir { /// will be attached as an argument attribute to the func op's argument. If a /// null MlirAttribute is returned, no attribute will be attached to that /// argument. -TORCH_API MlirOperation importJitFunctionAsFuncOp( +MlirOperation importJitFunctionAsFuncOp( MlirContext context, torch::jit::Function *function, std::function getArgAttribute = [](int) -> MlirAttribute { return {nullptr}; }, diff --git a/python/torch_mlir_e2e_test/framework.py b/python/torch_mlir_e2e_test/framework.py index 38ba064c9548..c432004fb312 100644 --- a/python/torch_mlir_e2e_test/framework.py +++ b/python/torch_mlir_e2e_test/framework.py @@ -22,12 +22,13 @@ import abc from typing import Any, Callable, List, NamedTuple, Optional, TypeVar, Union, Dict +from itertools import repeat import sys import traceback import torch -import torch.multiprocessing as mp +import multiprocess as mp TorchScriptValue = Union[int, float, List['TorchScriptValue'], Dict['TorchScriptValue', @@ -314,21 +315,6 @@ def compile_and_run_test(test: Test, config: TestConfig, verbose=False) -> Any: golden_trace=golden_trace) -queue_sentinel = "QUEUE_SENTINEL" - - -def run_workers_in_parallel(task_queue: mp.Queue, worker, num_processes: int): - processes = [] - for i in range(num_processes): - p = mp.get_context("fork").Process(target=worker, args=(task_queue, )) - p.start() - processes.append(p) - for i in range(num_processes): - task_queue.put(queue_sentinel) - for p in processes: - p.join() - - def run_tests(tests: List[Test], config: TestConfig, sequential=False, verbose=False) -> List[TestResult]: """Invoke the given `Test`'s with the provided `TestConfig`.""" num_processes = min(int(mp.cpu_count() * 1.1), len(tests)) @@ -351,30 +337,16 @@ def run_tests(tests: List[Test], config: TestConfig, sequential=False, verbose=F if num_processes == 1 or sequential: return [compile_and_run_test(test, config, verbose) for test in tests] - # To run e2e tests in parallel: - # The tests are put into a synchronized queue. Multiple worker processes are - # created. Each worker takes one test at a time from the queue to compile - # and execute it. If the test finishes, whether failed or passed, the result - # of the test is put into a synchronized list which collects the tests - # results from all worker processes. - manager = mp.Manager() - tests_queue = manager.Queue() - sync_results = manager.list() # This is needed because autograd does not support crossing process # boundaries. torch.autograd.set_grad_enabled(False) - for test in tests: - tests_queue.put(test.unique_name) - - tests_dict = {test.unique_name: test for test in tests} - def worker(tests_queue: mp.Queue): - for test_name in iter(tests_queue.get, queue_sentinel): - sync_results.append( - compile_and_run_test(tests_dict[test_name], config)) + pool = mp.Pool(num_processes) + arg_list = zip(tests, repeat(config)) + handles = pool.starmap_async(compile_and_run_test, arg_list) + results = handles.get() - run_workers_in_parallel(tests_queue, worker, num_processes) - tests_with_results = {result.unique_name for result in sync_results} + tests_with_results = {result.unique_name for result in results} all_tests = {test.unique_name for test in tests} # For processes that are crashed due to compile time or runtime error, # the error outputs are printed out all together but no TestResult is @@ -391,7 +363,6 @@ def worker(tests_queue: mp.Queue): trace=None, golden_trace=None) for aborted_test_name in aborted_tests ] - results = [result for result in sync_results] results.extend(aborted_tests_results) results.sort(key=lambda result: result.unique_name) return results diff --git a/requirements.txt b/requirements.txt index 060ef671f627..8fc41158a6a8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,3 +16,5 @@ pyyaml # Test Requirements pillow +dill +multiprocess diff --git a/test/lit.cfg.py b/test/lit.cfg.py index 5a7ba132f3bd..9945f005deef 100644 --- a/test/lit.cfg.py +++ b/test/lit.cfg.py @@ -57,6 +57,12 @@ # Tweak the PATH to include the tools dir. llvm_config.with_environment('PATH', config.llvm_tools_dir, append_path=True) +# On Windows the path to python could contains spaces in which case it needs to +# be provided in quotes. This is the equivalent of how %python is setup in +# llvm/utils/lit/lit/llvm/config.py. +if "Windows" in config.host_os: + config.python_executable = '"%s"' % (config.python_executable) + tool_dirs = [config.standalone_tools_dir, config.llvm_tools_dir] tools = [ 'torch-mlir-opt', diff --git a/test/lit.site.cfg.py.in b/test/lit.site.cfg.py.in index 94c93a05c898..5dd0b2e5221a 100644 --- a/test/lit.site.cfg.py.in +++ b/test/lit.site.cfg.py.in @@ -5,6 +5,7 @@ import sys config.enable_bindings_python = @MLIR_ENABLE_BINDINGS_PYTHON@ config.torch_mlir_obj_root = "@TORCH_MLIR_BINARY_DIR@" config.torch_mlir_python_packages_dir = "@TORCH_MLIR_PYTHON_PACKAGES_DIR@" +config.host_os = "@HOST_OS@" config.llvm_src_root = "@LLVM_SOURCE_DIR@" config.llvm_obj_root = "@LLVM_BINARY_DIR@" config.llvm_tools_dir = "@LLVM_TOOLS_DIR@" @@ -13,7 +14,7 @@ config.llvm_shlib_dir = "@SHLIBDIR@" config.llvm_shlib_ext = "@SHLIBEXT@" config.llvm_exe_ext = "@EXEEXT@" config.lit_tools_dir = "@LLVM_LIT_TOOLS_DIR@" -config.python_executable = sys.executable +config.python_executable = "@Python3_EXECUTABLE@" config.enable_jit_ir_importer = @TORCH_MLIR_ENABLE_JIT_IR_IMPORTER@ config.enable_mhlo = @TORCH_MLIR_ENABLE_MHLO@ diff --git a/test/python/importer/jit_ir/get_registered_ops.py b/test/python/importer/jit_ir/get_registered_ops.py index 376ac9423df1..b4cdb4b276f2 100644 --- a/test/python/importer/jit_ir/get_registered_ops.py +++ b/test/python/importer/jit_ir/get_registered_ops.py @@ -2,11 +2,7 @@ # This file is licensed under a pytorch-style license # See LICENSE.pytorch for license information. -# TODO: Fix ODR violation on non-static cl::opt in LLVM -# `cl::opt`. -# This causes double free on global dtors on exiting the program. -# The FileCheck still passes though. -# RUN: (%PYTHON %s || true) | FileCheck %s +# RUN: %PYTHON %s | FileCheck %s from torch_mlir._mlir_libs._jit_ir_importer import get_registered_ops