Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Miscellaneous fixes for Windows builds #1376

Merged
merged 7 commits into from
Sep 29, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion python/TorchMLIRModule.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
//
//===----------------------------------------------------------------------===//

#include "mlir-c/Bindings/Python/Interop.h"
#include "mlir/Bindings/Python/PybindAdaptors.h"
#include "torch-mlir-c/Dialects.h"
#include "torch-mlir-c/Registration.h"
Expand Down
6 changes: 6 additions & 0 deletions python/test/lit.cfg.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,12 @@
# test_exec_root: The root path where tests should be run.
config.test_exec_root = os.path.join(config.torch_mlir_obj_root, 'test')

# On Windows the path to python could contains spaces in which case it needs to
# be provided in quotes. This is the equivalent of how %python is setup in
# llvm/utils/lit/lit/llvm/config.py.
if "Windows" in config.host_os:
config.python_executable = '"%s"' % (config.python_executable)

config.substitutions.append(('%PATH%', config.environment['PATH']))
config.substitutions.append(('%shlibext', config.llvm_shlib_ext))
config.substitutions.append(('%PYTHON', config.python_executable))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -135,11 +135,12 @@ const ClassAnnotationMap &ClassAnnotator::getAnnotationMap() {

ClassAnnotation &
ClassAnnotator::getOrCreateClassAnnotation(c10::ClassType *classType) {
auto it = classAnnotations.find(classType);
auto className = classType->name()->qualifiedName();
auto it = classAnnotations.find(className);
if (it == classAnnotations.end()) {
auto newAnnotation = std::make_unique<ClassAnnotation>(
classType->shared_from_this()->cast<c10::ClassType>());
it = classAnnotations.insert({classType, std::move(newAnnotation)}).first;
it = classAnnotations.insert({className, std::move(newAnnotation)}).first;
for (int i = 0, e = classType->methods().size(); i != e; i++) {
functionToMethodMap[classType->methods()[i]] =
&it->second->getMethodAnnotations()[i];
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -124,9 +124,9 @@ class ClassAnnotation {
std::vector<MethodAnnotation> methodAnnotations;
};

// A map of annotations on `c10::ClassType`s
// A map of annotations on `c10::ClassType` names
using ClassAnnotationMap =
std::unordered_map<c10::ClassType *, std::unique_ptr<ClassAnnotation>>;
std::map<std::string, std::unique_ptr<ClassAnnotation>>;

// A collection of class annotations + methods to create the annotations.
//
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ namespace torch_mlir {
/// will be attached as an argument attribute to the func op's argument. If a
/// null MlirAttribute is returned, no attribute will be attached to that
/// argument.
TORCH_API MlirOperation importJitFunctionAsFuncOp(
MlirOperation importJitFunctionAsFuncOp(
MlirContext context, torch::jit::Function *function,
std::function<MlirAttribute(int)> getArgAttribute =
[](int) -> MlirAttribute { return {nullptr}; },
Expand Down
43 changes: 7 additions & 36 deletions python/torch_mlir_e2e_test/framework.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,13 @@

import abc
from typing import Any, Callable, List, NamedTuple, Optional, TypeVar, Union, Dict
from itertools import repeat

import sys
import traceback

import torch
import torch.multiprocessing as mp
import multiprocess as mp

TorchScriptValue = Union[int, float, List['TorchScriptValue'],
Dict['TorchScriptValue',
Expand Down Expand Up @@ -314,21 +315,6 @@ def compile_and_run_test(test: Test, config: TestConfig, verbose=False) -> Any:
golden_trace=golden_trace)


queue_sentinel = "QUEUE_SENTINEL"


def run_workers_in_parallel(task_queue: mp.Queue, worker, num_processes: int):
processes = []
for i in range(num_processes):
p = mp.get_context("fork").Process(target=worker, args=(task_queue, ))
p.start()
processes.append(p)
for i in range(num_processes):
task_queue.put(queue_sentinel)
for p in processes:
p.join()


def run_tests(tests: List[Test], config: TestConfig, sequential=False, verbose=False) -> List[TestResult]:
"""Invoke the given `Test`'s with the provided `TestConfig`."""
num_processes = min(int(mp.cpu_count() * 1.1), len(tests))
Expand All @@ -351,30 +337,16 @@ def run_tests(tests: List[Test], config: TestConfig, sequential=False, verbose=F
if num_processes == 1 or sequential:
return [compile_and_run_test(test, config, verbose) for test in tests]

# To run e2e tests in parallel:
# The tests are put into a synchronized queue. Multiple worker processes are
# created. Each worker takes one test at a time from the queue to compile
# and execute it. If the test finishes, whether failed or passed, the result
# of the test is put into a synchronized list which collects the tests
# results from all worker processes.
manager = mp.Manager()
tests_queue = manager.Queue()
sync_results = manager.list()
# This is needed because autograd does not support crossing process
# boundaries.
torch.autograd.set_grad_enabled(False)
for test in tests:
tests_queue.put(test.unique_name)

tests_dict = {test.unique_name: test for test in tests}

def worker(tests_queue: mp.Queue):
for test_name in iter(tests_queue.get, queue_sentinel):
sync_results.append(
compile_and_run_test(tests_dict[test_name], config))
pool = mp.Pool(num_processes)
arg_list = zip(tests, repeat(config))
handles = pool.starmap_async(compile_and_run_test, arg_list)
results = handles.get()

run_workers_in_parallel(tests_queue, worker, num_processes)
tests_with_results = {result.unique_name for result in sync_results}
tests_with_results = {result.unique_name for result in results}
all_tests = {test.unique_name for test in tests}
# For processes that are crashed due to compile time or runtime error,
# the error outputs are printed out all together but no TestResult is
Expand All @@ -391,7 +363,6 @@ def worker(tests_queue: mp.Queue):
trace=None,
golden_trace=None) for aborted_test_name in aborted_tests
]
results = [result for result in sync_results]
results.extend(aborted_tests_results)
results.sort(key=lambda result: result.unique_name)
return results
2 changes: 2 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -16,3 +16,5 @@ pyyaml

# Test Requirements
pillow
dill
multiprocess
6 changes: 6 additions & 0 deletions test/lit.cfg.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,12 @@
# Tweak the PATH to include the tools dir.
llvm_config.with_environment('PATH', config.llvm_tools_dir, append_path=True)

# On Windows the path to python could contains spaces in which case it needs to
# be provided in quotes. This is the equivalent of how %python is setup in
# llvm/utils/lit/lit/llvm/config.py.
if "Windows" in config.host_os:
config.python_executable = '"%s"' % (config.python_executable)

tool_dirs = [config.standalone_tools_dir, config.llvm_tools_dir]
tools = [
'torch-mlir-opt',
Expand Down
3 changes: 2 additions & 1 deletion test/lit.site.cfg.py.in
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import sys
config.enable_bindings_python = @MLIR_ENABLE_BINDINGS_PYTHON@
config.torch_mlir_obj_root = "@TORCH_MLIR_BINARY_DIR@"
config.torch_mlir_python_packages_dir = "@TORCH_MLIR_PYTHON_PACKAGES_DIR@"
config.host_os = "@HOST_OS@"
config.llvm_src_root = "@LLVM_SOURCE_DIR@"
config.llvm_obj_root = "@LLVM_BINARY_DIR@"
config.llvm_tools_dir = "@LLVM_TOOLS_DIR@"
Expand All @@ -13,7 +14,7 @@ config.llvm_shlib_dir = "@SHLIBDIR@"
config.llvm_shlib_ext = "@SHLIBEXT@"
config.llvm_exe_ext = "@EXEEXT@"
config.lit_tools_dir = "@LLVM_LIT_TOOLS_DIR@"
config.python_executable = sys.executable
config.python_executable = "@Python3_EXECUTABLE@"
config.enable_jit_ir_importer = @TORCH_MLIR_ENABLE_JIT_IR_IMPORTER@
config.enable_mhlo = @TORCH_MLIR_ENABLE_MHLO@

Expand Down
6 changes: 1 addition & 5 deletions test/python/importer/jit_ir/get_registered_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,7 @@
# This file is licensed under a pytorch-style license
# See LICENSE.pytorch for license information.

# TODO: Fix ODR violation on non-static cl::opt in LLVM
# `cl::opt<FunctionSummary::ForceSummaryHotnessType, true>`.
# This causes double free on global dtors on exiting the program.
# The FileCheck still passes though.
# RUN: (%PYTHON %s || true) | FileCheck %s
# RUN: %PYTHON %s | FileCheck %s

from torch_mlir._mlir_libs._jit_ir_importer import get_registered_ops

Expand Down