diff --git a/thinc/tests/backends/test_ops.py b/thinc/tests/backends/test_ops.py index e095142b1..0d06d4833 100644 --- a/thinc/tests/backends/test_ops.py +++ b/thinc/tests/backends/test_ops.py @@ -8,8 +8,8 @@ from packaging.version import Version from thinc.api import NumpyOps, CupyOps, Ops, get_ops from thinc.api import get_current_ops, use_ops -from thinc.util import torch2xp, xp2torch, gpu_is_available -from thinc.compat import has_torch, torch_version +from thinc.util import torch2xp, xp2torch +from thinc.compat import has_cupy_gpu, has_torch, torch_version from thinc.api import fix_random_seed from thinc.api import LSTM from thinc.types import Floats2d @@ -26,7 +26,7 @@ BLIS_OPS = NumpyOps(use_blis=True) CPU_OPS = [NUMPY_OPS, VANILLA_OPS] XP_OPS = [NUMPY_OPS] -if CupyOps.xp is not None and gpu_is_available(): +if has_cupy_gpu: XP_OPS.append(CupyOps()) ALL_OPS = XP_OPS + [VANILLA_OPS] @@ -591,9 +591,7 @@ def test_backprop_seq2col_window_two(ops, dtype): ops.xp.testing.assert_allclose(seq, expected, atol=0.001, rtol=0.001) -@pytest.mark.skipif( - CupyOps.xp is None or not gpu_is_available(), reason="needs GPU/CuPy" -) +@pytest.mark.skipif(not has_cupy_gpu, reason="needs GPU/CuPy") @pytest.mark.parametrize("nW", [1, 2]) def test_large_seq2col_gpu_against_cpu(nW): cupy_ops = CupyOps() @@ -615,9 +613,7 @@ def test_large_seq2col_gpu_against_cpu(nW): assert_allclose(cols, cols_gpu.get()) -@pytest.mark.skipif( - CupyOps.xp is None or not gpu_is_available(), reason="needs GPU/CuPy" -) +@pytest.mark.skipif(not has_cupy_gpu, reason="needs GPU/CuPy") @pytest.mark.parametrize("nW", [1, 2]) def test_large_backprop_seq2col_gpu_against_cpu(nW): cupy_ops = CupyOps() diff --git a/thinc/tests/layers/test_tensorflow_wrapper.py b/thinc/tests/layers/test_tensorflow_wrapper.py index ff254ffc0..c1b85da3b 100644 --- a/thinc/tests/layers/test_tensorflow_wrapper.py +++ b/thinc/tests/layers/test_tensorflow_wrapper.py @@ -2,8 +2,8 @@ import pytest from thinc.api import Adam, ArgsKwargs, Linear, Model, TensorFlowWrapper from thinc.api import get_current_ops, keras_subclass, tensorflow2xp, xp2tensorflow -from thinc.util import gpu_is_available, to_categorical -from thinc.compat import has_tensorflow +from thinc.util import to_categorical +from thinc.compat import has_cupy_gpu, has_tensorflow from ..util import check_input_converters, make_tempdir @@ -359,7 +359,7 @@ def test_tensorflow_wrapper_to_cpu(tf_model): @pytest.mark.skipif(not has_tensorflow, reason="needs TensorFlow") -@pytest.mark.skipif(not gpu_is_available(), reason="needs GPU/cupy") +@pytest.mark.skipif(not has_cupy_gpu, reason="needs GPU/cupy") def test_tensorflow_wrapper_to_gpu(model, X): model.to_gpu(0) diff --git a/thinc/tests/model/test_model.py b/thinc/tests/model/test_model.py index 9ec71fc93..087e481c5 100644 --- a/thinc/tests/model/test_model.py +++ b/thinc/tests/model/test_model.py @@ -6,8 +6,7 @@ from thinc.api import Shim, Softmax, chain, change_attr_values from thinc.api import concatenate, set_dropout_rate from thinc.api import use_ops, with_debug, wrap_model_recursive -from thinc.util import gpu_is_available -from thinc.compat import has_cupy +from thinc.compat import has_cupy_gpu import numpy from ..util import make_tempdir @@ -410,7 +409,7 @@ def test_model_gpu(): import ml_datasets ops = "cpu" - if has_cupy and gpu_is_available(): + if has_cupy_gpu: ops = "cupy" with use_ops(ops): diff --git a/thinc/util.py b/thinc/util.py index 16f0f0196..cdfdf4c1e 100644 --- a/thinc/util.py +++ b/thinc/util.py @@ -44,7 +44,7 @@ def fix_random_seed(seed: int = 0) -> None: # pragma: no cover numpy.random.seed(seed) if has_torch: torch.manual_seed(seed) - if has_cupy and gpu_is_available(): + if has_cupy_gpu: cupy.random.seed(seed) if has_torch and has_torch_gpu: torch.cuda.manual_seed_all(seed) @@ -125,8 +125,8 @@ def to_numpy(data): # pragma: no cover def set_active_gpu(gpu_id: int) -> "cupy.cuda.Device": # pragma: no cover """Set the current GPU device for cupy and torch (if available).""" - if not gpu_is_available(): - raise ValueError("No GPU devices detected") + if not has_cupy_gpu: + raise ValueError("No CUDA GPU devices detected") device = cupy.cuda.device.Device(gpu_id) device.use() @@ -151,9 +151,7 @@ def require_cpu() -> bool: # pragma: no cover def prefer_gpu(gpu_id: int = 0) -> bool: # pragma: no cover """Use GPU if it's available. Returns True if so, False otherwise.""" - from .backends.cupy_ops import CupyOps - - if not gpu_is_available(): + if not has_cupy_gpu: return False else: require_gpu(gpu_id=gpu_id) @@ -163,8 +161,8 @@ def prefer_gpu(gpu_id: int = 0) -> bool: # pragma: no cover def require_gpu(gpu_id: int = 0) -> bool: # pragma: no cover from .backends import set_current_ops, CupyOps - if not gpu_is_available(): - raise ValueError("No GPU devices detected") + if not has_cupy_gpu: + raise ValueError("No CUDA GPU devices detected") set_current_ops(CupyOps()) set_active_gpu(gpu_id)