From 8ce87c356947d85cd19fbd7a33b9339de14f3fad Mon Sep 17 00:00:00 2001 From: Matteo Cencini Date: Thu, 19 Sep 2024 12:44:09 +0200 Subject: [PATCH] refactor: array interface conversion in separate module. --- pyproject.toml | 2 +- src/mrinufft/_array_compat.py | 379 ++++++++++++++++++ src/mrinufft/extras/smaps.py | 2 +- src/mrinufft/operators/base.py | 293 +------------- src/mrinufft/operators/interfaces/tfnufft.py | 9 +- .../operators/interfaces/torchkbnufft.py | 3 +- src/mrinufft/operators/off_resonance.py | 9 +- tests/operators/test_offres_exp_approx.py | 2 +- tests/test_array_compat.py | 140 +++++++ 9 files changed, 535 insertions(+), 304 deletions(-) create mode 100644 src/mrinufft/_array_compat.py create mode 100644 tests/test_array_compat.py diff --git a/pyproject.toml b/pyproject.toml index 604930c2..bfe9bcd7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,7 +11,7 @@ dynamic = ["version"] [project.optional-dependencies] -gpunufft = ["gpuNUFFT>=0.10.0", "cupy-cuda12x"] +gpunufft = ["gpuNUFFT>=0.9.0", "cupy-cuda12x"] torchkbnufft = ["torchkbnufft", "cupy-cuda12x"] cufinufft = ["cufinufft<2.3", "cupy-cuda12x"] finufft = ["finufft"] diff --git a/src/mrinufft/_array_compat.py b/src/mrinufft/_array_compat.py new file mode 100644 index 00000000..351d3618 --- /dev/null +++ b/src/mrinufft/_array_compat.py @@ -0,0 +1,379 @@ +"""Array libraries compatibility utils.""" + +import warnings +import inspect + +from functools import wraps + +from mrinufft._utils import get_array_module +from mrinufft.operators.interfaces.utils import is_cuda_array, is_cuda_tensor + + +CUPY_AVAILABLE = True +try: + import cupy as cp +except ImportError: + CUPY_AVAILABLE = False + +AUTOGRAD_AVAILABLE = True +try: + import torch +except ImportError: + AUTOGRAD_AVAILABLE = False + + +TENSORFLOW_AVAILABLE = True +try: + import tensorflow as tf +except ImportError: + TENSORFLOW_AVAILABLE = False + + +def _tf_cuda_is_available(): + """Check whether Tensorflow has CUDA support or not.""" + if TENSORFLOW_AVAILABLE: + devices = tf.config.list_physical_devices() + device_type = [device.device_type for device in devices] + return "GPU" in device_type + else: + return False + + +tf_cuda_is_available = _tf_cuda_is_available() + + +def with_numpy(fun): + """Ensure the function works internally with numpy array.""" + data_arg_idx = _ismethod(fun) + + @wraps(fun) + def wrapper(*args, **kwargs): + args = _get_args(fun, args, kwargs) + + # get array module and device from non-self argument + xp = get_array_module(args[data_arg_idx]) + device = _get_device(args[data_arg_idx]) + + # convert all to numpy + args = _to_numpy(*args) + + # run function + ret_ = fun(*args) + + # convert output to original array module and device + return _to_interface(ret_, xp, device) + + return wrapper + + +def with_tensorflow(fun): + """Ensure the function works internally with tensorflow array.""" + data_arg_idx = _ismethod(fun) + + @wraps(fun) + def wrapper(*args, **kwargs): + args = _get_args(fun, args, kwargs) + + # get array module from non-self argument + xp = get_array_module(args[data_arg_idx]) + device = _get_device(args[data_arg_idx]) + + # convert all to tensorflow + args = _to_tensorflow(*args) + + # run function + ret_ = fun(*args) + + # convert output to original array module and device + print(xp.__name__) + print(device) + return _to_interface(ret_, xp, device) + + return wrapper + + +def with_numpy_cupy(fun): + """Ensure the function works internally with numpy or cupy array.""" + data_arg_idx = _ismethod(fun) + + @wraps(fun) + def wrapper(*args, **kwargs): + args = _get_args(fun, args, kwargs) + + # get array module from non-self argument + xp = get_array_module(args[data_arg_idx]) + + # convert all to cupy / numpy according to data arg device + args = _to_numpy_cupy(args, data_arg_idx) + + # run function + ret_ = fun(*args) + + # convert output to original array module and device + return _to_interface(ret_, xp) + + return wrapper + + +def with_torch(fun): + """Ensure the function works internally with Torch.""" + data_arg_idx = _ismethod(fun) + + @wraps(fun) + def wrapper(*args, **kwargs): + args = _get_args(fun, args, kwargs) + + # get array module from non-self argument + xp = get_array_module(args[data_arg_idx]) + device = _get_device(args[data_arg_idx]) + + # convert all to tensorflow + args = _to_torch(*args) + + # run function + ret_ = fun(*args) + + # convert output to original array module and device + return _to_interface(ret_, xp, device) + + return wrapper + + +def _ismethod(fun): + """Determine whether input fun is instance-/classmethod or not.""" + first_arg = list(inspect.signature(fun).parameters)[0] + return first_arg in ["self", "cls"] + + +def _get_device(input): + """Determine computational device from input array.""" + try: + return input.device + except Exception: + return "cpu" + + +def _get_args(func, args, kwargs): + """Convert input args/kwargs mix to a list of positional arguments. + + This automatically fills missing kwargs with default values. + """ + signature = inspect.signature(func) + + # Get number of arguments + n_args = len(args) + + # Create a dictionary of keyword arguments and their default values + _kwargs = {} + for k, v in signature.parameters.items(): + if v.default is not inspect.Parameter.empty: + _kwargs[k] = v.default + else: + _kwargs[k] = None + + # Merge the default keyword arguments with the provided kwargs + for k in kwargs.keys(): + _kwargs[k] = kwargs[k] + + # Replace args + _args = list(_kwargs.values()) + + return list(args) + _args[n_args:] + + +def _to_numpy(*args): + """Convert a sequence of arguments to numpy. + + Non-arrays are ignored. + + """ + # enforce mutable + args = list(args) + + # convert positional arguments + for n in range(len(args)): + _arg = args[n] + if hasattr(_arg, "__array__"): + if is_cuda_array(_arg): + warnings.warn("data is on gpu, it will be moved to CPU.") + xp = get_array_module(_arg) + if xp.__name__ == "torch": + _arg = _arg.numpy(force=True) + elif xp.__name__ == "cupy": + _arg = cp.asnumpy(_arg) + elif "tensorflow" in xp.__name__: + _arg = _arg.numpy() + args[n] = _arg + + return args + + +def _to_cupy(*args, device=None): + """Convert a sequence of arguments to cupy. + + Non-arrays are ignored. + + This avoid transfers between different devices (e.g., different GPUs). + """ + # enforce mutable + args = list(args) + + # convert positional arguments + for n in range(len(args)): + _arg = args[n] + if hasattr(_arg, "__array__"): + xp = get_array_module(_arg) + if xp.__name__ == "numpy": + with cp.cuda.Device(device): + _arg = cp.asarray(_arg) + elif xp.__name__ == "torch": + if _arg.requires_grad: + _arg = _arg.detach() + if _arg.is_cpu: + with cp.cuda.Device(device): + _arg = cp.asarray(_arg.numpy()) + else: + _arg = cp.from_dlpack(_arg) + elif "tensorflow" in xp.__name__: + if "CPU" in _arg.device: + with cp.cuda.Device(device): + _arg = cp.asarray(_arg.numpy()) + else: + _arg = cp.from_dlpack(tf.experimental.dlpack.to_dlpack(_arg)) + + args[n] = _arg + + return args + + +def _to_numpy_cupy(args, data_arg_idx): + """Convert a sequence of arguments to numpy or cupy. + + Non-arrays are ignored. + + This avoid transfers between different devices + (e.g., CPU->GPU, GPU->CPU or different GPUs). + """ + if is_cuda_array(args[data_arg_idx]) and CUPY_AVAILABLE: + return _to_cupy(*args) + elif is_cuda_tensor(args[data_arg_idx]) and CUPY_AVAILABLE: + return _to_cupy(*args) + else: + return _to_numpy(*args) + + +def _to_torch(*args, device=None): + """Convert a sequence of arguments to Pytorch Tensors. + + Non-arrays are ignored. + + This avoid transfers between different devices + (e.g., CPU->GPU, GPU->CPU or different GPUs). + """ + # enforce mutable + args = list(args) + + # convert positional arguments + for n in range(len(args)): + _arg = args[n] + if hasattr(_arg, "__array__"): + xp = get_array_module(_arg) + if xp.__name__ == "numpy": + _arg = torch.as_tensor(_arg, device=device) + elif xp.__name__ == "cupy": + if torch.cuda.is_available(): + _arg = torch.from_dlpack(_arg) + else: + warnings.warn("data is on gpu, it will be moved to CPU.") + _arg = torch.as_tensor(cp.asnumpy(_arg)) + elif "tensorflow" in xp.__name__: + if "CPU" in _arg.device: + _arg = torch.as_tensor(_arg.numpy(), device=device) + else: + _arg = torch.from_dlpack(tf.experimental.dlpack.to_dlpack(_arg)) + + args[n] = _arg + + return args + + +def _to_tensorflow(*args): + """Convert a sequence of arguments to Tensorflow tensors. + + Non-arrays are ignored. + + This avoid transfers between different devices + (e.g., CPU->GPU, GPU->CPU or different GPUs). + """ + # enforce mutable + args = list(args) + + # convert positional arguments + for n in range(len(args)): + _arg = args[n] + if hasattr(_arg, "__array__"): + xp = get_array_module(_arg) + if xp.__name__ == "numpy": + with tf.device("CPU"): + _arg = tf.convert_to_tensor(_arg) + elif xp.__name__ == "cupy": + if tf_cuda_is_available: + _arg = tf.experimental.dlpack.from_dlpack(_arg.toDlpack()) + else: + warnings.warn("data is on gpu, it will be moved to CPU.") + _arg = tf.convert_to_tensor(cp.asnumpy(_arg)) + elif xp.__name__ == "torch": + if _arg.requires_grad: + _arg = _arg.detach() + if _arg.is_cpu: + _arg = tf.convert_to_tensor(_arg) + elif tf_cuda_is_available: + _arg = tf.experimental.dlpack.from_dlpack( + torch.utils.dlpack.to_dlpack(_arg) + ) + else: + _arg = tf.convert_to_tensor(_arg.numpy(force=True)) + args[n] = _arg + + return args + + +def _to_interface(args, array_interface, device=None): + """ + Convert a list of arguments to a given array interface. + + User may provide the desired computational device. + Non-arrays are ignored. + + Parameters + ---------- + args : list[object] + List of objects to be converted. + array_interface : ModuleType + Desired array backend (e.g., numpy). + device : Device, optional + Desired computational device, (e.g., "cpu" or Device('cuda')). + The default is None (i.e., maintain same device as input argument). + + Returns + ------- + list[object] + List of converted objects. + + """ + # enforce iterable + if isinstance(args, (list, tuple)) is False: + args = [args] + + # convert to target interface + if array_interface.__name__ == "numpy": + args = _to_numpy(*args) + elif array_interface.__name__ == "cupy": + args = _to_cupy(*args, device=device) + elif array_interface.__name__ == "torch": + args = _to_torch(*args, device=device) + + if len(args) == 1: + return args[0] + + return tuple(args) diff --git a/src/mrinufft/extras/smaps.py b/src/mrinufft/extras/smaps.py index eb2aaa4d..c3f7018f 100644 --- a/src/mrinufft/extras/smaps.py +++ b/src/mrinufft/extras/smaps.py @@ -3,7 +3,7 @@ from __future__ import annotations from mrinufft.density.utils import flat_traj -from mrinufft.operators.base import get_array_module +from mrinufft._utils import get_array_module from .utils import register_smaps import numpy as np diff --git a/src/mrinufft/operators/base.py b/src/mrinufft/operators/base.py index 53ba5afb..29e3e2e5 100644 --- a/src/mrinufft/operators/base.py +++ b/src/mrinufft/operators/base.py @@ -8,32 +8,18 @@ from __future__ import annotations -import warnings -import inspect - from abc import ABC, abstractmethod -from functools import partial, wraps +from functools import partial import numpy as np -from mrinufft._utils import auto_cast, get_array_module, power_method +from mrinufft._array_compat import with_numpy, with_numpy_cupy, AUTOGRAD_AVAILABLE +from mrinufft._utils import auto_cast, power_method from mrinufft.density import get_density from mrinufft.extras import get_smaps -from mrinufft.operators.interfaces.utils import is_cuda_array, is_cuda_tensor - -CUPY_AVAILABLE = True -try: - import cupy as cp -except ImportError: - CUPY_AVAILABLE = False - -AUTOGRAD_AVAILABLE = True -try: - import torch +if AUTOGRAD_AVAILABLE: from mrinufft.operators.autodiff import MRINufftAutoGrad -except ImportError: - AUTOGRAD_AVAILABLE = False # Mapping between numpy float and complex types. @@ -121,277 +107,6 @@ class or instance of class if args or kwargs are given. return operator -def with_numpy(fun): - """Ensure the function works internally with numpy array.""" - if _ismethod(fun): - data_arg_idx = 1 - else: - data_arg_idx = 0 - - @wraps(fun) - def wrapper(*args, **kwargs): - args = _get_args(fun, args, kwargs) - - # get array module and device from non-self argument - xp = _get_array_module(args[data_arg_idx]) - device = _get_device(args[data_arg_idx]) - - # convert all to numpy - args = _to_numpy(*args) - - # run function - ret_ = fun(*args) - - # convert output to original array module and device - return _to_interface(ret_, xp, device) - - return wrapper - - -def with_tensorflow(fun): - """Ensure the function works internally with tensorflow array.""" - if _ismethod(fun): - data_arg_idx = 1 - else: - data_arg_idx = 0 - - @wraps(fun) - def wrapper(*args, **kwargs): - args = _get_args(fun, args, kwargs) - - # get array module from non-self argument - xp = _get_array_module(args[data_arg_idx]) - - # convert all to tensorflow - args = _to_tensorflow(*args) - - # run function - ret_ = fun(*args) - - # convert output to original array module and device - return _to_interface(ret_, xp) - - return wrapper - - -def with_numpy_cupy(fun): - """Ensure the function works internally with numpy or cupy array.""" - if _ismethod(fun): - data_arg_idx = 1 - else: - data_arg_idx = 0 - - @wraps(fun) - def wrapper(*args, **kwargs): - args = _get_args(fun, args, kwargs) - - # get array module from non-self argument - xp = _get_array_module(args[data_arg_idx]) - - # convert all to cupy / numpy according to data arg device - args = _to_numpy_cupy(args, data_arg_idx) - - # run function - ret_ = fun(*args) - - # convert output to original array module and device - return _to_interface(ret_, xp) - - return wrapper - - -def with_torch(fun): - """Ensure the function works internally with Torch.""" - if _ismethod(fun): - data_arg_idx = 1 - else: - data_arg_idx = 0 - - @wraps(fun) - def wrapper(*args, **kwargs): - args = _get_args(fun, args, kwargs) - - # get array module from non-self argument - xp = _get_array_module(args[data_arg_idx]) - - # convert all to tensorflow - args = _to_torch(*args) - - # run function - ret_ = fun(*args) - - # convert output to original array module and device - return _to_interface(ret_, xp) - - return wrapper - - -def _ismethod(fun): # ismethod works on instance methods, not classes (always False) - first_arg = list(inspect.signature(fun).parameters)[0] - return first_arg in ["self", "cls"]: - - -def _get_array_module(input): # handle native Python case - try: - return get_array_module(input) - except Exception: - return np - - -def _get_device(input): - if is_cuda_array(input) or is_cuda_tensor(input): - return input.device - else: - return None - - -def _get_args(func, args, kwargs): - signature = inspect.signature(func) - - # Get number of arguments - n_args = len(args) - - # Create a dictionary of keyword arguments and their default values - _kwargs = {} - for k, v in signature.parameters.items(): - if v.default is not inspect.Parameter.empty: - _kwargs[k] = v.default - else: - _kwargs[k] = None - - # Merge the default keyword arguments with the provided kwargs - for k in kwargs.keys(): - _kwargs[k] = kwargs[k] - - # Replace args - _args = list(_kwargs.values()) - - return list(args) + _args[n_args:] - - -def _to_numpy(*args): - - # enforce mutable - args = list(args) - - # convert positional arguments - for n in range(len(args)): - _arg = args[n] - if hasattr(_arg, "__array__"): - if is_cuda_array(_arg): - warnings.warn("data is on gpu, it will be moved to CPU.") - xp = get_array_module(_arg) - if xp.__name__ == "torch": - _arg = _arg.numpy(force=True) - elif xp.__name__ == "cupy": - _arg = _arg.get() - args[n] = _arg - - return args - - -def _to_cupy(*args, device=None): - - # enforce mutable - args = list(args) - - # convert positional arguments - for n in range(len(args)): - _arg = args[n] - if hasattr(_arg, "__array__"): - xp = get_array_module(_arg) - if xp.__name__ == "numpy": - with cp.cuda.Device(device): - _arg = cp.asarray(_arg) - elif xp.__name__ == "torch": - if _arg.requires_grad: - _arg = _arg.detach() - if _arg.is_cpu: - _arg = cp.asarray(_arg) - else: - _arg = cp.from_dlpack(_arg) - args[n] = _arg - - return args - - -def _to_numpy_cupy(args, data_arg_idx): - if is_cuda_array(args[data_arg_idx]) and CUPY_AVAILABLE: - return _to_cupy(*args) - elif is_cuda_tensor(args[data_arg_idx]) and CUPY_AVAILABLE: - return _to_cupy(*args) - else: - return _to_numpy(*args) - - -def _to_torch(*args, device=None): - - # enforce mutable - args = list(args) - - # convert positional arguments - for n in range(len(args)): - _arg = args[n] - if hasattr(_arg, "__array__"): - xp = get_array_module(_arg) - if xp.__name__ == "numpy": - _arg = torch.as_tensor(_arg, device=device) - elif xp.__name__ == "cupy": - _arg = torch.from_dlpack(_arg) - args[n] = _arg - - return args - - -def _to_tensorflow(*args): - import tensorflow as tf - - # enforce mutable - args = list(args) - - # convert positional arguments - for n in range(len(args)): - _arg = args[n] - if hasattr(_arg, "__array__"): - xp = get_array_module(_arg) - if xp.__name__ == "numpy": - _arg = tf.convert_to_tensor(_arg) - elif xp.__name__ == "cupy": - _arg = tf.experimental.dlpack.from_dlpack(_arg.toDlpack()) - elif xp.__name__ == "torch": - if _arg.requires_grad: - _arg = _arg.detach() - if _arg.is_cpu: - _arg = tf.convert_to_tensor(_arg) - else: - _arg = tf.experimental.dlpack.from_dlpack( - torch.utils.dlpack.to_dlpack(_arg) - ) - args[n] = _arg - - return args - - -def _to_interface(args, array_interface, device=None): - - # enforce iterable - if isinstance(args, (list, tuple)) is False: - args = [args] - - # convert to target interface - if array_interface.__name__ == "numpy": - args = _to_numpy(*args) - elif array_interface.__name__ == "cupy": - args = _to_cupy(*args, device=device) - elif array_interface.__name__ == "torch": - args = _to_torch(*args, device=device) - - if len(args) == 1: - return args[0] - - return tuple(args) - - class FourierOperatorBase(ABC): """Base Fourier Operator class. diff --git a/src/mrinufft/operators/interfaces/tfnufft.py b/src/mrinufft/operators/interfaces/tfnufft.py index 09ef025c..0b9e5cfe 100644 --- a/src/mrinufft/operators/interfaces/tfnufft.py +++ b/src/mrinufft/operators/interfaces/tfnufft.py @@ -2,20 +2,19 @@ import numpy as np -from ..base import FourierOperatorBase, with_tensorflow +from ..base import FourierOperatorBase +from mrinufft._array_compat import with_tensorflow, TENSORFLOW_AVAILABLE from mrinufft._utils import proper_trajectory -TENSORFLOW_AVAILABLE = True +if TENSORFLOW_AVAILABLE: + import tensorflow as tf try: import tensorflow_nufft as tfnufft import tensorflow_mri as tfmri - import tensorflow as tf - except ImportError: TENSORFLOW_AVAILABLE = False - class MRITensorflowNUFFT(FourierOperatorBase): """MRI Transform Operator using Tensorflow NUFFT. diff --git a/src/mrinufft/operators/interfaces/torchkbnufft.py b/src/mrinufft/operators/interfaces/torchkbnufft.py index 3fa06cca..22b6bdf4 100644 --- a/src/mrinufft/operators/interfaces/torchkbnufft.py +++ b/src/mrinufft/operators/interfaces/torchkbnufft.py @@ -1,6 +1,7 @@ """Pytorch MRI Nufft Operators.""" -from mrinufft.operators.base import FourierOperatorBase, with_torch +from mrinufft.operators.base import FourierOperatorBase +from mrinufft._array_compat import with_torch from mrinufft._utils import proper_trajectory from mrinufft.operators.interfaces.utils import ( is_cuda_tensor, diff --git a/src/mrinufft/operators/off_resonance.py b/src/mrinufft/operators/off_resonance.py index d45501c2..c0b5067a 100644 --- a/src/mrinufft/operators/off_resonance.py +++ b/src/mrinufft/operators/off_resonance.py @@ -7,14 +7,11 @@ import math import numpy as np +from .._array_compat import CUPY_AVAILABLE, AUTOGRAD_AVAILABLE, with_numpy_cupy from .._utils import get_array_module -from .base import ( - FourierOperatorBase, - CUPY_AVAILABLE, - AUTOGRAD_AVAILABLE, - with_numpy_cupy, -) +from .base import FourierOperatorBase + from .interfaces.utils import is_cuda_array if CUPY_AVAILABLE: diff --git a/tests/operators/test_offres_exp_approx.py b/tests/operators/test_offres_exp_approx.py index dfcc03f4..3dde8cbe 100644 --- a/tests/operators/test_offres_exp_approx.py +++ b/tests/operators/test_offres_exp_approx.py @@ -9,8 +9,8 @@ import mrinufft +from mrinufft._array_compat import CUPY_AVAILABLE from mrinufft._utils import get_array_module -from mrinufft.operators.base import CUPY_AVAILABLE from mrinufft.operators.off_resonance import MRIFourierCorrected diff --git a/tests/test_array_compat.py b/tests/test_array_compat.py new file mode 100644 index 00000000..317330a4 --- /dev/null +++ b/tests/test_array_compat.py @@ -0,0 +1,140 @@ +"""Test array libraries compatibility utils.""" + +import pytest + +from pytest_cases import parametrize, fixture +from unittest.mock import patch, MagicMock + +import numpy as np + +from mrinufft._array_compat import ( + with_numpy, + with_numpy_cupy, + with_torch, + with_tensorflow, + _get_device, + CUPY_AVAILABLE, + AUTOGRAD_AVAILABLE, + TENSORFLOW_AVAILABLE, + tf_cuda_is_available, +) + +from helpers import to_interface +from helpers.factories import _param_array_interface + +if CUPY_AVAILABLE: + import cupy as cp + +torch_cuda_is_available = False +if AUTOGRAD_AVAILABLE: + import torch + + torch_cuda_is_available = torch.cuda.is_available() + +if TENSORFLOW_AVAILABLE: + import tensorflow as tf + + +def dummy_func(*args): + return args + + +@fixture(scope="module") +@parametrize("decorator", [with_numpy, with_numpy_cupy, with_torch, with_tensorflow]) +def decorator_factory(request, decorator): + @decorator + def test_func(*args): + return dummy_func(*args) + + return test_func, decorator + + +@_param_array_interface +def test_decorators_outcome(decorator_factory, array_interface): + decorated_function, _ = decorator_factory + + # Create input array + array = to_interface(np.asarray([1.0, 2.0, 3.0]), array_interface) + + # Output device and type must be the same as leading argument + expected_type = type(array) + expected_device = _get_device(array) + + # Execute function + outputs = decorated_function(array, array, array) + + # Assert the output has correct type + for output in outputs: + assert isinstance( + output, expected_type + ), f"Expected {expected_type} but got {type(output)}" + assert ( + _get_device(output) == expected_device + ), f"Expected {expected_device} but got {_get_device(output)}" + + +@_param_array_interface +def test_internal_conversions(decorator_factory, array_interface): + decorated_function, decorator = decorator_factory + + if decorator.__name__ == "with_tensorflow" and TENSORFLOW_AVAILABLE is False: + pytest.skip("tensorflow not available") + + # Create input array + array = to_interface(np.asarray([1.0, 2.0, 3.0]), array_interface) + + # Execute function and monitor internal conversion + if decorator.__name__ == "with_numpy": + if array_interface == "cupy": + with patch("cupy.asnumpy", wraps=cp.asnumpy) as mock_fun: + _ = decorated_function(array, array, array, 1.0, "a string") + assert mock_fun.call_count == 3 + if array_interface in ["torch-cpu", "torch-gpu"]: + array.numpy = MagicMock(wraps=array.numpy) + _ = decorated_function(array, array, array, 1.0, "a string") + assert array.numpy.call_count == 3 + + elif decorator.__name__ == "with_numpy_cupy": + if array_interface == "torch-cpu": + array.numpy = MagicMock(wraps=array.numpy) + _ = decorated_function(array, array, array, 1.0, "a string") + assert array.numpy.call_count == 3 + if array_interface == "torch-gpu": + with patch("cupy.from_dlpack", wraps=cp.from_dlpack) as mock_fun: + _ = decorated_function(array, array, array, 1.0, "a string") + assert mock_fun.call_count == 3 + + elif decorator.__name__ == "with_torch": + if array_interface == "numpy": + with patch("torch.as_tensor", wraps=torch.as_tensor) as mock_fun: + _ = decorated_function(array, array, array, 1.0, "a string") + assert mock_fun.call_count == 3 + if array_interface == "cupy" and torch_cuda_is_available: + with patch("torch.from_dlpack", wraps=torch.from_dlpack) as mock_fun: + _ = decorated_function(array, array, array, 1.0, "a string") + assert mock_fun.call_count == 3 + if array_interface == "cupy" and not torch_cuda_is_available: + with patch("torch.as_tensor", wraps=torch.as_tensor) as mock_fun: + _ = decorated_function(array, array, array, 1.0, "a string") + assert mock_fun.call_count == 3 + + elif decorator.__name__ == "with_tensorflow": + if array_interface in ["numpy", "torch-cpu"]: + with patch( + "tensorflow.convert_to_tensor", wraps=tf.convert_to_tensor + ) as mock_fun: + _ = decorated_function(array, array, array, 1.0, "a string") + assert mock_fun.call_count == 3 + if array_interface in ["cupy", "torch-gpu"] and tf_cuda_is_available: + with patch( + "tensorflow.experimental.dlpack.from_dlpack", + wraps=tf.experimental.dlpack.from_dlpack, + ) as mock_fun: + _ = decorated_function(array, array, array, 1.0, "a string") + assert mock_fun.call_count == 3 + if array_interface in ["cupy", "torch-gpu"] and not tf_cuda_is_available: + with patch( + "tensorflow.convert_to_tensor", wraps=tf.convert_to_tensor + ) as mock_fun: + _ = decorated_function(array, array, array, 1.0, "a string") + assert mock_fun.call_count == 3