From 0b641a6a64726715a34666485e7c8d9548e76980 Mon Sep 17 00:00:00 2001 From: Christoph Kolbitsch Date: Mon, 9 Dec 2024 13:59:54 +0100 Subject: [PATCH 1/5] Just a few formatting things (#225) --- docs/nufft.rst | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/docs/nufft.rst b/docs/nufft.rst index 7051bf756..1a85f154a 100644 --- a/docs/nufft.rst +++ b/docs/nufft.rst @@ -174,14 +174,14 @@ To achieve a clinically feasible scan time, each frame or contrast is acquired w .. math:: \tilde{\boldsymbol{y}} = \begin{bmatrix} - \mathcal{F}_\Omega_1 & 0 & \cdots & 0 \\ - 0 & \mathcal{F}_\Omega_2 & \cdots & 0 \\ + \mathcal{F}_{\Omega_1} & 0 & \cdots & 0 \\ + 0 & \mathcal{F}_{\Omega_2} & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ - 0 & 0 & \cdots & \mathcal{F}_\Omega_T + 0 & 0 & \cdots & \mathcal{F}_{\Omega_T} \end{bmatrix} \boldsymbol{x} + \boldsymbol{n} -where :math:`\mathcal{F}_\Omega_1, \dots, \mathcal{F}_\Omega_T` are the Fourier operators corresponding to each individual frame. Some applications (e.g., MR Fingerprinting [3]_) may consists of +where :math:`\mathcal{F}_{\Omega_1}, \dots, \mathcal{F}_{\Omega_T}` are the Fourier operators corresponding to each individual frame. Some applications (e.g., MR Fingerprinting [3]_) may consists of thousands of total frames :math:`T`, leading to repeated Fourier Transform operations and high computational burden. However, the 1D signal series arising from similar voxels, e.g., with similar relaxation properties, are typically highly correlated. For this reason, the image series can be represented as: @@ -195,17 +195,17 @@ training dataset or an ensemble of simulated Bloch responses. The signal model c .. math:: \tilde{\boldsymbol{y}} = \begin{bmatrix} - \mathcal{F}_\Omega_1 & 0 & \cdots & 0 \\ - 0 & \mathcal{F}_\Omega_2 & \cdots & 0 \\ + \mathcal{F}_{\Omega_1} & 0 & \cdots & 0 \\ + 0 & \mathcal{F}_{\Omega_2} & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ - 0 & 0 & \cdots & \mathcal{F}_\Omega_T + 0 & 0 & \cdots & \mathcal{F}_{\Omega_T} \end{bmatrix} \Phi \Phi^H \boldsymbol{x} + \boldsymbol{n} = \begin{bmatrix} - \mathcal{F}_\Omega_1 & 0 & \cdots & 0 \\ - 0 & \mathcal{F}_\Omega_2 & \cdots & 0 \\ + \mathcal{F}_{\Omega_1} & 0 & \cdots & 0 \\ + 0 & \mathcal{F}_{\Omega_2} & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ - 0 & 0 & \cdots & \mathcal{F}_\Omega_T + 0 & 0 & \cdots & \mathcal{F}_{\Omega_T} \end{bmatrix} \Phi \boldsymbol{\alpha} + \boldsymbol{n} @@ -214,7 +214,7 @@ the projection operator :math:`\boldsymbol{\Phi}` commutes with the Fourier tran .. math:: - \tilde{\boldsymbol{y}} = \Phi \mathcal{F}_Omega(\boldsymbol{\alpha}) + \boldsymbol{n} + \tilde{\boldsymbol{y}} = \Phi \mathcal{F}_\Omega(\boldsymbol{\alpha}) + \boldsymbol{n} that is, computation now involves :math:`K \ll T` Fourier Transform operations, each with the same sampling trajectory, which can be computed by levaraging efficient NUFFT implementations for conventional static MRI. @@ -222,7 +222,7 @@ The Non Uniform Fast Fourier Transform ====================================== -In order to lower the computational cost of the Non-Uniform Fourier Transform, the main idea is to move back to a regular grid where an FFT would be performed (going from a typical :math:`O(MN)` complexity to `O(M\log(N))`). Thus, the main steps of the *Non-Uniform Fast Fourier Transform* are for the type 1: +In order to lower the computational cost of the Non-Uniform Fourier Transform, the main idea is to move back to a regular grid where an FFT would be performed (going from a typical :math:`O(MN)` complexity to :math:`O(M\log(N))`). Thus, the main steps of the *Non-Uniform Fast Fourier Transform* are for the type 1: 1. Spreading/Interpolation of the non-uniform points to an oversampled Cartesian grid (typically with twice the resolution of the final image) 2. Perform the (I)FFT on this image @@ -259,4 +259,4 @@ References .. [1] https://en.m.wikipedia.org/wiki/Non-uniform_discrete_Fourier_transform .. [2] Noll, D. C., Meyer, C. H., Pauly, J. M., Nishimura, D. G., Macovski, A., "A homogeneity correction method for magnetic resonance imaging with time-varying gradients", IEEE Transaction on Medical Imaging (1991), pp. 629-637. .. [3] Fessler, J. A., Lee, S., Olafsson, V. T., Shi, H. R., Noll, D. C., "Toeplitz-based iterative image reconstruction for MRI with correction for magnetic field inhomogeneity", IEEE Transactions on Signal Processing 53.9 (2005), pp. 3393–3402. -.. [4] D. F. McGivney et al., "SVD Compression for Magnetic Resonance Fingerprinting in the Time Domain," IEEE Transactions on Medical Imaging (2014), pp. 2311-2322. \ No newline at end of file +.. [4] D. F. McGivney et al., "SVD Compression for Magnetic Resonance Fingerprinting in the Time Domain," IEEE Transactions on Medical Imaging (2014), pp. 2311-2322. From 9c41feb81e1c55e7e29a915d5fe43339dae1ebd0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Guillaume=20Daval-Fr=C3=A9rot?= Date: Mon, 16 Dec 2024 21:19:27 +0100 Subject: [PATCH 2/5] Densities, random walks & travelling salesman (#206) * Split trajectory.maths.py into a submodule * Add densities, random walk and travelling salesman trajectories * fixup! Add densities, random walk and travelling salesman trajectories * Add docstrings for random walk and TSP solver * fixup! Add densities, random walk and travelling salesman trajectories * Add docstrings to TSP & sampling densities * Add samplings to random walk initializations and fix boundaries * Add utils to examples to show densities * Add sampling density example skeleton * Add descriptions to sampling density examples * Add dependency to pywavelet * Improve cutoff/decay density, fix dependencies * Gather extra dependencies, homogeneize extra imports * Fix imports to pywavelets * Fix black format in smaps * Remove except duplicate in io/siemens * Update CI with new [extra] dependencies * [docs] Trigger documentation build --------- Co-authored-by: Chaithya G R --- .github/workflows/test-ci.yml | 2 +- examples/example_2D_trajectories.py | 44 +- examples/example_3D_trajectories.py | 72 +-- examples/example_sampling_densities.py | 448 ++++++++++++++++++ examples/example_trajectory_tools.py | 76 +-- examples/utils.py | 112 ++++- pyproject.toml | 3 +- src/mrinufft/__init__.py | 39 +- src/mrinufft/extras/smaps.py | 12 +- src/mrinufft/io/siemens.py | 5 +- src/mrinufft/operators/stacked.py | 1 - src/mrinufft/trajectories/__init__.py | 95 ++-- src/mrinufft/trajectories/inits/__init__.py | 14 + .../trajectories/inits/random_walk.py | 205 ++++++++ .../trajectories/inits/travelling_salesman.py | 289 +++++++++++ src/mrinufft/trajectories/maths.py | 343 -------------- src/mrinufft/trajectories/maths/__init__.py | 18 + src/mrinufft/trajectories/maths/fibonacci.py | 135 ++++++ src/mrinufft/trajectories/maths/primes.py | 32 ++ src/mrinufft/trajectories/maths/rotations.py | 164 +++++++ src/mrinufft/trajectories/maths/tsp_solver.py | 55 +++ src/mrinufft/trajectories/sampling.py | 370 +++++++++++++++ src/mrinufft/trajectories/tools.py | 42 +- src/mrinufft/trajectories/trajectory2D.py | 5 + src/mrinufft/trajectories/trajectory3D.py | 2 +- 25 files changed, 2076 insertions(+), 507 deletions(-) create mode 100644 examples/example_sampling_densities.py create mode 100644 src/mrinufft/trajectories/inits/__init__.py create mode 100644 src/mrinufft/trajectories/inits/random_walk.py create mode 100644 src/mrinufft/trajectories/inits/travelling_salesman.py delete mode 100644 src/mrinufft/trajectories/maths.py create mode 100644 src/mrinufft/trajectories/maths/__init__.py create mode 100644 src/mrinufft/trajectories/maths/fibonacci.py create mode 100644 src/mrinufft/trajectories/maths/primes.py create mode 100644 src/mrinufft/trajectories/maths/rotations.py create mode 100644 src/mrinufft/trajectories/maths/tsp_solver.py create mode 100644 src/mrinufft/trajectories/sampling.py diff --git a/.github/workflows/test-ci.yml b/.github/workflows/test-ci.yml index 5ae8cd2dd..4720af6ae 100644 --- a/.github/workflows/test-ci.yml +++ b/.github/workflows/test-ci.yml @@ -206,7 +206,7 @@ jobs: shell: bash run: | python -m pip install --upgrade pip - python -m pip install -e .[test,dev] + python -m pip install -e .[extra,test,dev] python -m pip install finufft pooch brainweb-dl torch fastmri - name: Install GPU related interfaces diff --git a/examples/example_2D_trajectories.py b/examples/example_2D_trajectories.py index ed50a9f73..eae7f9e15 100644 --- a/examples/example_2D_trajectories.py +++ b/examples/example_2D_trajectories.py @@ -17,7 +17,7 @@ # External import matplotlib.pyplot as plt import numpy as np -from utils import show_argument, show_trajectory +from utils import show_trajectories, show_trajectory # Internal import mrinufft as mn @@ -75,7 +75,7 @@ arguments = [8, 16, 32, 64] function = lambda x: mn.initialize_2D_radial(x, Ns, tilt=tilt, in_out=in_out) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -89,7 +89,7 @@ arguments = [8, 16, 32, 64] function = lambda x: mn.initialize_2D_radial(Nc, x, tilt=tilt, in_out=in_out) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -105,7 +105,7 @@ arguments = ["uniform", "golden", "mri-golden", np.pi / 17] function = lambda x: mn.initialize_2D_radial(Nc, Ns, tilt=x, in_out=in_out) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -128,7 +128,7 @@ arguments = [True, False] function = lambda x: mn.initialize_2D_radial(Nc, Ns, tilt=tilt, in_out=x) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -170,7 +170,7 @@ function = lambda x: mn.initialize_2D_spiral( Nc, Ns, tilt=tilt, nb_revolutions=x, in_out=in_out ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -194,7 +194,7 @@ arguments = ["galilean", "archimedes", "fermat", 1 / 4] function = lambda x: mn.initialize_2D_spiral(Nc, Ns, tilt=tilt, spiral=x, in_out=in_out) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -221,7 +221,7 @@ Ns, patch_center=x, ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -267,7 +267,7 @@ Ns, spiral_reduction=x, ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -293,7 +293,7 @@ Ns, patch_center=x, ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -333,7 +333,7 @@ function = lambda x: mn.initialize_2D_cones( Nc, Ns, tilt=tilt, in_out=in_out, nb_zigzags=x ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -348,7 +348,7 @@ arguments = [0.2, 1, 2, 3] function = lambda x: mn.initialize_2D_cones(Nc, Ns, tilt=tilt, in_out=in_out, width=x) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -413,7 +413,7 @@ arguments = [2, 3, 4, 6] function = lambda x: mn.initialize_2D_propeller(Nc, Ns, nb_strips=x) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -450,7 +450,7 @@ arguments = [Nc, int(2 * Nc / 3), int(Nc / 3)] function = lambda x: mn.initialize_2D_rings(Nc=x, Ns=Ns, nb_rings=x) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% # @@ -461,7 +461,7 @@ arguments = [Nc, int(4 * Nc / 3), 2 * Nc] function = lambda x: mn.initialize_2D_rings(Nc=x, Ns=Ns, nb_rings=Nc) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -501,7 +501,7 @@ arguments = [0, 1, 5, 10] function = lambda x: mn.initialize_2D_rosette(Nc, Ns, in_out=in_out, coprime_index=x) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -541,7 +541,7 @@ function = lambda x: mn.initialize_2D_polar_lissajous( Nc, Ns, in_out=in_out, coprime_index=x ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -564,7 +564,7 @@ function = lambda x: mn.initialize_2D_polar_lissajous( Nc, Ns, in_out=in_out, nb_segments=x ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -588,7 +588,7 @@ function = lambda x: mn.initialize_2D_polar_lissajous( Nc, Ns, in_out=io, coprime_index=cpi, nb_segments=x ) - show_argument( + show_trajectories( function, arguments, one_shot=one_shot, subfig_size=subfigure_size ) @@ -625,7 +625,7 @@ arguments = [1, 2.5, 5, 10] function = lambda x: mn.initialize_2D_waves(Nc, Ns, nb_zigzags=x) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -644,7 +644,7 @@ arguments = [0, 1, 1.5, 3] function = lambda x: mn.initialize_2D_waves(Nc, Ns, width=x) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -679,7 +679,7 @@ arguments = [1, 1.5, 2, 3] function = lambda x: mn.initialize_2D_lissajous(Nc, Ns, density=x) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% diff --git a/examples/example_3D_trajectories.py b/examples/example_3D_trajectories.py index 21bab0470..5224f3437 100644 --- a/examples/example_3D_trajectories.py +++ b/examples/example_3D_trajectories.py @@ -26,7 +26,7 @@ # External import matplotlib.pyplot as plt import numpy as np -from utils import show_argument, show_trajectory +from utils import show_trajectories, show_trajectory # Internal import mrinufft as mn @@ -93,7 +93,7 @@ arguments = [Nc // 4, Nc // 2, Nc, Nc * 2] function = lambda x: mn.initialize_3D_phyllotaxis_radial(x, Ns, in_out=in_out) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -106,7 +106,7 @@ arguments = [10, 25, 40, 100] function = lambda x: mn.initialize_3D_phyllotaxis_radial(Nc, x, in_out=in_out) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -129,7 +129,7 @@ arguments = [True, False] function = lambda x: mn.initialize_3D_phyllotaxis_radial(Nc, Ns, in_out=x) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -265,7 +265,7 @@ arguments = ["uniform", "golden", "mri-golden", np.pi / 17] function = lambda x: mn.initialize_3D_cones(Nc, Ns, tilt=x, in_out=in_out) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -278,7 +278,7 @@ arguments = [0.5, 2, 5, 10] function = lambda x: mn.initialize_3D_cones(Nc, Ns, in_out=in_out, nb_zigzags=x) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -293,7 +293,7 @@ arguments = ["archimedes", "fermat", 0.5, 1.5] function = lambda x: mn.initialize_3D_cones(Nc, Ns, in_out=in_out, spiral=x) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -308,7 +308,7 @@ arguments = [0.2, 1, 2, 3] function = lambda x: mn.initialize_3D_cones(Nc, Ns, in_out=in_out, width=x) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -372,11 +372,11 @@ max_angle=np.pi / 4, axes=x, )[::-1] -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% -show_argument( +show_trajectories( function, arguments, one_shot=one_shot, subfig_size=subfigure_size, dim="2D" ) @@ -418,7 +418,7 @@ arguments = [0.5, 2.5, 5, 10] function = lambda x: mn.initialize_3D_wave_caipi(Nc, Ns, nb_revolutions=x) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% # ``width (float)`` @@ -434,7 +434,7 @@ arguments = [0.2, 1, 2, 3] function = lambda x: mn.initialize_3D_wave_caipi(Nc, Ns, width=x) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% # ``packing (str)`` @@ -456,11 +456,11 @@ arguments = ["triangular", "square", "circular", "fibonacci", "random"] function = lambda x: mn.initialize_3D_wave_caipi(Nc, Ns, packing=x) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% -show_argument( +show_trajectories( function, arguments, one_shot=one_shot, subfig_size=subfigure_size, dim="2D" ) @@ -482,11 +482,11 @@ arguments = ["circle", "square", "diamond", 0.5] function = lambda x: mn.initialize_3D_wave_caipi(Nc, Ns, shape=x) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% -show_argument( +show_trajectories( function, arguments, one_shot=one_shot, subfig_size=subfigure_size, dim="2D" ) @@ -503,11 +503,11 @@ arguments = [(1, 1), (2, 1), (1, 2), (2.3, 1.8)] function = lambda x: mn.initialize_3D_wave_caipi(Nc, Ns, packing="square", spacing=x) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% -show_argument( +show_trajectories( function, arguments, one_shot=one_shot, subfig_size=subfigure_size, dim="2D" ) @@ -561,7 +561,7 @@ function = lambda x: mn.initialize_3D_seiffert_spiral( Nc, Ns, in_out=in_out, curve_index=x ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -581,7 +581,7 @@ in_out=in_out, nb_revolutions=x, ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -605,7 +605,7 @@ axis_tilt=x, spiral_tilt=0, ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -631,7 +631,7 @@ axis_tilt="golden", spiral_tilt=x, ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -680,7 +680,7 @@ function = lambda x: mn.initialize_3D_helical_shells( Nc=x, Ns=Ns, nb_shells=x, spiral_reduction=2 ) -show_argument(function, arguments, one_shot=False, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=False, subfig_size=subfigure_size) # %% @@ -699,7 +699,7 @@ function = lambda x: mn.initialize_3D_helical_shells( Nc=Nc, Ns=Ns, nb_shells=nb_repetitions, spiral_reduction=x ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -713,7 +713,7 @@ function = lambda x: mn.initialize_3D_helical_shells( Nc=Nc, Ns=Ns, nb_shells=nb_repetitions, spiral_reduction=2, shell_tilt=x ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -730,7 +730,7 @@ function = lambda x: mn.initialize_3D_helical_shells( Nc=Nc, Ns=Ns, nb_shells=nb_repetitions, spiral_reduction=2, shot_tilt=x ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -782,7 +782,7 @@ function = lambda x: mn.initialize_3D_annular_shells( Nc, Ns, nb_shells=nb_repetitions, ring_tilt=x ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -878,7 +878,7 @@ Ns_transitions=x, nb_blades=nb_blades, ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -897,7 +897,7 @@ Ns_transitions=Ns // 10, nb_blades=x, ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -915,11 +915,11 @@ nb_blades=nb_blades, blade_tilt=x, ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% -show_argument( +show_trajectories( function, arguments, one_shot=one_shot, subfig_size=subfigure_size, dim="2D" ) @@ -941,7 +941,7 @@ nb_blades=nb_blades, nb_trains=x, ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -965,11 +965,11 @@ nb_blades=nb_blades, skip_factor=x, ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% -show_argument( +show_trajectories( function, arguments, one_shot=one_shot, @@ -1054,7 +1054,7 @@ nb_blade_revolutions=x, nb_spiral_revolutions=0, ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -1071,7 +1071,7 @@ nb_blade_revolutions=x, nb_spiral_revolutions=nb_revolutions, ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% diff --git a/examples/example_sampling_densities.py b/examples/example_sampling_densities.py new file mode 100644 index 000000000..cc505a6f9 --- /dev/null +++ b/examples/example_sampling_densities.py @@ -0,0 +1,448 @@ +""" +================== +Sampling densities +================== + +A collection of sampling densities and density-based non-Cartesian trajectories. + +""" + +# %% +# In this example we illustrate the use of different sampling densities, +# and show how to generate trajectories based on them, such as random +# walks and travelling-salesman trajectories. +# + +# External +import brainweb_dl as bwdl +import matplotlib.pyplot as plt +import numpy as np +from utils import ( + show_density, + show_densities, + show_locations, + show_trajectory, + show_trajectories, +) + +# Internal +import mrinufft as mn +from mrinufft import display_2D_trajectory, display_3D_trajectory + + +# %% +# Script options +# ============== +# +# These options are used in the examples below as default values for +# all densities and trajectories. + +# Density parameters +shape_2d = (100, 100) +shape_3d = (100, 100, 100) + +# Trajectory parameters +Nc = 10 # Number of shots +Ns = 50 # Number of samples per shot + +# Display parameters +figure_size = 5.5 # Figure size for trajectory plots +subfigure_size = 3 # Figure size for subplots +one_shot = 0 # Highlight one shot in particular + +# %% +# Densities +# ========= +# +# In this section we present different sampling densities +# with various properties. +# +# Cutoff/decay density +# -------------------- +# +# Create a density composed of a central constant-value ellipsis +# defined by a cutoff ratio, followed by a polynomial decay over +# outer regions as defined in [Cha+22]_. + +cutoff_density = mn.create_cutoff_decay_density(shape=shape_2d, cutoff=0.2, decay=2) +show_density(cutoff_density, figure_size=figure_size) + +# %% +# ``cutoff (float)`` +# ~~~~~~~~~~~~~~~~~~ +# +# The k-space radius cutoff ratio between 0 and 1 within +# which density remains uniform and beyond which it decays. +# It is modulated by ``resolution`` to create ellipsoids. +# +# The ``mrinufft.create_polynomial_density`` +# simply calls this function with ``cutoff=0``. + +arguments = [0, 0.1, 0.2, 0.3] +function = lambda x: mn.create_cutoff_decay_density( + shape=shape_2d, + cutoff=x, + decay=2, +) +show_densities( + function, + arguments, + subfig_size=subfigure_size, +) + +# %% +# ``decay (float)`` +# ~~~~~~~~~~~~~~~~~ +# +# The polynomial decay in density beyond the cutoff ratio. +# It can be zero or negative as shown below, but most applications +# are expected have decays in the positive range. + +arguments = [-1, 0, 0.5, 2] +function = lambda x: mn.create_cutoff_decay_density( + shape=shape_2d, + cutoff=0.2, + decay=x, +) +show_densities( + function, + arguments, + subfig_size=subfigure_size, +) + +# %% +# ``resolution (tuple)`` +# ~~~~~~~~~~~~~~~~~~~~~~ +# +# Resolution scaling factors for each dimension of the density grid, +# by default ``None``. Note on the example below that the unit doesn't +# matter because ``cutoff`` is a ratio and ``decay`` is an exponent, +# so only the relative factor between the dimensions is important. +# +# This argument can be used to handle anisotropy but also to produce +# ellipsoidal densities. + + +arguments = [None, (1, 1), (1, 2), (1e-3, 0.5e-3)] +function = lambda x: mn.create_cutoff_decay_density( + shape=shape_2d, + cutoff=0.2, + decay=2, + resolution=x, +) +show_densities( + function, + arguments, + subfig_size=subfigure_size, +) + + +# %% +# Energy-based density +# -------------------- +# +# A common intuition is to consider that the sampling density +# should be proportional to the k-space amplitude. It can be +# learned from existing datasets and used for new acquisitions. + +dataset = bwdl.get_mri(4, "T1")[:, ::2, ::2] +energy_density = mn.create_energy_density(dataset=dataset) +show_density(energy_density, figure_size=figure_size, log_scale=True) + +# %% +# ``dataset (np.ndarray)`` +# ~~~~~~~~~~~~~~~~~~~~~~~~ +# +# The dataset from which to calculate the density +# based on its Fourier transform, with an expected +# shape (nb_volumes, dim_1, ..., dim_N). +# An N-dimensional Fourier transform is then performed. +# +# In the example below, we show the resulting densities +# from different slices of a single volume for convenience. +# More relevant use cases would be to learn densities for +# different organs and/or contrasts. + +arguments = [50, 100, 150] +function = lambda x: mn.create_energy_density(dataset=bwdl.get_mri(4, "T1")[x : x + 20]) +show_densities( + function, + arguments, + subfig_size=subfigure_size, + log_scale=True, +) + + +# %% +# Chauffert's density +# ------------------- +# +# This is a reproduction of the proposition from [CCW13]_. +# A sampling density is derived from compressed sensing +# equations to maximize guarantees of exact image recovery +# for a specified sparse wavelet domain decomposition. +# +# This principle is valid for any linear transform but +# for convenience it was limited to wavelets as in the +# original implementation. + +chauffert_density = mn.create_chauffert_density( + shape=shape_2d, + wavelet_basis="haar", + nb_wavelet_scales=3, +) +show_density(chauffert_density, figure_size=figure_size) + + +# %% +# ``wavelet_basis (str)`` +# ~~~~~~~~~~~~~~~~~~~~~~~ +# +# The wavelet basis to use for wavelet decomposition, either +# as a built-in wavelet name from the PyWavelets package +# or as a custom ``pywt.Wavelet`` object. + +arguments = ["haar", "rbio2.2", "coif4", "sym8"] +function = lambda x: mn.create_chauffert_density( + shape=shape_2d, + wavelet_basis=x, + nb_wavelet_scales=3, +) +show_densities( + function, + arguments, + subfig_size=subfigure_size, +) + +# %% +# ``nb_wavelet_scales (int)`` +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# The number of wavelet scales to use in decomposition. + +arguments = [1, 2, 3, 4] +function = lambda x: mn.create_chauffert_density( + shape=shape_2d, + wavelet_basis="haar", + nb_wavelet_scales=x, +) +show_densities( + function, + arguments, + subfig_size=subfigure_size, +) + + +# %% +# Custom density +# -------------- +# +# Any density can be defined and later used for sampling and +# trajectories. + +# Linear gradient +density = np.tile(np.linspace(0, 1, shape_2d[1])[:, None], (1, shape_2d[0])) +# Square center +density[ + 3 * shape_2d[0] // 8 : 5 * shape_2d[0] // 8, + 3 * shape_2d[1] // 8 : 5 * shape_2d[1] // 8, +] = 2 +# Outer circle +density = np.where( + np.linalg.norm(np.indices(shape_2d) - shape_2d[0] / 2, axis=0) < shape_2d[0] / 2, + density, + 0, +) +# Normalization +custom_density = density / np.sum(density) + +show_density(custom_density, figure_size=figure_size) + + +# %% +# Sampling +# ======== +# +# In this section we present random, pseudo-random and +# algorithm-based sampling methods. The examples are based +# on a few densities picked from the ones presented above. +# + +densities = { + "Cutoff/Decay": cutoff_density, + "Energy": energy_density, + "Chauffert": chauffert_density, + "Custom": custom_density, +} + +arguments = densities.keys() +function = lambda x: densities[x] +show_densities(function, arguments, subfig_size=subfigure_size) + + +# %% +# Random sampling +# --------------- +# +# This sampling simply consists of weighted-random selection from the +# density grid locations. + +arguments = densities.keys() +function = lambda x: mn.sample_from_density(Nc * Ns, densities[x], method="random") +show_locations(function, arguments, subfig_size=subfigure_size) + + +# %% +# Lloyd's sampling +# ---------------- +# +# This sampling is based on a Voronoi/Dirichlet tesselation using Lloyd's +# weighted KMeans algorithm. The implementation is based on +# ``sklearn.cluster.KMeans`` in 2D and ``sklearn.cluster.BisectingKMeans`` +# in 3D, mostly to reduce computation times in the most demanding cases. + +arguments = densities.keys() +function = lambda x: mn.sample_from_density(Nc * Ns, densities[x], method="lloyd") +show_locations(function, arguments, subfig_size=subfigure_size) + + +# %% +# Density-based trajectories +# ========================== +# +# In this section we present 2D and 3D trajectories based +# on arbitrary densities, and also sampling for some of them. +# +# Random walks +# ------------ +# +# This is an adaptation of the proposition from [Cha+14]_. +# It creates a trajectory by walking randomly to neighboring points +# following a provided sampling density. +# +# This implementation is different from the original proposition: +# trajectories are continuous with a fixed length instead of +# making random jumps to other locations, and an option +# is provided to have pseudo-random walks to improve coverage. + +arguments = densities.keys() +function = lambda x: mn.initialize_2D_random_walk( + Nc, Ns, density=densities[x][::4, ::4] +) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) + +# %% +# +# The starting shot positions can be modified to follow Lloyd's sampling +# method rather than the default random approach, resulting in more evenly +# spaced shots that still respect the prescribed density. +# Additional ``kwargs`` can provided to set the arguments in +# ``mrinufft.sample_from_density``. + +arguments = densities.keys() +function = lambda x: mn.initialize_2D_random_walk( + Nc, Ns, density=densities[x][::4, ::4], method="lloyd" +) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) + +# %% +# +# The random paths can be made into a smooth and continuous +# trajectory by oversampling the shots with cubic splines. + +arguments = densities.keys() +function = lambda x: mn.oversample( + mn.initialize_2D_random_walk( + Nc, Ns, density=densities[x][::4, ::4], method="lloyd" + ), + 4 * Ns, +) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) + + +# %% +# Travelling Salesman +# ------------------- +# +# This is a reproduction of the work from [Cha+14]_. The Travelling +# Salesman Problem (TSP) solution is obtained using the 2-opt method +# with a complexity in O(n²) in time and memory. + +arguments = densities.keys() +function = lambda x: mn.initialize_2D_travelling_salesman( + Nc, + Ns, + density=densities[x], +) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) + +# %% +# +# It is possible to customize the sampling method using ``kwargs`` +# to provide arguments to ``mrinufft.sample_from_density``. +# For example, one can use Lloyd's sampling method to create evenly +# spaced point distributions and obtain a more deterministic coverage. + +arguments = densities.keys() +function = lambda x: mn.initialize_2D_travelling_salesman( + Nc, + Ns, + density=densities[x], + method="lloyd", +) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) + +# %% +# +# Similarly to random walks, the travelling paths can be smoothed +# by oversampling the shots with cubic splines. Another use case +# is to reduce the number of TSP points to reduce the computation load +# and then oversample up to the desired shot length. + +arguments = densities.keys() +function = lambda x: mn.oversample( + mn.initialize_2D_travelling_salesman(Nc, Ns, density=densities[x], method="lloyd"), + 4 * Ns, +) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) + +# %% +# +# An option is provided to cluster the points before calling the TSP solver, +# reducing drastically the computation time. +# Clusters are chosen by Cartesian (``"x"``, ``"y"``, ``"z"``) or spherical +# (``"r"``, ``"phi"``, ``"theta"``) coordinate with up to two coordinates. +# Then the points can be sorted within each cluster in order to define a general +# shot direction as shown below. + +arguments = ((None, None, None), ("y", None, "x"), ("phi", None, "r"), ("y", "x", "r")) +function = lambda x: mn.initialize_2D_travelling_salesman( + Nc, + Ns, + density=densities["Custom"], + first_cluster_by=x[0], + second_cluster_by=x[1], + sort_by=x[2], + method="lloyd", +) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) + + +# %% +# References +# ========== +# +# .. [CCW13] Chauffert, Nicolas, Philippe Ciuciu, and Pierre Weiss. +# "Variable density compressed sensing in MRI. +# Theoretical vs heuristic sampling strategies." +# In 2013 IEEE 10th International Symposium on Biomedical Imaging, +# pp. 298-301. IEEE, 2013. +# .. [Cha+14] Chauffert, Nicolas, Philippe Ciuciu, +# Jonas Kahn, and Pierre Weiss. +# "Variable density sampling with continuous trajectories." +# SIAM Journal on Imaging Sciences 7, no. 4 (2014): 1962-1992. +# .. [Cha+22] Chaithya, G. R., Pierre Weiss, Guillaume Daval-Frérot, +# Aurélien Massire, Alexandre Vignaud, and Philippe Ciuciu. +# "Optimizing full 3D SPARKLING trajectories for high-resolution +# magnetic resonance imaging." +# IEEE Transactions on Medical Imaging 41, no. 8 (2022): 2105-2117. diff --git a/examples/example_trajectory_tools.py b/examples/example_trajectory_tools.py index e0a2efffb..83afeb5df 100644 --- a/examples/example_trajectory_tools.py +++ b/examples/example_trajectory_tools.py @@ -24,7 +24,7 @@ # External import matplotlib.pyplot as plt import numpy as np -from utils import show_argument, show_trajectory +from utils import show_trajectories, show_trajectory # Internal import mrinufft as mn @@ -87,7 +87,9 @@ arguments = ["Radial", "Spiral", "2D Cones", "3D Cones"] function = lambda x: single_trajectories[x] -show_argument(function, arguments, one_shot=bool(one_shot), subfig_size=subfigure_size) +show_trajectories( + function, arguments, one_shot=bool(one_shot), subfig_size=subfigure_size +) # %% @@ -119,7 +121,9 @@ arguments = ["Radial", "Spiral", "2D Cones", "3D Cones"] function = lambda x: planar_trajectories[x] -show_argument(function, arguments, one_shot=bool(one_shot), subfig_size=subfigure_size) +show_trajectories( + function, arguments, one_shot=bool(one_shot), subfig_size=subfigure_size +) # %% @@ -160,9 +164,9 @@ arguments = ["Radial", "Spiral", "2D Cones", "3D Cones"] function = lambda x: tools.stack(planar_trajectories[x], nb_stacks=nb_repetitions) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% -show_argument( +show_trajectories( function, arguments, one_shot=one_shot, @@ -188,7 +192,7 @@ ), nb_stacks=nb_repetitions, ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -206,7 +210,7 @@ trajectory = np.copy(planar_trajectories["3D Cones"]) trajectory[..., 2] *= 2 function = lambda x: tools.stack(trajectory, nb_stacks=nb_repetitions, hard_bounded=x) -show_argument( +show_trajectories( function, arguments, one_shot=one_shot, @@ -258,9 +262,9 @@ nb_rotations=nb_repetitions, x_tilt="uniform", ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% -show_argument( +show_trajectories( function, arguments, one_shot=one_shot, @@ -326,7 +330,7 @@ half_sphere=in_out, axis=2, ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% # @@ -345,7 +349,7 @@ half_sphere=in_out, axis=0, ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -369,7 +373,7 @@ half_sphere=x, axis=0, ) -show_argument( +show_trajectories( function, arguments, one_shot=one_shot, @@ -396,7 +400,7 @@ partition=x, axis=0, ) -show_argument( +show_trajectories( function, arguments, one_shot=one_shot, @@ -422,7 +426,7 @@ partition=x, axis=0, ) -show_argument( +show_trajectories( function, arguments, one_shot=one_shot, @@ -445,7 +449,7 @@ partition=x, axis=0, ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% # ``axis (int, array)`` @@ -478,7 +482,7 @@ half_sphere=in_out, axis=x, ) -show_argument( +show_trajectories( function, arguments, one_shot=one_shot, @@ -501,7 +505,7 @@ half_sphere=in_out, axis=x, ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% # @@ -556,9 +560,9 @@ function = lambda x: tools.conify( planar_trajectories[x], nb_cones=nb_repetitions, in_out=in_out ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% -show_argument( +show_trajectories( function, arguments, one_shot=one_shot, @@ -578,9 +582,9 @@ function = lambda x: tools.conify( single_trajectories[x], nb_cones=Nc, z_tilt="golden", in_out=in_out ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% -show_argument( +show_trajectories( function, arguments, one_shot=one_shot, @@ -607,7 +611,7 @@ in_out=in_out, max_angle=x, ) -show_argument( +show_trajectories( function, arguments, one_shot=one_shot, @@ -634,7 +638,7 @@ max_angle=np.pi / 2, borderless=x, ) -show_argument( +show_trajectories( function, arguments, one_shot=one_shot, @@ -691,11 +695,11 @@ nb_trains=Nc_planes // 2, reverse_odd_shots=True, ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% -show_argument( +show_trajectories( function, arguments, one_shot=one_shot, subfig_size=subfigure_size, dim="2D" ) @@ -716,7 +720,7 @@ nb_trains=Nc_planes // 2, reverse_odd_shots=True, ) -show_argument( +show_trajectories( function, arguments, one_shot=one_shot, subfig_size=subfigure_size, dim="2D" ) @@ -735,7 +739,7 @@ nb_trains=x, reverse_odd_shots=True, ) -show_argument( +show_trajectories( function, arguments, one_shot=one_shot, subfig_size=subfigure_size, dim="2D" ) @@ -755,7 +759,7 @@ nb_trains=Nc_planes // 2, reverse_odd_shots=x, ) -show_argument( +show_trajectories( function, arguments, one_shot=one_shot, subfig_size=subfigure_size, dim="2D" ) @@ -799,11 +803,11 @@ tools.rewind(planar_trajectories[x], Ns_transitions=Ns // 10), Ns_transitions=Ns // 10, ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% -show_argument( +show_trajectories( function, arguments, one_shot=one_shot, subfig_size=subfigure_size, dim="2D" ) @@ -821,7 +825,7 @@ tools.rewind(planar_trajectories["2D Cones"], Ns_transitions=x), Ns_transitions=x, ) -show_argument( +show_trajectories( function, arguments, one_shot=one_shot, subfig_size=subfigure_size, dim="2D" ) @@ -898,9 +902,9 @@ function = lambda x: tools.stack_spherically( init_trajectories[x], Nc=Nc, nb_stacks=nb_repetitions ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% -show_argument( +show_trajectories( function, arguments, one_shot=one_shot, @@ -935,7 +939,7 @@ trajectories = {"Classic": traj_classic, "Normalized": traj_normal} arguments = ["Classic", "Normalized"] function = lambda x: trajectories[x] -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% @@ -988,7 +992,7 @@ function = lambda x: tools.shellify( init_trajectories[x], Nc=Nc, nb_shells=nb_repetitions ) -show_argument(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) +show_trajectories(function, arguments, one_shot=one_shot, subfig_size=subfigure_size) # %% # ``hemisphere_mode (str)`` @@ -1005,7 +1009,7 @@ function = lambda x: tools.shellify( init_trajectories["Spiral"], Nc=Nc, nb_shells=nb_repetitions, hemisphere_mode=x ) -show_argument( +show_trajectories( function, arguments, one_shot=one_shot, diff --git a/examples/utils.py b/examples/utils.py index 7c70595bd..5edaedc51 100644 --- a/examples/utils.py +++ b/examples/utils.py @@ -3,16 +3,39 @@ the examples. """ -import matplotlib.pyplot as plt - # External imports import numpy as np +from matplotlib import colors +import matplotlib.pyplot as plt # Internal imports from mrinufft import display_2D_trajectory, display_3D_trajectory, displayConfig +from mrinufft.trajectories.utils import KMAX + + +def show_trajectory(trajectory, one_shot, figure_size): + if trajectory.shape[-1] == 2: + ax = display_2D_trajectory( + trajectory, size=figure_size, one_shot=one_shot % trajectory.shape[0] + ) + ax.set_aspect("equal") + plt.tight_layout() + plt.show() + else: + ax = display_3D_trajectory( + trajectory, + size=figure_size, + one_shot=one_shot % trajectory.shape[0], + per_plane=False, + ) + plt.tight_layout() + plt.subplots_adjust(bottom=0.1) + plt.show() -def show_argument(function, arguments, one_shot, subfig_size, dim="3D", axes=(0, 1)): +def show_trajectories( + function, arguments, one_shot, subfig_size, dim="3D", axes=(0, 1) +): # Initialize trajectories with varying option trajectories = [function(arg) for arg in arguments] @@ -42,25 +65,72 @@ def show_argument(function, arguments, one_shot, subfig_size, dim="3D", axes=(0, ax.set_xlabel(labels[axes[0]], fontsize=displayConfig.fontsize) ax.set_ylabel(labels[axes[1]], fontsize=displayConfig.fontsize) ax.set_aspect("equal") - ax.set_title(str(arg), fontsize=4 * subfig_size) + ax.set_title(str(arg), fontsize=displayConfig.fontsize) plt.show() -def show_trajectory(trajectory, one_shot, figure_size): - if trajectory.shape[-1] == 2: - ax = display_2D_trajectory( - trajectory, size=figure_size, one_shot=one_shot % trajectory.shape[0] - ) - ax.set_aspect("equal") - plt.tight_layout() - plt.show() +def show_density(density, figure_size, *, log_scale=False): + density = density.T[::-1] + + plt.figure(figsize=(figure_size, figure_size)) + if log_scale: + plt.imshow(density, cmap="jet", norm=colors.LogNorm()) else: - ax = display_3D_trajectory( - trajectory, - size=figure_size, - one_shot=one_shot % trajectory.shape[0], - per_plane=False, - ) - plt.tight_layout() - plt.subplots_adjust(bottom=0.1) - plt.show() + plt.imshow(density, cmap="jet") + + ax = plt.gca() + ax.set_xlabel("kx", fontsize=displayConfig.fontsize) + ax.set_ylabel("ky", fontsize=displayConfig.fontsize) + ax.set_aspect("equal") + + plt.axis(False) + plt.colorbar() + plt.show() + + +def show_densities(function, arguments, subfig_size, *, log_scale=False): + # Initialize k-space densities with varying option + densities = [function(arg).T[::-1] for arg in arguments] + + # Plot the trajectories side by side + fig, axes = plt.subplots( + 1, + len(densities), + figsize=(len(densities) * subfig_size, subfig_size), + constrained_layout=True, + ) + + for ax, arg, density in zip(axes, arguments, densities): + ax.set_title(str(arg), fontsize=displayConfig.fontsize) + ax.set_xlabel("kx", fontsize=displayConfig.fontsize) + ax.set_ylabel("ky", fontsize=displayConfig.fontsize) + ax.set_aspect("equal") + if log_scale: + ax.imshow(density, cmap="jet", norm=colors.LogNorm()) + else: + ax.imshow(density, cmap="jet") + ax.axis(False) + plt.show() + + +def show_locations(function, arguments, subfig_size, *, log_scale=False): + # Initialize k-space locations with varying option + locations = [function(arg) for arg in arguments] + + # Plot the trajectories side by side + fig, axes = plt.subplots( + 1, + len(locations), + figsize=(len(locations) * subfig_size, subfig_size), + constrained_layout=True, + ) + + for ax, arg, location in zip(axes, arguments, locations): + ax.set_title(str(arg), fontsize=displayConfig.fontsize) + ax.set_xlim(-1.05 * KMAX, 1.05 * KMAX) + ax.set_ylim(-1.05 * KMAX, 1.05 * KMAX) + ax.set_xlabel("kx", fontsize=displayConfig.fontsize) + ax.set_ylabel("ky", fontsize=displayConfig.fontsize) + ax.set_aspect("equal") + ax.scatter(location[..., 0], location[..., 1], s=3) + plt.show() diff --git a/pyproject.toml b/pyproject.toml index 361ee1f65..1d594e0c0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,8 +17,7 @@ cufinufft = ["cufinufft<2.3", "cupy-cuda12x"] finufft = ["finufft"] pynfft = ["pynfft2>=1.4.3; python_version < '3.12'", "numpy>=2.0.0; python_version < '3.12'"] pynufft = ["pynufft"] -io = ["pymapvbvd"] -smaps = ["scikit-image"] +extra = ["pymapvbvd", "scikit-image", "scikit-learn", "pywavelets"] autodiff = ["torch"] diff --git a/src/mrinufft/__init__.py b/src/mrinufft/__init__.py index 69bae1a26..041c454f3 100644 --- a/src/mrinufft/__init__.py +++ b/src/mrinufft/__init__.py @@ -14,6 +14,7 @@ ) from .trajectories import ( + # trajectories initialize_2D_radial, initialize_2D_spiral, initialize_2D_fibonacci_spiral, @@ -25,6 +26,8 @@ initialize_2D_polar_lissajous, initialize_2D_lissajous, initialize_2D_waves, + initialize_2D_random_walk, + initialize_2D_travelling_salesman, initialize_3D_phyllotaxis_radial, initialize_3D_golden_means_radial, initialize_3D_wong_radial, @@ -38,6 +41,9 @@ initialize_3D_seiffert_shells, initialize_3D_turbine, initialize_3D_repi, + initialize_3D_random_walk, + initialize_3D_travelling_salesman, + # tools stack, rotate, precess, @@ -46,6 +52,15 @@ shellify, duplicate_along_axes, radialize_center, + oversample, + # densities + sample_from_density, + create_cutoff_decay_density, + create_polynomial_density, + create_energy_density, + create_chauffert_density, + create_fast_chauffert_density, + # display displayConfig, display_2D_trajectory, display_3D_trajectory, @@ -54,10 +69,16 @@ from .density import voronoi, cell_count, pipe, get_density __all__ = [ + # nufft "get_operator", "check_backend", "list_backends", "get_interpolators_from_fieldmap", + "voronoi", + "cell_count", + "pipe", + "get_density", + # trajectories "initialize_2D_radial", "initialize_2D_spiral", "initialize_2D_fibonacci_spiral", @@ -69,6 +90,8 @@ "initialize_2D_polar_lissajous", "initialize_2D_lissajous", "initialize_2D_waves", + "initialize_2D_random_walk", + "initialize_2D_travelling_salesman", "initialize_3D_phyllotaxis_radial", "initialize_3D_golden_means_radial", "initialize_3D_wong_radial", @@ -83,6 +106,9 @@ "initialize_3D_seiffert_shells", "initialize_3D_turbine", "initialize_3D_repi", + "initialize_3D_random_walk", + "initialize_3D_travelling_salesman", + # tools "stack", "rotate", "precess", @@ -92,13 +118,18 @@ "duplicate_along_axes", "radialize_center", "patch_center_anomaly", + "oversample", + # densities + "sample_from_density", + "create_cutoff_decay_density", + "create_polynomial_density", + "create_energy_density", + "create_chauffert_density", + "create_fast_chauffert_density", + # display "displayConfig", "display_2D_trajectory", "display_3D_trajectory", - "voronoi", - "cell_count", - "pipe", - "get_density", ] from importlib.metadata import version, PackageNotFoundError diff --git a/src/mrinufft/extras/smaps.py b/src/mrinufft/extras/smaps.py index c3f7018fe..6bb0ba3b9 100644 --- a/src/mrinufft/extras/smaps.py +++ b/src/mrinufft/extras/smaps.py @@ -149,8 +149,16 @@ def low_frequency( """ # defer import to later to prevent circular import from mrinufft import get_operator - from skimage.filters import threshold_otsu, gaussian - from skimage.morphology import convex_hull_image + + try: + from skimage.filters import threshold_otsu, gaussian + from skimage.morphology import convex_hull_image + except ImportError as err: + raise ImportError( + "The scikit-image module is not available. Please install " + "it along with the [extra] dependencies " + "or using `pip install scikit-image`." + ) from err k_space, samples, dc = _extract_kspace_center( kspace_data=kspace_data, diff --git a/src/mrinufft/io/siemens.py b/src/mrinufft/io/siemens.py index dd8c96d72..e0ea009d2 100644 --- a/src/mrinufft/io/siemens.py +++ b/src/mrinufft/io/siemens.py @@ -57,8 +57,9 @@ def read_siemens_rawdat( from mapvbvd import mapVBVD except ImportError as err: raise ImportError( - "The mapVBVD module is not available. Please install it using " - "the following command: pip install pymapVBVD" + "The mapVBVD module is not available. Please install " + "it along with the [extra] dependencies " + "or using `pip install pymapVBVD`." ) from err twixObj = mapVBVD(filename) if isinstance(twixObj, list): diff --git a/src/mrinufft/operators/stacked.py b/src/mrinufft/operators/stacked.py index c3136bd17..cdb2bb627 100644 --- a/src/mrinufft/operators/stacked.py +++ b/src/mrinufft/operators/stacked.py @@ -23,7 +23,6 @@ try: import cupy as cp from cupyx.scipy import fft as cpfft - except ImportError: CUPY_AVAILABLE = False diff --git a/src/mrinufft/trajectories/__init__.py b/src/mrinufft/trajectories/__init__.py index bed80d3bb..77c588cd8 100644 --- a/src/mrinufft/trajectories/__init__.py +++ b/src/mrinufft/trajectories/__init__.py @@ -1,55 +1,63 @@ """Collection of trajectories and tools used for non-Cartesian MRI.""" +from .display import display_2D_trajectory, display_3D_trajectory, displayConfig +from .gradients import patch_center_anomaly +from .inits import ( + initialize_2D_random_walk, + initialize_2D_travelling_salesman, + initialize_3D_random_walk, + initialize_3D_travelling_salesman, +) +from .sampling import ( + create_chauffert_density, + create_cutoff_decay_density, + create_energy_density, + create_fast_chauffert_density, + create_polynomial_density, + sample_from_density, +) +from .tools import ( + conify, + duplicate_along_axes, + oversample, + precess, + radialize_center, + rotate, + shellify, + stack, + stack_spherically, +) from .trajectory2D import ( - initialize_2D_radial, - initialize_2D_spiral, - initialize_2D_fibonacci_spiral, initialize_2D_cones, - initialize_2D_sinusoide, + initialize_2D_fibonacci_spiral, + initialize_2D_lissajous, + initialize_2D_polar_lissajous, initialize_2D_propeller, - initialize_2D_rosette, + initialize_2D_radial, initialize_2D_rings, - initialize_2D_polar_lissajous, - initialize_2D_lissajous, + initialize_2D_rosette, + initialize_2D_sinusoide, + initialize_2D_spiral, initialize_2D_waves, ) - from .trajectory3D import ( - initialize_3D_phyllotaxis_radial, - initialize_3D_golden_means_radial, - initialize_3D_wong_radial, - initialize_3D_park_radial, + initialize_3D_annular_shells, initialize_3D_cones, initialize_3D_floret, - initialize_3D_wave_caipi, - initialize_3D_seiffert_spiral, + initialize_3D_golden_means_radial, initialize_3D_helical_shells, - initialize_3D_annular_shells, + initialize_3D_park_radial, + initialize_3D_phyllotaxis_radial, + initialize_3D_repi, initialize_3D_seiffert_shells, + initialize_3D_seiffert_spiral, initialize_3D_turbine, - initialize_3D_repi, -) - -from .tools import ( - stack, - rotate, - precess, - conify, - stack_spherically, - shellify, - duplicate_along_axes, - radialize_center, -) - -from .display import ( - displayConfig, - display_2D_trajectory, - display_3D_trajectory, + initialize_3D_wave_caipi, + initialize_3D_wong_radial, ) -from .gradients import patch_center_anomaly - __all__ = [ + # trajectories "initialize_2D_radial", "initialize_2D_spiral", "initialize_2D_fibonacci_spiral", @@ -61,6 +69,8 @@ "initialize_2D_polar_lissajous", "initialize_2D_lissajous", "initialize_2D_waves", + "initialize_2D_random_walk", + "initialize_2D_travelling_salesman", "initialize_3D_phyllotaxis_radial", "initialize_3D_golden_means_radial", "initialize_3D_wong_radial", @@ -75,6 +85,9 @@ "initialize_3D_seiffert_shells", "initialize_3D_turbine", "initialize_3D_repi", + "initialize_3D_random_walk", + "initialize_3D_travelling_salesman", + # tools "stack", "rotate", "precess", @@ -87,4 +100,16 @@ "display_2D_trajectory", "display_3D_trajectory", "patch_center_anomaly", + "oversample", + # densities + "sample_from_density", + "create_cutoff_decay_density", + "create_polynomial_density", + "create_energy_density", + "create_chauffert_density", + "create_fast_chauffert_density", + # display + "displayConfig", + "display_2D_trajectory", + "display_3D_trajectory", ] diff --git a/src/mrinufft/trajectories/inits/__init__.py b/src/mrinufft/trajectories/inits/__init__.py new file mode 100644 index 000000000..31c9da932 --- /dev/null +++ b/src/mrinufft/trajectories/inits/__init__.py @@ -0,0 +1,14 @@ +"""Module containing all trajectories.""" + +from .random_walk import initialize_2D_random_walk, initialize_3D_random_walk +from .travelling_salesman import ( + initialize_2D_travelling_salesman, + initialize_3D_travelling_salesman, +) + +__all__ = [ + "initialize_2D_random_walk", + "initialize_3D_random_walk", + "initialize_2D_travelling_salesman", + "initialize_3D_travelling_salesman", +] diff --git a/src/mrinufft/trajectories/inits/random_walk.py b/src/mrinufft/trajectories/inits/random_walk.py new file mode 100644 index 000000000..da68feb1f --- /dev/null +++ b/src/mrinufft/trajectories/inits/random_walk.py @@ -0,0 +1,205 @@ +"""Trajectories based on random walks.""" + +import numpy as np + +from ..sampling import sample_from_density +from ..utils import KMAX + + +def _get_adjacent_neighbors_offsets(shape): + return np.concatenate([np.eye(len(shape)), -np.eye(len(shape))], axis=0).astype(int) + + +def _get_neighbors_offsets(shape): + nb_dims = len(shape) + neighbors = (np.indices([3] * nb_dims) - 1).reshape((nb_dims, -1)).T + nb_half = neighbors.shape[0] // 2 + # Remove full zero entry + neighbors = np.concatenate([neighbors[:nb_half], neighbors[-nb_half:]], axis=0) + return neighbors + + +def _initialize_ND_random_walk( + Nc, Ns, density, *, diagonals=True, pseudo_random=True, **sampling_kwargs +): + density = density / np.sum(density) + flat_density = np.copy(density.flatten()) + shape = np.array(density.shape) + mask = np.ones_like(flat_density) + + # Prepare neighbor offsets once + offsets = ( + _get_neighbors_offsets(shape) + if diagonals + else _get_adjacent_neighbors_offsets(shape) + ) + + # Make all random draws at once for performance + draws = np.random.random((Ns, Nc)) # inverted shape for convenience + + # Initialize shot starting points + locations = sample_from_density(Nc, density, **sampling_kwargs) + choices = np.around((locations + KMAX) * (np.array(density.shape) - 1)).astype(int) + choices = np.ravel_multi_index(choices.T, density.shape) + # choices = np.random.choice(np.arange(len(flat_density)), size=Nc, p=flat_density) + routes = [choices] + + # Walk + for i in range(1, Ns): + # Express indices back to multi-dim coordinates to check bounds + neighbors = np.array(np.unravel_index(choices, shape)) + neighbors = neighbors[:, None] + offsets.T[..., None] + + # Find out-of-bound neighbors and ignore them + invalids = (neighbors < 0).any(axis=0) | ( + neighbors >= shape[:, None, None] + ).any(axis=0) + neighbors[:, invalids] = 0 + invalids = invalids.T + + # Flatten indices to use np.random.choice + neighbors = np.ravel_multi_index(neighbors, shape).T + + # Set walk probabilities + walk_probs = flat_density[neighbors] + walk_probs[invalids] = 0 + walk_probs = walk_probs / np.sum(walk_probs, axis=-1, keepdims=True) + cum_walk_probs = np.cumsum(walk_probs, axis=-1) + + # Select next walk steps + indices = np.argmax(draws[i][:, None] < cum_walk_probs, axis=-1) + choices = neighbors[np.arange(Nc), indices] + routes.append(choices) + + # Update density to account for already drawed positions + if pseudo_random: + flat_density[choices] = ( + mask[choices] * flat_density[choices] / (mask[choices] + 1) + ) + mask[choices] += 1 + routes = np.array(routes).T + + # Create trajectory from routes + locations = np.indices(shape) + locations = locations.reshape((len(shape), -1)) + trajectory = np.array([locations[:, r].T for r in routes]) + trajectory = 2 * KMAX * trajectory / (shape - 1) - KMAX + return trajectory + + +def initialize_2D_random_walk( + Nc, Ns, density, *, diagonals=True, pseudo_random=True, **sampling_kwargs +): + """Initialize a 2D random walk trajectory. + + This is an adaptation of the proposition from [Cha+14]_. + It creates a trajectory by walking randomly to neighboring points + following a provided sampling density. + + This implementation is different from the original proposition: + trajectories are continuous with a fixed length instead of + making random jumps to other locations, and an option + is provided to have pseudo-random walks to improve coverage. + + Parameters + ---------- + Nc : int + Number of shots + Ns : int + Number of samples per shot + density : array_like + Sampling density used to determine the walk probabilities, + normalized automatically by its sum during the call for convenience. + diagonals : bool, optional + Whether to draw the next walk step from the diagional neighbors + on top of the adjacent ones. Default to ``True``. + pseudo_random : bool, optional + Whether to adapt the density dynamically to reduce areas + already covered. The density is still statistically followed + for undersampled acquisitions. Default to ``True``. + **sampling_kwargs + Sampling parameters in + ``mrinufft.trajectories.sampling.sample_from_density`` used for the + shot starting positions. + + Returns + ------- + array_like + 2D random walk trajectory + + References + ---------- + .. [Cha+14] Chauffert, Nicolas, Philippe Ciuciu, + Jonas Kahn, and Pierre Weiss. + "Variable density sampling with continuous trajectories." + SIAM Journal on Imaging Sciences 7, no. 4 (2014): 1962-1992. + """ + if len(density.shape) != 2: + raise ValueError("`density` is expected to be 2-dimensional.") + return _initialize_ND_random_walk( + Nc, + Ns, + density, + diagonals=diagonals, + pseudo_random=pseudo_random, + **sampling_kwargs, + ) + + +def initialize_3D_random_walk( + Nc, Ns, density, *, diagonals=True, pseudo_random=True, **sampling_kwargs +): + """Initialize a 3D random walk trajectory. + + This is an adaptation of the proposition from [Cha+14]_. + It creates a trajectory by walking randomly to neighboring points + following a provided sampling density. + + This implementation is different from the original proposition: + trajectories are continuous with a fixed length instead of + making random jumps to other locations, and an option + is provided to have pseudo-random walks to improve coverage. + + Parameters + ---------- + Nc : int + Number of shots + Ns : int + Number of samples per shot + density : array_like + Sampling density used to determine the walk probabilities, + normalized automatically by its sum during the call for convenience. + diagonals : bool, optional + Whether to draw the next walk step from the diagional neighbors + on top of the adjacent ones. Default to ``True``. + pseudo_random : bool, optional + Whether to adapt the density dynamically to reduce areas + already covered. The density is still statistically followed + for undersampled acquisitions. Default to ``True``. + **sampling_kwargs + Sampling parameters in + ``mrinufft.trajectories.sampling.sample_from_density`` used for the + shot starting positions. + + Returns + ------- + array_like + 3D random walk trajectory + + References + ---------- + .. [Cha+14] Chauffert, Nicolas, Philippe Ciuciu, + Jonas Kahn, and Pierre Weiss. + "Variable density sampling with continuous trajectories." + SIAM Journal on Imaging Sciences 7, no. 4 (2014): 1962-1992. + """ + if len(density.shape) != 3: + raise ValueError("`density` is expected to be 3-dimensional.") + return _initialize_ND_random_walk( + Nc, + Ns, + density, + diagonals=diagonals, + pseudo_random=pseudo_random, + **sampling_kwargs, + ) diff --git a/src/mrinufft/trajectories/inits/travelling_salesman.py b/src/mrinufft/trajectories/inits/travelling_salesman.py new file mode 100644 index 000000000..6b4f7764b --- /dev/null +++ b/src/mrinufft/trajectories/inits/travelling_salesman.py @@ -0,0 +1,289 @@ +"""Trajectories based on the Travelling Salesman Problem.""" + +import numpy as np +import numpy.linalg as nl +from scipy.interpolate import CubicSpline +from tqdm.auto import tqdm + +from ..maths import solve_tsp_with_2opt +from ..sampling import sample_from_density +from ..tools import oversample + + +def _get_approx_cluster_sizes(nb_total, nb_clusters): + # Give a list of cluster sizes close to sqrt(`nb_total`) + cluster_sizes = round(nb_total / nb_clusters) * np.ones(nb_clusters).astype(int) + delta_sum = nb_total - np.sum(cluster_sizes) + cluster_sizes[: int(np.abs(delta_sum))] += np.sign(delta_sum) + return cluster_sizes + + +def _sort_by_coordinate(array, coord): + # Sort a list of N-D locations by a Cartesian/spherical coordinate + if array.shape[-1] < 3 and coord.lower() in ["z", "theta"]: + raise ValueError( + f"Invalid `coord`='{coord}' for arrays with less than 3 dimensions." + ) + + match coord.lower(): + case "x": + coord = array[..., 0] + case "y": + coord = array[..., 1] + case "z": + coord = array[..., 2] + case "r": + coord = np.linalg.norm(array, axis=-1) + case "phi": + coord = np.sign(array[..., 1]) * np.arccos( + array[..., 0] / nl.norm(array[..., :2], axis=-1) + ) + case "theta": + coord = np.arccos(array[..., 2] / nl.norm(array, axis=-1)) + case _: + raise ValueError(f"Unknown coordinate `{coord}`") + order = np.argsort(coord) + return array[order] + + +def _cluster_by_coordinate( + locations, nb_clusters, cluster_by, second_cluster_by=None, sort_by=None +): + # Cluster approximately a list of N-D locations by Cartesian/spherical coordinates + # Gather dimension variables + nb_dims = locations.shape[-1] + locations = locations.reshape((-1, nb_dims)) + nb_locations = locations.shape[0] + + # Check arguments validity + if nb_locations % nb_clusters: + raise ValueError("`nb_clusters` should divide the number of locations") + cluster_size = nb_locations // nb_clusters + + # Create chunks of cluters by a first coordinate + locations = _sort_by_coordinate(locations, cluster_by) + + if second_cluster_by: + # Cluster each location within the chunks of clusters by a second coordinate + chunk_sizes = _get_approx_cluster_sizes( + nb_clusters, round(np.sqrt(nb_clusters)) + ) + chunk_ranges = np.cumsum([0] + list(chunk_sizes)) + for i in range(len(chunk_sizes)): + i_s, i_e = ( + chunk_ranges[i] * cluster_size, + chunk_ranges[i + 1] * cluster_size, + ) + locations[i_s:i_e] = _sort_by_coordinate( + locations[i_s:i_e], second_cluster_by + ) + locations = locations.reshape((nb_clusters, cluster_size, nb_dims)) + + # Order locations within each cluster by another coordinate + if sort_by: + for i in range(nb_clusters): + locations[i] = _sort_by_coordinate(locations[i], sort_by) + return locations + + +def _initialize_ND_travelling_salesman( + Nc, + Ns, + density, + first_cluster_by=None, + second_cluster_by=None, + sort_by=None, + tsp_tol=1e-8, + *, + verbose=False, + **sampling_kwargs, +): + # Check arguments validity + if Nc * Ns > np.prod(density.shape): + raise ValueError("`density` array not large enough to pick `Nc` * `Ns` points.") + Nd = len(density.shape) + + # Select k-space locations + trajectory = sample_from_density(Nc * Ns, density, **sampling_kwargs) + + # Re-organise locations into Nc clusters + if first_cluster_by: + trajectory = _cluster_by_coordinate( + trajectory, + Nc, + cluster_by=first_cluster_by, + second_cluster_by=second_cluster_by, + sort_by=sort_by, + ) + + # Compute TSP solution within each cluster/shot + for i in tqdm(range(Nc), disable=not verbose): + order = solve_tsp_with_2opt(trajectory[i], improvement_threshold=tsp_tol) + trajectory[i] = trajectory[i][order] + else: + trajectory = ( + _sort_by_coordinate(trajectory, coord=sort_by) if sort_by else trajectory + ) + + # Compute TSP solution over the whole cloud + order = solve_tsp_with_2opt(trajectory, improvement_threshold=tsp_tol) + trajectory = trajectory[order] + trajectory = trajectory.reshape((Nc, Ns, Nd)) + + return trajectory + + +def initialize_2D_travelling_salesman( + Nc, + Ns, + density, + first_cluster_by=None, + second_cluster_by=None, + sort_by=None, + tsp_tol=1e-8, + *, + verbose=False, + **sampling_kwargs, +): + """ + Initialize a 2D trajectory using a Travelling Salesman Problem (TSP)-based path. + + This is a reproduction of the work from [Cha+14]_. The TSP solution + is obtained using the 2-opt method in O(n²). An additional option + is provided to cluster shots before solving the TSP and thus + reduce drastically the computation time. The initial sampling method + can also be customized. + + Parameters + ---------- + Nc : int + The number of clusters (or shots) to divide the trajectory into. + Ns : int + The number of points per cluster. + density : np.ndarray + A 2-dimensional density array from which points are sampled. + first_cluster_by : str, optional + The coordinate used to cluster points initially, by default ``None``. + second_cluster_by : str, optional + A secondary coordinate used for clustering within primary clusters, + by default ``None``. + sort_by : str, optional + The coordinate by which to order points within each cluster, + by default ``None``. + tsp_tol : float, optional + Convergence tolerance for the TSP solution, by default ``1e-8``. + verbose : bool, optional + If ``True``, displays a progress bar, by default ``False``. + **sampling_kwargs : dict, optional + Additional arguments to pass to + ``mrinufft.trajectories.sampling.sample_from_density``. + + Returns + ------- + np.ndarray + A 2D array representing the TSP-ordered trajectory. + + Raises + ------ + ValueError + If ``density`` is not a 2-dimensional array. + + References + ---------- + .. [Cha+14] Chauffert, Nicolas, Philippe Ciuciu, + Jonas Kahn, and Pierre Weiss. + "Variable density sampling with continuous trajectories." + SIAM Journal on Imaging Sciences 7, no. 4 (2014): 1962-1992. + """ + if len(density.shape) != 2: + raise ValueError("`density` is expected to be 2-dimensional.") + return _initialize_ND_travelling_salesman( + Nc, + Ns, + density, + first_cluster_by=first_cluster_by, + second_cluster_by=second_cluster_by, + sort_by=sort_by, + tsp_tol=tsp_tol, + verbose=verbose, + **sampling_kwargs, + ) + + +def initialize_3D_travelling_salesman( + Nc, + Ns, + density, + first_cluster_by=None, + second_cluster_by=None, + sort_by=None, + tsp_tol=1e-8, + *, + verbose=False, + **sampling_kwargs, +): + """ + Initialize a 3D trajectory using a Travelling Salesman Problem (TSP)-based path. + + This is a reproduction of the work from [Cha+14]_. The TSP solution + is obtained using the 2-opt method with a complexity in O(n²) in time + and memory. + + An additional option is provided to cluster shots before solving the + TSP and thus reduce drastically the computation time. The initial + sampling method can also be customized. + + Parameters + ---------- + Nc : int + The number of clusters (or shots) to divide the trajectory into. + Ns : int + The number of points per cluster. + density : np.ndarray + A 3-dimensional density array from which points are sampled. + first_cluster_by : str, optional + The coordinate used to cluster points initially, by default ``None``. + second_cluster_by : str, optional + A secondary coordinate used for clustering within primary clusters, + by default ``None``. + sort_by : str, optional + The coordinate by which to order points within each cluster, + by default ``None``. + tsp_tol : float, optional + Convergence tolerance for the TSP solution, by default ``1e-8``. + verbose : bool, optional + If ``True``, displays a progress bar, by default ``False``. + **sampling_kwargs : dict, optional + Additional arguments to pass to + ``mrinufft.trajectories.sampling.sample_from_density``. + + Returns + ------- + np.ndarray + A 3D array representing the TSP-ordered trajectory. + + Raises + ------ + ValueError + If ``density`` is not a 3-dimensional array. + + References + ---------- + .. [Cha+14] Chauffert, Nicolas, Philippe Ciuciu, + Jonas Kahn, and Pierre Weiss. + "Variable density sampling with continuous trajectories." + SIAM Journal on Imaging Sciences 7, no. 4 (2014): 1962-1992. + """ + if len(density.shape) != 3: + raise ValueError("`density` is expected to be 3-dimensional.") + return _initialize_ND_travelling_salesman( + Nc, + Ns, + density, + first_cluster_by=first_cluster_by, + second_cluster_by=second_cluster_by, + sort_by=sort_by, + tsp_tol=tsp_tol, + verbose=verbose, + **sampling_kwargs, + ) diff --git a/src/mrinufft/trajectories/maths.py b/src/mrinufft/trajectories/maths.py deleted file mode 100644 index 906b1e383..000000000 --- a/src/mrinufft/trajectories/maths.py +++ /dev/null @@ -1,343 +0,0 @@ -"""Utility functions for mathematical operations.""" - -import numpy as np -import numpy.linalg as nl - -CIRCLE_PACKING_DENSITY = np.pi / (2 * np.sqrt(3)) -EIGENVECTOR_2D_FIBONACCI = (0.4656, 0.6823, 1) - - -########## -# PRIMES # -########## - - -def compute_coprime_factors(Nc, length, start=1, update=1): - """Compute a list of coprime factors of Nc. - - Parameters - ---------- - Nc : int - Number to factorize. - length : int - Number of coprime factors to compute. - start : int, optional - First number to check. The default is 1. - update : int, optional - Increment between two numbers to check. The default is 1. - - Returns - ------- - list - List of coprime factors of Nc. - """ - count = start - coprimes = [] - while len(coprimes) < length: - # Check greatest common divider (gcd) - if np.gcd(Nc, count) == 1: - coprimes.append(count) - count += update - return coprimes - - -############# -# ROTATIONS # -############# - - -def R2D(theta): - """Initialize 2D rotation matrix. - - Parameters - ---------- - theta : float - Rotation angle in rad. - - Returns - ------- - np.ndarray - 2D rotation matrix. - """ - return np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) - - -def Rx(theta): - """Initialize 3D rotation matrix around x axis. - - Parameters - ---------- - theta : float - Rotation angle in rad. - - Returns - ------- - np.ndarray - 3D rotation matrix. - """ - return np.array( - [ - [1, 0, 0], - [0, np.cos(theta), -np.sin(theta)], - [0, np.sin(theta), np.cos(theta)], - ] - ) - - -def Ry(theta): - """Initialize 3D rotation matrix around y axis. - - Parameters - ---------- - theta : float - Rotation angle in rad. - - Returns - ------- - np.ndarray - 3D rotation matrix. - """ - return np.array( - [ - [np.cos(theta), 0, np.sin(theta)], - [0, 1, 0], - [-np.sin(theta), 0, np.cos(theta)], - ] - ) - - -def Rz(theta): - """Initialize 3D rotation matrix around z axis. - - Parameters - ---------- - theta : float - Rotation angle in rad. - - Returns - ------- - np.ndarray - 3D rotation matrix. - """ - return np.array( - [ - [np.cos(theta), -np.sin(theta), 0], - [np.sin(theta), np.cos(theta), 0], - [0, 0, 1], - ] - ) - - -def Rv(v1, v2, normalize=True, eps=1e-8): - """Initialize 3D rotation matrix from two vectors. - - Initialize a 3D rotation matrix from two vectors using Rodrigues's rotation - formula. Note that the rotation is carried around the axis orthogonal to both - vectors from the origin, and therefore is undetermined when both vectors - are colinear. While this case is handled manually, close cases might result - in approximative behaviors. - - Parameters - ---------- - v1 : np.ndarray - Source vector. - v2 : np.ndarray - Target vector. - normalize : bool, optional - Normalize the vectors. The default is True. - - Returns - ------- - np.ndarray - 3D rotation matrix. - """ - # Check for colinearity, not handled by Rodrigues' coefficients - if nl.norm(np.cross(v1, v2)) < eps: - sign = np.sign(np.dot(v1, v2)) - return sign * np.identity(3) - - if normalize: - v1, v2 = v1 / np.linalg.norm(v1), v2 / np.linalg.norm(v2) - cos_theta = np.dot(v1, v2) - v3 = np.cross(v1, v2) - cross_matrix = np.cross(v3, np.identity(v3.shape[0]) * -1) - return np.identity(3) + cross_matrix + cross_matrix @ cross_matrix / (1 + cos_theta) - - -def Ra(vector, theta): - """Initialize 3D rotation matrix around an arbitrary vector. - - Initialize a 3D rotation matrix to rotate around `vector` by an angle `theta`. - It corresponds to a generalized formula with `Rx`, `Ry` and `Rz` as subcases. - - Parameters - ---------- - vector : np.ndarray - Vector defining the rotation axis, automatically normalized. - theta : float - Angle in radians defining the rotation around `vector`. - - Returns - ------- - np.ndarray - 3D rotation matrix. - """ - cos_t = np.cos(theta) - sin_t = np.sin(theta) - v_x, v_y, v_z = vector / np.linalg.norm(vector) - return np.array( - [ - [ - cos_t + v_x**2 * (1 - cos_t), - v_x * v_y * (1 - cos_t) + v_z * sin_t, - v_x * v_z * (1 - cos_t) - v_y * sin_t, - ], - [ - v_y * v_x * (1 - cos_t) - v_z * sin_t, - cos_t + v_y**2 * (1 - cos_t), - v_y * v_z * (1 - cos_t) + v_x * sin_t, - ], - [ - v_z * v_x * (1 - cos_t) + v_y * sin_t, - v_z * v_y * (1 - cos_t) - v_x * sin_t, - cos_t + v_z**2 * (1 - cos_t), - ], - ] - ) - - -############# -# FIBONACCI # -############# - - -def is_from_fibonacci_sequence(n): - """Check if an integer belongs to the Fibonacci sequence. - - An integer belongs to the Fibonacci sequence if either - :math:`5*n²+4` or :math:`5*n²-4` is a perfect square - (`Wikipedia `_). - - Parameters - ---------- - n : int - Integer to check. - - Returns - ------- - bool - Whether or not ``n`` belongs to the Fibonacci sequence. - """ - - def _is_perfect_square(n): - r = int(np.sqrt(n)) - return r * r == n - - return _is_perfect_square(5 * n**2 + 4) or _is_perfect_square(5 * n**2 - 4) - - -def get_closest_fibonacci_number(x): - """Provide the closest Fibonacci number. - - Parameters - ---------- - x : float - Value to match. - - Returns - ------- - int - Closest number from the Fibonacci sequence. - """ - # Find the power such that x ~= phi ** power - phi = (1 + np.sqrt(5)) / 2 - power = np.ceil(np.log(x) / np.log(phi)) + 1 - - # Check closest between the ones below and above n - lower_xf = int(np.around(phi ** (power) / np.sqrt(5))) - upper_xf = int(np.around(phi ** (power + 1) / np.sqrt(5))) - xf = lower_xf if (x - lower_xf) < (upper_xf - x) else upper_xf - return xf - - -def generate_fibonacci_lattice(nb_points, epsilon=0.25): - """Generate 2D Cartesian coordinates using the Fibonacci lattice. - - Place 2D points over a 1x1 square following the Fibonacci lattice. - - Parameters - ---------- - nb_points : int - Number of 2D points to generate. - epsilon : float - Continuous offset used to reduce initially wrong lattice behavior. - - Returns - ------- - np.ndarray - Array of 2D Cartesian coordinates covering a 1x1 square. - """ - angle = (1 + np.sqrt(5)) / 2 - fibonacci_square = np.zeros((nb_points, 2)) - fibonacci_square[:, 0] = (np.arange(nb_points) / angle) % 1 - fibonacci_square[:, 1] = (np.arange(nb_points) + epsilon) / ( - nb_points - 1 + 2 * epsilon - ) - return fibonacci_square - - -def generate_fibonacci_circle(nb_points, epsilon=0.25): - """Generate 2D Cartesian coordinates shaped as Fibonacci spirals. - - Place 2D points structured as Fibonacci spirals by distorting - a square Fibonacci lattice into a circle of radius 1. - - Parameters - ---------- - nb_points : int - Number of 2D points to generate. - epsilon : float - Continuous offset used to reduce initially wrong lattice behavior. - - Returns - ------- - np.ndarray - Array of 2D Cartesian coordinates covering a circle of radius 1. - """ - fibonacci_square = generate_fibonacci_lattice(nb_points, epsilon) - radius = np.sqrt(fibonacci_square[:, 1]) - angles = 2 * np.pi * fibonacci_square[:, 0] - - fibonacci_circle = np.zeros((nb_points, 2)) - fibonacci_circle[:, 0] = radius * np.cos(angles) - fibonacci_circle[:, 1] = radius * np.sin(angles) - return fibonacci_circle - - -def generate_fibonacci_sphere(nb_points, epsilon=0.25): - """Generate 3D Cartesian coordinates as a Fibonacci sphere. - - Place 3D points almost evenly over a sphere surface of radius - 1 by distorting a square Fibonacci lattice into a sphere. - - Parameters - ---------- - nb_points : int - Number of 3D points to generate. - epsilon : float - Continuous offset used to reduce initially wrong lattice behavior. - - Returns - ------- - np.ndarray - Array of 3D Cartesian coordinates covering a sphere of radius 1. - """ - fibonacci_square = generate_fibonacci_lattice(nb_points, epsilon) - theta = 2 * np.pi * fibonacci_square[:, 0] - phi = np.arccos(1 - 2 * fibonacci_square[:, 1]) - - fibonacci_sphere = np.zeros((nb_points, 3)) - fibonacci_sphere[:, 0] = np.cos(theta) * np.sin(phi) - fibonacci_sphere[:, 1] = np.sin(theta) * np.sin(phi) - fibonacci_sphere[:, 2] = np.cos(phi) - return fibonacci_sphere diff --git a/src/mrinufft/trajectories/maths/__init__.py b/src/mrinufft/trajectories/maths/__init__.py new file mode 100644 index 000000000..2d49c099f --- /dev/null +++ b/src/mrinufft/trajectories/maths/__init__.py @@ -0,0 +1,18 @@ +"""Utility module for mathematical operations.""" + +# Constants +import numpy as np + +from .fibonacci import ( + generate_fibonacci_circle, + generate_fibonacci_lattice, + generate_fibonacci_sphere, + get_closest_fibonacci_number, + is_from_fibonacci_sequence, +) +from .primes import compute_coprime_factors +from .rotations import R2D, Ra, Rv, Rx, Ry, Rz +from .tsp_solver import solve_tsp_with_2opt + +CIRCLE_PACKING_DENSITY = np.pi / (2 * np.sqrt(3)) +EIGENVECTOR_2D_FIBONACCI = (0.4656, 0.6823, 1) diff --git a/src/mrinufft/trajectories/maths/fibonacci.py b/src/mrinufft/trajectories/maths/fibonacci.py new file mode 100644 index 000000000..d7cc20a88 --- /dev/null +++ b/src/mrinufft/trajectories/maths/fibonacci.py @@ -0,0 +1,135 @@ +"""Fibonacci-related functions.""" + +import numpy as np + + +def is_from_fibonacci_sequence(n): + """Check if an integer belongs to the Fibonacci sequence. + + An integer belongs to the Fibonacci sequence if either + :math:`5*n²+4` or :math:`5*n²-4` is a perfect square + (`Wikipedia `_). + + Parameters + ---------- + n : int + Integer to check. + + Returns + ------- + bool + Whether or not ``n`` belongs to the Fibonacci sequence. + """ + + def _is_perfect_square(n): + r = int(np.sqrt(n)) + return r * r == n + + return _is_perfect_square(5 * n**2 + 4) or _is_perfect_square(5 * n**2 - 4) + + +def get_closest_fibonacci_number(x): + """Provide the closest Fibonacci number. + + Parameters + ---------- + x : float + Value to match. + + Returns + ------- + int + Closest number from the Fibonacci sequence. + """ + # Find the power such that x ~= phi ** power + phi = (1 + np.sqrt(5)) / 2 + power = np.ceil(np.log(x) / np.log(phi)) + 1 + + # Check closest between the ones below and above n + lower_xf = int(np.around(phi ** (power) / np.sqrt(5))) + upper_xf = int(np.around(phi ** (power + 1) / np.sqrt(5))) + xf = lower_xf if (x - lower_xf) < (upper_xf - x) else upper_xf + return xf + + +def generate_fibonacci_lattice(nb_points, epsilon=0.25): + """Generate 2D Cartesian coordinates using the Fibonacci lattice. + + Place 2D points over a 1x1 square following the Fibonacci lattice. + + Parameters + ---------- + nb_points : int + Number of 2D points to generate. + epsilon : float + Continuous offset used to reduce initially wrong lattice behavior. + + Returns + ------- + np.ndarray + Array of 2D Cartesian coordinates covering a 1x1 square. + """ + angle = (1 + np.sqrt(5)) / 2 + fibonacci_square = np.zeros((nb_points, 2)) + fibonacci_square[:, 0] = (np.arange(nb_points) / angle) % 1 + fibonacci_square[:, 1] = (np.arange(nb_points) + epsilon) / ( + nb_points - 1 + 2 * epsilon + ) + return fibonacci_square + + +def generate_fibonacci_circle(nb_points, epsilon=0.25): + """Generate 2D Cartesian coordinates shaped as Fibonacci spirals. + + Place 2D points structured as Fibonacci spirals by distorting + a square Fibonacci lattice into a circle of radius 1. + + Parameters + ---------- + nb_points : int + Number of 2D points to generate. + epsilon : float + Continuous offset used to reduce initially wrong lattice behavior. + + Returns + ------- + np.ndarray + Array of 2D Cartesian coordinates covering a circle of radius 1. + """ + fibonacci_square = generate_fibonacci_lattice(nb_points, epsilon) + radius = np.sqrt(fibonacci_square[:, 1]) + angles = 2 * np.pi * fibonacci_square[:, 0] + + fibonacci_circle = np.zeros((nb_points, 2)) + fibonacci_circle[:, 0] = radius * np.cos(angles) + fibonacci_circle[:, 1] = radius * np.sin(angles) + return fibonacci_circle + + +def generate_fibonacci_sphere(nb_points, epsilon=0.25): + """Generate 3D Cartesian coordinates as a Fibonacci sphere. + + Place 3D points almost evenly over a sphere surface of radius + 1 by distorting a square Fibonacci lattice into a sphere. + + Parameters + ---------- + nb_points : int + Number of 3D points to generate. + epsilon : float + Continuous offset used to reduce initially wrong lattice behavior. + + Returns + ------- + np.ndarray + Array of 3D Cartesian coordinates covering a sphere of radius 1. + """ + fibonacci_square = generate_fibonacci_lattice(nb_points, epsilon) + theta = 2 * np.pi * fibonacci_square[:, 0] + phi = np.arccos(1 - 2 * fibonacci_square[:, 1]) + + fibonacci_sphere = np.zeros((nb_points, 3)) + fibonacci_sphere[:, 0] = np.cos(theta) * np.sin(phi) + fibonacci_sphere[:, 1] = np.sin(theta) * np.sin(phi) + fibonacci_sphere[:, 2] = np.cos(phi) + return fibonacci_sphere diff --git a/src/mrinufft/trajectories/maths/primes.py b/src/mrinufft/trajectories/maths/primes.py new file mode 100644 index 000000000..3a9df50cb --- /dev/null +++ b/src/mrinufft/trajectories/maths/primes.py @@ -0,0 +1,32 @@ +"""Prime-related functions.""" + +import numpy as np + + +def compute_coprime_factors(Nc, length, start=1, update=1): + """Compute a list of coprime factors of Nc. + + Parameters + ---------- + Nc : int + Number to factorize. + length : int + Number of coprime factors to compute. + start : int, optional + First number to check. The default is 1. + update : int, optional + Increment between two numbers to check. The default is 1. + + Returns + ------- + list + List of coprime factors of Nc. + """ + count = start + coprimes = [] + while len(coprimes) < length: + # Check greatest common divider (gcd) + if np.gcd(Nc, count) == 1: + coprimes.append(count) + count += update + return coprimes diff --git a/src/mrinufft/trajectories/maths/rotations.py b/src/mrinufft/trajectories/maths/rotations.py new file mode 100644 index 000000000..568780786 --- /dev/null +++ b/src/mrinufft/trajectories/maths/rotations.py @@ -0,0 +1,164 @@ +"""Rotation functions in 2D & 3D spaces.""" + +import numpy as np +import numpy.linalg as nl + + +def R2D(theta): + """Initialize 2D rotation matrix. + + Parameters + ---------- + theta : float + Rotation angle in rad. + + Returns + ------- + np.ndarray + 2D rotation matrix. + """ + return np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) + + +def Rx(theta): + """Initialize 3D rotation matrix around x axis. + + Parameters + ---------- + theta : float + Rotation angle in rad. + + Returns + ------- + np.ndarray + 3D rotation matrix. + """ + return np.array( + [ + [1, 0, 0], + [0, np.cos(theta), -np.sin(theta)], + [0, np.sin(theta), np.cos(theta)], + ] + ) + + +def Ry(theta): + """Initialize 3D rotation matrix around y axis. + + Parameters + ---------- + theta : float + Rotation angle in rad. + + Returns + ------- + np.ndarray + 3D rotation matrix. + """ + return np.array( + [ + [np.cos(theta), 0, np.sin(theta)], + [0, 1, 0], + [-np.sin(theta), 0, np.cos(theta)], + ] + ) + + +def Rz(theta): + """Initialize 3D rotation matrix around z axis. + + Parameters + ---------- + theta : float + Rotation angle in rad. + + Returns + ------- + np.ndarray + 3D rotation matrix. + """ + return np.array( + [ + [np.cos(theta), -np.sin(theta), 0], + [np.sin(theta), np.cos(theta), 0], + [0, 0, 1], + ] + ) + + +def Rv(v1, v2, normalize=True, eps=1e-8): + """Initialize 3D rotation matrix from two vectors. + + Initialize a 3D rotation matrix from two vectors using Rodrigues's rotation + formula. Note that the rotation is carried around the axis orthogonal to both + vectors from the origin, and therefore is undetermined when both vectors + are colinear. While this case is handled manually, close cases might result + in approximative behaviors. + + Parameters + ---------- + v1 : np.ndarray + Source vector. + v2 : np.ndarray + Target vector. + normalize : bool, optional + Normalize the vectors. The default is True. + + Returns + ------- + np.ndarray + 3D rotation matrix. + """ + # Check for colinearity, not handled by Rodrigues' coefficients + if nl.norm(np.cross(v1, v2)) < eps: + sign = np.sign(np.dot(v1, v2)) + return sign * np.identity(3) + + if normalize: + v1, v2 = v1 / np.linalg.norm(v1), v2 / np.linalg.norm(v2) + cos_theta = np.dot(v1, v2) + v3 = np.cross(v1, v2) + cross_matrix = np.cross(v3, np.identity(v3.shape[0]) * -1) + return np.identity(3) + cross_matrix + cross_matrix @ cross_matrix / (1 + cos_theta) + + +def Ra(vector, theta): + """Initialize 3D rotation matrix around an arbitrary vector. + + Initialize a 3D rotation matrix to rotate around `vector` by an angle `theta`. + It corresponds to a generalized formula with `Rx`, `Ry` and `Rz` as subcases. + + Parameters + ---------- + vector : np.ndarray + Vector defining the rotation axis, automatically normalized. + theta : float + Angle in radians defining the rotation around `vector`. + + Returns + ------- + np.ndarray + 3D rotation matrix. + """ + cos_t = np.cos(theta) + sin_t = np.sin(theta) + v_x, v_y, v_z = vector / np.linalg.norm(vector) + return np.array( + [ + [ + cos_t + v_x**2 * (1 - cos_t), + v_x * v_y * (1 - cos_t) + v_z * sin_t, + v_x * v_z * (1 - cos_t) - v_y * sin_t, + ], + [ + v_y * v_x * (1 - cos_t) - v_z * sin_t, + cos_t + v_y**2 * (1 - cos_t), + v_y * v_z * (1 - cos_t) + v_x * sin_t, + ], + [ + v_z * v_x * (1 - cos_t) + v_y * sin_t, + v_z * v_y * (1 - cos_t) - v_x * sin_t, + cos_t + v_z**2 * (1 - cos_t), + ], + ] + ) diff --git a/src/mrinufft/trajectories/maths/tsp_solver.py b/src/mrinufft/trajectories/maths/tsp_solver.py new file mode 100644 index 000000000..b4bcf859d --- /dev/null +++ b/src/mrinufft/trajectories/maths/tsp_solver.py @@ -0,0 +1,55 @@ +"""Solver for the Travelling Salesman Problem.""" + +import numpy as np + + +def solve_tsp_with_2opt(locations, improvement_threshold=1e-8): + """Solve the TSP problem using a 2-opt approach. + + A sub-optimal solution to the Travelling Salesman Problem (TSP) + is provided using the 2-opt approach in O(n²) where chunks of + an initially random route are reversed, and selected if the + total distance is reduced. As a result the route solution + does not cross its own path in 2D. + + Parameters + ---------- + locations : array_like + An array of N points with shape (N, D) with D + the space dimension. + improvement_threshold: float + Threshold used as progress criterion to stop the optimization + process. + + Returns + ------- + array_like + The new positions order of shape (N,). + """ + route = np.arange(locations.shape[0]) + distances = np.linalg.norm(locations[None] - locations[:, None], axis=-1) + route_length = np.sum(distances[0]) + + improvement_factor = 1 + while improvement_factor >= improvement_threshold: + old_route_length = route_length + for i in range(1, len(route) - 2): + # Check new distance by reversing chunks between i and j + for j in range(i + 1, len(route) - 1): + # Compute new route distance variation + delta_length = ( + distances[route[i - 1], route[j]] + + distances[route[i], route[j + 1]] + - distances[route[i - 1], route[i]] + - distances[route[j], route[j + 1]] + ) + + if delta_length < 0: + # Reverse route chunk + route = np.concatenate( + [route[:i], route[i : j + 1][::-1], route[j + 1 :]] + ) + route_length += delta_length + + improvement_factor = abs(1 - route_length / old_route_length) + return route diff --git a/src/mrinufft/trajectories/sampling.py b/src/mrinufft/trajectories/sampling.py new file mode 100644 index 000000000..14ed18fc0 --- /dev/null +++ b/src/mrinufft/trajectories/sampling.py @@ -0,0 +1,370 @@ +"""Sampling densities and methods.""" + +import numpy as np +import numpy.fft as nf +import numpy.linalg as nl +import numpy.random as nr +from tqdm.auto import tqdm + +from .utils import KMAX + + +def sample_from_density( + nb_samples, density, method="random", *, dim_compensation="auto" +): + """ + Sample points based on a given density distribution. + + Parameters + ---------- + nb_samples : int + The number of samples to draw. + density : np.ndarray + An array representing the density distribution from which samples are drawn, + normalized automatically by its sum during the call for convenience. + method : str, optional + The sampling method to use, either 'random' for random sampling over + the discrete grid defined by the density or 'lloyd' for Lloyd's + algorithm over a continuous space, by default "random". + dim_compensation : str, bool, optional + Whether to apply a specific dimensionality compensation introduced + in [Cha+14]_. An exponent ``N/(N-1)`` with ``N`` the number of + dimensions in ``density`` is applied to fix the observed + density expectation when set to ``"auto"`` and ``method="lloyd"``. + It is also relevant to set it to ``True`` when ``method="random"`` + and one wants to create binary masks with continuous paths between + drawn samples. + + Returns + ------- + np.ndarray + An array of range-normalized sampled locations. + + Raises + ------ + ValueError + If ``nb_samples`` exceeds the total size of the density array or if the + specified ``method`` is unknown. + + References + ---------- + .. [Cha+14] Chauffert, Nicolas, Philippe Ciuciu, + Jonas Kahn, and Pierre Weiss. + "Variable density sampling with continuous trajectories." + SIAM Journal on Imaging Sciences 7, no. 4 (2014): 1962-1992. + """ + try: + from sklearn.cluster import BisectingKMeans, KMeans + except ImportError as err: + raise ImportError( + "The scikit-learn module is not available. Please install " + "it along with the [extra] dependencies " + "or using `pip install scikit-learn`." + ) from err + + # Define dimension variables + shape = np.array(density.shape) + nb_dims = len(shape) + max_nb_samples = np.prod(shape) + density = density / np.sum(density) + + if nb_samples > max_nb_samples: + raise ValueError("`nb_samples` must be lower than the size of `density`.") + + # Check for dimensionality compensation + if isinstance(dim_compensation, str) and dim_compensation != "auto": + raise ValueError(f"Unknown string {dim_compensation} for `dim_compensation`.") + if (dim_compensation == "auto" and method == "lloyd") or ( + isinstance(dim_compensation, bool) and dim_compensation + ): + density = density ** (nb_dims / (nb_dims - 1)) + density = density / np.sum(density) + + # Sample using specified method + rng = nr.default_rng() + if method == "random": + choices = rng.choice( + np.arange(max_nb_samples), + size=nb_samples, + p=density.flatten(), + replace=False, + ) + locations = np.indices(shape).reshape((nb_dims, -1))[:, choices] + locations = locations.T + 0.5 + locations = locations / shape[None, :] + locations = 2 * KMAX * locations - KMAX + elif method == "lloyd": + kmeans = ( + KMeans(n_clusters=nb_samples) + if nb_dims <= 2 + else BisectingKMeans(n_clusters=nb_samples) + ) + kmeans.fit( + np.indices(density.shape).reshape((nb_dims, -1)).T, + sample_weight=density.flatten(), + ) + locations = kmeans.cluster_centers_ - np.array(density.shape) / 2 + locations = KMAX * locations / np.max(np.abs(locations)) + else: + raise ValueError(f"Unknown sampling method {method}.") + return locations + + +def create_cutoff_decay_density(shape, cutoff, decay, resolution=None): + """ + Create a density with central plateau and polynomial decay. + + Create a density composed of a central constant-value ellipsis + defined by a cutoff ratio, followed by a polynomial decay over + outer regions as defined in [Cha+22]_. + + Parameters + ---------- + shape : tuple of int + The shape of the density grid, analog to the field-of-view + as opposed to ``resolution`` below. + cutoff : float + The ratio of the largest k-space dimension between 0 + and 1 within which density remains uniform and beyond which it decays. + decay : float + The polynomial decay in density beyond the cutoff ratio. + resolution : np.ndarray, optional + Resolution scaling factors for each dimension of the density grid, + by default ``None``. + + Returns + ------- + np.ndarray + A density array with values decaying based on the specified + cutoff ratio and decay rate. + + References + ---------- + .. [Cha+22] Chaithya, G. R., Pierre Weiss, Guillaume Daval-Frérot, + Aurélien Massire, Alexandre Vignaud, and Philippe Ciuciu. + "Optimizing full 3D SPARKLING trajectories for high-resolution + magnetic resonance imaging." + IEEE Transactions on Medical Imaging 41, no. 8 (2022): 2105-2117. + """ + shape = np.array(shape) + nb_dims = len(shape) + + if not resolution: + resolution = np.ones(nb_dims) + + differences = np.indices(shape).astype(float) + for i in range(nb_dims): + differences[i] = differences[i] + 0.5 - shape[i] / 2 + differences[i] = differences[i] / shape[i] / resolution[i] + distances = nl.norm(differences, axis=0) + + cutoff = cutoff * np.max(differences) if cutoff else np.min(differences) + density = np.ones(shape) + decay_mask = np.where(distances > cutoff, True, False) + + density[decay_mask] = (cutoff / distances[decay_mask]) ** decay + density = density / np.sum(density) + return density + + +def create_polynomial_density(shape, decay, resolution=None): + """ + Create a density with polynomial decay from the center. + + Parameters + ---------- + shape : tuple of int + The shape of the density grid. + decay : float + The exponent that controls the rate of decay for density. + resolution : np.ndarray, optional + Resolution scaling factors for each dimension of the density grid, + by default None. + + Returns + ------- + np.ndarray + A density array with polynomial decay. + """ + return create_cutoff_decay_density( + shape, cutoff=0, decay=decay, resolution=resolution + ) + + +def create_energy_density(dataset): + """ + Create a density based on energy in the Fourier spectrum. + + A density is made based on the average energy in the Fourier domain + of volumes from a target image dataset. + + Parameters + ---------- + dataset : np.ndarray + The dataset from which to calculate the density + based on its Fourier transform, with an expected + shape (nb_volumes, dim_1, ..., dim_N). + An N-dimensional Fourier transform is performed. + + Returns + ------- + np.ndarray + A density array derived from the mean energy in the Fourier + domain of the input dataset. + """ + nb_dims = len(dataset.shape) - 1 + axes = range(1, nb_dims + 1) + + kspace = nf.fftshift(nf.fftn(nf.fftshift(dataset, axes=axes), axes=axes), axes=axes) + energy = np.mean(np.abs(kspace), axis=0) + density = energy / np.sum(energy) + return density + + +def create_chauffert_density(shape, wavelet_basis, nb_wavelet_scales, verbose=False): + """Create a density based on Chauffert's method. + + This is a reproduction of the proposition from [CCW13]_. + A sampling density is derived from compressed sensing + equations to maximize guarantees of exact image recovery + for a specified sparse wavelet domain decomposition. + + Parameters + ---------- + shape : tuple of int + The shape of the density grid. + wavelet_basis : str, pywt.Wavelet + The wavelet basis to use for wavelet decomposition, either + as a built-in wavelet name from the PyWavelets package + or as a custom ``pywt.Wavelet`` object. + nb_wavelet_scales : int + The number of wavelet scales to use in decomposition. + verbose : bool, optional + If ``True``, displays a progress bar. Default to ``False``. + + Returns + ------- + np.ndarray + A density array created based on wavelet transform coefficients. + + See Also + -------- + pywt.wavelist : A list of wavelet decompositions available in the + PyWavelets package used inside the function. + pywt.Wavelet : A wavelet object accepted to generate Chauffert densities. + + References + ---------- + .. [CCW13] Chauffert, Nicolas, Philippe Ciuciu, and Pierre Weiss. + "Variable density compressed sensing in MRI. + Theoretical vs heuristic sampling strategies." + In 2013 IEEE 10th International Symposium on Biomedical Imaging, + pp. 298-301. IEEE, 2013. + """ + try: + import pywt as pw + except ImportError as err: + raise ImportError( + "The PyWavelets module is not available. Please install " + "it along with the [extra] dependencies " + "or using `pip install pywavelets`." + ) from err + + nb_dims = len(shape) + indices = np.indices(shape).reshape((nb_dims, -1)).T + + density = np.zeros(shape) + unit_vector = np.zeros(shape) + for ids in tqdm(indices, disable=not verbose): + ids = tuple(ids) + unit_vector[ids] = 1 + fourier_vector = nf.ifftn(unit_vector) + coeffs = pw.wavedecn( + fourier_vector, wavelet=wavelet_basis, level=nb_wavelet_scales + ) + coeffs, _ = pw.coeffs_to_array(coeffs) + density[ids] = np.max(np.abs(coeffs)) ** 2 + unit_vector[ids] = 0 + + density = density / np.sum(density) + return nf.ifftshift(density) + + +def create_fast_chauffert_density(shape, wavelet_basis, nb_wavelet_scales): + """Create a density based on an approximated Chauffert method. + + This implementation is based on this + tutorial: https://github.com/philouc/mri_acq_recon_tutorial. + It is a fast approximation of the proposition from [CCW13]_, + where a sampling density is derived from compressed sensing + equations to maximize guarantees of exact image recovery + for a specified sparse wavelet domain decomposition. + + In this approximation, the decomposition dimensions are + considered independent and computed separately to accelerate + the density generation. + + Parameters + ---------- + shape : tuple of int + The shape of the density grid. + wavelet_basis : str, pywt.Wavelet + The wavelet basis to use for wavelet decomposition, either + as a built-in wavelet name from the PyWavelets package + or as a custom ``pywt.Wavelet`` object. + nb_wavelet_scales : int + The number of wavelet scales to use in decomposition. + + Returns + ------- + np.ndarray + A density array created using a faster approximation + based on 1D projections of the wavelet transform. + + See Also + -------- + pywt.wavelist : A list of wavelet decompositions available in the + PyWavelets package used inside the function. + pywt.Wavelet : A wavelet object accepted to generate Chauffert densities. + + References + ---------- + .. [CCW13] Chauffert, Nicolas, Philippe Ciuciu, and Pierre Weiss. + "Variable density compressed sensing in MRI. + Theoretical vs heuristic sampling strategies." + In 2013 IEEE 10th International Symposium on Biomedical Imaging, + pp. 298-301. IEEE, 2013. + """ + try: + import pywt as pw + except ImportError as err: + raise ImportError( + "The PyWavelets module is not available. Please install " + "it along with the [extra] dependencies " + "or using `pip install pywavelets`." + ) from err + + nb_dims = len(shape) + + density = np.ones(shape) + for k, s in enumerate(shape): + unit_vector = np.zeros(s) + density_1d = np.zeros(s) + + for i in range(s): + unit_vector[i] = 1 + fourier_vector = nf.ifft(unit_vector) + coeffs = pw.wavedec( + fourier_vector, wavelet=wavelet_basis, level=nb_wavelet_scales + ) + coeffs, _ = pw.coeffs_to_array(coeffs) + density_1d[i] = np.max(np.abs(coeffs)) ** 2 + unit_vector[i] = 0 + + reshape = np.ones(nb_dims).astype(int) + reshape[k] = s + density_1d = density_1d.reshape(reshape) + density = density * density_1d + + density = density / np.sum(density) + return nf.ifftshift(density) diff --git a/src/mrinufft/trajectories/tools.py b/src/mrinufft/trajectories/tools.py index 5897b7f9e..cc099c07a 100644 --- a/src/mrinufft/trajectories/tools.py +++ b/src/mrinufft/trajectories/tools.py @@ -1,7 +1,7 @@ """Functions to manipulate/modify trajectories.""" import numpy as np -from scipy.interpolate import CubicSpline +from scipy.interpolate import CubicSpline, interp1d from .maths import Rv, Rx, Ry, Rz from .utils import KMAX, initialize_tilt @@ -429,6 +429,46 @@ def rewind(trajectory, Ns_transitions): return assembled_trajectory +def oversample(trajectory, new_Ns, kind="cubic"): + """ + Resample a trajectory to increase the number of samples using interpolation. + + Parameters + ---------- + trajectory : np.ndarray + The original trajectory array, where interpolation + is applied along the second axis. + new_Ns : int + The desired number of samples in the resampled trajectory. + kind : str, optional + The type of interpolation to use, such as 'linear', + 'quadratic', or 'cubic', by default "cubic". + + Returns + ------- + np.ndarray + The resampled trajectory array with ``new_Ns`` points along the second axis. + + Notes + ----- + This function uses ``scipy.interpolate.interp1d`` to perform + the interpolation along the second axis of the input `trajectory` array. + + Warnings + -------- + Using 'quadratic' or 'cubic' interpolations is likely to generate + samples located slightly beyond the original k-space limits by + making smooth transitions. + + See Also + -------- + scipy.interpolate.interp1d : The underlying interpolation function + used for resampling. + """ + f = interp1d(np.linspace(0, 1, trajectory.shape[1]), trajectory, axis=1, kind=kind) + return f(np.linspace(0, 1, new_Ns)) + + #################### # FUNCTIONAL TOOLS # #################### diff --git a/src/mrinufft/trajectories/trajectory2D.py b/src/mrinufft/trajectories/trajectory2D.py index f87c63629..e930e90b3 100644 --- a/src/mrinufft/trajectories/trajectory2D.py +++ b/src/mrinufft/trajectories/trajectory2D.py @@ -27,6 +27,11 @@ def initialize_2D_radial(Nc, Ns, tilt="uniform", in_out=False): Tilt of the shots, by default "uniform" in_out : bool, optional Whether to start from the center or not, by default False + + Returns + ------- + array_like + 2D radial trajectory """ # Initialize a first shot segment = np.linspace(-1 if (in_out) else 0, 1, Ns) diff --git a/src/mrinufft/trajectories/trajectory3D.py b/src/mrinufft/trajectories/trajectory3D.py index 12dd9241c..cdcf4a0ad 100644 --- a/src/mrinufft/trajectories/trajectory3D.py +++ b/src/mrinufft/trajectories/trajectory3D.py @@ -8,12 +8,12 @@ from .maths import ( CIRCLE_PACKING_DENSITY, + EIGENVECTOR_2D_FIBONACCI, R2D, Ra, Ry, Rz, generate_fibonacci_circle, - EIGENVECTOR_2D_FIBONACCI, ) from .tools import conify, duplicate_along_axes, epify, precess, stack from .trajectory2D import initialize_2D_radial, initialize_2D_spiral From 702eb4e749d52791e1a8de39317596ee6355f1b1 Mon Sep 17 00:00:00 2001 From: Pierre-Antoine Comby Date: Thu, 9 Jan 2025 14:37:09 +0100 Subject: [PATCH 3/5] proj: update numpy and minimal python version Python 3.8 is unsupported since 2024-10. it's time to move on. --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 1d594e0c0..187910627 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,8 +4,8 @@ description = "MRI Non-Cartesian Fourier Operators with multiple computation bac authors = [{name="Pierre-antoine Comby", email="pierre-antoine.comby@crans.org"}] readme = "README.md" -dependencies = ["numpy<=2.0.0", "scipy", "matplotlib", "tqdm"] -requires-python = ">=3.8" +dependencies = ["numpy", "scipy", "matplotlib", "tqdm"] +requires-python = ">=3.9" dynamic = ["version"] From 1790c0640e8e3778a5dd73e4c26564f9754d7d4e Mon Sep 17 00:00:00 2001 From: Pierre-Antoine Comby Date: Fri, 10 Jan 2025 09:09:41 +0100 Subject: [PATCH 4/5] fix: ruff UP035 --- src/mrinufft/extras/sim.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mrinufft/extras/sim.py b/src/mrinufft/extras/sim.py index 7307838ae..d72e57840 100644 --- a/src/mrinufft/extras/sim.py +++ b/src/mrinufft/extras/sim.py @@ -2,7 +2,7 @@ import numpy as np -from typing import Sequence +from collections.abc import Sequence def fse_simulation( From 3abd8d9ffafd97395920218d3544c0c4c02ccefc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Guillaume=20Daval-Fr=C3=A9rot?= Date: Fri, 10 Jan 2025 10:26:24 +0100 Subject: [PATCH 5/5] Clean documentation and add type hints (#228) * Clean documentation and add type hints * Apply ruff ANN rules to trajectories * Replace np.ndarray type hints with np.typing.NDArray * Fix missing import * Fix Literal use, change types to NDArray and reduce mypy errors * Add type alias for clustering coordinates --- src/mrinufft/trajectories/display.py | 130 ++++---- src/mrinufft/trajectories/gradients.py | 31 +- .../trajectories/inits/random_walk.py | 83 +++-- .../trajectories/inits/travelling_salesman.py | 99 +++--- src/mrinufft/trajectories/maths/fibonacci.py | 12 +- src/mrinufft/trajectories/maths/primes.py | 8 +- src/mrinufft/trajectories/maths/rotations.py | 39 ++- src/mrinufft/trajectories/maths/tsp_solver.py | 20 +- src/mrinufft/trajectories/sampling.py | 90 ++++-- src/mrinufft/trajectories/tools.py | 290 ++++++++++-------- src/mrinufft/trajectories/trajectory2D.py | 106 ++++--- src/mrinufft/trajectories/trajectory3D.py | 225 ++++++++------ src/mrinufft/trajectories/utils.py | 146 ++++----- 13 files changed, 732 insertions(+), 547 deletions(-) diff --git a/src/mrinufft/trajectories/display.py b/src/mrinufft/trajectories/display.py index 50f20344b..42e07cbe1 100644 --- a/src/mrinufft/trajectories/display.py +++ b/src/mrinufft/trajectories/display.py @@ -1,11 +1,15 @@ """Display functions for trajectories.""" +from __future__ import annotations + import itertools +from typing import Any import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.ticker as mticker import numpy as np +from numpy.typing import NDArray from .utils import ( DEFAULT_GMAX, @@ -46,7 +50,7 @@ class displayConfig: """Font size for most labels and texts, by default ``18``.""" small_fontsize: int = 14 """Font size for smaller texts, by default ``14``.""" - nb_colors = 10 + nb_colors: int = 10 """Number of colors to use in the color cycle, by default ``10``.""" palette: str = "tab10" """Name of the color palette to use, by default ``"tab10"``. @@ -58,33 +62,33 @@ class displayConfig: slewrate_point_color: str = "b" """Matplotlib color for slew rate constraint points, by default ``"b"`` (blue).""" - def __init__(self, **kwargs): + def __init__(self, **kwargs: Any) -> None: # noqa ANN401 """Update the display configuration.""" self.update(**kwargs) - def update(self, **kwargs): + def update(self, **kwargs: Any) -> None: # noqa ANN401 """Update the display configuration.""" self._old_values = {} for key, value in kwargs.items(): self._old_values[key] = getattr(displayConfig, key) setattr(displayConfig, key, value) - def reset(self): + def reset(self) -> None: """Restore the display configuration.""" for key, value in self._old_values.items(): setattr(displayConfig, key, value) delattr(self, "_old_values") - def __enter__(self): + def __enter__(self) -> displayConfig: """Enter the context manager.""" return self - def __exit__(self, *args): + def __exit__(self, *args: Any) -> None: # noqa ANN401 """Exit the context manager.""" self.reset() @classmethod - def get_colorlist(cls): + def get_colorlist(cls) -> list[str | NDArray]: """Extract a list of colors from a matplotlib palette. If the palette is continuous, the colors will be sampled from it. @@ -124,7 +128,7 @@ def get_colorlist(cls): ############## -def _setup_2D_ticks(figsize, fig=None): +def _setup_2D_ticks(figsize: float, fig: plt.Figure | None = None) -> plt.Axes: """Add ticks to 2D plot.""" if fig is None: fig = plt.figure(figsize=(figsize, figsize)) @@ -139,7 +143,7 @@ def _setup_2D_ticks(figsize, fig=None): return ax -def _setup_3D_ticks(figsize, fig=None): +def _setup_3D_ticks(figsize: float, fig: plt.Figure | None = None) -> plt.Axes: """Add ticks to 3D plot.""" if fig is None: fig = plt.figure(figsize=(figsize, figsize)) @@ -163,21 +167,21 @@ def _setup_3D_ticks(figsize, fig=None): def display_2D_trajectory( - trajectory, - figsize=5, - one_shot=False, - subfigure=None, - show_constraints=False, - gmax=DEFAULT_GMAX, - smax=DEFAULT_SMAX, - constraints_order=None, - **constraints_kwargs, -): + trajectory: NDArray, + figsize: float = 5, + one_shot: bool | int = False, + subfigure: plt.Figure | plt.Axes | None = None, + show_constraints: bool = False, + gmax: float = DEFAULT_GMAX, + smax: float = DEFAULT_SMAX, + constraints_order: int | str | None = None, + **constraints_kwargs: Any, # noqa ANN401 +) -> plt.Axes: """Display 2D trajectories. Parameters ---------- - trajectory : array_like + trajectory : NDArray Trajectory to display. figsize : float, optional Size of the figure. @@ -204,7 +208,7 @@ def display_2D_trajectory( typically 2 or `np.inf`, following the `numpy.linalg.norm` conventions on parameter `ord`. The default is None. - **kwargs + **constraints_kwargs Acquisition parameters used to check on hardware constraints, following the parameter convention from `mrinufft.trajectories.utils.compute_gradients_and_slew_rates`. @@ -278,23 +282,23 @@ def display_2D_trajectory( def display_3D_trajectory( - trajectory, - nb_repetitions=None, - figsize=5, - per_plane=True, - one_shot=False, - subfigure=None, - show_constraints=False, - gmax=DEFAULT_GMAX, - smax=DEFAULT_SMAX, - constraints_order=None, - **constraints_kwargs, -): + trajectory: NDArray, + nb_repetitions: int | None = None, + figsize: float = 5, + per_plane: bool = True, + one_shot: bool | int = False, + subfigure: plt.Figure | plt.Axes | None = None, + show_constraints: bool = False, + gmax: float = DEFAULT_GMAX, + smax: float = DEFAULT_SMAX, + constraints_order: int | str | None = None, + **constraints_kwargs: Any, # noqa ANN401 +) -> plt.Axes: """Display 3D trajectories. Parameters ---------- - trajectory : array_like + trajectory : NDArray Trajectory to display. nb_repetitions : int Number of repetitions (planes, cones, shells, etc). @@ -417,22 +421,22 @@ def display_3D_trajectory( def display_gradients_simply( - trajectory, - shot_ids=(0,), - figsize=5, - fill_area=True, - show_signal=True, - uni_signal="gray", - uni_gradient=None, - subfigure=None, -): + trajectory: NDArray, + shot_ids: tuple[int, ...] = (0,), + figsize: float = 5, + fill_area: bool = True, + show_signal: bool = True, + uni_signal: str | None = "gray", + uni_gradient: str | None = None, + subfigure: plt.Figure | None = None, +) -> tuple[plt.Axes]: """Display gradients based on trajectory of any dimension. Parameters ---------- - trajectory : array_like + trajectory : NDArray Trajectory to display. - shot_ids : list of int + shot_ids : tuple[int, ...], optional Indices of the shots to display. The default is `[0]`. figsize : float, optional @@ -455,7 +459,7 @@ def display_gradients_simply( unique color given as argument or just by the default color cycle when `None`. The default is `None`. - subfigure: plt.Figure or plt.SubFigure, optional + subfigure: plt.Figure, optional The figure where the trajectory should be displayed. The default is `None`. @@ -531,26 +535,26 @@ def display_gradients_simply( def display_gradients( - trajectory, - shot_ids=(0,), - figsize=5, - fill_area=True, - show_signal=True, - uni_signal="gray", - uni_gradient=None, - subfigure=None, - show_constraints=False, - gmax=DEFAULT_GMAX, - smax=DEFAULT_SMAX, - constraints_order=None, - raster_time=DEFAULT_RASTER_TIME, - **constraints_kwargs, -): + trajectory: NDArray, + shot_ids: tuple[int, ...] = (0,), + figsize: float = 5, + fill_area: bool = True, + show_signal: bool = True, + uni_signal: str | None = "gray", + uni_gradient: str | None = None, + subfigure: plt.Figure | plt.Axes | None = None, + show_constraints: bool = False, + gmax: float = DEFAULT_GMAX, + smax: float = DEFAULT_SMAX, + constraints_order: int | str | None = None, + raster_time: float = DEFAULT_RASTER_TIME, + **constraints_kwargs: Any, # noqa ANN401 +) -> tuple[plt.Axes]: """Display gradients based on trajectory of any dimension. Parameters ---------- - trajectory : array_like + trajectory : NDArray Trajectory to display. shot_ids : list of int Indices of the shots to display. @@ -597,7 +601,7 @@ def display_gradients( Amount of time between the acquisition of two consecutive samples in ms. The default is `DEFAULT_RASTER_TIME`. - **kwargs + **constraints_kwargs Acquisition parameters used to check on hardware constraints, following the parameter convention from `mrinufft.trajectories.utils.compute_gradients_and_slew_rates`. diff --git a/src/mrinufft/trajectories/gradients.py b/src/mrinufft/trajectories/gradients.py index 39ac934c0..ac205cab1 100644 --- a/src/mrinufft/trajectories/gradients.py +++ b/src/mrinufft/trajectories/gradients.py @@ -1,24 +1,27 @@ """Functions to improve/modify gradients.""" +from typing import Callable + import numpy as np import numpy.linalg as nl +from numpy.typing import NDArray from scipy.interpolate import CubicSpline def patch_center_anomaly( - shot_or_params, - update_shot=None, - update_parameters=None, - in_out=False, - learning_rate=1e-1, -): + shot_or_params: NDArray | tuple, + update_shot: Callable[..., NDArray] | None = None, + update_parameters: Callable[..., tuple] | None = None, + in_out: bool = False, + learning_rate: float = 1e-1, +) -> tuple[NDArray, tuple]: """Re-position samples to avoid center anomalies. Some trajectories behave slightly differently from expected when approaching definition bounds, most often the k-space center as for spirals in some cases. - This function enforces non-strictly increasing monoticity of + This function enforces non-strictly increasing monotonicity of sample distances from the center, effectively reducing slew rates and smoothing gradient transitions locally. @@ -31,17 +34,17 @@ def patch_center_anomaly( shot_or_params : np.array, list Either a single shot of shape (Ns, Nd), or a list of arbitrary arguments used by ``update_shot`` to initialize a single shot. - update_shot : function, optional + update_shot : Callable[..., NDArray], optional Function used to initialize a single shot based on parameters provided by ``update_parameters``. If None, cubic splines are used as an approximation instead, by default None - update_parameters : function, optional + update_parameters : Callable[..., tuple], optional Function used to update shot parameters when provided in ``shot_or_params`` from an updated shot and parameters. If None, cubic spline parameterization is used instead, by default None in_out : bool, optional - Whether the shot is going in-and-out or start from the center, + Whether the shot is going in-and-out or starts from the center, by default False learning_rate : float, optional Learning rate used in the iterative optimization process, @@ -49,9 +52,9 @@ def patch_center_anomaly( Returns ------- - array_like + NDArray N-D trajectory based on ``shot_or_params`` if a shot or - update_shot otherwise. + ``update_shot`` otherwise. list Updated parameters either in the ``shot_or_params`` format if params, or cubic spline parameterization as an array of @@ -70,7 +73,7 @@ def patch_center_anomaly( if update_shot is None or update_parameters is None: - def _default_update_parameters(shot, *parameters): + def _default_update_parameters(shot: NDArray, *parameters: list) -> list: return parameters update_parameters = _default_update_parameters @@ -114,5 +117,5 @@ def _default_update_parameters(shot, *parameters): single_shot = cbs(x_axis).T parameters = update_parameters(single_shot, *parameters) - single_shot = single_shot = update_shot(*parameters) + single_shot = update_shot(*parameters) return single_shot, parameters diff --git a/src/mrinufft/trajectories/inits/random_walk.py b/src/mrinufft/trajectories/inits/random_walk.py index da68feb1f..889fd20dd 100644 --- a/src/mrinufft/trajectories/inits/random_walk.py +++ b/src/mrinufft/trajectories/inits/random_walk.py @@ -1,17 +1,19 @@ """Trajectories based on random walks.""" +from typing import Any, Literal + import numpy as np +from numpy.typing import NDArray from ..sampling import sample_from_density from ..utils import KMAX -def _get_adjacent_neighbors_offsets(shape): - return np.concatenate([np.eye(len(shape)), -np.eye(len(shape))], axis=0).astype(int) +def _get_adjacent_neighbors_offsets(nb_dims: int) -> NDArray: + return np.concatenate([np.eye(nb_dims), -np.eye(nb_dims)], axis=0).astype(int) -def _get_neighbors_offsets(shape): - nb_dims = len(shape) +def _get_neighbors_offsets(nb_dims: int) -> NDArray: neighbors = (np.indices([3] * nb_dims) - 1).reshape((nb_dims, -1)).T nb_half = neighbors.shape[0] // 2 # Remove full zero entry @@ -20,18 +22,25 @@ def _get_neighbors_offsets(shape): def _initialize_ND_random_walk( - Nc, Ns, density, *, diagonals=True, pseudo_random=True, **sampling_kwargs -): + Nc: int, + Ns: int, + density: NDArray, + *, + diagonals: bool = True, + pseudo_random: bool = True, + **sampling_kwargs: Any, # noqa ANN401 +) -> NDArray: density = density / np.sum(density) flat_density = np.copy(density.flatten()) - shape = np.array(density.shape) + shape = density.shape + nb_dims = len(shape) mask = np.ones_like(flat_density) # Prepare neighbor offsets once offsets = ( - _get_neighbors_offsets(shape) + _get_neighbors_offsets(nb_dims) if diagonals - else _get_adjacent_neighbors_offsets(shape) + else _get_adjacent_neighbors_offsets(nb_dims) ) # Make all random draws at once for performance @@ -39,9 +48,8 @@ def _initialize_ND_random_walk( # Initialize shot starting points locations = sample_from_density(Nc, density, **sampling_kwargs) - choices = np.around((locations + KMAX) * (np.array(density.shape) - 1)).astype(int) - choices = np.ravel_multi_index(choices.T, density.shape) - # choices = np.random.choice(np.arange(len(flat_density)), size=Nc, p=flat_density) + choices = np.around((locations + KMAX) * (np.array(shape) - 1)).astype(int) + choices = np.ravel_multi_index(choices.T, shape) routes = [choices] # Walk @@ -52,7 +60,7 @@ def _initialize_ND_random_walk( # Find out-of-bound neighbors and ignore them invalids = (neighbors < 0).any(axis=0) | ( - neighbors >= shape[:, None, None] + neighbors >= np.array(shape)[:, None, None] ).any(axis=0) neighbors[:, invalids] = 0 invalids = invalids.T @@ -71,25 +79,30 @@ def _initialize_ND_random_walk( choices = neighbors[np.arange(Nc), indices] routes.append(choices) - # Update density to account for already drawed positions + # Update density to account for already drawn positions if pseudo_random: flat_density[choices] = ( mask[choices] * flat_density[choices] / (mask[choices] + 1) ) mask[choices] += 1 - routes = np.array(routes).T # Create trajectory from routes locations = np.indices(shape) - locations = locations.reshape((len(shape), -1)) - trajectory = np.array([locations[:, r].T for r in routes]) - trajectory = 2 * KMAX * trajectory / (shape - 1) - KMAX + locations = locations.reshape((nb_dims, -1)) + trajectory = np.array([locations[:, r].T for r in np.array(routes).T]) + trajectory = 2 * KMAX * trajectory / (np.array(shape) - 1) - KMAX return trajectory def initialize_2D_random_walk( - Nc, Ns, density, *, diagonals=True, pseudo_random=True, **sampling_kwargs -): + Nc: int, + Ns: int, + density: NDArray, + *, + diagonals: bool = True, + pseudo_random: bool = True, + **sampling_kwargs: Any, # noqa ANN401 +) -> NDArray: """Initialize a 2D random walk trajectory. This is an adaptation of the proposition from [Cha+14]_. @@ -107,31 +120,31 @@ def initialize_2D_random_walk( Number of shots Ns : int Number of samples per shot - density : array_like + density : NDArray Sampling density used to determine the walk probabilities, normalized automatically by its sum during the call for convenience. diagonals : bool, optional - Whether to draw the next walk step from the diagional neighbors + Whether to draw the next walk step from the diagonal neighbors on top of the adjacent ones. Default to ``True``. pseudo_random : bool, optional Whether to adapt the density dynamically to reduce areas already covered. The density is still statistically followed for undersampled acquisitions. Default to ``True``. - **sampling_kwargs + **sampling_kwargs : Any Sampling parameters in ``mrinufft.trajectories.sampling.sample_from_density`` used for the shot starting positions. Returns ------- - array_like + NDArray 2D random walk trajectory References ---------- .. [Cha+14] Chauffert, Nicolas, Philippe Ciuciu, Jonas Kahn, and Pierre Weiss. - "Variable density sampling with continuous trajectories." + "Variable density sampling with continuous trajectories" SIAM Journal on Imaging Sciences 7, no. 4 (2014): 1962-1992. """ if len(density.shape) != 2: @@ -147,8 +160,14 @@ def initialize_2D_random_walk( def initialize_3D_random_walk( - Nc, Ns, density, *, diagonals=True, pseudo_random=True, **sampling_kwargs -): + Nc: int, + Ns: int, + density: NDArray, + *, + diagonals: bool = True, + pseudo_random: bool = True, + **sampling_kwargs: Any, # noqa ANN401 +) -> NDArray: """Initialize a 3D random walk trajectory. This is an adaptation of the proposition from [Cha+14]_. @@ -166,31 +185,31 @@ def initialize_3D_random_walk( Number of shots Ns : int Number of samples per shot - density : array_like + density : NDArray Sampling density used to determine the walk probabilities, normalized automatically by its sum during the call for convenience. diagonals : bool, optional - Whether to draw the next walk step from the diagional neighbors + Whether to draw the next walk step from the diagonal neighbors on top of the adjacent ones. Default to ``True``. pseudo_random : bool, optional Whether to adapt the density dynamically to reduce areas already covered. The density is still statistically followed for undersampled acquisitions. Default to ``True``. - **sampling_kwargs + **sampling_kwargs : Any Sampling parameters in ``mrinufft.trajectories.sampling.sample_from_density`` used for the shot starting positions. Returns ------- - array_like + NDArray 3D random walk trajectory References ---------- .. [Cha+14] Chauffert, Nicolas, Philippe Ciuciu, Jonas Kahn, and Pierre Weiss. - "Variable density sampling with continuous trajectories." + "Variable density sampling with continuous trajectories" SIAM Journal on Imaging Sciences 7, no. 4 (2014): 1962-1992. """ if len(density.shape) != 3: diff --git a/src/mrinufft/trajectories/inits/travelling_salesman.py b/src/mrinufft/trajectories/inits/travelling_salesman.py index 6b4f7764b..9e3c35762 100644 --- a/src/mrinufft/trajectories/inits/travelling_salesman.py +++ b/src/mrinufft/trajectories/inits/travelling_salesman.py @@ -1,7 +1,10 @@ """Trajectories based on the Travelling Salesman Problem.""" +from typing import Any, Literal, TypeAlias + import numpy as np import numpy.linalg as nl +from numpy.typing import NDArray from scipy.interpolate import CubicSpline from tqdm.auto import tqdm @@ -9,8 +12,10 @@ from ..sampling import sample_from_density from ..tools import oversample +Coordinate: TypeAlias = Literal["x", "y", "z", "r", "phi", "theta"] + -def _get_approx_cluster_sizes(nb_total, nb_clusters): +def _get_approx_cluster_sizes(nb_total: int, nb_clusters: int) -> NDArray: # Give a list of cluster sizes close to sqrt(`nb_total`) cluster_sizes = round(nb_total / nb_clusters) * np.ones(nb_clusters).astype(int) delta_sum = nb_total - np.sum(cluster_sizes) @@ -18,7 +23,7 @@ def _get_approx_cluster_sizes(nb_total, nb_clusters): return cluster_sizes -def _sort_by_coordinate(array, coord): +def _sort_by_coordinate(array: NDArray, coord: Coordinate) -> NDArray: # Sort a list of N-D locations by a Cartesian/spherical coordinate if array.shape[-1] < 3 and coord.lower() in ["z", "theta"]: raise ValueError( @@ -47,8 +52,12 @@ def _sort_by_coordinate(array, coord): def _cluster_by_coordinate( - locations, nb_clusters, cluster_by, second_cluster_by=None, sort_by=None -): + locations: NDArray, + nb_clusters: int, + cluster_by: Coordinate, + second_cluster_by: Coordinate | None = None, + sort_by: Coordinate | None = None, +) -> NDArray: # Cluster approximately a list of N-D locations by Cartesian/spherical coordinates # Gather dimension variables nb_dims = locations.shape[-1] @@ -87,17 +96,17 @@ def _cluster_by_coordinate( def _initialize_ND_travelling_salesman( - Nc, - Ns, - density, - first_cluster_by=None, - second_cluster_by=None, - sort_by=None, - tsp_tol=1e-8, + Nc: int, + Ns: int, + density: NDArray, + first_cluster_by: Coordinate | None = None, + second_cluster_by: Coordinate | None = None, + sort_by: Coordinate | None = None, + tsp_tol: float = 1e-8, *, - verbose=False, - **sampling_kwargs, -): + verbose: bool = False, + **sampling_kwargs: Any, # noqa ANN401 +) -> NDArray: # Check arguments validity if Nc * Ns > np.prod(density.shape): raise ValueError("`density` array not large enough to pick `Nc` * `Ns` points.") @@ -134,17 +143,17 @@ def _initialize_ND_travelling_salesman( def initialize_2D_travelling_salesman( - Nc, - Ns, - density, - first_cluster_by=None, - second_cluster_by=None, - sort_by=None, - tsp_tol=1e-8, + Nc: int, + Ns: int, + density: NDArray, + first_cluster_by: Coordinate | None = None, + second_cluster_by: Coordinate | None = None, + sort_by: Coordinate | None = None, + tsp_tol: float = 1e-8, *, - verbose=False, - **sampling_kwargs, -): + verbose: bool = False, + **sampling_kwargs: Any, # noqa ANN401 +) -> NDArray: """ Initialize a 2D trajectory using a Travelling Salesman Problem (TSP)-based path. @@ -160,14 +169,14 @@ def initialize_2D_travelling_salesman( The number of clusters (or shots) to divide the trajectory into. Ns : int The number of points per cluster. - density : np.ndarray + density : NDArray A 2-dimensional density array from which points are sampled. - first_cluster_by : str, optional + first_cluster_by : {"x", "y", "z", "r", "phi", "theta"}, optional The coordinate used to cluster points initially, by default ``None``. - second_cluster_by : str, optional + second_cluster_by : {"x", "y", "z", "r", "phi", "theta"}, optional A secondary coordinate used for clustering within primary clusters, by default ``None``. - sort_by : str, optional + sort_by : {"x", "y", "z", "r", "phi", "theta"}, optional The coordinate by which to order points within each cluster, by default ``None``. tsp_tol : float, optional @@ -180,7 +189,7 @@ def initialize_2D_travelling_salesman( Returns ------- - np.ndarray + NDArray A 2D array representing the TSP-ordered trajectory. Raises @@ -192,7 +201,7 @@ def initialize_2D_travelling_salesman( ---------- .. [Cha+14] Chauffert, Nicolas, Philippe Ciuciu, Jonas Kahn, and Pierre Weiss. - "Variable density sampling with continuous trajectories." + "Variable density sampling with continuous trajectories" SIAM Journal on Imaging Sciences 7, no. 4 (2014): 1962-1992. """ if len(density.shape) != 2: @@ -211,17 +220,17 @@ def initialize_2D_travelling_salesman( def initialize_3D_travelling_salesman( - Nc, - Ns, - density, - first_cluster_by=None, - second_cluster_by=None, - sort_by=None, - tsp_tol=1e-8, + Nc: int, + Ns: int, + density: NDArray, + first_cluster_by: Coordinate | None = None, + second_cluster_by: Coordinate | None = None, + sort_by: Coordinate | None = None, + tsp_tol: float = 1e-8, *, - verbose=False, - **sampling_kwargs, -): + verbose: bool = False, + **sampling_kwargs: Any, # noqa ANN401 +) -> NDArray: """ Initialize a 3D trajectory using a Travelling Salesman Problem (TSP)-based path. @@ -239,14 +248,14 @@ def initialize_3D_travelling_salesman( The number of clusters (or shots) to divide the trajectory into. Ns : int The number of points per cluster. - density : np.ndarray + density : NDArray A 3-dimensional density array from which points are sampled. - first_cluster_by : str, optional + first_cluster_by : {"x", "y", "z", "r", "phi", "theta"}, optional The coordinate used to cluster points initially, by default ``None``. - second_cluster_by : str, optional + second_cluster_by : {"x", "y", "z", "r", "phi", "theta"}, optional A secondary coordinate used for clustering within primary clusters, by default ``None``. - sort_by : str, optional + sort_by : {"x", "y", "z", "r", "phi", "theta"}, optional The coordinate by which to order points within each cluster, by default ``None``. tsp_tol : float, optional @@ -259,7 +268,7 @@ def initialize_3D_travelling_salesman( Returns ------- - np.ndarray + NDArray A 3D array representing the TSP-ordered trajectory. Raises diff --git a/src/mrinufft/trajectories/maths/fibonacci.py b/src/mrinufft/trajectories/maths/fibonacci.py index d7cc20a88..69e621210 100644 --- a/src/mrinufft/trajectories/maths/fibonacci.py +++ b/src/mrinufft/trajectories/maths/fibonacci.py @@ -3,7 +3,7 @@ import numpy as np -def is_from_fibonacci_sequence(n): +def is_from_fibonacci_sequence(n: int) -> bool: """Check if an integer belongs to the Fibonacci sequence. An integer belongs to the Fibonacci sequence if either @@ -21,14 +21,14 @@ def is_from_fibonacci_sequence(n): Whether or not ``n`` belongs to the Fibonacci sequence. """ - def _is_perfect_square(n): + def _is_perfect_square(n: int) -> bool: r = int(np.sqrt(n)) return r * r == n return _is_perfect_square(5 * n**2 + 4) or _is_perfect_square(5 * n**2 - 4) -def get_closest_fibonacci_number(x): +def get_closest_fibonacci_number(x: float) -> int: """Provide the closest Fibonacci number. Parameters @@ -52,7 +52,7 @@ def get_closest_fibonacci_number(x): return xf -def generate_fibonacci_lattice(nb_points, epsilon=0.25): +def generate_fibonacci_lattice(nb_points: int, epsilon: float = 0.25) -> np.ndarray: """Generate 2D Cartesian coordinates using the Fibonacci lattice. Place 2D points over a 1x1 square following the Fibonacci lattice. @@ -78,7 +78,7 @@ def generate_fibonacci_lattice(nb_points, epsilon=0.25): return fibonacci_square -def generate_fibonacci_circle(nb_points, epsilon=0.25): +def generate_fibonacci_circle(nb_points: int, epsilon: float = 0.25) -> np.ndarray: """Generate 2D Cartesian coordinates shaped as Fibonacci spirals. Place 2D points structured as Fibonacci spirals by distorting @@ -106,7 +106,7 @@ def generate_fibonacci_circle(nb_points, epsilon=0.25): return fibonacci_circle -def generate_fibonacci_sphere(nb_points, epsilon=0.25): +def generate_fibonacci_sphere(nb_points: int, epsilon: float = 0.25) -> np.ndarray: """Generate 3D Cartesian coordinates as a Fibonacci sphere. Place 3D points almost evenly over a sphere surface of radius diff --git a/src/mrinufft/trajectories/maths/primes.py b/src/mrinufft/trajectories/maths/primes.py index 3a9df50cb..9db01c483 100644 --- a/src/mrinufft/trajectories/maths/primes.py +++ b/src/mrinufft/trajectories/maths/primes.py @@ -3,7 +3,9 @@ import numpy as np -def compute_coprime_factors(Nc, length, start=1, update=1): +def compute_coprime_factors( + Nc: int, length: int, start: int = 1, update: int = 1 +) -> list[int]: """Compute a list of coprime factors of Nc. Parameters @@ -19,11 +21,11 @@ def compute_coprime_factors(Nc, length, start=1, update=1): Returns ------- - list + list[int] List of coprime factors of Nc. """ count = start - coprimes = [] + coprimes: list[int] = [] while len(coprimes) < length: # Check greatest common divider (gcd) if np.gcd(Nc, count) == 1: diff --git a/src/mrinufft/trajectories/maths/rotations.py b/src/mrinufft/trajectories/maths/rotations.py index 568780786..f91cd37b8 100644 --- a/src/mrinufft/trajectories/maths/rotations.py +++ b/src/mrinufft/trajectories/maths/rotations.py @@ -2,9 +2,10 @@ import numpy as np import numpy.linalg as nl +from numpy.typing import NDArray -def R2D(theta): +def R2D(theta: float) -> NDArray: """Initialize 2D rotation matrix. Parameters @@ -14,13 +15,13 @@ def R2D(theta): Returns ------- - np.ndarray + NDArray 2D rotation matrix. """ return np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) -def Rx(theta): +def Rx(theta: float) -> NDArray: """Initialize 3D rotation matrix around x axis. Parameters @@ -30,7 +31,7 @@ def Rx(theta): Returns ------- - np.ndarray + NDArray 3D rotation matrix. """ return np.array( @@ -42,7 +43,7 @@ def Rx(theta): ) -def Ry(theta): +def Ry(theta: float) -> NDArray: """Initialize 3D rotation matrix around y axis. Parameters @@ -52,7 +53,7 @@ def Ry(theta): Returns ------- - np.ndarray + NDArray 3D rotation matrix. """ return np.array( @@ -64,7 +65,7 @@ def Ry(theta): ) -def Rz(theta): +def Rz(theta: float) -> NDArray: """Initialize 3D rotation matrix around z axis. Parameters @@ -74,7 +75,7 @@ def Rz(theta): Returns ------- - np.ndarray + NDArray 3D rotation matrix. """ return np.array( @@ -86,7 +87,13 @@ def Rz(theta): ) -def Rv(v1, v2, normalize=True, eps=1e-8): +def Rv( + v1: NDArray, + v2: NDArray, + eps: float = 1e-8, + *, + normalize: bool = True, +) -> NDArray: """Initialize 3D rotation matrix from two vectors. Initialize a 3D rotation matrix from two vectors using Rodrigues's rotation @@ -97,16 +104,18 @@ def Rv(v1, v2, normalize=True, eps=1e-8): Parameters ---------- - v1 : np.ndarray + v1 : NDArray Source vector. - v2 : np.ndarray + v2 : NDArray Target vector. + eps : float, optional + Tolerance to consider two vectors as colinear. The default is 1e-8. normalize : bool, optional Normalize the vectors. The default is True. Returns ------- - np.ndarray + NDArray 3D rotation matrix. """ # Check for colinearity, not handled by Rodrigues' coefficients @@ -122,7 +131,7 @@ def Rv(v1, v2, normalize=True, eps=1e-8): return np.identity(3) + cross_matrix + cross_matrix @ cross_matrix / (1 + cos_theta) -def Ra(vector, theta): +def Ra(vector: NDArray, theta: float) -> NDArray: """Initialize 3D rotation matrix around an arbitrary vector. Initialize a 3D rotation matrix to rotate around `vector` by an angle `theta`. @@ -130,14 +139,14 @@ def Ra(vector, theta): Parameters ---------- - vector : np.ndarray + vector : NDArray Vector defining the rotation axis, automatically normalized. theta : float Angle in radians defining the rotation around `vector`. Returns ------- - np.ndarray + NDArray 3D rotation matrix. """ cos_t = np.cos(theta) diff --git a/src/mrinufft/trajectories/maths/tsp_solver.py b/src/mrinufft/trajectories/maths/tsp_solver.py index b4bcf859d..0b40a53aa 100644 --- a/src/mrinufft/trajectories/maths/tsp_solver.py +++ b/src/mrinufft/trajectories/maths/tsp_solver.py @@ -1,29 +1,31 @@ """Solver for the Travelling Salesman Problem.""" import numpy as np +from numpy.typing import NDArray -def solve_tsp_with_2opt(locations, improvement_threshold=1e-8): +def solve_tsp_with_2opt( + locations: NDArray, improvement_threshold: float = 1e-8 +) -> NDArray: """Solve the TSP problem using a 2-opt approach. A sub-optimal solution to the Travelling Salesman Problem (TSP) is provided using the 2-opt approach in O(n²) where chunks of an initially random route are reversed, and selected if the - total distance is reduced. As a result the route solution + total distance is reduced. As a result, the route solution does not cross its own path in 2D. Parameters ---------- - locations : array_like - An array of N points with shape (N, D) with D - the space dimension. - improvement_threshold: float - Threshold used as progress criterion to stop the optimization - process. + locations : NDArray + An array of N points with shape (N, D) where D is the space dimension. + improvement_threshold : float, optional + Threshold used as progress criterion to stop the optimization process. + The default is 1e-8. Returns ------- - array_like + NDArray The new positions order of shape (N,). """ route = np.arange(locations.shape[0]) diff --git a/src/mrinufft/trajectories/sampling.py b/src/mrinufft/trajectories/sampling.py index 14ed18fc0..6b4c643ad 100644 --- a/src/mrinufft/trajectories/sampling.py +++ b/src/mrinufft/trajectories/sampling.py @@ -1,17 +1,27 @@ """Sampling densities and methods.""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Literal + +if TYPE_CHECKING: + import pywt as pw + import numpy as np import numpy.fft as nf -import numpy.linalg as nl -import numpy.random as nr +from numpy.typing import NDArray from tqdm.auto import tqdm from .utils import KMAX def sample_from_density( - nb_samples, density, method="random", *, dim_compensation="auto" -): + nb_samples: int, + density: NDArray, + method: Literal["random", "lloyd"] = "random", + *, + dim_compensation: Literal["auto"] | bool = "auto", +) -> NDArray: """ Sample points based on a given density distribution. @@ -19,14 +29,14 @@ def sample_from_density( ---------- nb_samples : int The number of samples to draw. - density : np.ndarray + density : NDArray An array representing the density distribution from which samples are drawn, normalized automatically by its sum during the call for convenience. - method : str, optional + method : Literal["random", "lloyd"], optional The sampling method to use, either 'random' for random sampling over the discrete grid defined by the density or 'lloyd' for Lloyd's algorithm over a continuous space, by default "random". - dim_compensation : str, bool, optional + dim_compensation : Literal["auto"], bool, optional Whether to apply a specific dimensionality compensation introduced in [Cha+14]_. An exponent ``N/(N-1)`` with ``N`` the number of dimensions in ``density`` is applied to fix the observed @@ -37,7 +47,7 @@ def sample_from_density( Returns ------- - np.ndarray + NDArray An array of range-normalized sampled locations. Raises @@ -50,7 +60,7 @@ def sample_from_density( ---------- .. [Cha+14] Chauffert, Nicolas, Philippe Ciuciu, Jonas Kahn, and Pierre Weiss. - "Variable density sampling with continuous trajectories." + "Variable density sampling with continuous trajectories" SIAM Journal on Imaging Sciences 7, no. 4 (2014): 1962-1992. """ try: @@ -63,7 +73,7 @@ def sample_from_density( ) from err # Define dimension variables - shape = np.array(density.shape) + shape = density.shape nb_dims = len(shape) max_nb_samples = np.prod(shape) density = density / np.sum(density) @@ -81,7 +91,7 @@ def sample_from_density( density = density / np.sum(density) # Sample using specified method - rng = nr.default_rng() + rng = np.random.default_rng() if method == "random": choices = rng.choice( np.arange(max_nb_samples), @@ -91,7 +101,7 @@ def sample_from_density( ) locations = np.indices(shape).reshape((nb_dims, -1))[:, choices] locations = locations.T + 0.5 - locations = locations / shape[None, :] + locations = locations / np.array(shape)[None, :] locations = 2 * KMAX * locations - KMAX elif method == "lloyd": kmeans = ( @@ -100,17 +110,22 @@ def sample_from_density( else BisectingKMeans(n_clusters=nb_samples) ) kmeans.fit( - np.indices(density.shape).reshape((nb_dims, -1)).T, + np.indices(shape).reshape((nb_dims, -1)).T, sample_weight=density.flatten(), ) - locations = kmeans.cluster_centers_ - np.array(density.shape) / 2 + locations = kmeans.cluster_centers_ - np.array(shape) / 2 locations = KMAX * locations / np.max(np.abs(locations)) else: raise ValueError(f"Unknown sampling method {method}.") return locations -def create_cutoff_decay_density(shape, cutoff, decay, resolution=None): +def create_cutoff_decay_density( + shape: tuple[int, ...], + cutoff: float, + decay: float, + resolution: NDArray | None = None, +) -> NDArray: """ Create a density with central plateau and polynomial decay. @@ -120,7 +135,7 @@ def create_cutoff_decay_density(shape, cutoff, decay, resolution=None): Parameters ---------- - shape : tuple of int + shape : tuple[int, ...] The shape of the density grid, analog to the field-of-view as opposed to ``resolution`` below. cutoff : float @@ -128,13 +143,13 @@ def create_cutoff_decay_density(shape, cutoff, decay, resolution=None): and 1 within which density remains uniform and beyond which it decays. decay : float The polynomial decay in density beyond the cutoff ratio. - resolution : np.ndarray, optional + resolution : NDArray, optional Resolution scaling factors for each dimension of the density grid, by default ``None``. Returns ------- - np.ndarray + NDArray A density array with values decaying based on the specified cutoff ratio and decay rate. @@ -146,7 +161,6 @@ def create_cutoff_decay_density(shape, cutoff, decay, resolution=None): magnetic resonance imaging." IEEE Transactions on Medical Imaging 41, no. 8 (2022): 2105-2117. """ - shape = np.array(shape) nb_dims = len(shape) if not resolution: @@ -156,7 +170,7 @@ def create_cutoff_decay_density(shape, cutoff, decay, resolution=None): for i in range(nb_dims): differences[i] = differences[i] + 0.5 - shape[i] / 2 differences[i] = differences[i] / shape[i] / resolution[i] - distances = nl.norm(differences, axis=0) + distances = np.linalg.norm(differences, axis=0) cutoff = cutoff * np.max(differences) if cutoff else np.min(differences) density = np.ones(shape) @@ -167,7 +181,9 @@ def create_cutoff_decay_density(shape, cutoff, decay, resolution=None): return density -def create_polynomial_density(shape, decay, resolution=None): +def create_polynomial_density( + shape: tuple[int, ...], decay: float, resolution: NDArray | None = None +) -> NDArray: """ Create a density with polynomial decay from the center. @@ -177,13 +193,13 @@ def create_polynomial_density(shape, decay, resolution=None): The shape of the density grid. decay : float The exponent that controls the rate of decay for density. - resolution : np.ndarray, optional + resolution : NDArray, optional Resolution scaling factors for each dimension of the density grid, by default None. Returns ------- - np.ndarray + NDArray A density array with polynomial decay. """ return create_cutoff_decay_density( @@ -191,7 +207,7 @@ def create_polynomial_density(shape, decay, resolution=None): ) -def create_energy_density(dataset): +def create_energy_density(dataset: NDArray) -> NDArray: """ Create a density based on energy in the Fourier spectrum. @@ -200,7 +216,7 @@ def create_energy_density(dataset): Parameters ---------- - dataset : np.ndarray + dataset : NDArray The dataset from which to calculate the density based on its Fourier transform, with an expected shape (nb_volumes, dim_1, ..., dim_N). @@ -208,7 +224,7 @@ def create_energy_density(dataset): Returns ------- - np.ndarray + NDArray A density array derived from the mean energy in the Fourier domain of the input dataset. """ @@ -221,7 +237,13 @@ def create_energy_density(dataset): return density -def create_chauffert_density(shape, wavelet_basis, nb_wavelet_scales, verbose=False): +def create_chauffert_density( + shape: tuple[int, ...], + wavelet_basis: str | pw.Wavelet, + nb_wavelet_scales: int, + *, + verbose: bool = False, +) -> NDArray: """Create a density based on Chauffert's method. This is a reproduction of the proposition from [CCW13]_. @@ -231,7 +253,7 @@ def create_chauffert_density(shape, wavelet_basis, nb_wavelet_scales, verbose=Fa Parameters ---------- - shape : tuple of int + shape : tuple[int, ...] The shape of the density grid. wavelet_basis : str, pywt.Wavelet The wavelet basis to use for wavelet decomposition, either @@ -244,7 +266,7 @@ def create_chauffert_density(shape, wavelet_basis, nb_wavelet_scales, verbose=Fa Returns ------- - np.ndarray + NDArray A density array created based on wavelet transform coefficients. See Also @@ -290,7 +312,11 @@ def create_chauffert_density(shape, wavelet_basis, nb_wavelet_scales, verbose=Fa return nf.ifftshift(density) -def create_fast_chauffert_density(shape, wavelet_basis, nb_wavelet_scales): +def create_fast_chauffert_density( + shape: tuple[int, ...], + wavelet_basis: str | pw.Wavelet, + nb_wavelet_scales: int, +) -> NDArray: """Create a density based on an approximated Chauffert method. This implementation is based on this @@ -306,7 +332,7 @@ def create_fast_chauffert_density(shape, wavelet_basis, nb_wavelet_scales): Parameters ---------- - shape : tuple of int + shape : tuple[int, ...] The shape of the density grid. wavelet_basis : str, pywt.Wavelet The wavelet basis to use for wavelet decomposition, either @@ -317,7 +343,7 @@ def create_fast_chauffert_density(shape, wavelet_basis, nb_wavelet_scales): Returns ------- - np.ndarray + NDArray A density array created using a faster approximation based on 1D projections of the wavelet transform. diff --git a/src/mrinufft/trajectories/tools.py b/src/mrinufft/trajectories/tools.py index cc099c07a..02ca23ae0 100644 --- a/src/mrinufft/trajectories/tools.py +++ b/src/mrinufft/trajectories/tools.py @@ -1,6 +1,9 @@ """Functions to manipulate/modify trajectories.""" +from typing import Any, Callable, Literal + import numpy as np +from numpy.typing import NDArray from scipy.interpolate import CubicSpline, interp1d from .maths import Rv, Rx, Ry, Rz @@ -11,23 +14,29 @@ ################ -def stack(trajectory, nb_stacks, z_tilt=None, hard_bounded=True): +def stack( + trajectory: NDArray, + nb_stacks: int, + z_tilt: str | float | None = None, + *, + hard_bounded: bool = True, +) -> NDArray: """Stack 2D or 3D trajectories over the :math:`k_z`-axis. Parameters ---------- - trajectory : array_like + trajectory : NDArray Trajectory in 2D or 3D to stack. nb_stacks : int Number of stacks repeating the provided trajectory. - z_tilt : str, optional + z_tilt : str | float, optional Tilt of the stacks, by default `None`. hard_bounded : bool, optional Whether the stacks should be strictly within the limits of the k-space. Returns ------- - array_like + NDArray Stacked trajectory. """ # Check dimensionality and initialize output @@ -55,25 +64,31 @@ def stack(trajectory, nb_stacks, z_tilt=None, hard_bounded=True): return new_trajectory.reshape(nb_stacks * Nc, Ns, 3) -def rotate(trajectory, nb_rotations, x_tilt=None, y_tilt=None, z_tilt=None): +def rotate( + trajectory: NDArray, + nb_rotations: int, + x_tilt: str | float | None = None, + y_tilt: str | float | None = None, + z_tilt: str | float | None = None, +) -> NDArray: """Rotate 2D or 3D trajectories over the different axes. Parameters ---------- - trajectory : array_like + trajectory : NDArray Trajectory in 2D or 3D to rotate. nb_rotations : int Number of rotations repeating the provided trajectory. - x_tilt : str, optional + x_tilt : str | float, optional Tilt of the trajectory over the :math:`k_x`-axis, by default `None`. - y_tilt : str, optional + y_tilt : str | float, optional Tilt of the trajectory over the :math:`k_y`-axis, by default `None`. - z_tilt : str, optional + z_tilt : str | float, optional Tilt of the trajectory over the :math:`k_z`-axis, by default `None`. Returns ------- - array_like + NDArray Rotated trajectory. """ # Check dimensionality and initialize output @@ -96,33 +111,33 @@ def rotate(trajectory, nb_rotations, x_tilt=None, y_tilt=None, z_tilt=None): def precess( - trajectory, - nb_rotations, - tilt="golden", - half_sphere=False, - partition="axial", - axis=None, -): + trajectory: NDArray, + nb_rotations: int, + tilt: str | float = "golden", + half_sphere: bool = False, + partition: Literal["axial", "polar"] = "axial", + axis: int | NDArray | None = None, +) -> NDArray: """Rotate trajectories as a precession around the :math:`k_z`-axis. Parameters ---------- - trajectory : array_like + trajectory : NDArray Trajectory in 2D or 3D to rotate. nb_rotations : int Number of rotations repeating the provided trajectory while precessing. - tilt : str, optional + tilt : str | float, optional Angle tilt between consecutive rotations around the :math:`k_z`-axis, by default "golden". half_sphere : bool, optional Whether the precession should be limited to the upper half of the k-space sphere. It is typically used for in-out trajectories or planes. - partition : str, optional + partition : Literal["axial", "polar"], optional Partition type between an "axial" or "polar" split of the :math:`k_z`-axis, designating whether the axis should be fragmented by radius or angle respectively, by default "axial". - axis : int, array_like, optional + axis : int, NDArray, optional Axis selected for alignment reference when rotating the trajectory around the :math:`k_z`-axis, generally corresponding to the shot direction for single shot ``trajectory`` inputs. It can either @@ -132,7 +147,7 @@ def precess( Returns ------- - array_like + NDArray Precessed trajectory. """ # Check for partition option error @@ -175,22 +190,22 @@ def precess( def conify( - trajectory, - nb_cones, - z_tilt=None, - in_out=False, - max_angle=np.pi / 2, - borderless=True, -): + trajectory: NDArray, + nb_cones: int, + z_tilt: str | float | None = None, + in_out: bool = False, + max_angle: float = np.pi / 2, + borderless: bool = True, +) -> NDArray: """Distort 2D or 3D trajectories into cones along the :math:`k_z`-axis. Parameters ---------- - trajectory : array_like + trajectory : NDArray Trajectory to conify. nb_cones : int Number of cones repeating the provided trajectory. - z_tilt : str, optional + z_tilt : str | float, optional Tilt of the trajectory over the :math:`k_z`-axis, by default `None`. in_out : bool, optional Whether to account for the in-out nature of some trajectories @@ -203,7 +218,7 @@ def conify( Returns ------- - array_like + NDArray Conified trajectory. """ # Check dimensionality and initialize output @@ -253,7 +268,13 @@ def conify( return new_trajectory -def epify(trajectory, Ns_transitions, nb_trains, reverse_odd_shots=False): +def epify( + trajectory: NDArray, + Ns_transitions: int, + nb_trains: int, + *, + reverse_odd_shots: bool = False, +) -> NDArray: """Create multi-readout shots from trajectory composed of single-readouts. Assemble multiple single-readout shots together by adding transition @@ -261,7 +282,7 @@ def epify(trajectory, Ns_transitions, nb_trains, reverse_odd_shots=False): Parameters ---------- - trajectory : array_like + trajectory : NDArray Trajectory to change by prolonging and merging the shots. Ns_transitions : int Number of samples/steps between the merged readouts. @@ -274,7 +295,7 @@ def epify(trajectory, Ns_transitions, nb_trains, reverse_odd_shots=False): Returns ------- - array_like + NDArray Trajectory with fewer but longer multi-readout shots. """ Nc, Ns, Nd = trajectory.shape @@ -302,12 +323,10 @@ def epify(trajectory, Ns_transitions, nb_trains, reverse_odd_shots=False): for i_c in range(nb_trains): spline = CubicSpline(source_sample_ids, np.concatenate(trajectory[i_c], axis=0)) assembled_trajectory.append(spline(target_sample_ids)) - assembled_trajectory = np.array(assembled_trajectory) - - return assembled_trajectory + return np.array(assembled_trajectory) -def unepify(trajectory, Ns_readouts, Ns_transitions): +def unepify(trajectory: NDArray, Ns_readouts: int, Ns_transitions: int) -> NDArray: """Recover single-readout shots from multi-readout trajectory. Reformat an EPI-like trajectory with multiple readouts and transitions @@ -319,7 +338,7 @@ def unepify(trajectory, Ns_readouts, Ns_transitions): Parameters ---------- - trajectory : array_like + trajectory : NDArray Trajectory to reduce by discarding transitions between readouts. Ns_readouts : int Number of samples within a single readout. @@ -328,7 +347,7 @@ def unepify(trajectory, Ns_readouts, Ns_transitions): Returns ------- - array_like + NDArray Trajectory with more but shorter single shots. """ Nc, Ns, Nd = trajectory.shape @@ -349,7 +368,7 @@ def unepify(trajectory, Ns_readouts, Ns_transitions): return trajectory -def prewind(trajectory, Ns_transitions): +def prewind(trajectory: NDArray, Ns_transitions: int) -> NDArray: """Add pre-winding/positioning to the trajectory. The trajectory is extended to start before the readout @@ -358,7 +377,7 @@ def prewind(trajectory, Ns_transitions): Parameters ---------- - trajectory : array_like + trajectory : NDArray Trajectory to extend with rewind gradients. Ns_transitions : int Number of pre-winding/positioning steps used to leave the @@ -366,7 +385,7 @@ def prewind(trajectory, Ns_transitions): Returns ------- - array_like + NDArray Extended trajectory with pre-winding/positioning. """ Nc, Ns, Nd = trajectory.shape @@ -384,12 +403,10 @@ def prewind(trajectory, Ns_transitions): np.concatenate([np.zeros((2, Nd)), trajectory[i_c]], axis=0), ) assembled_trajectory.append(spline(target_sample_ids)) - assembled_trajectory = np.array(assembled_trajectory) - - return assembled_trajectory + return np.array(assembled_trajectory) -def rewind(trajectory, Ns_transitions): +def rewind(trajectory: NDArray, Ns_transitions: int) -> NDArray: """Add rewinding to the trajectory. The trajectory is extended to come back to the k-space center @@ -397,14 +414,14 @@ def rewind(trajectory, Ns_transitions): Parameters ---------- - trajectory : array_like + trajectory : NDArray Trajectory to extend with rewind gradients. Ns_transitions : int Number of rewinding steps used to come back to the k-space center. Returns ------- - array_like + NDArray Extended trajectory with rewinding. """ Nc, Ns, Nd = trajectory.shape @@ -424,29 +441,31 @@ def rewind(trajectory, Ns_transitions): np.concatenate([trajectory[i_c], np.zeros((2, Nd))], axis=0), ) assembled_trajectory.append(spline(target_sample_ids)) - assembled_trajectory = np.array(assembled_trajectory) + return np.array(assembled_trajectory) - return assembled_trajectory - -def oversample(trajectory, new_Ns, kind="cubic"): +def oversample( + trajectory: NDArray, + new_Ns: int, + kind: Literal["linear", "quadratic", "cubic"] = "cubic", +) -> NDArray: """ Resample a trajectory to increase the number of samples using interpolation. Parameters ---------- - trajectory : np.ndarray + trajectory : NDArray The original trajectory array, where interpolation is applied along the second axis. new_Ns : int The desired number of samples in the resampled trajectory. - kind : str, optional + kind : Literal, optional The type of interpolation to use, such as 'linear', 'quadratic', or 'cubic', by default "cubic". Returns ------- - np.ndarray + NDArray The resampled trajectory array with ``new_Ns`` points along the second axis. Notes @@ -475,32 +494,36 @@ def oversample(trajectory, new_Ns, kind="cubic"): def stack_spherically( - trajectory_func, Nc, nb_stacks, z_tilt=None, hard_bounded=True, **traj_kwargs -): + trajectory_func: Callable[..., NDArray], + Nc: int, + nb_stacks: int, + z_tilt: str | float | None = None, + hard_bounded: bool = True, + **traj_kwargs: Any, # noqa ANN401 +) -> NDArray: """Stack 2D or 3D trajectories over the :math:`k_z`-axis to make a sphere. Parameters ---------- - trajectory_func : function + trajectory_func : Callable[..., NDArray] Trajectory function that should return an array-like with the usual (Nc, Ns, Nd) size. Nc : int - Number of shots to use for the whole spherically - stacked trajectory. + Number of shots to use for the whole spherically stacked trajectory. nb_stacks : int Number of stacks of trajectories. - z_tilt : str, optional + z_tilt : str | float, optional Tilt of the stacks, by default `None`. hard_bounded : bool, optional - Whether the stacks should be strictly within the limits of the k-space, - by default `True`. - **kwargs - Trajectory initialization parameters for the function provided - with `trajectory_func`. + Whether the stacks should be strictly within the limits + of the k-space, by default `True`. + **traj_kwargs : Any + Trajectory initialization parameters for the function + provided with `trajectory_func`. Returns ------- - array_like + NDArray Stacked trajectory. """ # Handle argument errors @@ -547,52 +570,45 @@ def stack_spherically( # Concatenate or handle varying Ns value Ns_values = np.array([stk.shape[1] for stk in new_trajectory]) if (Ns_values == Ns_values[0]).all(): - new_trajectory = np.concatenate(new_trajectory, axis=0) - new_trajectory = new_trajectory.reshape(Nc, Ns_values[0], 3) - else: - new_trajectory = np.concatenate( - [stk.reshape((-1, 3)) for stk in new_trajectory], axis=0 - ) - - return new_trajectory + output = np.concatenate(new_trajectory, axis=0) + return output.reshape(Nc, Ns_values[0], 3) + return np.concatenate([stk.reshape((-1, 3)) for stk in new_trajectory], axis=0) def shellify( - trajectory_func, - Nc, - nb_shells, - z_tilt="golden", - hemisphere_mode="symmetric", - **traj_kwargs, -): + trajectory_func: Callable[..., NDArray], + Nc: int, + nb_shells: int, + z_tilt: str | float = "golden", + hemisphere_mode: Literal["symmetric", "reversed"] = "symmetric", + **traj_kwargs: Any, # noqa ANN401 +) -> NDArray: """Stack 2D or 3D trajectories over the :math:`k_z`-axis to make a sphere. Parameters ---------- - trajectory_func : function - Trajectory function that should return an array-like - with the usual (Nc, Ns, Nd) size. + trajectory_func : Callable[..., NDArray] + Trajectory function that should return an array-like with the usual + (Nc, Ns, Nd) size. Nc : int - Number of shots to use for the whole spherically - stacked trajectory. + Number of shots to use for the whole spherically stacked trajectory. nb_shells : int - Number of shells of distorded trajectories. - z_tilt : str, float, optional + Number of shells of distorted trajectories. + z_tilt : str | float, optional Tilt of the shells, by default "golden". - hemisphere_mode : str, optional - Define how the lower hemisphere should be oriented - relatively to the upper one, with "symmetric" providing - a :math:`k_x-k_y` planar symmetry by changing the polar angle, - and with "reversed" promoting continuity (for example - in spirals) by reversing the azimuth angle. + hemisphere_mode : Literal["symmetric", "reversed"], optional + Define how the lower hemisphere should be oriented relatively to the + upper one, with "symmetric" providing a :math:`k_x-k_y` planar symmetry + by changing the polar angle, and with "reversed" promoting continuity + (for example in spirals) by reversing the azimuth angle. The default is "symmetric". - **kwargs - Trajectory initialization parameters for the function provided - with `trajectory_func`. + **traj_kwargs : Any + Trajectory initialization parameters for the function + provided with `trajectory_func`. Returns ------- - array_like + NDArray Concentric shell trajectory. """ # Handle argument errors @@ -643,14 +659,9 @@ def shellify( # Concatenate or handle varying Ns value Ns_values = np.array([hem.shape[1] for hem in new_trajectory]) if (Ns_values == Ns_values[0]).all(): - new_trajectory = np.concatenate(new_trajectory, axis=0) - new_trajectory = new_trajectory.reshape(Nc, Ns_values[0], 3) - else: - new_trajectory = np.concatenate( - [hem.reshape((-1, 3)) for hem in new_trajectory], axis=0 - ) - - return new_trajectory + output = np.concatenate(new_trajectory, axis=0) + return output.reshape(Nc, Ns_values[0], 3) + return np.concatenate([hem.reshape((-1, 3)) for hem in new_trajectory], axis=0) ######### @@ -658,7 +669,9 @@ def shellify( ######### -def duplicate_along_axes(trajectory, axes=(0, 1, 2)): +def duplicate_along_axes( + trajectory: NDArray, axes: tuple[int, ...] = (0, 1, 2) +) -> NDArray: """ Duplicate a trajectory along the specified axes. @@ -668,14 +681,14 @@ def duplicate_along_axes(trajectory, axes=(0, 1, 2)): Parameters ---------- - trajectory : array_like + trajectory : NDArray Trajectory to duplicate. - axes : tuple, optional + axes : tuple[int, ...], optional Axes along which to duplicate the trajectory, by default (0, 1, 2) Returns ------- - array_like + NDArray Duplicated trajectory along the specified axes. """ # Copy input trajectory along other axes @@ -690,12 +703,24 @@ def duplicate_along_axes(trajectory, axes=(0, 1, 2)): dp_trajectory = np.copy(trajectory) dp_trajectory[..., [2, 0]] = dp_trajectory[..., [0, 2]] new_trajectory.append(dp_trajectory) - new_trajectory = np.concatenate(new_trajectory, axis=0) - return new_trajectory + return np.concatenate(new_trajectory, axis=0) -def _radialize_center_out(trajectory, nb_samples): - """Radialize a trajectory from the center to the outside.""" +def _radialize_center_out(trajectory: NDArray, nb_samples: int) -> NDArray: + """Radialize a trajectory from the center to the outside. + + Parameters + ---------- + trajectory : NDArray + Trajectory to radialize. + nb_samples : int + Number of samples to radialize from the center. + + Returns + ------- + NDArray + Radialized trajectory. + """ Nc, Ns = trajectory.shape[:2] new_trajectory = np.copy(trajectory) for i in range(Nc): @@ -706,8 +731,21 @@ def _radialize_center_out(trajectory, nb_samples): return new_trajectory -def _radialize_in_out(trajectory, nb_samples): - """Radialize a trajectory from the inside to the outside.""" +def _radialize_in_out(trajectory: NDArray, nb_samples: int) -> NDArray: + """Radialize a trajectory from the inside to the outside. + + Parameters + ---------- + trajectory : NDArray + Trajectory to radialize. + nb_samples : int + Number of samples to radialize from the inside out. + + Returns + ------- + NDArray + Radialized trajectory. + """ Nc, Ns = trajectory.shape[:2] new_trajectory = np.copy(trajectory) first, half, second = (Ns - nb_samples) // 2, Ns // 2, (Ns + nb_samples) // 2 @@ -723,20 +761,26 @@ def _radialize_in_out(trajectory, nb_samples): return new_trajectory -def radialize_center(trajectory, nb_samples, in_out=False): +def radialize_center( + trajectory: NDArray, nb_samples: int, in_out: bool = False +) -> NDArray: """Radialize a trajectory. Parameters ---------- - trajectory : array_like + trajectory : NDArray Trajectory to radialize. nb_samples : int Number of samples to keep. in_out : bool, optional Whether the radialization is from the inside to the outside, by default False + + Returns + ------- + NDArray + Radialized trajectory. """ # Make nb_samples into straight lines around the center if in_out: return _radialize_in_out(trajectory, nb_samples) - else: - return _radialize_center_out(trajectory, nb_samples) + return _radialize_center_out(trajectory, nb_samples) diff --git a/src/mrinufft/trajectories/trajectory2D.py b/src/mrinufft/trajectories/trajectory2D.py index e930e90b3..a9029e25c 100644 --- a/src/mrinufft/trajectories/trajectory2D.py +++ b/src/mrinufft/trajectories/trajectory2D.py @@ -1,7 +1,10 @@ """Functions to initialize 2D trajectories.""" +from typing import Any, Literal + import numpy as np import numpy.linalg as nl +from numpy.typing import NDArray from scipy.interpolate import CubicSpline from .gradients import patch_center_anomaly @@ -14,7 +17,9 @@ ##################### -def initialize_2D_radial(Nc, Ns, tilt="uniform", in_out=False): +def initialize_2D_radial( + Nc: int, Ns: int, tilt: str | float = "uniform", in_out: bool = False +) -> NDArray: """Initialize a 2D radial trajectory. Parameters @@ -23,14 +28,14 @@ def initialize_2D_radial(Nc, Ns, tilt="uniform", in_out=False): Number of shots Ns : int Number of samples per shot - tilt : str, float, optional + tilt : str | float, optional Tilt of the shots, by default "uniform" in_out : bool, optional Whether to start from the center or not, by default False Returns ------- - array_like + NDArray 2D radial trajectory """ # Initialize a first shot @@ -47,14 +52,14 @@ def initialize_2D_radial(Nc, Ns, tilt="uniform", in_out=False): def initialize_2D_spiral( - Nc, - Ns, - tilt="uniform", - in_out=False, - nb_revolutions=1, - spiral="archimedes", - patch_center=True, -): + Nc: int, + Ns: int, + tilt: str | float = "uniform", + in_out: bool = False, + nb_revolutions: float = 1.0, + spiral: str | float = "archimedes", + patch_center: bool = True, +) -> NDArray: """Initialize a 2D algebraic spiral trajectory. A generalized function that generates algebraic spirals defined @@ -68,13 +73,13 @@ def initialize_2D_spiral( Number of shots Ns : int Number of samples per shot - tilt : str, float, optional + tilt : Literal, float, optional Tilt of the shots, by default "uniform" in_out : bool, optional Whether to start from the center or not, by default False - nb_revolutions : int, optional + nb_revolutions : float, optional Number of revolutions, by default 1 - spiral : str, float, optional + spiral : Literal, float, optional Spiral type or algebraic power, by default "archimedes" patch_center : bool, optional Whether the spiral anomaly at the center should be patched @@ -82,7 +87,7 @@ def initialize_2D_spiral( Returns ------- - array_like + NDArray 2D spiral trajectory Raises @@ -109,11 +114,18 @@ def initialize_2D_spiral( # Algebraic spirals with power coefficients superior to 1 # have a non-monotonic gradient norm when varying the angle # over [0, +inf) - def _update_shot(angles, radius, *args): + def _update_shot( + angles: NDArray, radius: NDArray, *args: Any # noqa ANN401 + ) -> NDArray: shot = np.sign(angles) * np.abs(radius) * np.exp(1j * np.abs(angles)) return np.stack([shot.real, shot.imag], axis=-1) - def _update_parameters(single_shot, angles, radius, spiral_power): + def _update_parameters( + single_shot: NDArray, + angles: NDArray, + radius: NDArray, + spiral_power: float, + ) -> tuple[NDArray, NDArray, float]: radius = nl.norm(single_shot, axis=-1) angles = np.sign(angles) * np.abs(radius) ** (1 / spiral_power) return angles, radius, spiral_power @@ -144,7 +156,9 @@ def _update_parameters(single_shot, angles, radius, spiral_power): return trajectory -def initialize_2D_fibonacci_spiral(Nc, Ns, spiral_reduction=1, patch_center=True): +def initialize_2D_fibonacci_spiral( + Nc: int, Ns: int, spiral_reduction: float = 1, patch_center: bool = True +) -> NDArray: """Initialize a 2D Fibonacci spiral trajectory. A non-algebraic spiral trajectory based on the Fibonacci sequence, @@ -168,7 +182,7 @@ def initialize_2D_fibonacci_spiral(Nc, Ns, spiral_reduction=1, patch_center=True Returns ------- - array_like + NDArray 2D Fibonacci spiral trajectory References @@ -213,7 +227,14 @@ def initialize_2D_fibonacci_spiral(Nc, Ns, spiral_reduction=1, patch_center=True return trajectory -def initialize_2D_cones(Nc, Ns, tilt="uniform", in_out=False, nb_zigzags=5, width=1): +def initialize_2D_cones( + Nc: int, + Ns: int, + tilt: str | float = "uniform", + in_out: bool = False, + nb_zigzags: float = 5, + width: float = 1, +) -> NDArray: """Initialize a 2D cone trajectory. Parameters @@ -222,7 +243,7 @@ def initialize_2D_cones(Nc, Ns, tilt="uniform", in_out=False, nb_zigzags=5, widt Number of shots Ns : int Number of samples per shot - tilt : str, optional + tilt : str | float, optional Tilt of the shots, by default "uniform" in_out : bool, optional Whether to start from the center or not, by default False @@ -233,7 +254,7 @@ def initialize_2D_cones(Nc, Ns, tilt="uniform", in_out=False, nb_zigzags=5, widt Returns ------- - array_like + NDArray 2D cone trajectory """ @@ -253,8 +274,13 @@ def initialize_2D_cones(Nc, Ns, tilt="uniform", in_out=False, nb_zigzags=5, widt def initialize_2D_sinusoide( - Nc, Ns, tilt="uniform", in_out=False, nb_zigzags=5, width=1 -): + Nc: int, + Ns: int, + tilt: str | float = "uniform", + in_out: bool = False, + nb_zigzags: float = 5, + width: float = 1, +) -> NDArray: """Initialize a 2D sinusoide trajectory. Parameters @@ -263,7 +289,7 @@ def initialize_2D_sinusoide( Number of shots Ns : int Number of samples per shot - tilt : str, float, optional + tilt : str | float, optional Tilt of the shots, by default "uniform" in_out : bool, optional Whether to start from the center or not, by default False @@ -274,7 +300,7 @@ def initialize_2D_sinusoide( Returns ------- - array_like + NDArray 2D sinusoide trajectory """ @@ -293,7 +319,7 @@ def initialize_2D_sinusoide( return trajectory -def initialize_2D_propeller(Nc, Ns, nb_strips): +def initialize_2D_propeller(Nc: int, Ns: int, nb_strips: int) -> NDArray: """Initialize a 2D PROPELLER trajectory, as proposed in [Pip99]_. The PROPELLER trajectory is generally used along a specific @@ -341,7 +367,7 @@ def initialize_2D_propeller(Nc, Ns, nb_strips): return KMAX * trajectory -def initialize_2D_rings(Nc, Ns, nb_rings): +def initialize_2D_rings(Nc: int, Ns: int, nb_rings: int) -> NDArray: """Initialize a 2D ring trajectory, as proposed in [HHN08]_. Parameters @@ -355,7 +381,7 @@ def initialize_2D_rings(Nc, Ns, nb_rings): Returns ------- - array_like + NDArray 2D ring trajectory References @@ -387,7 +413,9 @@ def initialize_2D_rings(Nc, Ns, nb_rings): return KMAX * np.array(trajectory) -def initialize_2D_rosette(Nc, Ns, in_out=False, coprime_index=0): +def initialize_2D_rosette( + Nc: int, Ns: int, in_out: bool = False, coprime_index: int = 0 +) -> NDArray: """Initialize a 2D rosette trajectory. Parameters @@ -403,7 +431,7 @@ def initialize_2D_rosette(Nc, Ns, in_out=False, coprime_index=0): Returns ------- - array_like + NDArray 2D rosette trajectory """ @@ -428,7 +456,9 @@ def initialize_2D_rosette(Nc, Ns, in_out=False, coprime_index=0): return trajectory -def initialize_2D_polar_lissajous(Nc, Ns, in_out=False, nb_segments=1, coprime_index=0): +def initialize_2D_polar_lissajous( + Nc: int, Ns: int, in_out: bool = False, nb_segments: int = 1, coprime_index: int = 0 +) -> NDArray: """Initialize a 2D polar Lissajous trajectory. Parameters @@ -446,7 +476,7 @@ def initialize_2D_polar_lissajous(Nc, Ns, in_out=False, nb_segments=1, coprime_i Returns ------- - array_like + NDArray 2D polar Lissajous trajectory """ # Adapt the parameters to subcases @@ -482,7 +512,7 @@ def initialize_2D_polar_lissajous(Nc, Ns, in_out=False, nb_segments=1, coprime_i ######################### -def initialize_2D_lissajous(Nc, Ns, density=1): +def initialize_2D_lissajous(Nc: int, Ns: int, density: float = 1) -> NDArray: """Initialize a 2D Lissajous trajectory. Parameters @@ -496,7 +526,7 @@ def initialize_2D_lissajous(Nc, Ns, density=1): Returns ------- - array_like + NDArray 2D Lissajous trajectory """ # Define the whole curve in Cartesian coordinates @@ -512,7 +542,9 @@ def initialize_2D_lissajous(Nc, Ns, density=1): return trajectory -def initialize_2D_waves(Nc, Ns, nb_zigzags=5, width=1): +def initialize_2D_waves( + Nc: int, Ns: int, nb_zigzags: float = 5, width: float = 1 +) -> NDArray: """Initialize a 2D waves trajectory. Parameters @@ -528,7 +560,7 @@ def initialize_2D_waves(Nc, Ns, nb_zigzags=5, width=1): Returns ------- - array_like + NDArray 2D waves trajectory """ # Initialize a first shot diff --git a/src/mrinufft/trajectories/trajectory3D.py b/src/mrinufft/trajectories/trajectory3D.py index cdcf4a0ad..5ec9f2bca 100644 --- a/src/mrinufft/trajectories/trajectory3D.py +++ b/src/mrinufft/trajectories/trajectory3D.py @@ -1,9 +1,11 @@ """Functions to initialize 3D trajectories.""" from functools import partial +from typing import Literal import numpy as np import numpy.linalg as nl +from numpy.typing import NDArray from scipy.special import ellipj, ellipk from .maths import ( @@ -17,14 +19,16 @@ ) from .tools import conify, duplicate_along_axes, epify, precess, stack from .trajectory2D import initialize_2D_radial, initialize_2D_spiral -from .utils import KMAX, Packings, initialize_shape_norm, initialize_tilt +from .utils import KMAX, Packings, Spirals, initialize_shape_norm, initialize_tilt ############## # 3D RADIALS # ############## -def initialize_3D_phyllotaxis_radial(Nc, Ns, nb_interleaves=1, in_out=False): +def initialize_3D_phyllotaxis_radial( + Nc: int, Ns: int, nb_interleaves: int = 1, in_out: bool = False +) -> NDArray: """Initialize 3D radial trajectories with phyllotactic structure. The radial shots are oriented according to a Fibonacci sphere @@ -53,7 +57,7 @@ def initialize_3D_phyllotaxis_radial(Nc, Ns, nb_interleaves=1, in_out=False): Returns ------- - array_like + NDArray 3D phyllotaxis radial trajectory References @@ -71,7 +75,9 @@ def initialize_3D_phyllotaxis_radial(Nc, Ns, nb_interleaves=1, in_out=False): return trajectory -def initialize_3D_golden_means_radial(Nc, Ns, in_out=False): +def initialize_3D_golden_means_radial( + Nc: int, Ns: int, in_out: bool = False +) -> NDArray: """Initialize 3D radial trajectories with golden means-based structure. The radial shots are oriented using multidimensional golden means, @@ -95,7 +101,7 @@ def initialize_3D_golden_means_radial(Nc, Ns, in_out=False): Returns ------- - array_like + NDArray 3D golden means radial trajectory References @@ -123,7 +129,9 @@ def initialize_3D_golden_means_radial(Nc, Ns, in_out=False): return KMAX * trajectory -def initialize_3D_wong_radial(Nc, Ns, nb_interleaves=1, in_out=False): +def initialize_3D_wong_radial( + Nc: int, Ns: int, nb_interleaves: int = 1, in_out: bool = False +) -> NDArray: """Initialize 3D radial trajectories with a spiral structure. The radial shots are oriented according to an archimedean spiral @@ -149,7 +157,7 @@ def initialize_3D_wong_radial(Nc, Ns, nb_interleaves=1, in_out=False): Returns ------- - array_like + NDArray 3D Wong radial trajectory References @@ -181,7 +189,9 @@ def initialize_3D_wong_radial(Nc, Ns, nb_interleaves=1, in_out=False): return trajectory -def initialize_3D_park_radial(Nc, Ns, nb_interleaves=1, in_out=False): +def initialize_3D_park_radial( + Nc: int, Ns: int, nb_interleaves: int = 1, in_out: bool = False +) -> NDArray: """Initialize 3D radial trajectories with a spiral structure. The radial shots are oriented according to an archimedean spiral @@ -208,7 +218,7 @@ def initialize_3D_park_radial(Nc, Ns, nb_interleaves=1, in_out=False): Returns ------- - array_like + NDArray 3D Park radial trajectory References @@ -233,8 +243,14 @@ def initialize_3D_park_radial(Nc, Ns, nb_interleaves=1, in_out=False): def initialize_3D_cones( - Nc, Ns, tilt="golden", in_out=False, nb_zigzags=5, spiral="archimedes", width=1 -): + Nc: int, + Ns: int, + tilt: str | float = "golden", + in_out: bool = False, + nb_zigzags: float = 5, + spiral: str | float = "archimedes", + width: float = 1, +) -> NDArray: """Initialize 3D trajectories with cones. Initialize a trajectory consisting of 3D cones duplicated @@ -264,7 +280,7 @@ def initialize_3D_cones( Returns ------- - array_like + NDArray 3D cones trajectory References @@ -274,7 +290,7 @@ def initialize_3D_cones( Journal of mathematical chemistry 6, no. 1 (1991): 325-349. """ # Initialize first spiral - spiral = initialize_2D_spiral( + single_spiral = initialize_2D_spiral( Nc=1, Ns=Ns, spiral=spiral, @@ -290,8 +306,8 @@ def initialize_3D_cones( # Initialize first cone ## Create three cones for proper partitioning, but only one is needed - cone = conify( - spiral, + cones = conify( + single_spiral, nb_cones=3, z_tilt=None, in_out=in_out, @@ -301,22 +317,27 @@ def initialize_3D_cones( # Apply precession to the first cone trajectory = precess( - cone, tilt=tilt, nb_rotations=Nc, half_sphere=in_out, partition="axial", axis=2 + cones, + tilt=tilt, + nb_rotations=Nc, + half_sphere=in_out, + partition="axial", + axis=2, ) return trajectory def initialize_3D_floret( - Nc, - Ns, - in_out=False, - nb_revolutions=1, - spiral="fermat", - cone_tilt="golden", - max_angle=np.pi / 2, - axes=(2,), -): + Nc: int, + Ns: int, + in_out: bool = False, + nb_revolutions: float = 1, + spiral: str | float = "fermat", + cone_tilt: str | float = "golden", + max_angle: float = np.pi / 2, + axes: tuple[int, ...] = (2,), +) -> NDArray: """Initialize 3D trajectories with FLORET. This implementation is based on the work from [Pip+11]_. @@ -346,7 +367,7 @@ def initialize_3D_floret( Returns ------- - array_like + NDArray 3D FLORET trajectory References @@ -363,7 +384,7 @@ def initialize_3D_floret( raise ValueError("Nc should be divisible by len(axes).") # Initialize first spiral - spiral = initialize_2D_spiral( + single_spiral = initialize_2D_spiral( Nc=1, Ns=Ns, spiral=spiral, @@ -372,8 +393,8 @@ def initialize_3D_floret( ) # Initialize first cone - cone = conify( - spiral, + cones = conify( + single_spiral, nb_cones=Nc_per_axis, z_tilt=cone_tilt, in_out=in_out, @@ -381,20 +402,20 @@ def initialize_3D_floret( ) # Duplicate cone along axes - axes = [2 - ax for ax in axes] # Default axis is kz, not kx - trajectory = duplicate_along_axes(cone, axes=axes) + axes = tuple(2 - ax for ax in axes) # Default axis is kz, not kx + trajectory = duplicate_along_axes(cones, axes=axes) return trajectory def initialize_3D_wave_caipi( - Nc, - Ns, - nb_revolutions=5, - width=1, - packing="triangular", - shape="square", - spacing=(1, 1), -): + Nc: int, + Ns: int, + nb_revolutions: float = 5, + width: float = 1, + packing: str = "triangular", + shape: str | float = "square", + spacing: tuple[int, int] = (1, 1), +) -> NDArray: """Initialize 3D trajectories with Wave-CAIPI. This implementation is based on the work from [Bil+15]_. @@ -415,20 +436,20 @@ def initialize_3D_wave_caipi( Packing method used to position the helices: "triangular"/"hexagonal", "square", "circular" or "random"/"uniform", by default "triangular". - shape : str or float, optional + shape : str | float, optional Shape over the 2D :math:`k_x-k_y` plane to pack with shots, either defined as `str` ("circle", "square", "diamond") or as `float` through p-norms following the conventions of the `ord` parameter from `numpy.linalg.norm`, by default "circle". - spacing : tuple(int, int) + spacing : tuple[int, int] Spacing between helices over the 2D :math:`k_x-k_y` plane normalized similarly to `width` to correspond to helix diameters, by default (1, 1). Returns ------- - array_like + NDArray 3D wave-CAIPI trajectory References @@ -439,7 +460,6 @@ def initialize_3D_wave_caipi( Magnetic resonance in medicine 73, no. 6 (2015): 2152-2162. """ trajectory = np.zeros((Nc, Ns, 3)) - spacing = np.array(spacing) # Initialize first shot angles = nb_revolutions * 2 * np.pi * np.arange(0, Ns) / Ns @@ -448,11 +468,11 @@ def initialize_3D_wave_caipi( trajectory[0, :, 2] = np.linspace(-1, 1, Ns) # Choose the helix positions according to packing - packing = Packings[packing] + packing_enum = Packings[packing] side = 2 * int(np.ceil(np.sqrt(Nc))) * np.max(spacing) - if packing == Packings.RANDOM: + if packing_enum == Packings.RANDOM: positions = 2 * side * (np.random.random((side * side, 2)) - 0.5) - elif packing == Packings.CIRCLE: + elif packing_enum == Packings.CIRCLE: positions = [[0, 0]] counter = 0 while len(positions) < side**2: @@ -466,21 +486,21 @@ def initialize_3D_wave_caipi( positions = np.concatenate( [positions, np.array([circle.real, circle.imag]).T], axis=0 ) - elif packing in [Packings.SQUARE, Packings.TRIANGLE, Packings.HEXAGONAL]: + elif packing_enum in [Packings.SQUARE, Packings.TRIANGLE, Packings.HEXAGONAL]: # Square packing or initialize hexagonal/triangular packing px, py = np.meshgrid( np.arange(-side + 1, side, 2), np.arange(-side + 1, side, 2) ) positions = np.stack([px.flatten(), py.flatten()], axis=-1).astype(float) - if packing in [Packings.HEXAGON, Packings.TRIANGLE]: + if packing_enum in [Packings.HEXAGON, Packings.TRIANGLE]: # Hexagonal/triangular packing based on square packing positions[::2, 1] += 1 / 2 positions[1::2, 1] -= 1 / 2 ratio = nl.norm(np.diff(positions[:2], axis=-1)) positions[:, 0] /= ratio / 2 - if packing == Packings.FIBONACCI: + if packing_enum == Packings.FIBONACCI: # Estimate helix width based on the k-space 2D surface # and an optimal circle packing positions = np.sqrt( @@ -490,7 +510,7 @@ def initialize_3D_wave_caipi( # Remove points by distance to fit both shape and Nc main_order = initialize_shape_norm(shape) tie_order = 2 if (main_order != 2) else np.inf # breaking ties - positions = np.array(positions) * spacing + positions = np.array(positions) * np.array(spacing) positions = sorted(positions, key=partial(nl.norm, ord=tie_order)) positions = sorted(positions, key=partial(nl.norm, ord=main_order)) positions = positions[:Nc] @@ -506,14 +526,14 @@ def initialize_3D_wave_caipi( def initialize_3D_seiffert_spiral( - Nc, - Ns, - curve_index=0.2, - nb_revolutions=1, - axis_tilt="golden", - spiral_tilt="golden", - in_out=False, -): + Nc: int, + Ns: int, + curve_index: float = 0.2, + nb_revolutions: float = 1, + axis_tilt: str | float = "golden", + spiral_tilt: str | float = "golden", + in_out: bool = False, +) -> NDArray: """Initialize 3D trajectories with modulated Seiffert spirals. Initially introduced in [SMR18]_, but also proposed later as "Yarnball" @@ -543,7 +563,7 @@ def initialize_3D_seiffert_spiral( Returns ------- - array_like + NDArray 3D Seiffert spiral trajectory References @@ -611,8 +631,13 @@ def initialize_3D_seiffert_spiral( def initialize_3D_helical_shells( - Nc, Ns, nb_shells, spiral_reduction=1, shell_tilt="intergaps", shot_tilt="uniform" -): + Nc: int, + Ns: int, + nb_shells: int, + spiral_reduction: float = 1, + shell_tilt: str = "intergaps", + shot_tilt: str = "uniform", +) -> NDArray: """Initialize 3D trajectories with helical shells. The implementation follows the proposition from [YRB06]_ @@ -635,7 +660,7 @@ def initialize_3D_helical_shells( Returns ------- - array_like + NDArray 3D helical shell trajectory References @@ -694,8 +719,12 @@ def initialize_3D_helical_shells( def initialize_3D_annular_shells( - Nc, Ns, nb_shells, shell_tilt=np.pi, ring_tilt=np.pi / 2 -): + Nc: int, + Ns: int, + nb_shells: int, + shell_tilt: float = np.pi, + ring_tilt: float = np.pi / 2, +) -> NDArray: """Initialize 3D trajectories with annular shells. An exclusive trajectory inspired from the work proposed in [HM11]_. @@ -715,7 +744,7 @@ def initialize_3D_annular_shells( Returns ------- - array_like + NDArray 3D annular shell trajectory References @@ -807,14 +836,14 @@ def initialize_3D_annular_shells( def initialize_3D_seiffert_shells( - Nc, - Ns, - nb_shells, - curve_index=0.5, - nb_revolutions=1, - shell_tilt="uniform", - shot_tilt="uniform", -): + Nc: int, + Ns: int, + nb_shells: int, + curve_index: float = 0.5, + nb_revolutions: float = 1, + shell_tilt: str = "uniform", + shot_tilt: str = "uniform", +) -> NDArray: """Initialize 3D trajectories with Seiffert shells. The implementation is based on work from [Er00]_ and [Br09]_, @@ -841,7 +870,7 @@ def initialize_3D_seiffert_shells( Returns ------- - array_like + NDArray 3D Seiffert shell trajectory References @@ -905,15 +934,15 @@ def initialize_3D_seiffert_shells( def initialize_3D_turbine( - Nc, - Ns_readouts, - Ns_transitions, - nb_blades, - blade_tilt="uniform", - nb_trains="auto", - skip_factor=1, - in_out=True, -): + Nc: int, + Ns_readouts: int, + Ns_transitions: int, + nb_blades: int, + blade_tilt: str = "uniform", + nb_trains: int | Literal["auto"] = "auto", + skip_factor: int = 1, + in_out: bool = True, +) -> NDArray: """Initialize 3D TURBINE trajectory. This is an implementation of the TURBINE (Trajectory Using Radially @@ -938,7 +967,7 @@ def initialize_3D_turbine( Number of line stacks over the :math:`k_z`-axis axis blade_tilt : str, float, optional Tilt between individual blades, by default "uniform" - nb_trains : int, str, optional + nb_trains : int, Literal["auto"], optional Number of resulting shots, or readout trains, such that each of them will be composed of :math:`n` readouts with ``Nc = n * nb_trains``. If ``"auto"`` then ``nb_trains`` is set @@ -951,7 +980,7 @@ def initialize_3D_turbine( Returns ------- - array_like + NDArray 3D TURBINE trajectory References @@ -1013,17 +1042,17 @@ def initialize_3D_turbine( def initialize_3D_repi( - Nc, - Ns_readouts, - Ns_transitions, - nb_blades, - nb_blade_revolutions=0, - blade_tilt="uniform", - nb_spiral_revolutions=0, - spiral="archimedes", - nb_trains="auto", - in_out=True, -): + Nc: int, + Ns_readouts: int, + Ns_transitions: int, + nb_blades: int, + nb_blade_revolutions: float = 0, + blade_tilt: str = "uniform", + nb_spiral_revolutions: float = 0, + spiral: str = "archimedes", + nb_trains: int | Literal["auto"] = "auto", + in_out: bool = True, +) -> NDArray: """Initialize 3D REPI trajectory. This is an implementation of the REPI (Radial Echo Planar Imaging) @@ -1058,7 +1087,7 @@ def initialize_3D_repi( Number of revolutions of the spirals over the readouts, by default 0 spiral : str, float, optional Spiral type, by default "archimedes" - nb_trains : int, str + nb_trains : int, Literal["auto"], optional Number of trains dividing the readouts, such that each shot will be composed of `n` readouts with `Nc = n * nb_trains`. If "auto" then `nb_trains` is set to `nb_blades`. @@ -1067,7 +1096,7 @@ def initialize_3D_repi( Returns ------- - array_like + NDArray 3D REPI trajectory References diff --git a/src/mrinufft/trajectories/utils.py b/src/mrinufft/trajectories/utils.py index e2c9ff4fb..c1124e116 100644 --- a/src/mrinufft/trajectories/utils.py +++ b/src/mrinufft/trajectories/utils.py @@ -2,8 +2,10 @@ from enum import Enum, EnumMeta from numbers import Real +from typing import Any, Literal import numpy as np +from numpy.typing import NDArray ############# # CONSTANTS # @@ -26,11 +28,11 @@ class CaseInsensitiveEnumMeta(EnumMeta): """A case-insensitive EnumMeta.""" - def __getitem__(self, name): + def __getitem__(self, name: str) -> Enum: """Allow ``MyEnum['Member'] == MyEnum['MEMBER']`` .""" return super().__getitem__(name.upper()) - def __getattr__(self, name): + def __getattr__(self, name: str) -> Any: # noqa ANN401 """Allow ``MyEnum.Member == MyEnum.MEMBER`` .""" return super().__getattr__(name.upper()) @@ -121,7 +123,7 @@ class Tilts(str, Enum): class Packings(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Enumerate available packing method for shots. - It is mostly use for wave-CAIPI trajectory + It is mostly used for wave-CAIPI trajectory See Also -------- @@ -150,15 +152,15 @@ class Packings(str, Enum, metaclass=CaseInsensitiveEnumMeta): def normalize_trajectory( - trajectory, - norm_factor=KMAX, - resolution=DEFAULT_RESOLUTION, -): + trajectory: NDArray, + norm_factor: float = KMAX, + resolution: float | NDArray = DEFAULT_RESOLUTION, +) -> NDArray: """Normalize an un-normalized/natural trajectory for NUFFT use. Parameters ---------- - trajectory : np.ndarray + trajectory : NDArray Un-normalized trajectory consisting of k-space coordinates in 2D or 3D. norm_factor : float, optional Trajectory normalization factor, by default KMAX. @@ -169,22 +171,22 @@ def normalize_trajectory( Returns ------- - trajectory : np.ndarray + trajectory : NDArray Normalized trajectory corresponding to `trajectory` input. """ return trajectory * norm_factor * (2 * resolution) def unnormalize_trajectory( - trajectory, - norm_factor=KMAX, - resolution=DEFAULT_RESOLUTION, -): + trajectory: NDArray, + norm_factor: float = KMAX, + resolution: float | NDArray = DEFAULT_RESOLUTION, +) -> NDArray: """Un-normalize a NUFFT-normalized trajectory. Parameters ---------- - trajectory : np.ndarray + trajectory : NDArray Normalized trajectory consisting of k-space coordinates in 2D or 3D. norm_factor : float, optional Trajectory normalization factor, by default KMAX. @@ -195,25 +197,25 @@ def unnormalize_trajectory( Returns ------- - trajectory : np.ndarray + trajectory : NDArray Un-normalized trajectory corresponding to `trajectory` input. """ return trajectory / norm_factor / (2 * resolution) def convert_trajectory_to_gradients( - trajectory, - norm_factor=KMAX, - resolution=DEFAULT_RESOLUTION, - raster_time=DEFAULT_RASTER_TIME, - gamma=Gammas.HYDROGEN, - get_final_positions=False, -): + trajectory: NDArray, + norm_factor: float = KMAX, + resolution: float | NDArray = DEFAULT_RESOLUTION, + raster_time: float = DEFAULT_RASTER_TIME, + gamma: float = Gammas.HYDROGEN, + get_final_positions: bool = False, +) -> tuple[NDArray, ...]: """Derive a normalized trajectory over time to provide gradients. Parameters ---------- - trajectory : np.ndarray + trajectory : NDArray Normalized trajectory consisting of k-space coordinates in 2D or 3D. norm_factor : float, optional Trajectory normalization factor, by default KMAX. @@ -234,7 +236,7 @@ def convert_trajectory_to_gradients( Returns ------- - gradients : np.ndarray + gradients : NDArray Gradients corresponding to `trajectory`. """ # Un-normalize the trajectory from NUFFT usage @@ -249,20 +251,20 @@ def convert_trajectory_to_gradients( def convert_gradients_to_trajectory( - gradients, - initial_positions=None, - norm_factor=KMAX, - resolution=DEFAULT_RESOLUTION, - raster_time=DEFAULT_RASTER_TIME, - gamma=Gammas.HYDROGEN, -): + gradients: NDArray, + initial_positions: NDArray | None = None, + norm_factor: float = KMAX, + resolution: float | NDArray = DEFAULT_RESOLUTION, + raster_time: float = DEFAULT_RASTER_TIME, + gamma: float = Gammas.HYDROGEN, +) -> NDArray: """Integrate gradients over time to provide a normalized trajectory. Parameters ---------- - gradients : np.ndarray + gradients : NDArray Gradients over 2 or 3 directions. - initial_positions: np.ndarray, optional + initial_positions: NDArray, optional Positions in k-space at the beginning of the readout window. The default is `None`. norm_factor : float, optional @@ -281,7 +283,7 @@ def convert_gradients_to_trajectory( Returns ------- - trajectory : np.ndarray + trajectory : NDArray Normalized trajectory corresponding to `gradients`. """ # Handle no initial positions @@ -299,14 +301,14 @@ def convert_gradients_to_trajectory( def convert_gradients_to_slew_rates( - gradients, - raster_time=DEFAULT_RASTER_TIME, -): + gradients: NDArray, + raster_time: float = DEFAULT_RASTER_TIME, +) -> tuple[NDArray, NDArray]: """Derive the gradients over time to provide slew rates. Parameters ---------- - gradients : np.ndarray + gradients : NDArray Gradients over 2 or 3 directions. raster_time : float, optional Amount of time between the acquisition of two @@ -315,9 +317,9 @@ def convert_gradients_to_slew_rates( Returns ------- - slewrates : np.ndarray + slewrates : NDArray Slew rates corresponding to `gradients`. - initial_gradients : np.ndarray + initial_gradients : NDArray Gradients at the beginning of the readout window. """ # Compute slew rates and starting gradients @@ -327,17 +329,17 @@ def convert_gradients_to_slew_rates( def convert_slew_rates_to_gradients( - slewrates, - initial_gradients=None, - raster_time=DEFAULT_RASTER_TIME, -): + slewrates: NDArray, + initial_gradients: NDArray | None = None, + raster_time: float = DEFAULT_RASTER_TIME, +) -> NDArray: """Integrate slew rates over time to provide gradients. Parameters ---------- - slewrates : np.ndarray + slewrates : NDArray Slew rates over 2 or 3 directions. - initial_gradients: np.ndarray, optional + initial_gradients: NDArray, optional Gradients at the beginning of the readout window. The default is `None`. raster_time : float, optional @@ -347,7 +349,7 @@ def convert_slew_rates_to_gradients( Returns ------- - gradients : np.ndarray + gradients : NDArray Gradients corresponding to `slewrates`. """ # Handle no initial gradients @@ -362,17 +364,17 @@ def convert_slew_rates_to_gradients( def compute_gradients_and_slew_rates( - trajectory, - norm_factor=KMAX, - resolution=DEFAULT_RESOLUTION, - raster_time=DEFAULT_RASTER_TIME, - gamma=Gammas.HYDROGEN, -): + trajectory: NDArray, + norm_factor: float = KMAX, + resolution: float | NDArray = DEFAULT_RESOLUTION, + raster_time: float = DEFAULT_RASTER_TIME, + gamma: float = Gammas.HYDROGEN, +) -> tuple[NDArray, NDArray]: """Compute the gradients and slew rates from a normalized trajectory. Parameters ---------- - trajectory : np.ndarray + trajectory : NDArray Normalized trajectory consisting of k-space coordinates in 2D or 3D. norm_factor : float, optional Trajectory normalization factor, by default KMAX. @@ -390,9 +392,9 @@ def compute_gradients_and_slew_rates( Returns ------- - gradients : np.ndarray + gradients : NDArray Gradients corresponding to `trajectory`. - slewrates : np.ndarray + slewrates : NDArray Slew rates corresponding to `trajectory` gradients. """ # Convert normalized trajectory to gradients @@ -411,15 +413,19 @@ def compute_gradients_and_slew_rates( def check_hardware_constraints( - gradients, slewrates, gmax=DEFAULT_GMAX, smax=DEFAULT_SMAX, order=None -): + gradients: NDArray, + slewrates: NDArray, + gmax: float = DEFAULT_GMAX, + smax: float = DEFAULT_SMAX, + order: int | str | None = None, +) -> tuple[bool, float, float]: """Check if a trajectory satisfies the gradient hardware constraints. Parameters ---------- - gradients : np.ndarray + gradients : NDArray Gradients to check - slewrates: np.ndarray + slewrates: NDArray Slewrates to check gmax : float, optional Maximum gradient amplitude in T/m. The default is DEFAULT_GMAX. @@ -450,12 +456,12 @@ def check_hardware_constraints( ########### -def initialize_tilt(tilt, nb_partitions=1): +def initialize_tilt(tilt: str | float | None, nb_partitions: int = 1) -> float: r"""Initialize the tilt angle. Parameters ---------- - tilt : str or float + tilt : str | float | None Tilt angle in rad or name of the tilt. nb_partitions : int, optional Number of partitions. The default is 1. @@ -493,12 +499,12 @@ def initialize_tilt(tilt, nb_partitions=1): raise NotImplementedError(f"Unknown tilt name: {tilt}") -def initialize_algebraic_spiral(spiral): +def initialize_algebraic_spiral(spiral: str | float) -> float: """Initialize the algebraic spiral type. Parameters ---------- - spiral : str or float + spiral : str | float Spiral type or spiral power value. Returns @@ -507,16 +513,16 @@ def initialize_algebraic_spiral(spiral): Spiral power value. """ if isinstance(spiral, Real): - return spiral - return Spirals[spiral] + return float(spiral) + return Spirals[str(spiral)] -def initialize_shape_norm(shape): +def initialize_shape_norm(shape: str | float) -> float: """Initialize the norm for a given shape. Parameters ---------- - shape : str or float + shape : str | float Shape name or p-norm value. Returns @@ -525,5 +531,5 @@ def initialize_shape_norm(shape): Shape p-norm value. """ if isinstance(shape, Real): - return shape - return NormShapes[shape] + return float(shape) + return NormShapes[str(shape)]