diff --git a/.buildinfo b/.buildinfo new file mode 100644 index 00000000..76d3ef77 --- /dev/null +++ b/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: b683366407baf893adbd59b1e1506c71 +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/.doctrees/core/bloch.doctree b/.doctrees/core/bloch.doctree new file mode 100644 index 00000000..e32160d6 Binary files /dev/null and b/.doctrees/core/bloch.doctree differ diff --git a/.doctrees/core/fft.doctree b/.doctrees/core/fft.doctree new file mode 100644 index 00000000..466324ee Binary files /dev/null and b/.doctrees/core/fft.doctree differ diff --git a/.doctrees/core/generated/deepmr.b0field.doctree b/.doctrees/core/generated/deepmr.b0field.doctree new file mode 100644 index 00000000..cad8c3de Binary files /dev/null and b/.doctrees/core/generated/deepmr.b0field.doctree differ diff --git a/.doctrees/core/generated/deepmr.b1field.doctree b/.doctrees/core/generated/deepmr.b1field.doctree new file mode 100644 index 00000000..5b27d974 Binary files /dev/null and b/.doctrees/core/generated/deepmr.b1field.doctree differ diff --git a/.doctrees/core/generated/deepmr.bloch.AdiabaticPulse.doctree b/.doctrees/core/generated/deepmr.bloch.AdiabaticPulse.doctree new file mode 100644 index 00000000..01590a6d Binary files /dev/null and b/.doctrees/core/generated/deepmr.bloch.AdiabaticPulse.doctree differ diff --git a/.doctrees/core/generated/deepmr.bloch.DiffusionDamping.doctree b/.doctrees/core/generated/deepmr.bloch.DiffusionDamping.doctree new file mode 100644 index 00000000..e0008ffa Binary files /dev/null and b/.doctrees/core/generated/deepmr.bloch.DiffusionDamping.doctree differ diff --git a/.doctrees/core/generated/deepmr.bloch.EPGstates.doctree b/.doctrees/core/generated/deepmr.bloch.EPGstates.doctree new file mode 100644 index 00000000..59d1da4d Binary files /dev/null and b/.doctrees/core/generated/deepmr.bloch.EPGstates.doctree differ diff --git a/.doctrees/core/generated/deepmr.bloch.ExcPulse.doctree b/.doctrees/core/generated/deepmr.bloch.ExcPulse.doctree new file mode 100644 index 00000000..762f44a2 Binary files /dev/null and b/.doctrees/core/generated/deepmr.bloch.ExcPulse.doctree differ diff --git a/.doctrees/core/generated/deepmr.bloch.FSEStep.doctree b/.doctrees/core/generated/deepmr.bloch.FSEStep.doctree new file mode 100644 index 00000000..3d075cae Binary files /dev/null and b/.doctrees/core/generated/deepmr.bloch.FSEStep.doctree differ diff --git a/.doctrees/core/generated/deepmr.bloch.FlowDephasing.doctree b/.doctrees/core/generated/deepmr.bloch.FlowDephasing.doctree new file mode 100644 index 00000000..db98bbf2 Binary files /dev/null and b/.doctrees/core/generated/deepmr.bloch.FlowDephasing.doctree differ diff --git a/.doctrees/core/generated/deepmr.bloch.FlowWash.doctree b/.doctrees/core/generated/deepmr.bloch.FlowWash.doctree new file mode 100644 index 00000000..2eff58b3 Binary files /dev/null and b/.doctrees/core/generated/deepmr.bloch.FlowWash.doctree differ diff --git a/.doctrees/core/generated/deepmr.bloch.InversionPrep.doctree b/.doctrees/core/generated/deepmr.bloch.InversionPrep.doctree new file mode 100644 index 00000000..1e4e5d68 Binary files /dev/null and b/.doctrees/core/generated/deepmr.bloch.InversionPrep.doctree differ diff --git a/.doctrees/core/generated/deepmr.bloch.RFPulse.doctree b/.doctrees/core/generated/deepmr.bloch.RFPulse.doctree new file mode 100644 index 00000000..dbae3bc0 Binary files /dev/null and b/.doctrees/core/generated/deepmr.bloch.RFPulse.doctree differ diff --git a/.doctrees/core/generated/deepmr.bloch.Relaxation.doctree b/.doctrees/core/generated/deepmr.bloch.Relaxation.doctree new file mode 100644 index 00000000..c71574a8 Binary files /dev/null and b/.doctrees/core/generated/deepmr.bloch.Relaxation.doctree differ diff --git a/.doctrees/core/generated/deepmr.bloch.SSFPEchoStep.doctree b/.doctrees/core/generated/deepmr.bloch.SSFPEchoStep.doctree new file mode 100644 index 00000000..802b866c Binary files /dev/null and b/.doctrees/core/generated/deepmr.bloch.SSFPEchoStep.doctree differ diff --git a/.doctrees/core/generated/deepmr.bloch.SSFPFidStep.doctree b/.doctrees/core/generated/deepmr.bloch.SSFPFidStep.doctree new file mode 100644 index 00000000..72c1c262 Binary files /dev/null and b/.doctrees/core/generated/deepmr.bloch.SSFPFidStep.doctree differ diff --git a/.doctrees/core/generated/deepmr.bloch.Shift.doctree b/.doctrees/core/generated/deepmr.bloch.Shift.doctree new file mode 100644 index 00000000..1cadde55 Binary files /dev/null and b/.doctrees/core/generated/deepmr.bloch.Shift.doctree differ diff --git a/.doctrees/core/generated/deepmr.bloch.Spoil.doctree b/.doctrees/core/generated/deepmr.bloch.Spoil.doctree new file mode 100644 index 00000000..aa2b0bb8 Binary files /dev/null and b/.doctrees/core/generated/deepmr.bloch.Spoil.doctree differ diff --git a/.doctrees/core/generated/deepmr.bloch.T2Prep.doctree b/.doctrees/core/generated/deepmr.bloch.T2Prep.doctree new file mode 100644 index 00000000..93feeb38 Binary files /dev/null and b/.doctrees/core/generated/deepmr.bloch.T2Prep.doctree differ diff --git a/.doctrees/core/generated/deepmr.bloch.bSSFPStep.doctree b/.doctrees/core/generated/deepmr.bloch.bSSFPStep.doctree new file mode 100644 index 00000000..af9e67c1 Binary files /dev/null and b/.doctrees/core/generated/deepmr.bloch.bSSFPStep.doctree differ diff --git a/.doctrees/core/generated/deepmr.bloch.bssfpmrf.doctree b/.doctrees/core/generated/deepmr.bloch.bssfpmrf.doctree new file mode 100644 index 00000000..a0f11363 Binary files /dev/null and b/.doctrees/core/generated/deepmr.bloch.bssfpmrf.doctree differ diff --git a/.doctrees/core/generated/deepmr.bloch.fse.doctree b/.doctrees/core/generated/deepmr.bloch.fse.doctree new file mode 100644 index 00000000..6852e1af Binary files /dev/null and b/.doctrees/core/generated/deepmr.bloch.fse.doctree differ diff --git a/.doctrees/core/generated/deepmr.bloch.memprage.doctree b/.doctrees/core/generated/deepmr.bloch.memprage.doctree new file mode 100644 index 00000000..1b027ca6 Binary files /dev/null and b/.doctrees/core/generated/deepmr.bloch.memprage.doctree differ diff --git a/.doctrees/core/generated/deepmr.bloch.mprage.doctree b/.doctrees/core/generated/deepmr.bloch.mprage.doctree new file mode 100644 index 00000000..4274aa0a Binary files /dev/null and b/.doctrees/core/generated/deepmr.bloch.mprage.doctree differ diff --git a/.doctrees/core/generated/deepmr.bloch.observe.doctree b/.doctrees/core/generated/deepmr.bloch.observe.doctree new file mode 100644 index 00000000..56afa88f Binary files /dev/null and b/.doctrees/core/generated/deepmr.bloch.observe.doctree differ diff --git a/.doctrees/core/generated/deepmr.bloch.ssfpmrf.doctree b/.doctrees/core/generated/deepmr.bloch.ssfpmrf.doctree new file mode 100644 index 00000000..04753d70 Binary files /dev/null and b/.doctrees/core/generated/deepmr.bloch.ssfpmrf.doctree differ diff --git a/.doctrees/core/generated/deepmr.bloch.susceptibility.doctree b/.doctrees/core/generated/deepmr.bloch.susceptibility.doctree new file mode 100644 index 00000000..64c75369 Binary files /dev/null and b/.doctrees/core/generated/deepmr.bloch.susceptibility.doctree differ diff --git a/.doctrees/core/generated/deepmr.bloch.t1sat.doctree b/.doctrees/core/generated/deepmr.bloch.t1sat.doctree new file mode 100644 index 00000000..d768db9a Binary files /dev/null and b/.doctrees/core/generated/deepmr.bloch.t1sat.doctree differ diff --git a/.doctrees/core/generated/deepmr.bloch.t1t2shuffling.doctree b/.doctrees/core/generated/deepmr.bloch.t1t2shuffling.doctree new file mode 100644 index 00000000..92084253 Binary files /dev/null and b/.doctrees/core/generated/deepmr.bloch.t1t2shuffling.doctree differ diff --git a/.doctrees/core/generated/deepmr.brainweb.doctree b/.doctrees/core/generated/deepmr.brainweb.doctree new file mode 100644 index 00000000..5a63a72a Binary files /dev/null and b/.doctrees/core/generated/deepmr.brainweb.doctree differ diff --git a/.doctrees/core/generated/deepmr.cartesian2D.doctree b/.doctrees/core/generated/deepmr.cartesian2D.doctree new file mode 100644 index 00000000..f5778334 Binary files /dev/null and b/.doctrees/core/generated/deepmr.cartesian2D.doctree differ diff --git a/.doctrees/core/generated/deepmr.cartesian3D.doctree b/.doctrees/core/generated/deepmr.cartesian3D.doctree new file mode 100644 index 00000000..5955013e Binary files /dev/null and b/.doctrees/core/generated/deepmr.cartesian3D.doctree differ diff --git a/.doctrees/core/generated/deepmr.custom_phantom.doctree b/.doctrees/core/generated/deepmr.custom_phantom.doctree new file mode 100644 index 00000000..a14ba33d Binary files /dev/null and b/.doctrees/core/generated/deepmr.custom_phantom.doctree differ diff --git a/.doctrees/core/generated/deepmr.fermi.doctree b/.doctrees/core/generated/deepmr.fermi.doctree new file mode 100644 index 00000000..56460138 Binary files /dev/null and b/.doctrees/core/generated/deepmr.fermi.doctree differ diff --git a/.doctrees/core/generated/deepmr.fft.apply_nufft_selfadj.doctree b/.doctrees/core/generated/deepmr.fft.apply_nufft_selfadj.doctree new file mode 100644 index 00000000..6e8db624 Binary files /dev/null and b/.doctrees/core/generated/deepmr.fft.apply_nufft_selfadj.doctree differ diff --git a/.doctrees/core/generated/deepmr.fft.apply_sparse_fft_selfadj.doctree b/.doctrees/core/generated/deepmr.fft.apply_sparse_fft_selfadj.doctree new file mode 100644 index 00000000..63a2d756 Binary files /dev/null and b/.doctrees/core/generated/deepmr.fft.apply_sparse_fft_selfadj.doctree differ diff --git a/.doctrees/core/generated/deepmr.fft.fft.doctree b/.doctrees/core/generated/deepmr.fft.fft.doctree new file mode 100644 index 00000000..543b2d5c Binary files /dev/null and b/.doctrees/core/generated/deepmr.fft.fft.doctree differ diff --git a/.doctrees/core/generated/deepmr.fft.ifft.doctree b/.doctrees/core/generated/deepmr.fft.ifft.doctree new file mode 100644 index 00000000..6aca69d9 Binary files /dev/null and b/.doctrees/core/generated/deepmr.fft.ifft.doctree differ diff --git a/.doctrees/core/generated/deepmr.fft.nufft.doctree b/.doctrees/core/generated/deepmr.fft.nufft.doctree new file mode 100644 index 00000000..cef4beb9 Binary files /dev/null and b/.doctrees/core/generated/deepmr.fft.nufft.doctree differ diff --git a/.doctrees/core/generated/deepmr.fft.nufft_adj.doctree b/.doctrees/core/generated/deepmr.fft.nufft_adj.doctree new file mode 100644 index 00000000..ce6ff0a4 Binary files /dev/null and b/.doctrees/core/generated/deepmr.fft.nufft_adj.doctree differ diff --git a/.doctrees/core/generated/deepmr.fft.plan_toeplitz_fft.doctree b/.doctrees/core/generated/deepmr.fft.plan_toeplitz_fft.doctree new file mode 100644 index 00000000..ce346124 Binary files /dev/null and b/.doctrees/core/generated/deepmr.fft.plan_toeplitz_fft.doctree differ diff --git a/.doctrees/core/generated/deepmr.fft.plan_toeplitz_nufft.doctree b/.doctrees/core/generated/deepmr.fft.plan_toeplitz_nufft.doctree new file mode 100644 index 00000000..2146874f Binary files /dev/null and b/.doctrees/core/generated/deepmr.fft.plan_toeplitz_nufft.doctree differ diff --git a/.doctrees/core/generated/deepmr.fft.sparse_fft.doctree b/.doctrees/core/generated/deepmr.fft.sparse_fft.doctree new file mode 100644 index 00000000..e17c5c97 Binary files /dev/null and b/.doctrees/core/generated/deepmr.fft.sparse_fft.doctree differ diff --git a/.doctrees/core/generated/deepmr.fft.sparse_ifft.doctree b/.doctrees/core/generated/deepmr.fft.sparse_ifft.doctree new file mode 100644 index 00000000..e1cf9b14 Binary files /dev/null and b/.doctrees/core/generated/deepmr.fft.sparse_ifft.doctree differ diff --git a/.doctrees/core/generated/deepmr.fwt.doctree b/.doctrees/core/generated/deepmr.fwt.doctree new file mode 100644 index 00000000..c9fdb51b Binary files /dev/null and b/.doctrees/core/generated/deepmr.fwt.doctree differ diff --git a/.doctrees/core/generated/deepmr.io.read_acqheader.doctree b/.doctrees/core/generated/deepmr.io.read_acqheader.doctree new file mode 100644 index 00000000..c98a4207 Binary files /dev/null and b/.doctrees/core/generated/deepmr.io.read_acqheader.doctree differ diff --git a/.doctrees/core/generated/deepmr.io.read_hdf5.doctree b/.doctrees/core/generated/deepmr.io.read_hdf5.doctree new file mode 100644 index 00000000..1f2f5971 Binary files /dev/null and b/.doctrees/core/generated/deepmr.io.read_hdf5.doctree differ diff --git a/.doctrees/core/generated/deepmr.io.read_image.doctree b/.doctrees/core/generated/deepmr.io.read_image.doctree new file mode 100644 index 00000000..5a6f58a7 Binary files /dev/null and b/.doctrees/core/generated/deepmr.io.read_image.doctree differ diff --git a/.doctrees/core/generated/deepmr.io.read_matfile.doctree b/.doctrees/core/generated/deepmr.io.read_matfile.doctree new file mode 100644 index 00000000..f469d26c Binary files /dev/null and b/.doctrees/core/generated/deepmr.io.read_matfile.doctree differ diff --git a/.doctrees/core/generated/deepmr.io.read_rawdata.doctree b/.doctrees/core/generated/deepmr.io.read_rawdata.doctree new file mode 100644 index 00000000..0eb41ce4 Binary files /dev/null and b/.doctrees/core/generated/deepmr.io.read_rawdata.doctree differ diff --git a/.doctrees/core/generated/deepmr.io.write_acqheader.doctree b/.doctrees/core/generated/deepmr.io.write_acqheader.doctree new file mode 100644 index 00000000..f5500e06 Binary files /dev/null and b/.doctrees/core/generated/deepmr.io.write_acqheader.doctree differ diff --git a/.doctrees/core/generated/deepmr.io.write_hdf5.doctree b/.doctrees/core/generated/deepmr.io.write_hdf5.doctree new file mode 100644 index 00000000..2c16f185 Binary files /dev/null and b/.doctrees/core/generated/deepmr.io.write_hdf5.doctree differ diff --git a/.doctrees/core/generated/deepmr.io.write_image.doctree b/.doctrees/core/generated/deepmr.io.write_image.doctree new file mode 100644 index 00000000..c38ecc0c Binary files /dev/null and b/.doctrees/core/generated/deepmr.io.write_image.doctree differ diff --git a/.doctrees/core/generated/deepmr.iwt.doctree b/.doctrees/core/generated/deepmr.iwt.doctree new file mode 100644 index 00000000..ef95281e Binary files /dev/null and b/.doctrees/core/generated/deepmr.iwt.doctree differ diff --git a/.doctrees/core/generated/deepmr.linops.FFTGramOp.doctree b/.doctrees/core/generated/deepmr.linops.FFTGramOp.doctree new file mode 100644 index 00000000..b389a02a Binary files /dev/null and b/.doctrees/core/generated/deepmr.linops.FFTGramOp.doctree differ diff --git a/.doctrees/core/generated/deepmr.linops.FFTOp.doctree b/.doctrees/core/generated/deepmr.linops.FFTOp.doctree new file mode 100644 index 00000000..975146ab Binary files /dev/null and b/.doctrees/core/generated/deepmr.linops.FFTOp.doctree differ diff --git a/.doctrees/core/generated/deepmr.linops.IFFTOp.doctree b/.doctrees/core/generated/deepmr.linops.IFFTOp.doctree new file mode 100644 index 00000000..a86f0057 Binary files /dev/null and b/.doctrees/core/generated/deepmr.linops.IFFTOp.doctree differ diff --git a/.doctrees/core/generated/deepmr.linops.NUFFTAdjointOp.doctree b/.doctrees/core/generated/deepmr.linops.NUFFTAdjointOp.doctree new file mode 100644 index 00000000..021131c9 Binary files /dev/null and b/.doctrees/core/generated/deepmr.linops.NUFFTAdjointOp.doctree differ diff --git a/.doctrees/core/generated/deepmr.linops.NUFFTGramOp.doctree b/.doctrees/core/generated/deepmr.linops.NUFFTGramOp.doctree new file mode 100644 index 00000000..0e77a925 Binary files /dev/null and b/.doctrees/core/generated/deepmr.linops.NUFFTGramOp.doctree differ diff --git a/.doctrees/core/generated/deepmr.linops.NUFFTOp.doctree b/.doctrees/core/generated/deepmr.linops.NUFFTOp.doctree new file mode 100644 index 00000000..914cbec6 Binary files /dev/null and b/.doctrees/core/generated/deepmr.linops.NUFFTOp.doctree differ diff --git a/.doctrees/core/generated/deepmr.linops.SenseAdjointOp.doctree b/.doctrees/core/generated/deepmr.linops.SenseAdjointOp.doctree new file mode 100644 index 00000000..2e48f73d Binary files /dev/null and b/.doctrees/core/generated/deepmr.linops.SenseAdjointOp.doctree differ diff --git a/.doctrees/core/generated/deepmr.linops.SenseOp.doctree b/.doctrees/core/generated/deepmr.linops.SenseOp.doctree new file mode 100644 index 00000000..c33e273e Binary files /dev/null and b/.doctrees/core/generated/deepmr.linops.SenseOp.doctree differ diff --git a/.doctrees/core/generated/deepmr.optim.ADMMStep.doctree b/.doctrees/core/generated/deepmr.optim.ADMMStep.doctree new file mode 100644 index 00000000..d3e971ec Binary files /dev/null and b/.doctrees/core/generated/deepmr.optim.ADMMStep.doctree differ diff --git a/.doctrees/core/generated/deepmr.optim.CGStep.doctree b/.doctrees/core/generated/deepmr.optim.CGStep.doctree new file mode 100644 index 00000000..3fce340f Binary files /dev/null and b/.doctrees/core/generated/deepmr.optim.CGStep.doctree differ diff --git a/.doctrees/core/generated/deepmr.optim.PGDStep.doctree b/.doctrees/core/generated/deepmr.optim.PGDStep.doctree new file mode 100644 index 00000000..6ee9d056 Binary files /dev/null and b/.doctrees/core/generated/deepmr.optim.PGDStep.doctree differ diff --git a/.doctrees/core/generated/deepmr.optim.admm_solve.doctree b/.doctrees/core/generated/deepmr.optim.admm_solve.doctree new file mode 100644 index 00000000..d13133b4 Binary files /dev/null and b/.doctrees/core/generated/deepmr.optim.admm_solve.doctree differ diff --git a/.doctrees/core/generated/deepmr.optim.cg_solve.doctree b/.doctrees/core/generated/deepmr.optim.cg_solve.doctree new file mode 100644 index 00000000..cb31276b Binary files /dev/null and b/.doctrees/core/generated/deepmr.optim.cg_solve.doctree differ diff --git a/.doctrees/core/generated/deepmr.optim.pgd_solve.doctree b/.doctrees/core/generated/deepmr.optim.pgd_solve.doctree new file mode 100644 index 00000000..15c43a18 Binary files /dev/null and b/.doctrees/core/generated/deepmr.optim.pgd_solve.doctree differ diff --git a/.doctrees/core/generated/deepmr.optim.power_method.doctree b/.doctrees/core/generated/deepmr.optim.power_method.doctree new file mode 100644 index 00000000..498665dc Binary files /dev/null and b/.doctrees/core/generated/deepmr.optim.power_method.doctree differ diff --git a/.doctrees/core/generated/deepmr.patches2tensor.doctree b/.doctrees/core/generated/deepmr.patches2tensor.doctree new file mode 100644 index 00000000..946cb6fe Binary files /dev/null and b/.doctrees/core/generated/deepmr.patches2tensor.doctree differ diff --git a/.doctrees/core/generated/deepmr.phase_cycling.doctree b/.doctrees/core/generated/deepmr.phase_cycling.doctree new file mode 100644 index 00000000..0bfcbd47 Binary files /dev/null and b/.doctrees/core/generated/deepmr.phase_cycling.doctree differ diff --git a/.doctrees/core/generated/deepmr.piecewise_fa.doctree b/.doctrees/core/generated/deepmr.piecewise_fa.doctree new file mode 100644 index 00000000..43c23c6d Binary files /dev/null and b/.doctrees/core/generated/deepmr.piecewise_fa.doctree differ diff --git a/.doctrees/core/generated/deepmr.prox.LLRDenoiser.doctree b/.doctrees/core/generated/deepmr.prox.LLRDenoiser.doctree new file mode 100644 index 00000000..504cd7ae Binary files /dev/null and b/.doctrees/core/generated/deepmr.prox.LLRDenoiser.doctree differ diff --git a/.doctrees/core/generated/deepmr.prox.TGVDenoiser.doctree b/.doctrees/core/generated/deepmr.prox.TGVDenoiser.doctree new file mode 100644 index 00000000..4051e324 Binary files /dev/null and b/.doctrees/core/generated/deepmr.prox.TGVDenoiser.doctree differ diff --git a/.doctrees/core/generated/deepmr.prox.TVDenoiser.doctree b/.doctrees/core/generated/deepmr.prox.TVDenoiser.doctree new file mode 100644 index 00000000..64ffe76e Binary files /dev/null and b/.doctrees/core/generated/deepmr.prox.TVDenoiser.doctree differ diff --git a/.doctrees/core/generated/deepmr.prox.WaveletDenoiser.doctree b/.doctrees/core/generated/deepmr.prox.WaveletDenoiser.doctree new file mode 100644 index 00000000..1f94c1da Binary files /dev/null and b/.doctrees/core/generated/deepmr.prox.WaveletDenoiser.doctree differ diff --git a/.doctrees/core/generated/deepmr.prox.WaveletDictDenoiser.doctree b/.doctrees/core/generated/deepmr.prox.WaveletDictDenoiser.doctree new file mode 100644 index 00000000..686e41e5 Binary files /dev/null and b/.doctrees/core/generated/deepmr.prox.WaveletDictDenoiser.doctree differ diff --git a/.doctrees/core/generated/deepmr.prox.llr_denoise.doctree b/.doctrees/core/generated/deepmr.prox.llr_denoise.doctree new file mode 100644 index 00000000..8165542e Binary files /dev/null and b/.doctrees/core/generated/deepmr.prox.llr_denoise.doctree differ diff --git a/.doctrees/core/generated/deepmr.prox.tgv_denoise.doctree b/.doctrees/core/generated/deepmr.prox.tgv_denoise.doctree new file mode 100644 index 00000000..ae309060 Binary files /dev/null and b/.doctrees/core/generated/deepmr.prox.tgv_denoise.doctree differ diff --git a/.doctrees/core/generated/deepmr.prox.tv_denoise.doctree b/.doctrees/core/generated/deepmr.prox.tv_denoise.doctree new file mode 100644 index 00000000..d0789871 Binary files /dev/null and b/.doctrees/core/generated/deepmr.prox.tv_denoise.doctree differ diff --git a/.doctrees/core/generated/deepmr.prox.wavelet_denoise.doctree b/.doctrees/core/generated/deepmr.prox.wavelet_denoise.doctree new file mode 100644 index 00000000..9ac3d8c1 Binary files /dev/null and b/.doctrees/core/generated/deepmr.prox.wavelet_denoise.doctree differ diff --git a/.doctrees/core/generated/deepmr.prox.wavelet_dict_denoise.doctree b/.doctrees/core/generated/deepmr.prox.wavelet_dict_denoise.doctree new file mode 100644 index 00000000..ac4b3753 Binary files /dev/null and b/.doctrees/core/generated/deepmr.prox.wavelet_dict_denoise.doctree differ diff --git a/.doctrees/core/generated/deepmr.radial.doctree b/.doctrees/core/generated/deepmr.radial.doctree new file mode 100644 index 00000000..3cb045c2 Binary files /dev/null and b/.doctrees/core/generated/deepmr.radial.doctree differ diff --git a/.doctrees/core/generated/deepmr.radial_proj.doctree b/.doctrees/core/generated/deepmr.radial_proj.doctree new file mode 100644 index 00000000..9bebefb2 Binary files /dev/null and b/.doctrees/core/generated/deepmr.radial_proj.doctree differ diff --git a/.doctrees/core/generated/deepmr.radial_stack.doctree b/.doctrees/core/generated/deepmr.radial_stack.doctree new file mode 100644 index 00000000..3c752294 Binary files /dev/null and b/.doctrees/core/generated/deepmr.radial_stack.doctree differ diff --git a/.doctrees/core/generated/deepmr.resample.doctree b/.doctrees/core/generated/deepmr.resample.doctree new file mode 100644 index 00000000..df85f640 Binary files /dev/null and b/.doctrees/core/generated/deepmr.resample.doctree differ diff --git a/.doctrees/core/generated/deepmr.resize.doctree b/.doctrees/core/generated/deepmr.resize.doctree new file mode 100644 index 00000000..7350f5e7 Binary files /dev/null and b/.doctrees/core/generated/deepmr.resize.doctree differ diff --git a/.doctrees/core/generated/deepmr.rf_spoiling.doctree b/.doctrees/core/generated/deepmr.rf_spoiling.doctree new file mode 100644 index 00000000..6d53a92c Binary files /dev/null and b/.doctrees/core/generated/deepmr.rf_spoiling.doctree differ diff --git a/.doctrees/core/generated/deepmr.rigid_motion.doctree b/.doctrees/core/generated/deepmr.rigid_motion.doctree new file mode 100644 index 00000000..424b0cf9 Binary files /dev/null and b/.doctrees/core/generated/deepmr.rigid_motion.doctree differ diff --git a/.doctrees/core/generated/deepmr.rosette.doctree b/.doctrees/core/generated/deepmr.rosette.doctree new file mode 100644 index 00000000..c7957df8 Binary files /dev/null and b/.doctrees/core/generated/deepmr.rosette.doctree differ diff --git a/.doctrees/core/generated/deepmr.rosette_proj.doctree b/.doctrees/core/generated/deepmr.rosette_proj.doctree new file mode 100644 index 00000000..f9c3d892 Binary files /dev/null and b/.doctrees/core/generated/deepmr.rosette_proj.doctree differ diff --git a/.doctrees/core/generated/deepmr.rosette_stack.doctree b/.doctrees/core/generated/deepmr.rosette_stack.doctree new file mode 100644 index 00000000..3d2c2724 Binary files /dev/null and b/.doctrees/core/generated/deepmr.rosette_stack.doctree differ diff --git a/.doctrees/core/generated/deepmr.rss.doctree b/.doctrees/core/generated/deepmr.rss.doctree new file mode 100644 index 00000000..e6a6c98c Binary files /dev/null and b/.doctrees/core/generated/deepmr.rss.doctree differ diff --git a/.doctrees/core/generated/deepmr.sensmap.doctree b/.doctrees/core/generated/deepmr.sensmap.doctree new file mode 100644 index 00000000..da7d9ecb Binary files /dev/null and b/.doctrees/core/generated/deepmr.sensmap.doctree differ diff --git a/.doctrees/core/generated/deepmr.shepp_logan.doctree b/.doctrees/core/generated/deepmr.shepp_logan.doctree new file mode 100644 index 00000000..09a8842e Binary files /dev/null and b/.doctrees/core/generated/deepmr.shepp_logan.doctree differ diff --git a/.doctrees/core/generated/deepmr.sinusoidal_fa.doctree b/.doctrees/core/generated/deepmr.sinusoidal_fa.doctree new file mode 100644 index 00000000..5c6f4845 Binary files /dev/null and b/.doctrees/core/generated/deepmr.sinusoidal_fa.doctree differ diff --git a/.doctrees/core/generated/deepmr.spiral.doctree b/.doctrees/core/generated/deepmr.spiral.doctree new file mode 100644 index 00000000..fc03ecd1 Binary files /dev/null and b/.doctrees/core/generated/deepmr.spiral.doctree differ diff --git a/.doctrees/core/generated/deepmr.spiral_proj.doctree b/.doctrees/core/generated/deepmr.spiral_proj.doctree new file mode 100644 index 00000000..01bb9b39 Binary files /dev/null and b/.doctrees/core/generated/deepmr.spiral_proj.doctree differ diff --git a/.doctrees/core/generated/deepmr.spiral_stack.doctree b/.doctrees/core/generated/deepmr.spiral_stack.doctree new file mode 100644 index 00000000..472540c8 Binary files /dev/null and b/.doctrees/core/generated/deepmr.spiral_stack.doctree differ diff --git a/.doctrees/core/generated/deepmr.svd.doctree b/.doctrees/core/generated/deepmr.svd.doctree new file mode 100644 index 00000000..c157de84 Binary files /dev/null and b/.doctrees/core/generated/deepmr.svd.doctree differ diff --git a/.doctrees/core/generated/deepmr.tensor2patches.doctree b/.doctrees/core/generated/deepmr.tensor2patches.doctree new file mode 100644 index 00000000..f227eac5 Binary files /dev/null and b/.doctrees/core/generated/deepmr.tensor2patches.doctree differ diff --git a/.doctrees/core/io.doctree b/.doctrees/core/io.doctree new file mode 100644 index 00000000..cc5a0925 Binary files /dev/null and b/.doctrees/core/io.doctree differ diff --git a/.doctrees/core/linops.doctree b/.doctrees/core/linops.doctree new file mode 100644 index 00000000..d00bc41a Binary files /dev/null and b/.doctrees/core/linops.doctree differ diff --git a/.doctrees/core/optim.doctree b/.doctrees/core/optim.doctree new file mode 100644 index 00000000..5737ffb5 Binary files /dev/null and b/.doctrees/core/optim.doctree differ diff --git a/.doctrees/core/prox.doctree b/.doctrees/core/prox.doctree new file mode 100644 index 00000000..67df419f Binary files /dev/null and b/.doctrees/core/prox.doctree differ diff --git a/.doctrees/core/signal.doctree b/.doctrees/core/signal.doctree new file mode 100644 index 00000000..1a1616ba Binary files /dev/null and b/.doctrees/core/signal.doctree differ diff --git a/.doctrees/core/vobj.doctree b/.doctrees/core/vobj.doctree new file mode 100644 index 00000000..95fc8f3b Binary files /dev/null and b/.doctrees/core/vobj.doctree differ diff --git a/.doctrees/environment.pickle b/.doctrees/environment.pickle new file mode 100644 index 00000000..925cc523 Binary files /dev/null and b/.doctrees/environment.pickle differ diff --git a/.doctrees/index.doctree b/.doctrees/index.doctree new file mode 100644 index 00000000..e14afd38 Binary files /dev/null and b/.doctrees/index.doctree differ diff --git a/.doctrees/misc/changelog.doctree b/.doctrees/misc/changelog.doctree new file mode 100644 index 00000000..e55eed10 Binary files /dev/null and b/.doctrees/misc/changelog.doctree differ diff --git a/.doctrees/misc/conduct.doctree b/.doctrees/misc/conduct.doctree new file mode 100644 index 00000000..84d2162d Binary files /dev/null and b/.doctrees/misc/conduct.doctree differ diff --git a/.doctrees/misc/contributing.doctree b/.doctrees/misc/contributing.doctree new file mode 100644 index 00000000..262c482a Binary files /dev/null and b/.doctrees/misc/contributing.doctree differ diff --git a/.doctrees/recon/alg.doctree b/.doctrees/recon/alg.doctree new file mode 100644 index 00000000..81f3df90 Binary files /dev/null and b/.doctrees/recon/alg.doctree differ diff --git a/.doctrees/recon/calib.doctree b/.doctrees/recon/calib.doctree new file mode 100644 index 00000000..2fdd22db Binary files /dev/null and b/.doctrees/recon/calib.doctree differ diff --git a/.doctrees/recon/generated/deepmr.recon.EncodingOp.doctree b/.doctrees/recon/generated/deepmr.recon.EncodingOp.doctree new file mode 100644 index 00000000..91c46808 Binary files /dev/null and b/.doctrees/recon/generated/deepmr.recon.EncodingOp.doctree differ diff --git a/.doctrees/recon/generated/deepmr.recon.espirit_cal.doctree b/.doctrees/recon/generated/deepmr.recon.espirit_cal.doctree new file mode 100644 index 00000000..420e93d3 Binary files /dev/null and b/.doctrees/recon/generated/deepmr.recon.espirit_cal.doctree differ diff --git a/.doctrees/recon/generated/deepmr.recon.fse_fit.doctree b/.doctrees/recon/generated/deepmr.recon.fse_fit.doctree new file mode 100644 index 00000000..692cb73d Binary files /dev/null and b/.doctrees/recon/generated/deepmr.recon.fse_fit.doctree differ diff --git a/.doctrees/recon/generated/deepmr.recon.intensity_scaling.doctree b/.doctrees/recon/generated/deepmr.recon.intensity_scaling.doctree new file mode 100644 index 00000000..02adbbc9 Binary files /dev/null and b/.doctrees/recon/generated/deepmr.recon.intensity_scaling.doctree differ diff --git a/.doctrees/recon/generated/deepmr.recon.mpnrage_fit.doctree b/.doctrees/recon/generated/deepmr.recon.mpnrage_fit.doctree new file mode 100644 index 00000000..f5afd13b Binary files /dev/null and b/.doctrees/recon/generated/deepmr.recon.mpnrage_fit.doctree differ diff --git a/.doctrees/recon/generated/deepmr.recon.recon_lstsq.doctree b/.doctrees/recon/generated/deepmr.recon.recon_lstsq.doctree new file mode 100644 index 00000000..e8f0a369 Binary files /dev/null and b/.doctrees/recon/generated/deepmr.recon.recon_lstsq.doctree differ diff --git a/.doctrees/recon/inference.doctree b/.doctrees/recon/inference.doctree new file mode 100644 index 00000000..2696dd49 Binary files /dev/null and b/.doctrees/recon/inference.doctree differ diff --git a/.doctrees/tutorials/index.doctree b/.doctrees/tutorials/index.doctree new file mode 100644 index 00000000..5c1022a6 Binary files /dev/null and b/.doctrees/tutorials/index.doctree differ diff --git a/.doctrees/user_guide/getting_started.doctree b/.doctrees/user_guide/getting_started.doctree new file mode 100644 index 00000000..f8445250 Binary files /dev/null and b/.doctrees/user_guide/getting_started.doctree differ diff --git a/.doctrees/user_guide/overview.doctree b/.doctrees/user_guide/overview.doctree new file mode 100644 index 00000000..dc493f56 Binary files /dev/null and b/.doctrees/user_guide/overview.doctree differ diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/_modules/deepmr/_signal/filter.html b/_modules/deepmr/_signal/filter.html new file mode 100644 index 00000000..3123c671 --- /dev/null +++ b/_modules/deepmr/_signal/filter.html @@ -0,0 +1,636 @@ + + + + + + + + + + + deepmr._signal.filter — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr._signal.filter

+"""Signal filtering routines."""
+
+__all__ = ["fermi"]
+
+import torch
+
+
+
[docs]def fermi(ndim, size, width=None): + """ + Build a n-dimensional Fermi filter. + + This routine can handle isotropic ND matrices. User + can specify the size of window support and the FWHM. + The filter can be used in the context of image processing + to mitigate ringing artifact [1]. + + Parameters + ---------- + ndim : int + Number of dimensions (e.g., ``1=1D``, ``2=2D``, ``3=3D``). + size : int + Support of the window. Filter size will be ``ndim * [size]``. + width : int, optional + Full width half maximum of the filter. + If ``None``, it is automatically set to ``size``. The default is ``None``. + + Returns + ------- + filt : torch.Tensor + Fermi window of shape ``ndim * [size]`` and ``FWHM = width``. + + Example + ------- + >>> import deepmr + + We can design e.g., 1D, 2D or 3D filters as: + + >>> filt1d = deepmr.fermi(1, 128) + >>> filt1d.shape + torch.Size([128]) + >>> filt2d = deepmr.fermi(2, 128) + torch.Size([128, 128]) + >>> filt3d = deepmr.fermi(3, 128) + torch.Size([128, 128]) + + Bu default, FWHM is equal to the support size: + + >>> (filt1d >= 0.5).sum() + tensor(128) + + User can specify a smaller FWHM via ``width`` parameter: + + >>> filt1d = deepmr.fermi(1, 128, width=32) + >>> filt1d.shape + torch.Size([128]) + >>> (filt1d >= 0.5).sum() + tensor(47) + + The discrepancy between nominal and actual FWHM is due to signal + discretization. + + References + ---------- + [1] Bernstein, M.A., Fain, S.B. and Riederer, S.J. (2001), + Effect of windowing and zero-filled reconstruction of MRI data + on spatial resolution and acquisition strategy. + J. Magn. Reson. Imaging, 14: 270-280. + https://doi.org/10.1002/jmri.1183 + + """ + # default width + if width is None: + width = size + + # get radius + radius = int(width // 2) + + # build grid, normalized so that u = 1 corresponds to window FWHM + R = [ + torch.arange(int(-size // 2), int(size // 2), dtype=torch.float32) + for n in range(ndim) + ] + + # get transition width + T = 20 * size # width / 128 + + # build center-out grid + R = torch.meshgrid(*R, indexing="xy") + R = torch.stack(R, dim=0) + R = (R**2).sum(dim=0) ** 0.5 + + # build filter + filt = 1 / (1 + torch.exp((R - radius)) / T) + filt /= filt.max() + + return filt
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/_signal/fold.html b/_modules/deepmr/_signal/fold.html new file mode 100644 index 00000000..c2fa252e --- /dev/null +++ b/_modules/deepmr/_signal/fold.html @@ -0,0 +1,784 @@ + + + + + + + + + + + deepmr._signal.fold — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr._signal.fold

+"""Patch extraction routines."""
+
+__all__ = ["tensor2patches", "patches2tensor"]
+
+import numpy as np
+import torch
+
+from .._external.unfoldNd.fold import foldNd
+
+
+
[docs]def tensor2patches(image, patch_shape, patch_stride=None): + """ + View tensor as overlapping hyperectangular patches, with a given stride. + + Adapted from [1, 2]. + + Parameters + ---------- + image : torch.Tensor + N-dimensional image tensor, with the last ``ndim`` dimensions + being the image dimensions. + patch_shape : Iterable[int] + Shape of the patch of length ``ndim``. + patch_stride : Iterable[int], optional + Stride of the windows of length ``ndim``. + The default it is the patch size (i.e., non overlapping). + + Returns + ------- + patches : torch.Tensor + Tensor of (overlapping) patches of shape: + + * ``1D: (..., npatches_z, patch_size_x)`` + * ``2D: (..., npatches_z, npatches_y, patch_size_y, patch_size_x)`` + * ``3D: (..., npatches_z, npatches_y, npatches_x, patch_size_z, patch_size_y, patch_size_x)`` + + References + ---------- + [1] https://stackoverflow.com/questions/64462917/view-as-windows-from-skimage-but-in-pytorch \n + [2] https://discuss.pytorch.org/t/patch-making-does-pytorch-have-anything-to-offer/33850/10 + + """ + # be sure it is a tensor + image = torch.as_tensor(image) + + # default stride + if patch_stride is None: + patch_stride = patch_shape + + # cast to array + patch_shape = np.asarray(patch_shape) + patch_stride = np.asarray(patch_stride) + + # verify that strides and shapes are > 0 + assert np.all(patch_shape > 0), f"Patch shape must be > 0; got {patch_shape}" + assert np.all(patch_stride > 0), f"Patch stride must be > 0; got {patch_stride}" + assert np.all( + patch_stride <= patch_shape + ), "We do not support non-overlapping or non-contiguous patches." + + # get number of dimensions + ndim = len(patch_shape) + batch_shape = image.shape[:-ndim] + + # count number of patches for each dimension + ishape = np.asarray(image.shape[-ndim:]) + num_patches = np.ceil(ishape / patch_stride) + num_patches = num_patches.astype(int) + + # pad if required + padsize = ((num_patches - 1) * patch_stride + patch_shape) - ishape + padsize = np.stack((0 * padsize, padsize), axis=-1) + padsize = padsize.ravel() + patches = torch.nn.functional.pad(image, tuple(padsize)) + + # get reshape to (b, nz, ny, nx), (b, ny, nx), (b, nx) for 3, 2, and 1D, respectively + patches = patches.view(int(np.prod(batch_shape)), *patches.shape[-ndim:]) + + if ndim == 3: + kc, kh, kw = patch_shape # kernel size + dc, dh, dw = patch_stride # stride + patches = patches.unfold(1, kc, dc).unfold(2, kh, dh).unfold(3, kw, dw) + elif ndim == 2: + kh, kw = patch_shape # kernel size + dh, dw = patch_stride # stride + patches = patches.unfold(1, kh, dh).unfold(2, kw, dw) + elif ndim == 1: + kw = patch_shape # kernel size + dw = patch_stride # stride + patches = patches.unfold(1, kw, dw) + else: + raise ValueError(f"Only support ndim=1, 2, or 3, got {ndim}") + + # reformat + patches = patches.reshape(*batch_shape, *patches.shape[1:]) + + return patches
+ + +
[docs]def patches2tensor(patches, shape, patch_shape, patch_stride=None): + """ + Accumulate patches into a tensor. + + Adapted from [1] using [2]. + + Parameters + ---------- + patches : torch.Tensor + Tensor of (overlapping) patches of shapes: + + * ``1D: (..., npatches_z, patch_size_x)`` + * ``2D: (..., npatches_z, npatches_y, patch_size_y, patch_size_x)`` + * ``3D: (..., npatches_z, npatches_y, npatches_x, patch_size_z, patch_size_y, patch_size_x)`` + + shape : Iterable[int] + Output shape of length ``ndim``. + If scalar, assume isotropic matrix of shape ``ndim * [shape]``. + patch_shape : Iterable[int] + Shape of the patch of length ``ndim``. + patch_stride : Iterable[int], optional + Stride of the windows of length ``ndim``. + The default it is the patch size (i.e., non overlapping). + + Returns + ------- + image : torch.Tensor + N-dimensional image tensor, with the last ``ndim`` dimensions + being the image dimensions. + + References + ---------- + [1] https://discuss.pytorch.org/t/how-to-split-tensors-with-overlap-and-then-reconstruct-the-original-tensor/70261 \n + [2] https://github.com/f-dangel/unfoldNd + + """ + # be sure it is a tensor + patches = torch.as_tensor(patches) + + # default stride + if patch_stride is None: + patch_stride = patch_shape + + # cast to array + patch_shape = np.asarray(patch_shape) + patch_stride = np.asarray(patch_stride) + + # verify that strides and shapes are > 0 + assert np.all(patch_shape > 0), f"Patch shape must be > 0; got {patch_shape}" + assert np.all(patch_stride > 0), f"Patch stride must be > 0; got {patch_stride}" + assert np.all( + patch_stride <= patch_shape + ), "We do not support non-overlapping or non-contiguous patches." + + # get number of dimensions + ndim = len(shape) + batch_shape = patches.shape[: -2 * ndim] + + # count number of patches for each dimension + ishape = np.asarray(shape) + num_patches = np.ceil(ishape / patch_stride) + num_patches = num_patches.astype(int) + + # pad if required + padsize = ((num_patches - 1) * patch_stride + patch_shape) - ishape + padded_shape = shape + padsize + + # perform unfolding + if np.allclose(patch_shape, patch_stride): + image = _fold_nonoverlapping(patches, ndim, padded_shape, batch_shape) + else: + image = _fold_overlapping( + patches, padded_shape, batch_shape, patch_shape, patch_stride + ) + + # crop + if ndim == 1: + image = image[:, : shape[0]] + elif ndim == 2: + image = image[:, : shape[0], : shape[1]] + elif ndim == 3: + image = image[:, : shape[0], : shape[1], : shape[2]] + else: + raise ValueError(f"Only support ndim=1, 2, or 3, got {ndim}") + + # final reshape + image = image.reshape(*batch_shape, *shape) + + return image
+ + +# %% local subroutines +def _fold_overlapping(patches, padded_shape, batch_shape, patch_shape, patch_stride): + # get reshape to (b, nz, ny, nx), (b, ny, nx), (b, nx) for 3, 2, and 1D, respectively + patches = patches.reshape(int(np.prod(batch_shape)), -1, int(np.prod(patch_shape))) + patches = patches.permute(0, 2, 1) + + # get image + weight = foldNd( + torch.ones_like(patches[[0]]), + tuple(padded_shape), + tuple(patch_shape), + stride=tuple(patch_stride), + ) + image = foldNd( + patches, tuple(padded_shape), tuple(patch_shape), stride=tuple(patch_stride) + ) + + # get rid of channel dim + weight = weight[0, 0] + image = image[:, 0] + + # final reshape + image = image.reshape(-1, *padded_shape) + weight = weight.reshape(*padded_shape) + + return (image / weight).to(patches.dtype) + + +def _fold_nonoverlapping(patches, ndim, padded_shape, batch_shape): + # get reshape to (b, nz, ny, nx), (b, ny, nx), (b, nx) for 3, 2, and 1D, respectively + unfold_shape = patches.shape[-2 * ndim :] + patches = patches.view(int(np.prod(batch_shape)), *unfold_shape) + + if ndim == 3: + nz = unfold_shape[0] * unfold_shape[3] + ny = unfold_shape[1] * unfold_shape[4] + nx = unfold_shape[2] * unfold_shape[5] + patches = patches.permute(0, 1, 4, 2, 5, 3, 6) + image = patches.reshape(-1, nz, ny, nx) + elif ndim == 2: + ny = unfold_shape[0] * unfold_shape[2] + nx = unfold_shape[1] * unfold_shape[3] + patches = patches.permute(0, 1, 3, 2, 4) + image = patches.reshape(-1, ny, nx) + elif ndim == 1: + nx = unfold_shape[0] * unfold_shape[1] + image = patches.reshape(-1, nx) + else: + raise ValueError(f"Only support ndim=1, 2, or 3, got {ndim}") + + # final reshape + image = image.reshape(-1, *padded_shape) + + return image +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/_signal/resize.html b/_modules/deepmr/_signal/resize.html new file mode 100644 index 00000000..f211bb1a --- /dev/null +++ b/_modules/deepmr/_signal/resize.html @@ -0,0 +1,746 @@ + + + + + + + + + + + deepmr._signal.resize — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr._signal.resize

+"""Array shape manipulation routines."""
+
+__all__ = ["resize", "resample"]
+
+import numpy as np
+import torch
+
+from .filter import fermi
+
+
+
[docs]def resize(input, oshape): + """ + Resize with zero-padding or cropping. + + Adapted from SigPy [1]. + + Parameters + ---------- + input : np.ndarray | torch.Tensor + Input tensor of shape ``(..., ishape)``. + oshape : Iterable + Output shape. + + Returns + ------- + output : np.ndarray | torch.Tensor + Zero-padded or cropped tensor of shape ``(..., oshape)``. + + Examples + -------- + >>> import torch + >>> import deepmr + + We can pad tensors to desired shape: + + >>> x = torch.tensor([0, 1, 0]) + >>> y = deepmr.resize(x, [5]) + >>> y + tensor([0, 0, 1, 0, 0]) + + Batch dimensions are automatically expanded (pad will be applied starting from rightmost dimension): + + >>> x = torch.tensor([0, 1, 0])[None, ...] + >>> x.shape + torch.Size([1, 3]) + >>> y = deepmr.resize(x, [5]) # len(oshape) == 1 + >>> y.shape + torch.Size([1, 5]) + + Similarly, if oshape is smaller than ishape, the tensor will be cropped: + + >>> x = torch.tensor([0, 0, 1, 0, 0]) + >>> y = deepmr.resize(x, [3]) + >>> y + tensor([0, 1, 0]) + + Again, batch dimensions are automatically expanded: + + >>> x = torch.tensor([0, 0, 1, 0, 0])[None, ...] + >>> x.shape + torch.Size([1, 5]) + >>> y = deepmr.resize(x, [3]) # len(oshape) == 1 + >>> y.shape + torch.Size([1, 3]) + + References + ---------- + [1] https://github.com/mikgroup/sigpy/blob/main/sigpy/util.py + + """ + if isinstance(input, np.ndarray): + isnumpy = True + input = torch.as_tensor(input) + else: + isnumpy = False + + if isinstance(oshape, int): + oshape = [oshape] + + ishape1, oshape1 = _expand_shapes(input.shape, oshape) + + if ishape1 == oshape1: + return input + + # shift not supported for now + ishift = [max(i // 2 - o // 2, 0) for i, o in zip(ishape1, oshape1)] + oshift = [max(o // 2 - i // 2, 0) for i, o in zip(ishape1, oshape1)] + + copy_shape = [ + min(i - si, o - so) for i, si, o, so in zip(ishape1, ishift, oshape1, oshift) + ] + islice = tuple([slice(si, si + c) for si, c in zip(ishift, copy_shape)]) + oslice = tuple([slice(so, so + c) for so, c in zip(oshift, copy_shape)]) + + output = torch.zeros(oshape1, dtype=input.dtype, device=input.device) + input = input.reshape(ishape1) + output[oslice] = input[islice] + + if isnumpy: + output = output.numpy(force=True) + + return output
+ + +
[docs]def resample(input, oshape, filt=True, polysmooth=False): + """ + Resample a n-dimensional signal. + + Parameters + ---------- + input : np.ndarray | torch.Tensor + Input tensor of shape ``(..., ishape)``. + oshape : Iterable + Output shape. + filt : bool, optional + If True and signal is upsampled (i.e., ``any(oshape > ishape)``), + apply Fermi filter to limit ringing. + The default is True. + polysmooth : bool, optional + If true, perform polynomial smoothing. + The default is False. !!! NOT IMPLEMENTED YET !!! + + Returns + ------- + output : np.ndarray | torch.Tensor + Resampled tensor of shape ``(..., oshape)``. + + """ + if isinstance(input, np.ndarray): + isnumpy = True + input = torch.as_tensor(input) + else: + isnumpy = False + + if isinstance(oshape, int): + oshape = [oshape] + + # first, get number of dimensions + ndim = len(oshape) + axes = list(range(-ndim, 0)) + isreal = torch.isreal(input).all() + + # take fourier transform along last ndim axes + freq = _fftc(input, axes) + + # get initial and final shapes + ishape1, oshape1 = _expand_shapes(input.shape, oshape) + + # build filter + if filt and np.any(np.asarray(oshape1) > np.asarray(ishape1)): + size = np.max(oshape1) + width = np.min(oshape1) + filt = fermi(ndim, size, width) + filt = resize(filt, oshape1) # crop to match dimension + else: + filt = None + + # resize in frequency space + freq = resize(freq, oshape1) + + # if required, apply filtering + if filt is not None: + freq *= filt.to(freq.device) + + # transform back + output = _ifftc(freq, axes) + + # smooth + if polysmooth: + print("Polynomial smoothing not implemented yet; skipping") + + # take magnitude if original signal was real + if isreal: + output = abs(output) + + if isnumpy: + output = output.numpy(force=True) + + return output
+ + +# %% subroutines +def _expand_shapes(*shapes): + shapes = [list(shape) for shape in shapes] + max_ndim = max(len(shape) for shape in shapes) + + shapes_exp = [np.asarray([1] * (max_ndim - len(shape)) + shape) for shape in shapes] + shapes_exp = np.stack(shapes_exp, axis=0) # (nshapes, max_ndim) + shapes_exp = np.max(shapes_exp, axis=0) + + # restore original shape in non-padded portions + shapes_exp = [list(shapes_exp[: -len(shape)]) + shape for shape in shapes] + + return tuple(shapes_exp) + + +def _fftc(x, ax): + return torch.fft.fftshift( + torch.fft.fftn(torch.fft.ifftshift(x, dim=ax), dim=ax, norm="ortho"), dim=ax + ) + + +def _ifftc(x, ax): + return torch.fft.fftshift( + torch.fft.ifftn(torch.fft.ifftshift(x, dim=ax), dim=ax, norm="ortho"), dim=ax + ) +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/_signal/subspace.html b/_modules/deepmr/_signal/subspace.html new file mode 100644 index 00000000..cc94cadc --- /dev/null +++ b/_modules/deepmr/_signal/subspace.html @@ -0,0 +1,684 @@ + + + + + + + + + + + deepmr._signal.subspace — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr._signal.subspace

+"""Data compression routines."""
+
+__all__ = ["rss", "svd"]
+
+import numpy as np
+import torch
+
+
+
[docs]def rss(input, axis=None, keepdim=False): + """ + Perform root sum-of-squares combination of a signal. + + Parameters + ---------- + input : np.ndarray | torch.Tensor + Input signal (real- or complex-valued). + axis : int, optional + Combination axis. If ``None``, combine along all dimensions, + reducing to a scalar. The default is ``None``. + keepdim : bool, optional + If ``True``, maintain the combined axis as a singleton dimension. + The default is ``False`` (squeeze the combination axis). + + Returns + ------- + output : np.ndarray | torch.Tensor + Real-valued output combined signal. + + Examples + -------- + >>> import torch + >>> import deepmr + + Generate an example signal: + + >>> signal = torch.ones(10, 4, 4) + + We can compute the rss of all signal elements as: + + >>> output = deepmr.rss(signal) + >>> output + tensor(12.6491) + + We can compute rss along the first axis only (i.e., coil combination) as: + + >>> output = deepmr.rss(signal, axis=0) + >>> output.shape + torch.Tensor([4, 4]) + + The axis can be explicitly maintained instead of squeezed as + + >>> output = deepmr.rss(signal, axis=0, keepdim=True) + >>> output.shape + torch.Size([1, 4, 4]) + + + """ + if axis is None: + return (input * input.conj()).sum() ** 0.5 + + if isinstance(input, np.ndarray): + isnumpy = True + else: + isnumpy = False + + output = torch.as_tensor(input) + output = (output * output.conj()).sum(axis=axis, keepdim=keepdim) ** 0.5 + + if isnumpy: + output = output.numpy() + + return output
+ + +
[docs]def svd(input, ncoeff, axis): + """ + Perform SVD compression of a signal. + + The routine returns the SVD subspace basis, the compressed signal + and the explained variance of the subspace. + + Parameters + ---------- + input : np.ndarray | torch.Tensor + Input signal (real- or complex-valued). + ncoeff : int + Number of subspace coefficients to be retained. + axis : int + Compression axis. + + Returns + ------- + basis : np.ndarray | torch.Tensor + Subspace basis of shape (input.shape[axis], ncoeff). + output : np.ndarray | torch.Tensor + Compressed signal of shape (..., ncoeff, ...). + explained_variance : float + Explained variance of the subspace. Values close to 100% + indicates that information content of the signal is preserved + despite compression. + + """ + if isinstance(input, np.ndarray): + isnumpy = True + else: + isnumpy = False + + # cast to tensor + output = torch.as_tensor(input) + + # move specified axis in the right position + if axis != -1: + output = output[..., None] + output = output.swapaxes(axis, -1) + + # fold to (nbatches, nrows, ncols) + ishape = output.shape + nrows = int(np.prod(ishape[:-1])) + ncols = ishape[-1] + output = output.reshape(nrows, ncols) + + # perform svd + u, s, vh = torch.linalg.svd(output, full_matrices=None) + + # compress data + basis = vh[..., :ncoeff] + output = output @ basis + + # calculate explained variance + explained_variance = s**2 / (nrows - 1) # (neigenvalues,) + explained_variance = explained_variance / explained_variance.sum() + explained_variance = torch.cumsum(explained_variance)[ncoeff - 1] + + # reshape + output = output.reshape(*ishape[:-1], ncoeff) + + # permute back + output = output.swapaxes(axis, -1)[..., 0] + + if isnumpy: + output = output.numpy() + basis = basis.numpy() + + return basis, output, 100 * explained_variance
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/_signal/wavelet.html b/_modules/deepmr/_signal/wavelet.html new file mode 100644 index 00000000..d94628b5 --- /dev/null +++ b/_modules/deepmr/_signal/wavelet.html @@ -0,0 +1,785 @@ + + + + + + + + + + + deepmr._signal.wavelet — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr._signal.wavelet

+"""
+Wavelet transform routines; adapted from Sigpy [1].
+
+
+References
+----------
+[1] https://github.com/mikgroup/sigpy/tree/main
+
+"""
+
+__all__ = ["fwt", "iwt"]
+
+import torch
+import numpy as np
+
+import ptwt
+import pywt
+
+from .resize import resize
+
+
+
[docs]def fwt(input, ndim=None, device=None, wave_name="db4", level=None): + """ + Forward wavelet transform. + + Adapted from Sigpy [1]. + + Parameters + ---------- + input : np.ndarray | torch.Tensor + Input signal of shape (..., nz, ny, nx). + ndim : int, optional + Number of spatial dimensions over to which compute + wavelet transform (``1``, ``2``, ``3``). + Assume spatial axis are the rightmost ones. + The default is ``None`` (``ndim = min(3, len(input.shape))``). + device : str, optional + Computational device for Wavelet transform. + If not specified, use ``input.device``. + The default is ``None``. + wave_name : str, optional + Wavelet name. The default is ``"db4"``. + axes : Iterable[int], optional + Axes to perform wavelet transform. + The default is ``None`` (all axes). + level : int, optional + Number of wavelet levels. The default is ``None``. + + Returns + ------- + output : np.ndarray | torch.Tensor + Output wavelet decomposition. + shape : Iterable[int] + Input signal shape (``input.shape``) for synthesis. + + Examples + -------- + >>> import torch + >>> import deepmr + + First, generate a 2D phantom and add some noise: + + >>> img = deepmr.shepp_logan(128) + 0.05 * torch.randn(128, 128) + + Now, run wavelet decomposition: + + >>> coeff, shape = deepmr.fwt(img) + + The function returns a ``coeff`` tuple, containing the Wavelet coefficients, + and a ``shape`` tuple, containing the original image shape for image synthesis via + ``deepmr.iwt``: + + >>> shape + torch.Size([128, 128]) + + References + ---------- + [1] https://github.com/mikgroup/sigpy/tree/main + + """ + if isinstance(input, np.ndarray): + isnumpy = True + else: + isnumpy = False + + # cast to tensor + input = torch.as_tensor(input) + + # get device + idevice = input.device + if device is None: + device = idevice + input = input.to(device) + + # get default ndim + if ndim is None: + ndim = min(3, len(input.shape)) + + # pad to nearest even value + ishape = input.shape + zshape = [((ishape[n] + 1) // 2) * 2 for n in range(-ndim, 0)] + zinput = resize( + input.reshape(-1, *ishape[-ndim:]), [int(np.prod(ishape[:-ndim]))] + zshape + ) + + # select wavelet + wavelet = pywt.Wavelet(wave_name) + + # select transform + if ndim == 1: + _fwt = ptwt.wavedec + elif ndim == 2: + _fwt = ptwt.wavedec2 + elif ndim == 3: + _fwt = ptwt.wavedec3 + else: + raise ValueError( + f"Number of dimensions (={ndim}) not recognized; we support only 1, 2 and 3." + ) + + # compute + output = _fwt(zinput, wavelet, mode="zero", level=level) + output = list(output) + output[0] = output[0].to(idevice) + for n in range(1, len(output)): + output[n] = [o.to(idevice) for o in output[n]] + + # cast to numpy if required + if isnumpy: + output[0] = output.numpy(force=True) + for n in range(1, len(output)): + output[n] = [o.numpy(force=True) for o in output[n]] + + return output, ishape
+ + +
[docs]def iwt(input, shape, device=None, wave_name="db4", level=None): + """ + Inverse wavelet transform. + + Adapted from Sigpy [1]. + + Parameters + ---------- + input : np.ndarray | torch.Tensor + Input wavelet decomposition. + shape : Iterable[int], optional + Spatial matrix size of output signal ``(nx)`` (1D signals), + ``(ny, nx)`` (2D) or ``(nz, ny, nx)`` (3D). + device : str, optional + Computational device for Wavelet transform. + If not specified, use ``input.device``. + The default is ``None``. + wave_name : str, optional + Wavelet name. The default is ``"db4"``. + axes : Iterable[int], optional + Axes to perform wavelet transform. + The default is ``None`` (all axes). + level : int, optional + Number of wavelet levels. The default is ``None``. + + Returns + ------- + output : np.ndarray | torch.Tensor + Output signal of shape (..., nz, ny, nx). + + Examples + -------- + >>> import torch + >>> import deepmr + + First, generate a 2D phantom and add some noise: + + >>> img0 = deepmr.shepp_logan(128) + 0.05 * torch.randn(128, 128) + + Now, run wavelet decomposition: + + >>> coeff, shape = deepmr.fwt(img0) + + The image can be synthesized from ``coeff`` and ``shape`` as: + + >>> img = deepmr.iwt(coeff, shape) + + References + ---------- + [1] https://github.com/mikgroup/sigpy/tree/main + + """ + if isinstance(input, np.ndarray): + isnumpy = True + else: + isnumpy = False + + # cast to tensor + output = list(input) + output[0] = torch.as_tensor(output[0]) + for n in range(1, len(output)): + output[n] = [torch.as_tensor(o) for o in output[n]] + + # get device + idevice = output[0].device + if device is None: + device = idevice + + # transfer to device + output[0] = output[0].to(idevice) + for n in range(1, len(output)): + output[n] = [o.to(idevice) for o in output[n]] + + # convert to tuple + for n in range(1, len(output)): + output[n] = tuple(output[n]) + output = tuple(output) + + # select wavelet + wavelet = pywt.Wavelet(wave_name) + + # select transform + ndim = len(shape) + if ndim == 1: + _iwt = ptwt.waverec + elif ndim == 2: + _iwt = ptwt.waverec2 + elif ndim == 3: + _iwt = ptwt.waverec3 + else: + raise ValueError( + f"Number of dimensions (={ndim}) not recognized; we support only 1, 2 and 3." + ) + + # compute + zoutput = _iwt(output, wavelet) + zoutput = zoutput.reshape(*shape[:-ndim], *zoutput.shape[-ndim:]) + output = resize(zoutput, shape) + output = output.to(idevice) + + # cast to numpy if required + if isnumpy: + output = output.numpy(force=True) + + # erase singleton dimension + if len(output.shape) == ndim + 1 and output.shape[0] == 1: + output = output[0] + + return output
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/_vobj/fields/b0.html b/_modules/deepmr/_vobj/fields/b0.html new file mode 100644 index 00000000..154f8307 --- /dev/null +++ b/_modules/deepmr/_vobj/fields/b0.html @@ -0,0 +1,625 @@ + + + + + + + + + + + deepmr._vobj.fields.b0 — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr._vobj.fields.b0

+"""B0 field maps generation routines."""
+
+__all__ = ["b0field"]
+
+import numpy as np
+import torch
+
+from ... import fft
+
+
+
[docs]def b0field(chi, b0range=(-200, 200), mask=None): + """ + Simulate inhomogeneous B0 fields. + + Output field units is ``[Hz]``. The field + is created by convolving the dipole kernel with an input + magnetic susceptibility map. + + Parameters + ---------- + chi : np.ndarray | torch.Tensor + Object magnetic susceptibility map in ``[ppb]`` of + shape ``(ny, nx)`` (2D) or ``(nz, ny, nx)`` (3D). + b0range : Iterable[float] + Range of B0 field in ``[Hz]``. The default is ``(-200, 200)``. + mask : np.ndarray | torch.Tensor, optional + Region of support of the object of + shape ``(ny, nx)`` (2D) or ``(nz, ny, nx)`` (3D). + The default is ``None``. + + Returns + ------- + B0map : torch.Tensor + Spatially varying B0 maps of shape ``(ny, nx)`` (2D) + or ``(nz, ny, nx)`` (3D) in ``[Hz]``, arising from the object susceptibility. + + Example + ------- + >>> import deepmr + + We can generate a 2D B0 field map of shape ``(ny=128, nx=128)`` starting from a + magnetic susceptibility distribution: + + >>> chi = deepmr.shepp_logan(128, qmr=True)["chi"] + >>> b0map = deepmr.b0field(chi) + + B0 values range can be specified using ``b0range`` argument: + + >>> b0map = deepmr.b0field(chi, b0range=(-500, 500)) + + """ + # make sure this is a torch tensor + chi = torch.as_tensor(chi, dtype=torch.float32) + + # get input shape + ishape = chi.shape + + # get k space coordinates + kgrid = [ + np.arange(-ishape[n] // 2, ishape[n] // 2, dtype=np.float32) + for n in range(len(ishape)) + ] + kgrid = np.meshgrid(*kgrid, indexing="ij") + kgrid = np.stack(kgrid, axis=-1) + + knorm = (kgrid**2).sum(axis=-1) + np.finfo(np.float32).eps + dipole_kernel = 1 / 3 - (kgrid[..., 0] ** 2 / knorm) + dipole_kernel = torch.as_tensor(dipole_kernel, dtype=torch.float32) + + # apply convolution + B0map = fft.ifft(dipole_kernel * fft.fft(chi)).real + + # rescale + B0map = B0map - B0map.min() # (min, max) -> (0, max - min) + B0map = B0map / B0map.max() # (0, max - min) -> (0, 1) + B0map = ( + B0map * (b0range[1] - b0range[0]) + b0range[0] + ) # (0, 1) -> (b0range[0], b0range[1]) + + # mask + if mask is not None: + mask = torch.as_tensor(mask != 0) + B0map = mask * B0map + + return torch.as_tensor(B0map, dtype=torch.float32)
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/_vobj/fields/coil.html b/_modules/deepmr/_vobj/fields/coil.html new file mode 100644 index 00000000..40d6a04d --- /dev/null +++ b/_modules/deepmr/_vobj/fields/coil.html @@ -0,0 +1,821 @@ + + + + + + + + + + + deepmr._vobj.fields.coil — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr._vobj.fields.coil

+"""B1+ and sensitivity maps generation routines."""
+
+__all__ = ["sensmap", "b1field"]
+
+import math
+import numpy as np
+import torch
+
+
+
[docs]def sensmap(shape, coil_width=2.0, shift=None, dphi=0.0, nrings=None, mask=None): + """ + Simulate birdcage coils. + + Adapted from SigPy [1]. + + Parameters + ---------- + shape : Iterable[int] + Size of the matrix ``(ncoils, ny, nx)`` (2D) or ``(ncoils, nz, ny, nx)`` (3D) for the sensitivity coils. + shift : Iterable[int], optional + Displacement of the coil center with respect to matrix center. + The default is ``(0, 0)`` / ``(0, 0, 0)``. + dphi : float + Bulk coil angle in ``[deg]``. + The default is ``0.0°``. + coil_width : float, optional + Width of the coil, with respect to image dimension. + The default is ``2.0``. + nrings : int, optional + Number of rings for a cylindrical hardware set-up. + The default is ``ncoils // 4``. + mask : np.ndarray | torch.Tensor, optional + Region of support of the object of + shape ``(ny, nx)`` (2D) or ``(nz, ny, nx)`` (3D). + The default is ``None``. + + Returns + ------- + smap : torch.Tensor + Complex spatially varying sensitivity maps of shape ``(nmodes, ny, nx)`` (2D) + or ``(nmodes, nz, ny, nx)`` (3D). If ``nmodes = 1``, the first dimension is squeezed. + + Example + ------- + >>> import deepmr + + We can generate a set of ``nchannels=8`` 2D sensitivity maps of shape ``(ny=128, nx=128)`` by: + + >>> smap = deepmr.sensmap((8, 128, 128)) + + Coil center and rotation can be modified by ``shift`` and ``dphi`` arguments: + + >>> smap = deepmr.sensmap((8, 128, 128), shift=(-3, 5), dphi=30.0) # center shifted by (dy, dx) = (-3, 5) pixels and rotated by 30.0 degrees. + + Similarly, ``nchannels=8`` 3D sensitivity maps can be generated as: + + >>> smap = deepmr.sensmap((8, 128, 128, 128)) + + Beware that this will require more memory. + + References + ---------- + [1] https://github.com/mikgroup/sigpy/tree/main + + """ + smap = _birdcage(shape, coil_width, nrings, shift, np.deg2rad(dphi)) + + # normalize + rss = sum(abs(smap) ** 2, 0) ** 0.5 + smap /= rss + + # mask + if mask is not None: + mask = torch.as_tensor(mask != 0) + smap = mask * smap + + return smap
+ + +
[docs]def b1field( + shape, + nmodes=1, + b1range=(0.5, 2.0), + shift=None, + dphi=0.0, + coil_width=1.1, + ncoils=8, + nrings=None, + mask=None, +): + """ + Simulate inhomogeneous B1+ fields. + + Adapted from SigPy [1]. + + Parameters + ---------- + shape : Iterable[int] + Size of the matrix ``(ny, nx)`` (2D) or + ``(nz, ny, nx)`` (3D) for the B1+ field. + nmodes : int, optional + Number of B1+ modes. First mode is ``CP`` mode, second + is ``gradient`` mode, and so on. The default is ``1``. + b1range : Iterable[float] + Range of B1+ magnitude. The default is ``(0.5, 2.0)``. + shift : Iterable[int], optional + Displacement of the coil center with respect to matrix center. + The default is ``(0, 0)`` / ``(0, 0, 0)``. + dphi : float + Bulk coil angle in ``[deg]``. + The default is ``0.0°``. + coil_width : float, optional + Width of the coil, with respect to image dimension. + The default is ``1.1``. + ncoils : int, optional + Number of transmit coil elements. Standard coils have ``2`` channels + operating in quadrature. To support multiple modes (i.e., PTX), increase this + number. The default is ``8``. + nrings : int, optional + Number of rings for a cylindrical hardware set-up. + The default is ``ncoils // 4``. + mask : np.ndarray | torch.Tensor, optional + Region of support of the object of + shape ``(ny, nx)`` (2D) or ``(nz, ny, nx)`` (3D). + The default is ``None``. + + Returns + ------- + smap : torch.Tensor + Complex spatially varying b1+ maps of shape ``(nmodes, ny, nx)`` (2D) + or ``(nmodes, nz, ny, nx)`` (3D). Magnitude of the map represents + the relative flip angle scaling (wrt to the nominal). + + Example + ------- + >>> import deepmr + + We can generate a 2D B1+ field map of shape ``(ny=128, nx=128)`` by: + + >>> b1map = deepmr.b1field((128, 128)) + + Field center and rotation can be modified by ``shift`` and ``dphi`` arguments: + + >>> b1map = deepmr.b1field((8, 128, 128), shift=(-3, 5), dphi=30.0) # center shifted by (dy, dx) = (-3, 5) pixels and rotated by 30.0 degrees. + + B1+ values range and steepness of variation can be specified using ``b1range`` and ``coil_width`` arguments: + + >>> # transmit coil is 4 times bigger than FOV (e.g., body coil) and + >>> # B1+ scalings are between (0.8, 1.2) the nominal flip angle (e.g., 3T scanner) + >>> b1map3T = deepmr.b1field((128, 128), b1range=(0.8, 1.2), coil_width=4.0) + >>> + >>> # transmit coil is 1.1 times bigger than FOV (e.g., head coil) and + >>> # B1+ scalings are between (0.5, 2.0) the nominal flip angle (e.g., 7T scanner) + >>> b1map7T = deepmr.b1field((128, 128), b1range=(0.5, 2.0), coil_width=1.1) + + Multiple orthogonal modes can be simulated by ``nmodes`` argument. + For example, ``CP`` mode and ``gradient`` mode can be obtained as: + + >>> b1map = deepmr.b1field((128, 128), nmodes=2) # b1map[0] is CP, b1map[1] is gradient mode. + + Three dimensional B1+ maps of shape ``(nz, ny, nx)`` can be obtained as: + + >>> b1map = deepmr.b1field((128, 128, 128)) + + Beware that this will require more memory. + + References + ---------- + [1] https://github.com/mikgroup/sigpy/tree/main + + """ + # check we can do quadrature + assert ( + ncoils >= 2 + ), f"We support circular polarization only - found {ncoils} transmit elements." + assert ncoils >= nmodes, f"Need ncoils (={ncoils}) to be >= nmodes (={nmodes})." + + # generate coils + smap = _birdcage( + [ncoils] + list(shape), coil_width, nrings, shift, np.deg2rad(dphi) + ).numpy() + + # normalize + rss = sum(abs(smap) ** 2, 0) ** 0.5 + smap /= rss + + # # combine + dalpha = 2 * math.pi / ncoils + alpha = np.arange(ncoils) * dalpha + mode = np.arange(nmodes) + phafu = np.exp(1j * mode[:, None] * alpha[None, :]) # (nmodes, nchannels) + + # # get modes + smap = smap.T # (nc, ...) -> (..., nc) + smap = [(abs(smap) * phafu[n]).sum(axis=-1) for n in range(nmodes)] + smap = np.stack(smap, axis=-1) # (..., nmodes) + smap = smap.T # (..., nmodes) -> (nmodes, ...) + + # # rescale + phase = smap / abs(smap) + smap = abs(smap) + smap = smap - smap.min() # (min, max) -> (0, max - min) + smap = smap / smap.max() # (0, max - min) -> (0, 1) + smap = ( + smap * (b1range[1] - b1range[0]) + b1range[0] + ) # (0, 1) -> (b1range[0], b1range[1]) + smap = smap * phase + + # convert to tensor + if nmodes == 1: + smap = torch.as_tensor(abs(smap[0]), dtype=torch.float32) + else: + smap = torch.as_tensor(smap, dtype=torch.complex64) + + # mask + if mask is not None: + mask = torch.as_tensor(mask != 0) + smap = mask * smap + + return smap
+ + +def _birdcage(shape, coil_width, nrings, shift, dphi): + # default + if shift is None: + shift = [0.0 for ax in range(len(shape) - 1)] + if nrings is None: + nrings = np.max((shape[0] // 4, 1)) + + # coil width and radius + c_width = coil_width * min(shape[-2:]) + c_rad = 0.5 * c_width + + if len(shape) == 3: + nc, ny, nx = shape + phi = np.arange(nc) * (2 * math.pi / nc) + dphi + y, x = np.mgrid[:ny, :nx] + + x0 = c_rad * np.cos(phi) + shape[-1] / 2.0 + shift[-1] + y0 = c_rad * np.sin(phi) + shape[-2] / 2.0 + shift[-2] + + x_co = x[None, ...] - x0[:, None, None] + y_co = y[None, ...] - y0[:, None, None] + + # coil magnitude + rr = np.sqrt(x_co**2 + y_co**2) / (2 * c_width) + + # coil phase + phi = np.arctan2(x_co, -y_co) - phi[:, None, None] + + elif len(shape) == 4: + nc, nz, ny, nx = shape + phi = np.arange(nc) * (2 * math.pi / (nc + nrings)) + dphi + z, y, x = np.mgrid[:nz, :ny, :nx] + + x0 = c_rad * np.cos(phi) + shape[-1] / 2.0 + shift[-1] + y0 = c_rad * np.sin(phi) + shape[-2] / 2.0 + shift[-2] + z0 = ( + np.floor(np.arange(nc) / nrings) + - 0.5 * (np.ceil(np.arange(nc) / nrings) - 1) + + shape[-3] / 2.0 + + shift[-3] + ) + + x_co = x[None, ...] - x0[:, None, None, None] + y_co = y[None, ...] - y0[:, None, None, None] + z_co = z[None, ...] - z0[:, None, None, None] + + # coil magnitude + rr = np.sqrt(x_co**2 + y_co**2 + z_co**2) / (2 * c_width) + + # coil phase + phi = np.arctan2(x_co, -y_co) - phi[:, None, None, None] + else: + raise ValueError("Can only generate shape with length 3 or 4") + + # build coils + rr[rr == 0.0] = 1.0 + smap = (1.0 / rr) * np.exp(1j * phi) + + return torch.as_tensor(smap, dtype=torch.complex64) +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/_vobj/motion/rigid.html b/_modules/deepmr/_vobj/motion/rigid.html new file mode 100644 index 00000000..a9f239f3 --- /dev/null +++ b/_modules/deepmr/_vobj/motion/rigid.html @@ -0,0 +1,675 @@ + + + + + + + + + + + deepmr._vobj.motion.rigid — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr._vobj.motion.rigid

+"""Rigid motion parameter generation routines."""
+
+__all__ = ["rigid_motion"]
+
+import numpy as np
+import numba as nb
+
+
+
[docs]def rigid_motion(ndims, nframes, degree="moderate", seed=42): + """ + Generate rigid motion pattern as a Markov Chain process. + + Parameters + ---------- + ndims : int + Generate 2D (in-plane only) or 3D motion pattern. + nframes : int + Number of motion frames. + degree : str | Iterable[float], optional + Severity of motion. The default is ``"moderate"``. + seed : int, optional + Random number generator seed. + The default is ``42``. + + Notes + ----- + Severity of motion can be specified via the ``degree`` argument. + This can be a string - accepted values are ``"subtle"``, ``"moderate"`` + and ``"severe"``. These corresponds to the following motion ranges: + + * ``"subtle"``: maximum rotation ``5.0 [deg]``; maximum translation ``2.0 [mm]`` + * ``"moderate"``: maximum rotation ``10.0 [deg]``; maximum translation ``8.0 [mm]`` + * ``"severe"``: maximum rotation ``16.0 [deg]``; maximum translation ``16.0 [mm]`` + + As an alternative, user can specify a tuple of floats, where ``degree[0]`` + is the maximum rotation in ``[deg]`` and ``degree[1]`` is the maximum translation + in ``[mm]``. + + Returns + ------- + angleX : torch.Tensor + Rotation about ``x`` axis in ``[deg]`` of shape ``(nframes,)``. + angleY : torch.Tensor + Rotation about ``y`` axis in ``[deg]`` of shape ``(nframes,)``. + angleZ : torch.Tensor + Rotation about ``z`` axis in ``[deg]`` of shape ``(nframes,)``. + dx : torch.Tensor + Translation towards ``x`` axis in ``[mm]`` of shape ``(nframes,)``. + dy : torch.Tensor + Translation towards ``y`` axis in ``[mm]`` of shape ``(nframes,)``. + dz : torch.Tensor + Translation towards ``z`` axis in ``[mm]`` of shape ``(nframes,)``. + + """ + # Markov rate (I don't remember what this is :() + rate = [[0.9, 0.05, 0.05], [0.4, 0.3, 0.3], [0.4, 0.3, 0.3]] + transition_mtx = np.array(rate, np.float32) + + # generate probability array + np.random.seed(seed) + change = np.random.rand(6, nframes) + + # generate six random series + x = _generate_series(6, nframes, transition_mtx, change) + + # rescale series + x_max = np.abs(x).max(axis=1)[:, None] + x_max[x_max == 0] = 1 + x = x / x_max + + # get motion range + if isinstance(degree, str): + if degree == "subtle": + degree = [5.0, 2.0] + elif degree == "moderate": + degree = [10.0, 8.0] + elif degree == "severe": + degree = [16.0, 16.0] + else: + raise ValueError( + f"Severity of motion not recognized - must be either 'subtle', 'moderate', 'severe' or a (rotation, translation) tuple in (deg, mm). Found {degree}." + ) + + # set + roll = degree[0] * x[0] # deg, rotation around x + pitch = degree[0] * x[1] # deg, rotation around y + yaw = degree[0] * x[2] # deg, rotation around z + dx = degree[1] * x[3] # mm, translation around x + dy = degree[1] * x[4] # mm, translation around x + dz = degree[1] * x[5] # mm, translation around x + + if ndims == 2: + return yaw, dy, dx + elif ndims == 3: + return roll, pitch, yaw, dx, dy, dz + else: + raise ValueError(f"Invalid number of dims! must be 2 or 3 - found {ndims}")
+ + +# %% local utils +# adapted from +# https://ipython-books.github.io/131-simulating-a-discrete-time-markov-chain/ + +# The statespace +states = np.array([0, -1, 1], np.int64) + + +@nb.njit(fastmath=True, parallel=True) # pragma: no cover +def _generate_series(n_parameters, n_frames, transition_matrix, change): + # pre-allocate state history + state_history = np.zeros((n_parameters, n_frames), dtype=np.int64) + + # initialize states + current_state = np.zeros(n_parameters, dtype=np.int64) + + for p in nb.prange(n_parameters): + for t in range(1, n_frames): + current_state[p] = _generate_state( + transition_matrix[current_state[p]], change[p, t] + ) + state_history[p, t] = state_history[p, t - 1] + states[current_state[p]] + + return state_history + + +@nb.njit(fastmath=True, cache=True) # pragma: no cover +def _generate_state(probability, change): + if change <= probability[0]: + out_state = 0 + elif change <= probability[0] + probability[1]: + out_state = 1 + else: + out_state = 2 + + return out_state +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/_vobj/phantoms.html b/_modules/deepmr/_vobj/phantoms.html new file mode 100644 index 00000000..b7330140 --- /dev/null +++ b/_modules/deepmr/_vobj/phantoms.html @@ -0,0 +1,1078 @@ + + + + + + + + + + + deepmr._vobj.phantoms — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr._vobj.phantoms

+""""Numerical phantoms generation routines"""
+
+__all__ = ["shepp_logan", "brainweb", "custom_phantom"]
+
+import numpy as np
+import torch
+
+from .brainweb import brainweb as _brainweb
+from .ct_shepp_logan import ct_shepp_logan
+from .mr_shepp_logan import mr_shepp_logan
+
+
+
[docs]def shepp_logan(npix, nslices=1, qmr=False, B0=3.0): + """ + Initialize numerical phantom for MR simulations. + + This function generates a numerical phantom for qMR or MR simulations based on the provided parameters. + + Parameters + ---------- + npix : Iterable[int] + In-plane matrix size. + nslices : int, optional + Number of slices. An isotropic ``[npix, npix, npix]`` phantom can be + generated, for convenience, by setting nslices to ``-1``. The default is ``1``. + qmr : bool, optional + Flag indicating whether the phantom is for qMRI (``True``) or MR (``False``) simulations. + The default is False. + B0 : float, optional + Static field strength in ``[T]``. Ignored if ``mr`` is False. + The default is ``3.0``. + + Returns + ------- + phantom : torch.Tensor, dict + Shepp-Logan phantom of shape ``(nslices, ny, nx)`` (``qmr == False``) or + a dictionary of maps (``M0``, ``T1``, ``T2``, ``T2star``, ``chi``) of + shape ``(nslices, ny, nx)`` (``qmr == True``). Units for ``T1``, ``T2`` and ``T2star`` + are ``[ms]``; for ``chi``, units are ``[ppm]``. + + Examples + -------- + >>> import deepmr + + We can generate a non-quantitative Shepp-Logan phantom as: + + >>> phantom = deepmr.shepp_logan(128) + >>> phantom.shape + torch.Size([128, 128]) + + We also support multiple slices: + + >>> phantom = deepmr.shepp_logan(128, 32) + >>> phantom.shape + torch.Size([32, 128, 128]) + + An isotropic ``[npix, npix, npix]`` phantom can be generated by setting nslices to ``-1``: + + >>> phantom = deepmr.shepp_logan(128, -1) + >>> phantom.shape + torch.Size([128, 128, 128]) + + We can also generate quantitative ``M0``, ``T1``, ``T2``, ``T2*`` and magnetic susceptibility maps: + + >>> phantom = deepmr.shepp_logan(128, qmr=True) + >>> phantom.keys() + dict_keys(['M0', 'T1', 'T2', 'T2star', 'chi']) + + Each map will have ``(nslices, npix, npix)`` shape: + + >>> phantom["M0"].shape + torch.Size([128, 128]) + + References + ---------- + [1] L. A. Shepp and B. F. Logan, + "The Fourier reconstruction of a head section," + in IEEE Transactions on Nuclear Science, + vol. 21, no. 3, pp. 21-43, June 1974, + doi: 10.1109/TNS.1974.6499235. + + """ + if nslices < 0: + nslices = npix + if qmr: + seg, mrtp, emtp = mr_shepp_logan(npix, nslices, B0) + # - seg (tensor): phantom segmentation (e.g., 1 := GM, 2 := WM, 3 := CSF...) + # - mrtp (list): list of dictionaries containing 1) free water T1/T2/T2*/ADC/v, 2) bm/mt T1/T2/fraction, 3) exchange matrix + # for each class (index along the list correspond to value in segmentation mask) + # - emtp (list): list of dictionaries containing electromagnetic tissue properties for each class. + + # only support single model for now: + prop = { + "M0": mrtp["M0"], + "T1": mrtp["T1"], + "T2": mrtp["T2"], + "T2star": mrtp["T2star"], + "chi": emtp["chi"], + } + return custom_phantom(seg, prop) + else: + return ct_shepp_logan(npix, nslices)
+ + +
[docs]def brainweb(idx, npix=None, nslices=1, B0=3.0, cache_dir=None): + """ + Initialize a brain-shaped phantom for MR simulations. + + This function generates a brain-shaped phantom [1-3] for qMR or MR simulations based on the provided parameters. + + Parameters + ---------- + idx : int + Brainweb ID (``0`` to ``19``). + npix : Iterable[int], optional + In-plane matrix size. The default is ``None``. + nslices : int, optional + Number of slices. An isotropic ``[npix, npix, npix]`` phantom can be + generated, for convenience, by setting nslices to ``-1``. The default is ``1``. + B0 : float, optional + Static field strength in ``[T]``. + The default is ``3.0``. + cache_dir : os.PathLike + Directory to download the data. + + Returns + ------- + phantom : torch.Tensor, dict + Dictionary of BrainWeb maps (``M0``, ``T1``, ``T2``, ``T2star``, ``chi``) of + shape ``(nslices, ny, nx)`` (``qmr == True``). Units for ``T1``, ``T2`` and ``T2star`` + are ``[ms]``; for ``chi``, units are ``[ppm]``. + + Examples + -------- + >>> import deepmr + + We can generate a single-slice BrainWeb phantom as: + + >>> phantom = deepmr.brainweb(128) + >>> phantom.keys() + dict_keys(['M0', 'T1', 'T2', 'T2star', 'chi']) + + Each map will have ``(nslices, npix, npix)`` shape: + + >>> phantom["M0"].shape + torch.Size([128, 128]) + + We also support multiple slices: + + >>> phantom = deepmr.brainweb(128, 32) + >>> phantom["M0"].shape + torch.Size([32, 128, 128]) + + Notes + ----- + The brainweb is set in the following order: + + * The ``cache_dir`` passed as argument. + * The environment variable ``BRAINWEB_DIR``. + * The default cache__dir ``~/brainweb``. + + References + ---------- + [1] D.L. Collins, A.P. Zijdenbos, V. Kollokian, J.G. Sled, N.J. Kabani, C.J. Holmes, A.C. Evans, + Design and Construction of a Realistic Digital Brain Phantom, + IEEE Transactions on Medical Imaging, vol.17, No.3, p.463--468, June 1998\n + [2] https://github.com/casperdcl/brainweb/ \n + [3] https://github.com/paquiteau/brainweb-dl?tab=readme-ov-file + + + """ + if nslices < 0: + nslices = npix + + seg, mrtp, emtp = _brainweb(npix, nslices, B0, idx, cache_dir) + # - seg (tensor): phantom segmentation (e.g., 1 := GM, 2 := WM, 3 := CSF...) + # - mrtp (list): list of dictionaries containing 1) free water T1/T2/T2*/ADC/v, 2) bm/mt T1/T2/fraction, 3) exchange matrix + # for each class (index along the list correspond to value in segmentation mask) + # - emtp (list): list of dictionaries containing electromagnetic tissue properties for each class. + + # only support single model for now: + prop = { + "M0": mrtp["M0"], + "T1": mrtp["T1"], + "T2": mrtp["T2"], + "T2star": mrtp["T2star"], + "chi": emtp["chi"], + } + return custom_phantom(seg, prop)
+ + +
[docs]def custom_phantom(segmentation, properties): + """ + Initialize numerical phantom for MR simulations from user-provided segmentation. + + This function generates a numerical phantom for qMR simulations based on the segmentation and parameters. + + Parameters + ---------- + segmentation : torch.Tensor + Hard (i.e. non probabilistic) segmentation of the object of shape ``(nslices, ny, nx)``. + properties : dict + Dictionary with the properties for each class (e.g., ``properties.keys() = dict_keys(["M0", "T1", "T2", "T2star", "chi"])``). + Each property is a list, whose entries ordering should match the label values in "segmentation". + For example, ``properties["T1"][2]`` is the T1 value of the region corresponding to (``segmentation == 2``). + + Returns + ------- + phantom : dict + Dictionary of maps (e.g., ``M0``, ``T1``, ``T2``, ``T2star``, ``chi``) of + shape ``(nslices, ny, nx)``. + + Examples + -------- + >>> import torch + >>> import deepmr + + We can initialize a simple tissue segmentation and its ``M0``, ``T1`` and ``T2`` properties: + + >>> segmentation = torch.tensor([0, 0, 0, 1, 1, 1], dtype=int) + >>> properties = {"M0": [0.7, 0.8], "T1": [500.0, 1000.0], "T2": [50.0, 100.0]} + + Now, we can use "create_phantom" to generate our ``M0``, ``T1`` and ``T2`` maps: + + >>> phantom = deepmr.custom_phantom(segmentation, properties) + >>> phantom["M0"] + tensor([ 0.7, 0.7, 0.7, 0.8, 0.8, 0.8]) + >>> phantom["T1"] + tensor([ 500., 500., 500., 1000., 1000., 1000.]) + >>> phantom["T2"] + tensor([ 50., 50., 50., 100., 100., 100.]) + + """ + assert ( + np.issubdtype(segmentation.detach().cpu().numpy().dtype, np.floating) is False + ), "We only support hard segmentation right now." + map_template = torch.zeros(segmentation.shape, dtype=torch.float32) + labels = np.unique(segmentation) + + phantom = {} + for key in properties.keys(): + value = map_template.clone() + for idx in labels: + value[segmentation == idx] = properties[key][idx] + phantom[key] = value + + return phantom
+ + +# @_dataclass +# class ArbitraryPhantomBuilder: +# """Helper class to build qMRI phantoms from externally provided maps.""" + +# # relaxation properties +# T1: _Union[float, _npt.NDArray] # ms +# T2: _Union[float, _npt.NDArray] # ms +# segmentation: _npt.NDArray = None +# M0: float = 1.0 + +# # other properties +# T2star: _Union[float, _npt.NDArray] = 0.0 # ms +# chemshift: _Union[float, _npt.NDArray] = 0.0 # Hz / T + +# # motion properties +# D: _Union[float, _npt.NDArray] = 0.0 # um**2 / ms +# v: _Union[float, _npt.NDArray] = 0.0 # cm / s + +# # multi-component related properties +# exchange_rate: _Union[float, _npt.NDArray] = 0.0 # 1 / s + +# # smaller pools +# bm: dict = None +# mt: dict = None + +# # electromagnetic properties +# chi: float = 0.0 +# sigma: float = 0.0 # S / m +# epsilon: float = 0.0 + +# # size and shape +# n_atoms: int = 1 +# shape: tuple = None + +# def __post_init__(self): +# # convert scalar to array and gather sizes and shapes +# sizes = [] +# shapes = [] +# for field in _fields(self): +# value = getattr(self, field.name) +# if ( +# field.name != "bm" +# and field.name != "mt" +# and field.name != "segmentation" +# and field.name != "n_atoms" +# and field.name != "shape" +# and field.name != "exchange_rate" +# ): +# val = _np.asarray(value) +# sizes.append(val.size) +# shapes.append(val.shape) +# setattr(self, field.name, val) + +# # get number of atoms +# self.n_atoms = _np.max(sizes) +# self.shape = shapes[_np.argmax(sizes)] + +# # check shapes +# shapes = [shape for shape in shapes if shape != ()] +# assert ( +# len(set(shapes)) <= 1 +# ), "Error! All input valus must be either scalars or arrays of the same shape!" + +# # check segmentation consistence +# if self.segmentation is not None: +# seg = self.segmentation +# if issubclass(seg.dtype.type, _np.integer): # discrete segmentation case +# assert seg.max() == self.n_atoms - 1, ( +# f"Error! Number of atoms = {self.n_atoms} must match number of" +# f" classes = {seg.max()}" +# ) +# else: +# assert seg.shape[0] == self.n_atoms - 1, ( +# f"Error! Number of atoms = {self.n_atoms} must match number of" +# f" classes = {seg.shape[0]}" +# ) + +# # expand scalars +# for field in _fields(self): +# value = getattr(self, field.name) +# if ( +# field.name != "bm" +# and field.name != "mt" +# and field.name != "segmentation" +# and field.name != "n_atoms" +# and field.name != "shape" +# and field.name != "exchange_rate" +# ): +# if value.size == 1: +# value = value * _np.ones(self.shape, dtype=_np.float32) +# value = _np.atleast_1d(value) +# setattr(self, field.name, value) + +# # initialize exchange_rate +# self.exchange_rate = _np.zeros(list(self.shape) + [1], dtype=_np.float32) + +# # initialize BM and MT pools +# self.bm = {} +# self.mt = {} + +# def add_cest_pool( +# self, +# T1: _Union[float, _npt.NDArray], +# T2: _Union[float, _npt.NDArray], +# weight: _Union[float, _npt.NDArray], +# chemshift: _Union[float, _npt.NDArray] = 0.0, +# ): +# """ +# Add a new Chemical Exchanging pool to the model. + +# Args: +# T1 (Union[float, npt.NDArray]): New pool T1. +# T2 (Union[float, npt.NDArray]): New pool T2. +# weight (Union[float, npt.NDArray]): New pool relative fraction. +# chemshift (Union[float, npt.NDArray], optional): New pool chemical shift. Defaults to 0.0. + +# """ +# # check pool +# if _np.isscalar(T1): +# T1 *= _np.ones((self.n_atoms, 1), dtype=_np.float32) +# elif len(T1.shape) == 1: +# assert _np.array_equal( +# T1.shape, self.shape +# ), "Input T1 must be either a scalar or match the existing shape." +# T1 = T1[..., None] +# else: +# assert _np.array_equal( +# T1.squeeze().shape, self.shape +# ), "Input T1 must be either a scalar or match the existing shape." +# assert T1.shape[-1] == 1, "Pool dimension size must be 1!" +# if _np.isscalar(T2): +# T2 *= _np.ones((self.n_atoms, 1), dtype=_np.float32) +# elif len(T2.shape) == 1: +# assert _np.array_equal( +# T2.shape, self.shape +# ), "Input T2 must be either a scalar or match the existing shape." +# T2 = T2[..., None] +# else: +# assert _np.array_equal( +# T2.squeeze().shape, self.shape +# ), "Input T2 must be either a scalar or match the existing shape." +# assert T2.shape[-1] == 1, "Pool dimension size must be 1!" +# if _np.isscalar(weight): +# weight *= _np.ones((self.n_atoms, 1), dtype=_np.float32) +# elif len(weight.shape) == 1: +# assert _np.array_equal( +# weight.shape, self.shape +# ), "Input weight must be either a scalar or match the existing shape." +# weight = weight[..., None] +# else: +# assert _np.array_equal( +# weight.squeeze().shape, self.shape +# ), "Input weight must be either a scalar or match the existing shape." +# assert weight.shape[-1] == 1, "Pool dimension size must be 1!" +# if _np.isscalar(chemshift): +# chemshift *= _np.ones((self.n_atoms, 1), dtype=_np.float32) +# elif len(chemshift.shape) == 1: +# assert _np.array_equal(chemshift.shape, self.shape), ( +# "Input chemical_shift must be either a scalar or match the existing" +# " shape." +# ) +# chemshift = chemshift[..., None] +# else: +# assert _np.array_equal(chemshift.squeeze().shape, self.shape), ( +# "Input chemical_shift must be either a scalar or match the existing" +# " shape." +# ) +# assert chemshift.shape[-1] == 1, "Pool dimension size must be 1!" + +# # BM pool already existing; add a new component +# if self.bm: +# self.bm["T1"] = _np.concatenate((self.bm["T1"], T1), axis=-1) +# self.bm["T2"] = _np.concatenate((self.bm["T2"], T2), axis=-1) +# self.bm["weight"] = _np.concatenate((self.bm["weight"], weight), axis=-1) +# self.bm["chemshift"] = _np.concatenate( +# (self.bm["chemshift"], chemshift), axis=-1 +# ) +# else: +# self.bm["T1"] = T1 +# self.bm["T2"] = T2 +# self.bm["weight"] = weight +# self.bm["chemshift"] = chemshift + +# def add_mt_pool(self, weight: _Union[float, _npt.NDArray]): +# """ +# Set macromolecolar pool. + +# Args: +# weight (Union[float, npt.NDArray]): Semisolid pool relative fraction. +# """ +# # check pool +# if _np.isscalar(weight): +# weight *= _np.ones((self.n_atoms, 1), dtype=_np.float32) +# elif len(weight.shape) == 1: +# assert _np.array_equal( +# weight.shape, self.shape +# ), "Input weight must be either a scalar or match the existing shape." +# weight = weight[..., None] +# else: +# assert _np.array_equal( +# weight.squeeze().shape, self.shape +# ), "Input weight must be either a scalar or match the existing shape." +# assert weight.shape[-1] == 1, "Pool dimension size must be 1!" + +# self.mt["weight"] = weight + +# def set_exchange_rate(self, *exchange_rate_matrix_rows: _Union[list, tuple]): +# """ +# Build system exchange matrix. + +# Args: +# *exchange_rate_matrix_rows (list or tuple): list or tuple of exchange constant. +# Each argument represent a row of the exchange matrix in s**-1. +# Each element of each argument represent a single element of the row; these can +# be either scalar or array-like objects of shape (n_atoms,) + +# """ +# # check that every row has enough exchange rates for each pool +# npools = 1 +# if self.bm: +# npools += self.bm["T1"].shape[-1] +# if self.mt: +# npools += self.mt["T1"].shape[-1] + +# # count rows +# assert ( +# len(exchange_rate_matrix_rows) == npools +# ), "Error! Incorrect number of exchange constant" +# for row in exchange_rate_matrix_rows: +# row = _np.asarray(row).T +# assert ( +# row.shape[0] == npools +# ), "Error! Incorrect number of exchange constant per row" +# for el in row: +# if _np.isscalar(el): +# el *= _np.ones(self.n_atoms, dtype=_np.float32) +# else: +# assert _np.array_equal(el.shape, self.shape), ( +# "Input exchange constant must be either a scalar or match the" +# " existing shape." +# ) +# # stack element in row +# row = _np.stack(row, axis=-1) + +# # stack row +# self.exchange_rate = _np.stack(exchange_rate_matrix_rows, axis=-1) + +# # check it is symmetric +# assert _np.allclose( +# self.exchange_rate, self.exchange_rate.swapaxes(-1, -2) +# ), "Error! Non-directional exchange matrix must be symmetric." + +# def build(self): +# """ +# Return structures for MR simulation. +# """ +# # check that exchange matrix is big enough +# npools = 1 +# if self.bm: +# npools += self.bm["T1"].shape[-1] +# if self.mt: +# npools += self.mt["T1"].shape[-1] + +# # actual check +# assert ( +# self.exchange_rate.shape[-1] == npools +# ), "Error! Incorrect exchange matrix size." +# if npools > 1: +# assert ( +# self.exchange_rate.shape[-2] == npools +# ), "Error! Incorrect exchange matrix size." + +# # prepare output +# mrtp = _asdict(self) + +# # erase unused stuff +# mrtp.pop("n_atoms") +# mrtp.pop("shape") + +# # get segmentation +# seg = mrtp.pop("segmentation") + +# # electromagnetic tissue properties +# emtp = {} +# emtp["chi"] = mrtp.pop("chi") +# emtp["sigma"] = mrtp.pop("sigma") +# emtp["epsilon"] = mrtp.pop("epsilon") + +# return seg, mrtp, emtp +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/_vobj/sampling/cartesian.html b/_modules/deepmr/_vobj/sampling/cartesian.html new file mode 100644 index 00000000..f85624c9 --- /dev/null +++ b/_modules/deepmr/_vobj/sampling/cartesian.html @@ -0,0 +1,854 @@ + + + + + + + + + + + deepmr._vobj.sampling.cartesian — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr._vobj.sampling.cartesian

+"""Two- and three-dimensional Cartesian sampling."""
+
+__all__ = ["cartesian2D", "cartesian3D"]
+
+import numpy as np
+import torch
+
+# this is for stupid Sphinx
+try:
+    from ... import _design
+except Exception:
+    pass
+
+from ..._types import Header
+
+
+
[docs]def cartesian2D(shape, accel=(1, 1), acs_shape=None): + r""" + Design a 2D (+t) cartesian encoding scheme. + + This function only support simple regular undersampling along phase + encoding direction, with Parallel Imaging and Partial Fourier acceleration + and rectangular FOV. For multi-echo acquisitions, sampling pattern is assumed + constant along the echo train. + + Parameters + ---------- + shape : Iterable[int] + Matrix shape ``(x, y, echoes=1)``. + accel : Iterable[int], optional + Acceleration ``(Ry, Pf)``. + Ranges from ``(1, 1)`` (fully sampled) to ``(ny, 0.75)``. + The default is ``(1, 1)``. + acs_shape : int, optional + Matrix size for calibration regions ``ACSy``. + The default is ``None``. + + Returns + ------- + mask : torch.Tensor + Binary mask indicating the sampled k-space locations of shape ``(nz, ny)``. + head : Header + Acquisition header corresponding to the generated sampling pattern. + + Example + ------- + >>> import deepmr + + We can create a 2D Cartesian sampling mask of ``(128, 128)`` pixels with Parallel Imaging factor ``Ry = 2`` by: + + >>> mask, head = deepmr.cartesian2D(128, accel=2) + + Partial Fourier acceleration can be enabled by passing a ``tuple`` as the ``accel`` argument: + + >>> mask, head = deepmr.cartesian2D(128, accel=(1, 0.8)) + + A rectangular matrix can be specified by passing a ``tuple`` as the ``shape`` argument: + + >>> mask, head = deepmr.cartesian2D((128, 96), accel=2) + >>> mask.shape + torch.Size([96, 128]) + + Autocalibration region width can be specified via the ``acs_shape`` argument: + + >>> mask, head = deepmr.cartesian2D(128, accel=2, acs_shape=32) + + The generated mask will have an inner ``(32, 128)`` fully sampled k-space stripe + for coil sensitivity estimation. + + Multiple echoes with the same sampling (e.g., for QSM and T2* mapping) can be obtained by providing + a 3-element tuple of ints as the ``shape`` argument: + + >>> mask, head = deepmr.cartesian2D((128, 128, 8), accel=2) + >>> head.TE.shape + torch.Size([8]) + + corresponding to a 8-echoes undersampled k-spaces. + + Notes + ----- + The returned ``head`` (:func:`deepmr.Header`) is a structure with the following fields: + + * shape (torch.Tensor): + This is the expected image size of shape ``(nz, ny, nx)``. + * t (torch.Tensor): + This is the readout sampling time ``(0, t_read)`` in ``ms``. + with shape ``(nx,)``. K-space raster time of ``1 us`` is assumed. + * TE (torch.Tensor): + This is the Echo Times array. Assumes a k-space raster time of ``1 us`` + and minimal echo spacing. + + """ + # expand shape if needed + if np.isscalar(shape): + shape = [shape, shape] + else: + shape = list(shape) + + while len(shape) < 3: + shape = shape + [1] + + shape = shape[:2] + [1] + [shape[-1]] + + # assume 1mm iso + fov = [float(shape[0]), float(shape[1])] + + # get nechoes + nechoes = shape[-1] + shape[-1] = 1 + + # design mask + tmp, _ = _design.cartesian2D(fov, shape[:2], accel, acs_shape=acs_shape) + + # get shape + shape = shape[::-1] + + # get time + t = tmp["t"] + + # calculate TE + min_te = float(tmp["te"][0]) + TE = np.arange(nechoes, dtype=np.float32) * t[-1] + min_te + + # get indexes + head = Header(shape, t=t, TE=TE) + head.torch() + + # build mask + mask = tmp["mask"] + mask = np.repeat(mask, shape[-1], axis=-1) + mask = torch.as_tensor(mask, dtype=int) + mask = mask[0, 0] + + return mask, head
+ + +
[docs]def cartesian3D(shape, accel_type="PI", accel=(1, 1, 1), shift=0, acs_shape=None): + r""" + Design a 3D (+t) cartesian encoding scheme. + + This function regular undersampling along both phase encoding directions, + with Parallel Imaging (including CAIPIRINHA shift), Partial Fourier acceleration + and rectangular FOV. In addition, variable density Poisson disk sampling for Compressed Sensing + is supported. In the former case, sampling pattern is assumed constant for each contrast + in multi-contrast acquisitions; in the latter, sampling pattern is unique for each contrast. + + For multi-echo acquisitions, sampling pattern is assumed constant along the echo train for + both Parallel Imaging and Poisson Disk sampling. + + Parameters + ---------- + shape : Iterable[int] + Matrix shape ``(y, z, contrast=1, echoes=1)``. + accel_type : str, optional + Acceleration type. Can be either ``PI`` (Parallel Imaging) + or ``CS`` (Compressed Sensing). In the former case, undersampling + is regular and equal for each contrast. In the latter, build unique variable density Poisson-disk + sampling for each contrast. The default is ``PI``. + accel : Iterable[int], optional + Acceleration factor. For ``accel_type = PI``, it is defined as ``(Ry, Rz, Pf)``, + ranging from ``(1, 1, 1)`` (fully sampled) to ``(ny, nz, 0.75)``. For ``accel_type = CS``, + ranges from ``1`` (fully sampled) to ``ny * nz``. + The default is ``(1, 1, 1)``. + shift : int, optional + CAIPIRINHA shift between ``ky`` and ``kz``. + The default is ``0`` (standard Parallel Imaging). + acs_shape : int, optional + Matrix size for calibration regions ``ACSy``. + The default is ``None``. + + Returns + ------- + mask : torch.Tensor + Binary mask indicating the sampled k-space locations of shape ``(ncontrasts, nz, ny)``. + head : Header + Acquisition header corresponding to the generated sampling pattern. + + Example + ------- + >>> import deepmr + + We can create a 3D Cartesian sampling mask of ``(128, 128)`` pixels with Parallel Imaging factor ``(Ry, Rz) = (2, 2)`` by: + + >>> mask, head = deepmr.cartesian3D(128, accel=(2, 2)) + + The undersampling along ``ky`` and ``kz`` can be shifted as in a CAIPIRINHA sampling by specifying the ``shift`` argument: + + >>> mask, head = deepmr.cartesian3D(128, accel=(1, 3), shift=2) + + Partial Fourier acceleration can be enabled by passing a 3-element ``tuple`` as the ``accel`` argument: + + >>> mask, head = deepmr.cartesian3D(128, accel=(2, 2, 0.8)) + + Instead of regular undersampling, variable density Poisson disk sampling can be obtained by passing ``accel_type = CS``: + + >>> mask, head = deepmr.cartesian3D(128, accel_type="CS", accel=4) # 4 is the overall acceleration factor. + + A rectangular matrix can be specified by passing a ``tuple`` as the ``shape`` argument: + + >>> mask, head = deepmr.cartesian3D((128, 96), accel=(2, 2)) + >>> mask.shape + torch.Size([96, 128]) + + Autocalibration region width can be specified via the ``acs_shape`` argument: + + >>> mask, head = deepmr.cartesian2D(128, accel=2, acs_shape=32) + + The generated mask will have an inner ``(32, 32)`` fully sampled ``(kz, ky)`` k-space square + for coil sensitivity estimation. + + Multiple contrasts with different sampling (e.g., for T1-T2 Shuffling) can be achieved by providing + a 3-element tuple of ints as the ``shape`` argument: + + >>> mask, head = deepmr.cartesian3D((128, 128, 96), accel_type="CS", accel=4) + >>> mask.shape + torch.Size([96, 128, 128]) + + corresponding to 96 different contrasts, each sampled with a different undersampled ``(kz, ky)`` pattern. + Similarly, multiple echoes (with fixed sampling) can be specified using a 4-element tuple as: + + >>> mask, head = deepmr.cartesian3D((128, 128, 1, 8), accel=(2, 2)) + >>> mask.shape + torch.Size([128, 128]) + >>> head.TE.shape + torch.Size([8]) + + corresponding to a 8-echoes undersampled k-spaces. + + Notes + ----- + The returned ``head`` (:func:`deepmr.Header`) is a structure with the following fields: + + * shape (torch.Tensor): + This is the expected image size of shape ``(nz, ny, nx)``. Matrix is assumed squared (i.e., ``nx = ny``). + * t (torch.Tensor): + This is the readout sampling time ``(0, t_read)`` in ``ms``. + with shape ``(nx,)``. K-space raster time of ``1 us`` is assumed. + * TE (torch.Tensor): + This is the Echo Times array. Assumes a k-space raster time of ``1 us`` + and minimal echo spacing. + + """ + # expand shape if needed + if np.isscalar(shape): + shape = [shape, shape] + else: + shape = list(shape) + + while len(shape) < 4: + shape = shape + [1] + + # assume 1mm iso + fov = [float(shape[0]), float(shape[0]), float(shape[1])] + + # add x + shape = [shape[0]] + shape + + # get ncontrasts + ncontrasts = shape[-2] + shape[-2] = 1 + + # get nechoes + nechoes = shape[-1] + shape[-1] = 1 + + # fix acs_shape + if acs_shape is not None: + acs_shape = list(acs_shape) + + # design mask + if accel_type == "PI": + tmp, _ = _design.cartesian3D( + fov, shape, accel, accel_type=accel_type, shift=shift, acs_shape=acs_shape + ) + elif accel_type == "CS": + if accel == 1: + tmp, _ = _design.cartesian3D(fov, shape, accel_type="PI") + else: + tmp, _ = _design.cartesian3D( + fov, shape, accel, accel_type=accel_type, acs_shape=acs_shape + ) + if ncontrasts > 1: + mask = np.zeros( + [ncontrasts - 1] + list(tmp["mask"].shape[1:]), dtype=tmp["mask"].dtype + ) + mask = np.concatenate((tmp["mask"], mask), axis=0) + idx = np.random.rand(*mask.shape).argsort(axis=0) + mask = np.take_along_axis(mask, idx, axis=0) + tmp["mask"] = mask + else: + raise ValueError( + f"accel_type = {accel_type} not recognized; must be either 'PI' or 'CS'." + ) + + # get shape + shape = shape[::-1] + + # get time + t = tmp["t"] + + # calculate TE + min_te = float(tmp["te"][0]) + TE = np.arange(nechoes, dtype=np.float32) * t[-1] + min_te + + # get indexes + head = Header(shape, t=t, TE=TE) + head.torch() + + # build mask + mask = tmp["mask"] + mask = mask[..., 0] + mask = torch.as_tensor(mask, dtype=int) + + return mask, head
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/_vobj/sampling/radial.html b/_modules/deepmr/_vobj/sampling/radial.html new file mode 100644 index 00000000..b295c277 --- /dev/null +++ b/_modules/deepmr/_vobj/sampling/radial.html @@ -0,0 +1,700 @@ + + + + + + + + + + + deepmr._vobj.sampling.radial — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr._vobj.sampling.radial

+"""Two-dimensional radial sampling."""
+
+__all__ = ["radial"]
+
+import math
+import numpy as np
+
+# this is for stupid Sphinx
+try:
+    from ... import _design
+except Exception:
+    pass
+
+from ..._types import Header
+
+
+
[docs]def radial(shape, nviews=None, **kwargs): + r""" + Design a radial trajectory. + + The radial spokes are rotated by a pseudo golden angle + with period 377 interelaves. Rotations are performed both along + ``view`` and ``contrast`` dimensions. Acquisition is assumed to + traverse the ``contrast`` dimension first and then the ``view``, + i.e., all the contrasts are acquired before moving to the second view. + If multiple echoes are specified, final contrast dimensions will have + length ``ncontrasts * nechoes``. Echoes are assumed to be acquired + sequentially with the same radial spoke. + + Parameters + ---------- + shape : Iterable[int] + Matrix shape ``(in-plane, contrasts=1, echoes=1)``. + nviews : int, optional + Number of spokes. + The default is ``$\pi$ * shape[0]`` if ``shape[1] == 1``, otherwise it is ``1``. + + Keyword Arguments + ----------------- + variant : str + Type of radial trajectory. Allowed values are: + + * ``fullspoke``: starts at the edge of k-space and ends on the opposite side (default). + * ``center-out``: starts at the center of k-space and ends at the edge. + + Returns + ------- + head : Header + Acquisition header corresponding to the generated sampling pattern. + + Example + ------- + >>> import deepmr + + We can create a Nyquist-sampled radial trajectory for an in-plane matrix of ``(128, 128)`` pixels by: + + >>> head = deepmr.radial(128) + + An undersampled trajectory can be generated by specifying the ``nviews`` argument: + + >>> head = deepmr.radial(128, nviews=64) + + Multiple contrasts with different sampling (e.g., for MR Fingerprinting) can be achieved by providing + a tuple of ints as the ``shape`` argument: + + >>> head = deepmr.radial((128, 420)) + >>> head.traj.shape + torch.Size([420, 1, 128, 2]) + + corresponding to 420 different contrasts, each sampled with a different single radial spoke of 128 points. + Similarly, multiple echoes (with fixed sampling) can be specified as: + + >>> head = deepmr.radial((128, 1, 8)) + >>> head.traj.shape + torch.Size([8, 402, 128, 2]) + + corresponding to a 8-echoes fully sampled k-spaces, e.g., for QSM and T2* mapping. + + Notes + ----- + The returned ``head`` (:func:`deepmr.Header`) is a structure with the following fields: + + * shape (torch.Tensor): + This is the expected image size of shape ``(nz, ny, nx)``. + * t (torch.Tensor): + This is the readout sampling time ``(0, t_read)`` in ``ms``. + with shape ``(nsamples,)``. + * traj (torch.Tensor): + This is the k-space trajectory normalized as ``(-0.5 * shape, 0.5 * shape)`` + with shape ``(ncontrasts, nviews, nsamples, 2)``. + * dcf (torch.Tensor): + This is the k-space sampling density compensation factor + with shape ``(ncontrasts, nviews, nsamples)``. + * TE (torch.Tensor): + This is the Echo Times array. Assumes a k-space raster time of ``1 us`` + and minimal echo spacing. + + """ + # expand shape if needed + if np.isscalar(shape): + shape = [shape, 1] + else: + shape = list(shape) + + while len(shape) < 3: + shape = shape + [1] + + # default views + if nviews is None: + if shape[1] == 1: + nviews = int(math.pi * shape[0]) + else: + nviews = 1 + + # assume 1mm iso + fov = shape[0] + + # design single interleaf spiral + tmp, _ = _design.radial(fov, shape[0], 1, 1, **kwargs) + + # rotate + ncontrasts = shape[1] + + # generate angles + dphi = (1 - 233 / 377) * 360.0 + phi = np.arange(ncontrasts * nviews) * dphi # angles in degrees + phi = np.deg2rad(phi) # angles in radians + + # build rotation matrix + rot = _design.angleaxis2rotmat(phi, "z") + + # get trajectory + traj = tmp["kr"] * tmp["mtx"] + traj = _design.projection(traj[0].T, rot) + traj = traj.swapaxes(-2, -1).T + traj = traj.reshape(nviews, ncontrasts, *traj.shape[-2:]) + traj = traj.swapaxes(0, 1) + + # expand echoes + nechoes = shape[-1] + traj = np.repeat(traj, nechoes, axis=0) + + # get dcf + dcf = tmp["dcf"] + + # get shape + shape = tmp["mtx"] + + # get time + t = tmp["t"] + + # calculate TE + min_te = float(tmp["te"][0]) + TE = np.arange(nechoes, dtype=np.float32) * t[-1] + min_te + + # get indexes + head = Header(shape, t=t, traj=traj, dcf=dcf, TE=TE) + head.torch() + + return head
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/_vobj/sampling/radial_proj.html b/_modules/deepmr/_vobj/sampling/radial_proj.html new file mode 100644 index 00000000..05f492d6 --- /dev/null +++ b/_modules/deepmr/_vobj/sampling/radial_proj.html @@ -0,0 +1,760 @@ + + + + + + + + + + + deepmr._vobj.sampling.radial_proj — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr._vobj.sampling.radial_proj

+"""Three-dimensional radial projection sampling."""
+
+__all__ = ["radial_proj"]
+
+import math
+import numpy as np
+
+# this is for stupid Sphinx
+try:
+    from ... import _design
+except Exception:
+    pass
+
+from ..._types import Header
+
+
+
[docs]def radial_proj(shape, nviews=None, order="ga", **kwargs): + r""" + Design a 3D radial projectiontrajectory. + + The trajectory consists of a 2D radial trajectory, whose plane + is rotated to cover the 3D k-space. In-plane rotations + are sequential. Plane rotation types are specified + via the ``order`` argument. + + Parameters + ---------- + shape : Iterable[int] + Matrix shape ``(in-plane, contrasts=1, echoes=1)``. + nviews : int, optional + Number of spokes (in-plane, radial). + The default is ``$\pi$ * (shape[0], shape[1])`` if ``shape[2] == 1``, + otherwise it is ``($\pi$ * shape[0], 1)``. + order : str, optional + Radial plane rotation type. + These can be: + + * ``ga``: Pseudo golden angle variation of periodicity ``377``. + * ``ga::multiaxis``: Pseudo golden angle, i.e., same as ``ga`` but views are repeated 3 times on orthogonal axes. + * ``ga-sh``: Shuffled pseudo golden angle. + * ``ga-sh::multiaxis``: Multiaxis shuffled pseudo golden angle, i.e., same as ``ga-sh`` but views are repeated 3 times on orthogonal axes. + + The default is ``ga``. + + Keyword Arguments + ----------------- + variant : str + Type of radial trajectory. Allowed values are: + + * ``fullspoke``: starts at the edge of k-space and ends on the opposite side (default). + * ``center-out``: starts at the center of k-space and ends at the edge. + + Returns + ------- + head : Header + Acquisition header corresponding to the generated sampling pattern. + + Example + ------- + >>> import deepmr + + We can create a Nyquist-sampled 3D radial trajectory for a matrix of ``(128, 128, 128)`` voxels by: + + >>> head = deepmr.radial_proj(128) + + An undersampled trajectory can be generated by specifying the ``nviews`` argument: + + >>> head = deepmr.radial_proj(128, nviews=64) + + Multiple contrasts with different sampling (e.g., for MR Fingerprinting) can be achieved by providing + a tuple of ints as the ``shape`` argument: + + >>> head = deepmr.radial_proj((128, 420)) + >>> head.traj.shape + torch.Size([420, 402, 128, 2]) + + corresponding to 420 different contrasts, each sampled with a different fully sampled plane. + Similarly, multiple echoes (with fixed sampling) can be specified as: + + >>> head = deepmr.radial_proj((128, 1, 8)) + >>> head.traj.shape + torch.Size([8, 161604, 128, 2]) + + corresponding to a 8-echoes fully sampled k-spaces, e.g., for QSM and T2* mapping. + + Notes + ----- + The returned ``head`` (:func:`deepmr.Header`) is a structure with the following fields: + + * shape (torch.Tensor): + This is the expected image size of shape ``(nz, ny, nx)``. + * t (torch.Tensor): + This is the readout sampling time ``(0, t_read)`` in ``ms``. + with shape ``(nsamples,)``. + * traj (torch.Tensor): + This is the k-space trajectory normalized as ``(-0.5 * shape, 0.5 * shape)`` + with shape ``(ncontrasts, nviews, nsamples, 2)``. + * dcf (torch.Tensor): + This is the k-space sampling density compensation factor + with shape ``(ncontrasts, nviews, nsamples)``. + * TE (torch.Tensor): + This is the Echo Times array. Assumes a k-space raster time of ``1 us`` + and minimal echo spacing. + + """ + # expand shape if needed + if np.isscalar(shape): + shape = [shape, 1] + else: + shape = list(shape) + + while len(shape) < 3: + shape = shape + [1] + + # default views + if nviews is None: + if shape[1] == 1: + nviews = int(math.pi * shape[0]) + else: + nviews = 1 + + # expand nviews if needed + if np.isscalar(nviews): + nviews = [int(math.pi * shape[0]), nviews] + else: + nviews = list(nviews) + + # assume 1mm iso + fov = shape[0] + + # design single interleaf spiral + tmp, _ = _design.radial(fov, shape[0], 1, 1, **kwargs) + + # generate angles + ncontrasts = shape[1] + + dphi = 360.0 / nviews[0] + dtheta = (1 - 233 / 377) * 360.0 + + # build rotation angles + j = np.arange(ncontrasts * nviews[1]) + i = np.arange(nviews[0]) + + j = np.tile(j, nviews[0]) + i = np.repeat(i, ncontrasts * nviews[1]) + + # radial angle + if order[:5] == "ga-sh": + theta = (i + j) * dtheta + else: + theta = j * dtheta + + # in-plane angle + phi = i * dphi + + # convert to radians + theta = np.deg2rad(theta) # angles in radians + phi = np.deg2rad(phi) # angles in radians + + # perform rotation + axis = np.zeros_like(theta, dtype=int) # rotation axis + Rx = _design.angleaxis2rotmat(theta, [1, 0, 0]) # whole-plane rotation about x + Rz = _design.angleaxis2rotmat(phi, [0, 0, 1]) # in-plane rotation about z + + # put together full rotation matrix + rot = np.einsum("...ij,...jk->...ik", Rx, Rz) + + # get trajectory + traj = tmp["kr"] * tmp["mtx"] + traj = np.concatenate((traj, 0 * traj[..., [0]]), axis=-1) + traj = _design.projection(traj[0].T, rot) + traj = traj.swapaxes(-2, -1).T + traj = traj.reshape(nviews[0], nviews[1], ncontrasts, *traj.shape[-2:]) + traj = traj.transpose(2, 1, 0, *np.arange(3, len(traj.shape))) + traj = traj.reshape(ncontrasts, -1, *traj.shape[3:]) + + # get dcf + dcf = tmp["dcf"] + dcf = _design.angular_compensation(dcf, traj.reshape(-1, *traj.shape[-2:]), axis) + dcf = dcf.reshape(*traj.shape[:-1]) + + # apply multiaxis + if order[-9:] == "multiaxis": + # expand trajectory + traj1 = np.stack((traj[..., 2], traj[..., 0], traj[..., 1]), axis=-1) + traj2 = np.stack((traj[..., 1], traj[..., 2], traj[..., 0]), axis=-1) + traj = np.concatenate((traj, traj1, traj2), axis=-3) + + # expand dcf + dcf = np.concatenate((dcf, dcf, dcf), axis=-2) + + # renormalize dcf + tabs = (traj[0, 0] ** 2).sum(axis=-1) ** 0.5 + k0_idx = np.argmin(tabs) + nshots = nviews[0] * nviews[1] * ncontrasts + + # impose that center of k-space weight is 1 / nshots + scale = 1.0 / (dcf[[k0_idx]] + 0.000001) / nshots + dcf = scale * dcf + + # expand echoes + nechoes = shape[-1] + traj = np.repeat(traj, nechoes, axis=0) + dcf = np.repeat(dcf, nechoes, axis=0) + + # get shape + shape = [shape[0]] + list(tmp["mtx"]) + + # get time + t = tmp["t"] + + # calculate TE + min_te = float(tmp["te"][0]) + TE = np.arange(nechoes, dtype=np.float32) * t[-1] + min_te + + # get indexes + head = Header(shape, t=t, traj=traj, dcf=dcf, TE=TE) + head.torch() + + return head
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/_vobj/sampling/radial_stack.html b/_modules/deepmr/_vobj/sampling/radial_stack.html new file mode 100644 index 00000000..2bf1dc1a --- /dev/null +++ b/_modules/deepmr/_vobj/sampling/radial_stack.html @@ -0,0 +1,758 @@ + + + + + + + + + + + deepmr._vobj.sampling.radial_stack — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr._vobj.sampling.radial_stack

+"""Three-dimensional stack-of-stars sampling."""
+
+__all__ = ["radial_stack"]
+
+import math
+import numpy as np
+
+# this is for stupid Sphinx
+try:
+    from ... import _design
+except Exception:
+    pass
+
+from ..._types import Header
+
+
+
[docs]def radial_stack(shape, nviews=None, accel=1, **kwargs): + r""" + Design a stack-of-stars trajectory. + + As in the 2D radial case, spokes are rotated by a pseudo golden angle + with period 377 interelaves. Rotations are performed both along + ``view`` and ``contrast`` dimensions. Acquisition is assumed to + traverse the ``contrast`` dimension first and then the ``view``, + i.e., all the contrasts are acquired before moving to the second view. + If multiple echoes are specified, final contrast dimensions will have + length ``ncontrasts * nechoes``. Echoes are assumed to be acquired + sequentially with the same spoke. + + Finally, slice dimension is assumed to be the outermost loop. + + Parameters + ---------- + shape : Iterable[int] + Matrix shape ``(in-plane, slices=1, contrasts=1, echoes=1)``. + nviews : int, optional + Number of spokes. + The default is ``$\pi$ * shape[0]`` if ``shape[1] == 1``, otherwise it is ``1``. + accel : int, optional + Slice acceleration factor. + Ranges from ``1`` (fully sampled) to ``nslices``. + The default is ``1``. + + Keyword Arguments + ----------------- + acs_shape : int + Matrix size for inner (coil sensitivity estimation) region along slice encoding direction. + The default is ``None``. + variant : str + Type of radial trajectory. Allowed values are: + + * ``fullspoke``: starts at the edge of k-space and ends on the opposite side (default). + * ``center-out``: starts at the center of k-space and ends at the edge. + + Returns + ------- + head : Header + Acquisition header corresponding to the generated sampling pattern. + + Example + ------- + >>> import deepmr + + We can create a Nyquist-sampled stack-of-stars trajectory for a ``(128, 128, 120)`` voxels matrix by: + + >>> head = deepmr.radial_stack((128, 120)) + + An undersampled trajectory can be generated by specifying the ``nviews`` argument: + + >>> head = deepmr.radial_stack((128, 120), nviews=64) + + Slice acceleration can be specified using the ``accel`` argument. For example, the following + + >>> head = deepmr.radial_stack((128, 120), accel=2) + + will generate the following trajectory: + + >>> head.traj.shape + torch.Size([1, 24120, 128, 3]) + + i.e., a Nyquist-sampled stack-of-stars trajectory with a slice acceleration of 2 (i.e., 60 encodings). + + Parallel imaging calibration region can be specified using ``acs_shape`` argument: + + >>> head = deepmr.radial_stack((128, 120), accel=2, acs_shape=32) + + The generated stack will have an inner ``32``-wide fully sampled k-space region. + + Multiple contrasts with different sampling (e.g., for MR Fingerprinting) can be achieved by providing + a tuple of ints as the ``shape`` argument: + + >>> head = deepmr.radial_stack((128, 120, 420)) + >>> head.traj.shape + torch.Size([420, 120, 128, 3]) + + corresponding to 420 different contrasts, each sampled with a single radial spoke of 128 points, + repeated for 120 slice encodings. Similarly, multiple echoes (with fixed sampling) can be specified as: + + >>> head = deepmr.radial_stack((128, 120, 1, 8)) + >>> head.traj.shape + torch.Size([8, 48240, 128, 3]) + + corresponding to a 8-echoes fully sampled k-spaces, e.g., for QSM and T2* mapping. + + Notes + ----- + The returned ``head`` (:func:`deepmr.Header`) is a structure with the following fields: + + * shape (torch.Tensor): + This is the expected image size of shape ``(nz, ny, nx)``. + * t (torch.Tensor): + This is the readout sampling time ``(0, t_read)`` in ``ms``. + with shape ``(nsamples,)``. + * traj (torch.Tensor): + This is the k-space trajectory normalized as ``(-0.5 * shape, 0.5 * shape)`` + with shape ``(ncontrasts, nviews, nsamples, 2)``. + * dcf (torch.Tensor): + This is the k-space sampling density compensation factor + with shape ``(ncontrasts, nviews, nsamples)``. + * TE (torch.Tensor): + This is the Echo Times array. Assumes a k-space raster time of ``1 us`` + and minimal echo spacing. + + """ + assert len(shape) >= 2, "Please provide at least (in-plane, nslices) as shape." + + # expand shape if needed + shape = list(shape) + + while len(shape) < 4: + shape = shape + [1] + + # default views + if nviews is None: + if shape[2] == 1: + nviews = int(math.pi * shape[0]) + else: + nviews = 1 + + # expand acs if needed + if "acs_shape" in kwargs: + acs_shape = kwargs["acs_shape"] + else: + acs_shape = None + kwargs.pop("acs_shape", None) + + # assume 1mm iso + fov = shape[0] + + # design single interleaf spiral + tmp, _ = _design.radial(fov, shape[0], 1, 1, **kwargs) + + # rotate + ncontrasts = shape[2] + + # generate angles + dphi = (1 - 233 / 377) * 360.0 + phi = np.arange(ncontrasts * nviews) * dphi # angles in degrees + phi = np.deg2rad(phi) # angles in radians + + # build rotation matrix + rot = _design.angleaxis2rotmat(phi, "z") + + # get trajectory + traj = tmp["kr"] * tmp["mtx"] + traj = _design.projection(traj[0].T, rot) + traj = traj.swapaxes(-2, -1).T + traj = traj.reshape(nviews, ncontrasts, *traj.shape[-2:]) + traj = traj.swapaxes(0, 1) + + # expand slices + nz = shape[1] + az = np.arange(-nz // 2, nz // 2, dtype=np.float32) + + # accelerate + az = az[::accel] + + # add back ACS + if acs_shape is not None: + az = np.concatenate( + (az, np.arange(-acs_shape // 2, acs_shape // 2, dtype=np.float32)) + ) + az = np.unique(az) + + # expand + traj = np.apply_along_axis(np.tile, -3, traj, len(az)) + az = np.repeat(az, nviews) + az = az[None, :, None] * np.ones_like(traj[..., 0]) + + # append new axis + traj = np.concatenate((traj, az[..., None]), axis=-1) + + # expand echoes + nechoes = shape[-1] + traj = np.repeat(traj, nechoes, axis=0) + + # get dcf + dcf = tmp["dcf"] + + # get shape + shape = [shape[1]] + tmp["mtx"] + + # get time + t = tmp["t"] + + # calculate TE + min_te = float(tmp["te"][0]) + TE = np.arange(nechoes, dtype=np.float32) * t[-1] + min_te + + # extra args + user = {} + user["acs_shape"] = tmp["acs"]["mtx"] + + # get indexes + head = Header(shape, t=t, traj=traj, dcf=dcf, TE=TE, user=user) + head.torch() + + return head
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/_vobj/sampling/rosette.html b/_modules/deepmr/_vobj/sampling/rosette.html new file mode 100644 index 00000000..1eb8f04f --- /dev/null +++ b/_modules/deepmr/_vobj/sampling/rosette.html @@ -0,0 +1,699 @@ + + + + + + + + + + + deepmr._vobj.sampling.rosette — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr._vobj.sampling.rosette

+"""Two-dimensional rosette sampling."""
+
+__all__ = ["rosette"]
+
+import math
+import numpy as np
+
+# this is for stupid Sphinx
+try:
+    from ... import _design
+except Exception:
+    pass
+
+from ..._types import Header
+
+
+
[docs]def rosette(shape, nviews=None, bending_factor=1.0): + r""" + Design a rosette trajectory. + + The rosette petals are rotated by a pseudo golden angle + with period 377 interelaves. Rotations are performed both along + ``view`` and ``contrast`` dimensions. Acquisition is assumed to + traverse the ``contrast`` dimension first and then the ``view``, + i.e., all the contrasts are acquired before moving to the second view. + If multiple echoes are specified, final contrast dimensions will have + length ``ncontrasts * nechoes``. + + Parameters + ---------- + shape : Iterable[int] + Matrix shape ``(in-plane, contrasts=1, echoes=1)``. + nviews : int, optional + Number of spokes. + The default is ``$\pi$ * shape[0]`` if ``shape[1] == 1``, otherwise it is ``1``. + bending_factor : float, optional + This is ``0.0`` for radial-like trajectory; increase for maximum coverage per shot. + In real world, must account for hardware and safety limitations. + The default is ``1.0``. + + Returns + ------- + head : Header + Acquisition header corresponding to the generated sampling pattern. + + Example + ------- + >>> import deepmr + + We can create a Nyquist-sampled rosette trajectory for an in-plane matrix of ``(128, 128)`` pixels by: + + >>> head = deepmr.rosette(128) + + An undersampled trajectory can be generated by specifying the ``nviews`` argument: + + >>> head = deepmr.rosette(128, nviews=64) + + Petals bending can be modified via ``bending_factor``: + + >>> head = deepmr.rosette(128, bending_factor=1.0) # radial-like trajectory + + Multiple contrasts with different sampling (e.g., for MR Fingerprinting) can be achieved by providing + a tuple of ints as the ``shape`` argument: + + >>> head = deepmr.rosette((128, 420)) + >>> head.traj.shape + torch.Size([420, 1, 128, 2]) + + corresponding to 420 different contrasts, each sampled with a different single radial spoke of 128 points. + Similarly, multiple echoes (with fixed sampling) can be specified as: + + >>> head = deepmr.rosette((128, 1, 8)) # 8 echoes + >>> head.traj.shape + torch.Size([8, 402, 128, 2]) + + corresponding to a 8-echoes fully sampled k-spaces, e.g., for QSM and T2* mapping. + + Notes + ----- + The returned ``head`` (:func:`deepmr.Header`) is a structure with the following fields: + + * shape (torch.Tensor): + This is the expected image size of shape ``(nz, ny, nx)``. + * t (torch.Tensor): + This is the readout sampling time ``(0, t_read)`` in ``ms``. + with shape ``(nsamples,)``. + * traj (torch.Tensor): + This is the k-space trajectory normalized as ``(-0.5 * shape, 0.5 * shape)`` + with shape ``(ncontrasts, nviews, nsamples, 2)``. + * dcf (torch.Tensor): + This is the k-space sampling density compensation factor + with shape ``(ncontrasts, nviews, nsamples)``. + * TE (torch.Tensor): + This is the Echo Times array. + + """ + # expand shape if needed + if np.isscalar(shape): + shape = [shape, 1] + else: + shape = list(shape) + + while len(shape) < 3: + shape = shape + [1] + + # default views + if nviews is None: + if shape[1] == 1: + nviews = int(math.pi * shape[0]) + else: + nviews = 1 + + # assume 1mm iso + fov = shape[0] + + # get number of contrasts + ncontrasts = shape[1] + shape[1] = 1 + shape = [shape[0], shape[2], shape[1]] + + # design single interleaf spiral + tmp, _ = _design.rosette(fov, shape, 1, 1, int(math.pi * shape[0]), bending_factor) + + # generate angles + dphi = (1 - 233 / 377) * 360.0 + phi = np.arange(ncontrasts * nviews) * dphi # angles in degrees + phi = np.deg2rad(phi) # angles in radians + + # build rotation matrix + rot = _design.angleaxis2rotmat(phi, "z") + + # get trajectory + traj = tmp["kr"] * tmp["mtx"] + traj = _design.projection(traj[0].T, rot) + traj = traj.swapaxes(-2, -1).T + traj = traj.reshape(nviews, ncontrasts, *traj.shape[-2:]) + traj = traj.swapaxes(0, 1) + + # expand echoes + nechoes = shape[1] + traj = np.repeat(traj, nechoes, axis=0) + + # get dcf + dcf = tmp["dcf"] + + # get shape + shape = tmp["mtx"] + + # get time + t = tmp["t"] + + # calculate TE + TE = tmp["te"] + + # get indexes + head = Header(shape, t=t, traj=traj, dcf=dcf, TE=TE) + head.torch() + + return head
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/_vobj/sampling/rosette_proj.html b/_modules/deepmr/_vobj/sampling/rosette_proj.html new file mode 100644 index 00000000..9d61f300 --- /dev/null +++ b/_modules/deepmr/_vobj/sampling/rosette_proj.html @@ -0,0 +1,760 @@ + + + + + + + + + + + deepmr._vobj.sampling.rosette_proj — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr._vobj.sampling.rosette_proj

+"""Three-dimensional rosette projection sampling."""
+
+__all__ = ["rosette_proj"]
+
+import math
+import numpy as np
+
+# this is for stupid Sphinx
+try:
+    from ... import _design
+except Exception:
+    pass
+
+from ..._types import Header
+
+
+
[docs]def rosette_proj(shape, nviews=None, bending_factor=1.0, order="ga"): + r""" + Design a 3D rosette projection trajectory. + + The trajectory consists of a 2D rosette trajectory, whose plane + is rotated to cover the 3D k-space. In-plane rotations + are sequential. Plane rotation types are specified + via the ``order`` argument. + + Parameters + ---------- + shape : Iterable[int] + Matrix shape ``(in-plane, contrasts=1, echoes=1)``. + nviews : int, optional + Number of petals (in-plane, radial). + The default is ``$\pi$ * (shape[0], shape[1])`` if ``shape[2] == 1``, + otherwise it is ``($\pi$ * shape[0], 1)``. + bending_factor : float, optional + This is ``0.0`` for radial-like trajectory; increase for maximum coverage per shot. + In real world, must account for hardware and safety limitations. + The default is ``1.0``. + order : str, optional + Rosette plane rotation type. + These can be: + + * ``ga``: Pseudo golden angle variation of periodicity ``377``. + * ``ga::multiaxis``: Pseudo golden angle, i.e., same as ``ga`` but views are repeated 3 times on orthogonal axes. + * ``ga-sh``: Shuffled pseudo golden angle. + * ``ga-sh::multiaxis``: Multiaxis shuffled pseudo golden angle, i.e., same as ``ga-sh`` but views are repeated 3 times on orthogonal axes. + + The default is ``ga``. + + Returns + ------- + head : Header + Acquisition header corresponding to the generated sampling pattern. + + Example + ------- + >>> import deepmr + + We can create a Nyquist-sampled 3D rosette trajectory for a matrix of ``(128, 128, 128)`` voxels by: + + >>> head = deepmr.rosette_proj(128) + + An undersampled trajectory can be generated by specifying the ``nviews`` argument: + + >>> head = deepmr.rosette_proj(128, nviews=64) + + Petals bending can be modified via ``bending_factor``: + + >>> head = deepmr.rosette_proj(128, bending_factor=1.0) # radial-like trajectory + + Multiple contrasts with different sampling (e.g., for MR Fingerprinting) can be achieved by providing + a tuple of ints as the ``shape`` argument: + + >>> head = deepmr.rosette_proj((128, 420)) + >>> head.traj.shape + torch.Size([420, 402, 128, 2]) + + corresponding to 420 different contrasts, each sampled with a different fully sampled plane. + Similarly, multiple echoes (with fixed sampling) can be specified as: + + >>> head = deepmr.rosette_proj((128, 1, 8)) + >>> head.traj.shape + torch.Size([8, 161604, 128, 2]) + + corresponding to a 8-echoes fully sampled k-spaces, e.g., for QSM and T2* mapping. + + Notes + ----- + The returned ``head`` (:func:`deepmr.Header`) is a structure with the following fields: + + * shape (torch.Tensor): + This is the expected image size of shape ``(nz, ny, nx)``. + * t (torch.Tensor): + This is the readout sampling time ``(0, t_read)`` in ``ms``. + with shape ``(nsamples,)``. + * traj (torch.Tensor): + This is the k-space trajectory normalized as ``(-0.5 * shape, 0.5 * shape)`` + with shape ``(ncontrasts, nviews, nsamples, 2)``. + * dcf (torch.Tensor): + This is the k-space sampling density compensation factor + with shape ``(ncontrasts, nviews, nsamples)``. + * TE (torch.Tensor): + This is the Echo Times array. + + """ + # expand shape if needed + if np.isscalar(shape): + shape = [shape, 1] + else: + shape = list(shape) + + while len(shape) < 3: + shape = shape + [1] + + # default views + if nviews is None: + if shape[1] == 1: + nviews = int(math.pi * shape[0]) + else: + nviews = 1 + + # expand nviews if needed + if np.isscalar(nviews): + nviews = [int(math.pi * shape[0]), nviews] + else: + nviews = list(nviews) + + # assume 1mm iso + fov = shape[0] + + # get number of contrasts + ncontrasts = shape[1] + shape[1] = 1 + shape = [shape[0], shape[2], shape[1]] + + # design single interleaf spiral + tmp, _ = _design.rosette(fov, shape, 1, 1, int(math.pi * shape[0]), bending_factor) + + dphi = 360.0 / nviews[0] + dtheta = (1 - 233 / 377) * 360.0 + + # build rotation angles + j = np.arange(ncontrasts * nviews[1]) + i = np.arange(nviews[0]) + + j = np.tile(j, nviews[0]) + i = np.repeat(i, ncontrasts * nviews[1]) + + # radial angle + if order[:5] == "ga-sh": + theta = (i + j) * dtheta + else: + theta = j * dtheta + + # in-plane angle + phi = i * dphi + + # convert to radians + theta = np.deg2rad(theta) # angles in radians + phi = np.deg2rad(phi) # angles in radians + + # perform rotation + axis = np.zeros_like(theta, dtype=int) # rotation axis + Rx = _design.angleaxis2rotmat(theta, [1, 0, 0]) # whole-plane rotation about x + Rz = _design.angleaxis2rotmat(phi, [0, 0, 1]) # in-plane rotation about z + + # put together full rotation matrix + rot = np.einsum("...ij,...jk->...ik", Rx, Rz) + + # get trajectory + traj = tmp["kr"] * tmp["mtx"] + traj = np.concatenate((traj, 0 * traj[..., [0]]), axis=-1) + traj = _design.projection(traj[0].T, rot) + traj = traj.swapaxes(-2, -1).T + traj = traj.reshape(nviews[0], nviews[1], ncontrasts, *traj.shape[-2:]) + traj = traj.transpose(2, 1, 0, *np.arange(3, len(traj.shape))) + traj = traj.reshape(ncontrasts, -1, *traj.shape[3:]) + + # get dcf + dcf = tmp["dcf"] + dcf = _design.angular_compensation(dcf, traj.reshape(-1, *traj.shape[-2:]), axis) + dcf = dcf.reshape(*traj.shape[:-1]) + + # apply multiaxis + if order[-9:] == "multiaxis": + # expand trajectory + traj1 = np.stack((traj[..., 2], traj[..., 0], traj[..., 1]), axis=-1) + traj2 = np.stack((traj[..., 1], traj[..., 2], traj[..., 0]), axis=-1) + traj = np.concatenate((traj, traj1, traj2), axis=-3) + + # expand dcf + dcf = np.concatenate((dcf, dcf, dcf), axis=-2) + + # renormalize dcf + tabs = (traj[0, 0] ** 2).sum(axis=-1) ** 0.5 + k0_idx = np.argmin(tabs) + nshots = nviews[0] * nviews[1] * ncontrasts + + # impose that center of k-space weight is 1 / nshots + scale = 1.0 / (dcf[[k0_idx]] + 0.000001) / nshots + dcf = scale * dcf + + # expand echoes + nechoes = shape[1] + traj = np.repeat(traj, nechoes, axis=0) + dcf = np.repeat(dcf, nechoes, axis=0) + + # get shape + shape = [shape[0]] + list(tmp["mtx"]) + + # get time + t = tmp["t"] + + # calculate TE + TE = tmp["te"] + + # get indexes + head = Header(shape, t=t, traj=traj, dcf=dcf, TE=TE) + head.torch() + + return head
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/_vobj/sampling/rosette_stack.html b/_modules/deepmr/_vobj/sampling/rosette_stack.html new file mode 100644 index 00000000..3fa9a227 --- /dev/null +++ b/_modules/deepmr/_vobj/sampling/rosette_stack.html @@ -0,0 +1,758 @@ + + + + + + + + + + + deepmr._vobj.sampling.rosette_stack — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr._vobj.sampling.rosette_stack

+"""Three-dimensional stack-of-rosettes sampling."""
+
+__all__ = ["rosette_stack"]
+
+import math
+import numpy as np
+
+# this is for stupid Sphinx
+try:
+    from ... import _design
+except Exception:
+    pass
+
+from ..._types import Header
+
+
+
[docs]def rosette_stack(shape, nviews=None, accel=1, bending_factor=1.0, **kwargs): + r""" + Design a stack-of-rosettes trajectory. + + As in the 2D rosette case, petals are rotated by a pseudo golden angle + with period 377 interelaves. Rotations are performed both along + ``view`` and ``contrast`` dimensions. Acquisition is assumed to + traverse the ``contrast`` dimension first and then the ``view``, + i.e., all the contrasts are acquired before moving to the second view. + If multiple echoes are specified, final contrast dimensions will have + length ``ncontrasts * nechoes``. + + Parameters + ---------- + shape : Iterable[int] + Matrix shape ``(in-plane, slices=1, contrasts=1, echoes=1)``. + nviews : int, optional + Number of spokes. + The default is ``$\pi$ * shape[0]`` if ``shape[1] == 1``, otherwise it is ``1``. + accel : int, optional + Slice acceleration factor. + Ranges from ``1`` (fully sampled) to ``nslices``. + The default is ``1``. + bending_factor : float, optional + This is ``0.0`` for radial-like trajectory; increase for maximum coverage per shot. + In real world, must account for hardware and safety limitations. + The default is ``1.0``. + + Keyword Arguments + ----------------- + acs_shape : int + Matrix size for inner (coil sensitivity estimation) region along slice encoding direction. + The default is ``None``. + + Returns + ------- + head : Header + Acquisition header corresponding to the generated sampling pattern. + + Example + ------- + >>> import deepmr + + We can create a Nyquist-sampled stack-of-rosettes trajectory for a ``(128, 128, 120)`` voxels matrix by: + + >>> head = deepmr.rosette_stack((128, 120)) + + An undersampled trajectory can be generated by specifying the ``nviews`` argument: + + >>> head = deepmr.rosette_stack((128, 120), nviews=64) + + Slice acceleration can be specified using the ``accel`` argument. For example, the following + + >>> head = deepmr.rosette_stack((128, 120), accel=2) + + will generate the following trajectory: + + >>> head.traj.shape + torch.Size([1, 24120, 128, 3]) + + i.e., a Nyquist-sampled stack-of-rosettes trajectory with a slice acceleration of 2 (i.e., 60 encodings). + + Parallel imaging calibration region can be specified using ``acs_shape`` argument: + + >>> head = deepmr.rosette_stack((128, 120), accel=2, acs_shape=32) + + The generated stack will have an inner ``32``-wide fully sampled k-space region. + + Petals bending can be modified via ``bending_factor``: + + >>> head = deepmr.rosette_stack(128, bending_factor=1.0) # radial-like trajectory + + Multiple contrasts with different sampling (e.g., for MR Fingerprinting) can be achieved by providing + a tuple of ints as the ``shape`` argument: + + >>> head = deepmr.rosette_stack((128, 120, 420)) + >>> head.traj.shape + torch.Size([420, 120, 128, 3]) + + corresponding to 420 different contrasts, each sampled with a single petal of 128 points, + repeated for 120 slice encodings. Similarly, multiple echoes (with fixed sampling) can be specified as: + + >>> head = deepmr.rosette_stack((128, 120, 1, 8)) + >>> head.traj.shape + torch.Size([8, 48240, 128, 3]) + + corresponding to a 8-echoes fully sampled k-spaces, e.g., for QSM and T2* mapping. + + Notes + ----- + The returned ``head`` (:func:`deepmr.Header`) is a structure with the following fields: + + * shape (torch.Tensor): + This is the expected image size of shape ``(nz, ny, nx)``. + * t (torch.Tensor): + This is the readout sampling time ``(0, t_read)`` in ``ms``. + with shape ``(nsamples,)``. + * traj (torch.Tensor): + This is the k-space trajectory normalized as ``(-0.5 * shape, 0.5 * shape)`` + with shape ``(ncontrasts, nviews, nsamples, 2)``. + * dcf (torch.Tensor): + This is the k-space sampling density compensation factor + with shape ``(ncontrasts, nviews, nsamples)``. + * TE (torch.Tensor): + This is the Echo Times array. + + """ + assert len(shape) >= 2, "Please provide at least (in-plane, nslices) as shape." + + # expand shape if needed + shape = list(shape) + + while len(shape) < 4: + shape = shape + [1] + + # default views + if nviews is None: + if shape[2] == 1: + nviews = int(math.pi * shape[0]) + else: + nviews = 1 + + # expand acs if needed + if "acs_shape" in kwargs: + acs_shape = kwargs["acs_shape"] + else: + acs_shape = None + kwargs.pop("acs_shape", None) + + # assume 1mm iso + fov = shape[0] + + # get number of slices and contrasts + nz = shape[1] + ncontrasts = shape[3] + shape[3] = 1 + shape = [shape[0], shape[3], shape[2]] + + # design single interleaf spiral + tmp, _ = _design.rosette(fov, shape, 1, 1, int(math.pi * shape[0]), bending_factor) + + # generate angles + dphi = (1 - 233 / 377) * 360.0 + phi = np.arange(ncontrasts * nviews) * dphi # angles in degrees + phi = np.deg2rad(phi) # angles in radians + + # build rotation matrix + rot = _design.angleaxis2rotmat(phi, "z") + + # get trajectory + traj = tmp["kr"] * tmp["mtx"] + traj = _design.projection(traj[0].T, rot) + traj = traj.swapaxes(-2, -1).T + traj = traj.reshape(nviews, ncontrasts, *traj.shape[-2:]) + traj = traj.swapaxes(0, 1) + + # expand slices + az = np.arange(-nz // 2, nz // 2, dtype=np.float32) + + # accelerate + az = az[::accel] + + # add back ACS + if acs_shape is not None: + az = np.concatenate( + (az, np.arange(-acs_shape // 2, acs_shape // 2, dtype=np.float32)) + ) + az = np.unique(az) + + # expand + traj = np.apply_along_axis(np.tile, -3, traj, len(az)) + az = np.repeat(az, nviews) + az = az[None, :, None] * np.ones_like(traj[..., 0]) + + # append new axis + traj = np.concatenate((traj, az[..., None]), axis=-1) + + # expand echoes + nechoes = shape[-1] + traj = np.repeat(traj, nechoes, axis=0) + + # get dcf + dcf = tmp["dcf"] + + # get shape + shape = [shape[1]] + tmp["mtx"] + + # get time + t = tmp["t"] + + # calculate TE + TE = tmp["te"] + + # extra args + user = {} + user["acs_shape"] = tmp["acs"]["mtx"] + + # get indexes + head = Header(shape, t=t, traj=traj, dcf=dcf, TE=TE, user=user) + head.torch() + + return head
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/_vobj/sampling/spiral.html b/_modules/deepmr/_vobj/sampling/spiral.html new file mode 100644 index 00000000..d380fd37 --- /dev/null +++ b/_modules/deepmr/_vobj/sampling/spiral.html @@ -0,0 +1,743 @@ + + + + + + + + + + + deepmr._vobj.sampling.spiral — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr._vobj.sampling.spiral

+"""Two-dimensional spiral sampling."""
+
+__all__ = ["spiral"]
+
+import numpy as np
+
+# this is for stupid Sphinx
+try:
+    from ... import _design
+except Exception:
+    pass
+
+from ..._types import Header
+
+
+
[docs]def spiral(shape, accel=None, nintl=1, **kwargs): + r""" + Design a constant- or multi-density spiral. + + The spiral interleaves are rotated by a pseudo golden angle + with period 377 interelaves. Rotations are performed both along + ``view`` and ``contrast`` dimensions. Acquisition is assumed to + traverse the ``contrast`` dimension first and then the ``view``, + i.e., all the contrasts are acquired before moving to the second view. + If multiple echoes are specified, final contrast dimensions will have + length ``ncontrasts * nechoes``. Echoes are assumed to be acquired + sequentially with the same spiral interleaf. + + Parameters + ---------- + shape : Iterable[int] + Matrix shape ``(in-plane, contrasts=1, echoes=1)``. + accel : int, optional + In-plane acceleration. Ranges from ``1`` (fully sampled) to ``nintl``. + The default is ``1``. + nintl : int, optional + Number of interleaves to fully sample a plane. + The default is ``1``. + + + Keyword Arguments + ----------------- + moco_shape : int + Matrix size for inner-most (motion navigation) spiral. + The default is ``None``. + acs_shape : int + Matrix size for intermediate inner (coil sensitivity estimation) spiral. + The default is ``None``. + acs_nintl : int + Number of interleaves to fully sample intermediate inner spiral. + The default is ``1``. + variant : str + Type of spiral. Allowed values are: + + * ``center-out``: starts at the center of k-space and ends at the edge (default). + * ``reverse``: starts at the edge of k-space and ends at the center. + * ``in-out``: starts at the edge of k-space and ends on the opposite side (two 180° rotated arms back-to-back). + + Returns + ------- + head : Header + Acquisition header corresponding to the generated spiral. + + Example + ------- + >>> import deepmr + + We can create a single-shot spiral for an in-plane matrix of ``(128, 128)`` pixels by: + + >>> head = deepmr.spiral(128) + + A multi-shot trajectory can be generated by specifying the ``nintl`` argument: + + >>> head = deepmr.spiral(128, nintl=48) + + Both spirals have constant density. If we want a dual density we can use ``acs_shape`` and ``acs_nintl`` arguments. + For example, if we want an inner ``(32, 32)`` k-space region sampled with a 4 interleaves spiral, this can be obtained as: + + >>> head = deepmr.spiral(128, nintl=48, acs_shape=32, acs_nintl=4) + + This inner region can be used e.g., for Parallel Imaging calibration. Similarly, a triple density spiral can + be obtained by using the ``moco_shape`` argument: + + >>> head = deepmr.spiral(128, nintl=48, acs_shape=32, acs_nintl=4, moco_shape=8) + + The generated spiral will have an innermost ``(8, 8)`` single-shot k-space region (e.g., for PROPELLER-like motion correction), + an intermediate ``(32, 32)`` k-space region fully covered by 4 spiral shots and an outer ``(128, 128)`` region fully covered by 48 interleaves. + + In-plane acceleration can be specified using the ``accel`` argument. For example, the following + + >>> head = deepmr.spiral(128, nintl=48, accel=4) + + will generate the following trajectory: + + >>> head.traj.shape + torch.Size([1, 12, 538, 2]) + + i.e., a 48-interleaves trajectory with an in-plane acceleration factor of 4 (i.e., 12 interleaves). + + Multiple contrasts with different sampling (e.g., for MR Fingerprinting) can be achieved by providing + a tuple of ints as the ``shape`` argument: + + >>> head = deepmr.spiral((128, 420), nintl=48) + >>> head.traj.shape + torch.Size([420, 1, 538, 2]) + + corresponding to 420 different contrasts, each sampled with a different single spiral interleaf of 538 points. + Similarly, multiple echoes (with fixed sampling) can be specified as: + + >>> head = deepmr.spiral((128, 1, 8), nintl=48) + >>> head.traj.shape + torch.Size([8, 48, 538, 2]) + + corresponding to a 8-echoes fully sampled k-spaces, e.g., for QSM and T2* mapping. + + Notes + ----- + The returned ``head`` (:func:`deepmr.Header`) is a structure with the following fields: + + * shape (torch.Tensor): + This is the expected image size of shape ``(nz, ny, nx)``. + * t (torch.Tensor): + This is the readout sampling time ``(0, t_read)`` in ``ms``. + with shape ``(nsamples,)``. + * traj (torch.Tensor): + This is the k-space trajectory normalized as ``(-0.5 * shape, 0.5 * shape)`` + with shape ``(ncontrasts, nviews, nsamples, 2)``. + * dcf (torch.Tensor): + This is the k-space sampling density compensation factor + with shape ``(ncontrasts, nviews, nsamples)``. + * TE (torch.Tensor): + This is the Echo Times array. Assumes a k-space raster time of ``1 us`` + and minimal echo spacing. + + """ + # expand shape if needed + if np.isscalar(shape): + shape = [shape, 1] + else: + shape = list(shape) + + while len(shape) < 3: + shape = shape + [1] + + # default accel + if accel is None: + if shape[1] == 1: + accel = 1 + else: + accel = nintl + + # assume 1mm iso + fov = shape[0] + + # design single interleaf spiral + tmp, _ = _design.spiral(fov, shape[0], 1, nintl, **kwargs) + + # rotate + ncontrasts = shape[1] + nviews = max(int(nintl // accel), 1) + + # generate angles + dphi = (1 - 233 / 377) * 360.0 + phi = np.arange(ncontrasts * nviews) * dphi # angles in degrees + phi = np.deg2rad(phi) # angles in radians + + # build rotation matrix + rot = _design.angleaxis2rotmat(phi, "z") + + # get trajectory + traj = tmp["kr"] * tmp["mtx"] + traj = _design.projection(traj[0].T, rot) + traj = traj.swapaxes(-2, -1).T + traj = traj.reshape(nviews, ncontrasts, *traj.shape[-2:]) + traj = traj.swapaxes(0, 1) + + # expand echoes + nechoes = shape[-1] + traj = np.repeat(traj, nechoes, axis=0) + + # get dcf + dcf = tmp["dcf"] + + # get shape + shape = tmp["mtx"] + + # get time + t = tmp["t"] + + # calculate TE + min_te = float(tmp["te"][0]) + TE = np.arange(nechoes, dtype=np.float32) * t[-1] + min_te + + # extra args + user = {} + user["moco_shape"] = tmp["moco"]["mtx"] + user["acs_shape"] = tmp["acs"]["mtx"] + + # get indexes + head = Header(shape, t=t, traj=traj, dcf=dcf, TE=TE, user=user) + head.torch() + + return head
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/_vobj/sampling/spiral_proj.html b/_modules/deepmr/_vobj/sampling/spiral_proj.html new file mode 100644 index 00000000..0ed74eeb --- /dev/null +++ b/_modules/deepmr/_vobj/sampling/spiral_proj.html @@ -0,0 +1,806 @@ + + + + + + + + + + + deepmr._vobj.sampling.spiral_proj — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr._vobj.sampling.spiral_proj

+"""Three-dimensional spiral projection sampling."""
+
+__all__ = ["spiral_proj"]
+
+import math
+import numpy as np
+
+# this is for stupid Sphinx
+try:
+    from ... import _design
+except Exception:
+    pass
+
+from ..._types import Header
+
+
+
[docs]def spiral_proj(shape, accel=None, nintl=1, order="ga", **kwargs): + r""" + Design a constant- or multi-density spiral projection. + + The trajectory consists of a 2D spiral, whose plane + is rotated to cover the 3D k-space. In-plane rotations + are sequential. Plane rotation types are specified + via the ``order`` argument. + + Parameters + ---------- + shape : Iterable[int] + Matrix shape ``(in-plane, contrasts=1, echoes=1)``. + accel : Iterable[int], optional + Acceleration factors ``(in-plane, radial)``. + Range from ``1`` (fully sampled) to ``nintl`` / ``$\pi$ * shape[0]``. + The default is ``(1, 1)`` if ``ncontrast == 1`` and ``(1, ``$\pi$ * shape[0])`` + if ``ncontrast > 1``. + nintl : int, optional + Number of interleaves to fully sample a plane. + The default is ``1``. + order : str, optional + Spiral plane rotation type. + These can be: + + * ``ga``: Pseudo golden angle variation of periodicity ``377``. + * ``ga::multiaxis``: Pseudo golden angle, i.e., same as ``ga`` but views are repeated 3 times on orthogonal axes. + * ``ga-sh``: Shuffled pseudo golden angle. + * ``ga-sh::multiaxis``: Multiaxis shuffled pseudo golden angle, i.e., same as ``ga-sh`` but views are repeated 3 times on orthogonal axes. + + The default is ``ga``. + + Keyword Arguments + ----------------- + moco_shape : int + Matrix size for inner-most (motion navigation) spiral. + The default is ``None``. + acs_shape : int + Matrix size for intermediate inner (coil sensitivity estimation) spiral. + The default is ``None``. + acs_nintl : int + Number of interleaves to fully sample intermediate inner spiral. + The default is ``1``. + variant : str + Type of spiral. Allowed values are: + + * ``center-out``: starts at the center of k-space and ends at the edge (default). + * ``reverse``: starts at the edge of k-space and ends at the center. + * ``in-out``: starts at the edge of k-space and ends on the opposite side (two 180° rotated arms back-to-back). + + Returns + ------- + head : Header + Acquisition header corresponding to the generated spiral. + + Example + ------- + >>> import deepmr + + We can create a single-shot spiral projection for a matrix of ``(128, 128, 128)`` voxels by: + + >>> head = deepmr.spiral_proj(128) + + An in-plane multi-shot trajectory can be generated by specifying the ``nintl`` argument: + + >>> head = deepmr.spiral_proj(128, nintl=48) + + Both spirals have constant density. If we want a dual density we can use ``acs_shape`` and ``acs_nintl`` arguments. + For example, if we want an inner ``(32, 32)`` k-space region sampled with a 4 interleaves spiral, this can be obtained as: + + >>> head = deepmr.spiral_proj(128, nintl=48, acs_shape=32, acs_nintl=4) + + This inner region can be used e.g., for Parallel Imaging calibration. Similarly, a triple density spiral can + be obtained by using the ``moco_shape`` argument: + + >>> head = deepmr.spiral_proj(128, nintl=48, acs_shape=32, acs_nintl=4, moco_shape=8) + + The generated spiral will have an innermost ``(8, 8)`` single-shot k-space region (e.g., for PROPELLER-like motion correction), + an intermediate ``(32, 32)`` k-space region fully covered by 4 spiral shots and an outer ``(128, 128)`` region fully covered by 48 interleaves. + + In-plane acceleration can be specified using the ``accel`` argument. For example, the following + + >>> head = deepmr.spiral_proj(128, nintl=48, accel=4) + + will generate the following trajectory: + + >>> head.traj.shape + torch.Size([1, 4824, 538, 2]) + + i.e., a 48-interleaves trajectory with an in-plane acceleration factor of 4 (i.e., 12 interleaves), + repeated for 402 planes covering the 3D k-space sphere. + + Multiple contrasts with different sampling (e.g., for MR Fingerprinting) can be achieved by providing + a tuple of ints as the ``shape`` argument: + + >>> head = deepmr.spiral_proj((128, 420), nintl=48) + >>> head.traj.shape + torch.Size([420, 48, 538, 2]) + + corresponding to 420 different contrasts, each sampled with a different fully sampled plane. + Similarly, multiple echoes (with fixed sampling) can be specified as: + + >>> head = deepmr.spiral_proj((128, 1, 8), nintl=48) + >>> head.traj.shape + torch.Size([8, 19296, 538, 2]) + + corresponding to a 8-echoes fully sampled k-spaces, e.g., for QSM and T2* mapping. + + Notes + ----- + The returned ``head`` (:func:`deepmr.Header`) is a structure with the following fields: + + * shape (torch.Tensor): + This is the expected image size of shape ``(nz, ny, nx)``. + * t (torch.Tensor): + This is the readout sampling time ``(0, t_read)`` in ``ms``. + with shape ``(nsamples,)``. + * traj (torch.Tensor): + This is the k-space trajectory normalized as ``(-0.5 * shape, 0.5 * shape)`` + with shape ``(ncontrasts, nviews, nsamples, 3)``. + * dcf (torch.Tensor): + This is the k-space sampling density compensation factor + with shape ``(ncontrasts, nviews, nsamples)``. + * TE (torch.Tensor): + This is the Echo Times array. Assumes a k-space raster time of ``1 us`` + and minimal echo spacing. + + """ + # expand shape if needed + if np.isscalar(shape): + shape = [shape, 1] + else: + shape = list(shape) + + while len(shape) < 3: + shape = shape + [1] + + # default accel + if accel is None: + if shape[1] == 1: + accel = 1 + else: + accel = int(math.pi * shape[0]) + + # expand accel if needed + if np.isscalar(accel): + accel = [1, accel] + else: + accel = list(accel) + + # assume 1mm iso + fov = shape[0] + + # design single interleaf spiral + tmp, _ = _design.spiral(fov, shape[0], 1, nintl, **kwargs) + + # generate angles + ncontrasts = shape[1] + nplanes = max(int((math.pi * shape[0]) // accel[1]), 1) + nviews = max(int(nintl // accel[0]), 1) + + dphi = 360.0 / nintl + dtheta = (1 - 233 / 377) * 360.0 + + # build rotation angles + j = np.arange(ncontrasts * nplanes) + i = np.arange(nviews) + + j = np.tile(j, nviews) + i = np.repeat(i, ncontrasts * nplanes) + + # radial angle + if order[:5] == "ga-sh": + theta = (i + j) * dtheta + else: + theta = j * dtheta + + # in-plane angle + phi = i * dphi + + # convert to radians + theta = np.deg2rad(theta) # angles in radians + phi = np.deg2rad(phi) # angles in radians + + # perform rotation + axis = np.zeros_like(theta, dtype=int) # rotation axis + Rx = _design.angleaxis2rotmat(theta, [1, 0, 0]) # radial rotation about x + Rz = _design.angleaxis2rotmat(phi, [0, 0, 1]) # in-plane rotation about z + + # put together full rotation matrix + rot = np.einsum("...ij,...jk->...ik", Rx, Rz) + + # get trajectory + traj = tmp["kr"] * tmp["mtx"] + traj = np.concatenate((traj, 0 * traj[..., [0]]), axis=-1) + traj = _design.projection(traj[0].T, rot) + traj = traj.swapaxes(-2, -1).T + traj = traj.reshape(nviews, nplanes, ncontrasts, *traj.shape[-2:]) + traj = traj.transpose(2, 1, 0, *np.arange(3, len(traj.shape))) + traj = traj.reshape(ncontrasts, -1, *traj.shape[3:]) + + # get dcf + dcf = tmp["dcf"] + dcf = _design.angular_compensation(dcf, traj.reshape(-1, *traj.shape[-2:]), axis) + dcf = dcf.reshape(*traj.shape[:-1]) + + # apply multiaxis + if order[-9:] == "multiaxis": + # expand trajectory + traj1 = np.stack((traj[..., 2], traj[..., 0], traj[..., 1]), axis=-1) + traj2 = np.stack((traj[..., 1], traj[..., 2], traj[..., 0]), axis=-1) + traj = np.concatenate((traj, traj1, traj2), axis=-3) + + # expand dcf + dcf = np.concatenate((dcf, dcf, dcf), axis=-2) + + # renormalize dcf + tabs = (traj[0, 0] ** 2).sum(axis=-1) ** 0.5 + k0_idx = np.argmin(tabs) + nshots = nviews * ncontrasts * nplanes + + # impose that center of k-space weight is 1 / nshots + scale = 1.0 / (dcf[[k0_idx]] + 0.000001) / nshots + dcf = scale * dcf + + # expand echoes + nechoes = shape[-1] + traj = np.repeat(traj, nechoes, axis=0) + dcf = np.repeat(dcf, nechoes, axis=0) + + # get shape + shape = [shape[0]] + list(tmp["mtx"]) + + # get time + t = tmp["t"] + + # calculate TE + min_te = float(tmp["te"][0]) + TE = np.arange(nechoes, dtype=np.float32) * t[-1] + min_te + + # extra args + user = {} + user["moco_shape"] = tmp["moco"]["mtx"] + user["acs_shape"] = tmp["acs"]["mtx"] + + # get indexes + head = Header(shape, t=t, traj=traj, dcf=dcf, TE=TE, user=user) + head.torch() + + return head
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/_vobj/sampling/spiral_stack.html b/_modules/deepmr/_vobj/sampling/spiral_stack.html new file mode 100644 index 00000000..6861e8b3 --- /dev/null +++ b/_modules/deepmr/_vobj/sampling/spiral_stack.html @@ -0,0 +1,783 @@ + + + + + + + + + + + deepmr._vobj.sampling.spiral_stack — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr._vobj.sampling.spiral_stack

+"""Three-dimensional stack-of-spirals sampling."""
+
+__all__ = ["spiral_stack"]
+
+import numpy as np
+
+# this is for stupid Sphinx
+try:
+    from ... import _design
+except Exception:
+    pass
+
+from ..._types import Header
+
+
+
[docs]def spiral_stack(shape, accel=None, nintl=1, **kwargs): + r""" + Design a constant- or multi-density stack of spirals. + + As in the 2D spiral case, interleaves are rotated by a pseudo golden angle + with period 377 interelaves. Rotations are performed both along + ``view`` and ``contrast`` dimensions. Acquisition is assumed to + traverse the ``contrast`` dimension first and then the ``view``, + i.e., all the contrasts are acquired before moving to the second view. + If multiple echoes are specified, final contrast dimensions will have + length ``ncontrasts * nechoes``. Echoes are assumed to be acquired + sequentially with the same spiral interleaf. + + Finally, slice dimension is assumed to be the outermost loop. + + Parameters + ---------- + shape : Iterable[int] + Matrix shape ``(in-plane, slices=1, contrasts=1, echoes=1)``. + accel : Iterable[int], optional + Acceleration factors ``(in-plane, slices=1)``. + Range from ``1`` (fully sampled) to ``nintl`` / ``nslices``. + The default is ``(1, 1)``. + nintl : int, optional + Number of interleaves to fully sample a plane. + The default is ``1``. + + Keyword Arguments + ----------------- + moco_shape : int + Matrix size for inner-most (motion navigation) spiral. + The default is ``None``. + acs_shape : Iterable[int] + Matrix size for intermediate inner (coil sensitivity estimation) spiral. + The default is (``None``, ``None``). + acs_nintl : int + Number of interleaves to fully sample intermediate inner spiral. + The default is ``1``. + variant : str + Type of spiral. Allowed values are: + + * ``center-out``: starts at the center of k-space and ends at the edge (default). + * ``reverse``: starts at the edge of k-space and ends at the center. + * ``in-out``: starts at the edge of k-space and ends on the opposite side (two 180° rotated arms back-to-back). + + Returns + ------- + head : Header + Acquisition header corresponding to the generated spiral. + + Example + ------- + >>> import deepmr + + We can create a single-shot stack-of-spirals for a ``(128, 128, 120)`` voxels matrix by: + + >>> head = deepmr.spiral_stack((128, 120)) + + A multi-shot trajectory can be generated by specifying the ``nintl`` argument: + + >>> head = deepmr.spiral_stack((128, 120), nintl=48) + + Both spirals have constant density. If we want a dual density we can use ``acs_shape`` and ``acs_nintl`` arguments. + For example, if we want an inner ``(32, 32, 16)`` k-space region sampled with a 4 interleaves spiral, this can be obtained as: + + >>> head = deepmr.spiral_stack((128, 120), nintl=48, acs_shape=(32, 16), acs_nintl=4) + + This inner region can be used e.g., for Parallel Imaging calibration. Similarly, a triple density spiral can + be obtained by using the ``moco_shape`` argument: + + >>> head = deepmr.spiral_stack((128, 120), nintl=48, acs_shape=(32, 16), acs_nintl=4, moco_shape=8) + + The generated spiral will have an innermost ``(8, 8)`` single-shot k-space region (e.g., for PROPELLER-like motion correction), + an intermediate ``(32, 32, 16)`` k-space region fully covered by 4 spiral shots and an outer ``(128, 128, 120)`` region fully covered by 48 interleaves. + + In-plane and slice accelerations can be specified using the ``accel`` argument. For example, the following + + >>> head = deepmr.spiral_stack((128, 120), nintl=48, accel=(4, 2)) + + will generate the following trajectory: + + >>> head.traj.shape + torch.Size([1, 720, 538, 3]) + + i.e., a 48-interleaves trajectory with an in-plane acceleration factor of 4 (i.e., 12 interleaves) + and slice acceleration of 2 (i.e., 60 encodings). + + Multiple contrasts with different sampling (e.g., for MR Fingerprinting) can be achieved by providing + a tuple of ints as the ``shape`` argument: + + >>> head = deepmr.spiral_stack((128, 120, 420), nintl=48) + >>> head.traj.shape + torch.Size([420, 120, 538, 3]) + + corresponding to 420 different contrasts, each sampled with a single spiral interleaf of 538 points, + repeated for 120 slice encodings. Similarly, multiple echoes (with fixed sampling) can be specified as: + + >>> head = deepmr.spiral_stack((128, 120, 1, 8), nintl=48) + >>> head.traj.shape + torch.Size([8, 5760, 538, 3]) + + corresponding to a 8-echoes fully sampled k-spaces, e.g., for QSM and T2* mapping. + + Notes + ----- + The returned ``head`` (:func:`deepmr.Header`) is a structure with the following fields: + + * shape (torch.Tensor): + This is the expected image size of shape ``(nz, ny, nx)``. + * t (torch.Tensor): + This is the readout sampling time ``(0, t_read)`` in ``ms``. + with shape ``(nsamples,)``. + * traj (torch.Tensor): + This is the k-space trajectory normalized as ``(-0.5 * shape, 0.5 * shape)`` + with shape ``(ncontrasts, nviews, nsamples, 3)``. + * dcf (torch.Tensor): + This is the k-space sampling density compensation factor + with shape ``(ncontrasts, nviews, nsamples)``. + * TE (torch.Tensor): + This is the Echo Times array. Assumes a k-space raster time of ``1 us`` + and minimal echo spacing. + + """ + assert len(shape) >= 2, "Please provide at least (in-plane, nslices) as shape." + + # expand shape if needed + shape = list(shape) + + while len(shape) < 4: + shape = shape + [1] + + # default accel + if accel is None: + if shape[2] == 1: + accel = 1 + else: + accel = nintl + + # expand accel if needed + if np.isscalar(accel): + accel = [accel, 1] + else: + accel = list(accel) + + # expand acs if needed + if "acs_shape" in kwargs: + acs_shape = kwargs["acs_shape"] + else: + acs_shape = [None] + kwargs.pop("acs_shape", None) + + while len(acs_shape) < 2: + acs_shape = acs_shape + [None] + + # assume 1mm iso + fov = shape[0] + + # design single interleaf spiral + tmp, _ = _design.spiral(fov, shape[0], 1, nintl, acs_shape=acs_shape[0], **kwargs) + + # rotate + ncontrasts = shape[2] + nviews = max(int(nintl // accel[0]), 1) + + # generate angles + dphi = (1 - 233 / 377) * 360.0 + phi = np.arange(ncontrasts * nviews) * dphi # angles in degrees + phi = np.deg2rad(phi) # angles in radians + + # build rotation matrix + rot = _design.angleaxis2rotmat(phi, "z") + + # get trajectory + traj = tmp["kr"] * tmp["mtx"] + traj = _design.projection(traj[0].T, rot) + traj = traj.swapaxes(-2, -1).T + traj = traj.reshape(nviews, ncontrasts, *traj.shape[-2:]) + traj = traj.swapaxes(0, 1) + + # expand slices + nz = shape[1] + az = np.arange(-nz // 2, nz // 2, dtype=np.float32) + + # accelerate + az = az[:: accel[1]] + + # add back ACS + if acs_shape[1] is not None: + az = np.concatenate( + (az, np.arange(-acs_shape[1] // 2, acs_shape[1] // 2, dtype=np.float32)) + ) + az = np.unique(az) + + # expand + traj = np.apply_along_axis(np.tile, -3, traj, len(az)) + az = np.repeat(az, nviews) + az = az[None, :, None] * np.ones_like(traj[..., 0]) + + # append new axis + traj = np.concatenate((traj, az[..., None]), axis=-1) + + # get dcf + dcf = tmp["dcf"] + + # expand echoes + nechoes = shape[-1] + traj = np.repeat(traj, nechoes, axis=0) + + # get shape + shape = [shape[1]] + tmp["mtx"] + + # get time + t = tmp["t"] + + # calculate TE + min_te = float(tmp["te"][0]) + TE = np.arange(nechoes, dtype=np.float32) * t[-1] + min_te + + # extra args + user = {} + user["moco_shape"] = tmp["moco"]["mtx"] + user["acs_shape"] = tmp["acs"]["mtx"] + + # get indexes + head = Header(shape, t=t, traj=traj, dcf=dcf, TE=TE, user=user) + head.torch() + + return head
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/_vobj/trains/flip.html b/_modules/deepmr/_vobj/trains/flip.html new file mode 100644 index 00000000..956bba28 --- /dev/null +++ b/_modules/deepmr/_vobj/trains/flip.html @@ -0,0 +1,723 @@ + + + + + + + + + + + deepmr._vobj.trains.flip — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr._vobj.trains.flip

+"""Variable flip angle train generation routines."""
+
+__all__ = ["piecewise_fa", "sinusoidal_fa"]
+
+import math
+import numpy as np
+import torch
+
+
+
[docs]def piecewise_fa( + fa_ref=[1.5556, 70.0, 0.7778], length=[350, 230], recovery=300, head=None +): + """ + Design a multi-segment linear flip angle train. + + The resulting train, similar to the one introduced by Gomez et al. [1], + consists of ``len(fa_max)`` linear sections. The ``n``-th sections starts at + ``fa_ref[n] [deg]``, ends at ``fa_ref[n+1] [deg]`` and consists of ``length[n]`` pulses. + The train is followed by a constant flip angle section of length ``recovery``. + + Parameters + ---------- + fa_ref : Iterable[float], optional + Starting flip angle in ``[deg]`` per linear segment. + The default is ``(1.5556, 70., 0.7778) [deg]``. + length : Iterable[int], optional + Linear segment length. + The default is ``(350, 200)`` pulses. + recovery : int, optional + Constant flip angle recovery train length. + The default is ``300``. + head : Header, optional + Pre-existing acquisition header. + If provided, interpolate FA train to ``ncontrast = head.traj.shape[0]`` + and include it in the ``head.FA`` field. + The default is ``None``. + + Returns + ------- + torch.Tensor | deepmr.Header + If ``head`` is not provided, returns piecewise linear flip angle train in ``[deg]`` + as a ``torch.Tensor``. If ``head`` is provided, insert flip angle train + (linearly interpolated to ``ncontrast = head.traj.shape[0]``) in ``head.FA`` field. + + References + ---------- + [1] Gómez, P.A., Cencini, M., Golbabaee, M. et al. + Rapid three-dimensional multiparametric MRI with quantitative transient-state imaging. + Sci Rep 10, 13769 (2020). https://doi.org/10.1038/s41598-020-70789-2 + + """ + # generate segments + rf_schedule = [ + np.linspace(fa_ref[n], fa_ref[n + 1], length[n], dtype=np.float32) + for n in range(len(fa_ref - 1)) + ] + rf_schedule = np.concatenate(rf_schedule) + + # add recovery + if recovery > 0: + rf_schedule = np.concatenate((rf_schedule, np.ones(recovery, dtype=np.float32))) + + if head is not None: + if head.traj is not None: + ncontrasts = head.traj.shape[0] + rf_schedule = np.interp( + np.linspace(0, 1, ncontrasts), + np.linspace(0, 1, len(rf_schedule)), + rf_schedule, + ) + head.FA = torch.as_tensor(rf_schedule) + + return head + + return torch.as_tensor(rf_schedule)
+ + +
[docs]def sinusoidal_fa( + fa_max=(35.0, 43.0, 70.0, 45.0, 27.0), + length=200, + spacing=10, + recovery=0, + offset=5.0, + head=None, +): + """ + Design a multi-segment sinusoidal flip angle train. + + The resulting train, similar to the one introduced by Jiang et al. [1], + consists of ``len(fa_max)`` sinusoidal section (positive wave), + each of length ``length` separed by constant flip angle sections + of length ``spacing``. The ``n``-th sinusoidal section peaks at + ``fa_max[n] [deg]``. The train is followed by a constant flip angle + section of length ``recovery``. The overall schedule minimum flip angle + is determined by the ``offset`` parameter (in ``[deg]``). + + Parameters + ---------- + fa_max : Iterable[float], optional + Maximum flip angle in ``[deg]`` per sinusoidal segment. + The default is ``(35., 43., 70., 45., 27.) [deg]``. + length : int, optional + Sinusoidal segment length. + The default is ``200`` pulses. + spacing : int, optional + Zero degrees flip angle pulses in between segments. + The default is ``10``. + recovery : int, optional + Constant flip angle recovery train length. + The default is ``0``. + offset : float, optional + Minimum flip angle in ``[deg]``. The default is ``5. [deg]``. + head : Header, optional + Pre-existing acquisition header. + If provided, interpolate FA train to ``ncontrast = head.traj.shape[0]`` + and include it in the ``head.FA`` field. + The default is ``None``. + + Returns + ------- + torch.Tensor | deepmr.Header + If ``head`` is not provided, returns sinusoidal flip angle train in ``[deg]`` + as a ``torch.Tensor``. If ``head`` is provided, insert flip angle train + (linearly interpolated to ``ncontrast = head.traj.shape[0]``) in ``head.FA`` field. + + Examples + -------- + >>> import deepmr + + A ``5`` sections flip angle train with ``10``-pulses long separation + and no recovery (e.g., for 2D MR Fingerprinting) can be generated as: + + >>> fa_train = deepmr.sinusoidal_fa((35., 43., 70., 45., 27.), 200, 10) + + A final ``100`` constant flip angle segment (e.g., for 3D MR Fingerprinting) + can be added via the ``recovery`` argument: + + >>> fa_train = deepmr.sinusoidal_fa((35., 43., 70., 45., 27.), 200, 10, recovery=100) + + References + ---------- + [1] Jiang, Y., Ma, D., Seiberlich, N., Gulani, V. and Griswold, M.A. (2015), + MR fingerprinting using fast imaging with steady state precession (FISP) with spiral readout. + Magn. Reson. Med., 74: 1621-1631. https://doi.org/10.1002/mrm.25559 + + + """ + + # get maximum flip angle + max_fa = np.array(fa_max, dtype=np.float32) - offset + n_segments = len(max_fa) + + # build schedule + n = np.arange(length, dtype=np.float32) + 1 + rest = np.zeros(spacing, dtype=np.float32) + rf_schedule = np.concatenate((np.sin(n * np.pi / length) * max_fa[0], rest)) + + for i in range(1, n_segments): + segment = np.concatenate((np.sin(n * math.pi / length) * max_fa[i], rest)) + rf_schedule = np.concatenate((rf_schedule, segment)) + + # add recovery + if recovery > 0: + rf_schedule = np.concatenate( + (rf_schedule, np.zeros(recovery, dtype=np.float32)) + ) + + # add back offset + rf_schedule += offset + + if head is not None: + if head.traj is not None: + ncontrasts = head.traj.shape[0] + rf_schedule = np.interp( + np.linspace(0, 1, ncontrasts), + np.linspace(0, 1, len(rf_schedule)), + rf_schedule, + ) + head.FA = torch.as_tensor(rf_schedule) + + return head + + return torch.as_tensor(rf_schedule)
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/_vobj/trains/phase.html b/_modules/deepmr/_vobj/trains/phase.html new file mode 100644 index 00000000..d0da46cd --- /dev/null +++ b/_modules/deepmr/_vobj/trains/phase.html @@ -0,0 +1,598 @@ + + + + + + + + + + + deepmr._vobj.trains.phase — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr._vobj.trains.phase

+"""RF phase generation routines."""
+
+__all__ = ["phase_cycling", "rf_spoiling"]
+
+import numpy as np
+import torch
+
+
+
[docs]def phase_cycling(length, dphi=180.0): + """ + Generate a linear phase cycling scheme. + + Parameters + ---------- + length : int + Flip angle train length. + dphi : float, optional + Linear phase increment in ``[deg]``. + The default is ``180.0 [deg]``. + + Returns + ------- + phase : torch.Tensor + RF pulse phase for each pulse in a train. + + """ + # generate phase + phase = np.arange(length, dtype=np.float32) * dphi + phase = phase % 360.0 + + return torch.as_tensor(phase)
+ + +
[docs]def rf_spoiling(length, dphi=117.0): + """ + Generate a quadratic phase cycling scheme + for rf spoiling or partial spoiling. + + Parameters + ---------- + length : int + Flip angle train length. + dphi : float, optional + Quadratic phase increment in ``[deg]``. + The default is ``117.0 [deg]``. + + Returns + ------- + phase : torch.Tensor + RF pulse phase for each pulse in a train. + + """ + # generate phase + phase = np.zeros(length, dtype=np.float32) + for n in range(length): + phase[n] = (n * (n + 1) / 2.0 * dphi) % 360 + + return torch.as_tensor(phase)
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/bloch/blocks/prep.html b/_modules/deepmr/bloch/blocks/prep.html new file mode 100644 index 00000000..1ce0aaad --- /dev/null +++ b/_modules/deepmr/bloch/blocks/prep.html @@ -0,0 +1,645 @@ + + + + + + + + + + + deepmr.bloch.blocks.prep — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.bloch.blocks.prep

+"""Common preparation blocks."""
+
+__all__ = ["InversionPrep", "T2Prep"]
+
+from .. import ops
+
+
+
[docs]def InversionPrep(TI, T1, T2, weight, k, inv_props): + """ + Adiabatic inversion operator. + + Consists of a 180° pulse followed by a crusher gradient. + + Parameters + ---------- + TI : torch.Tensor + Inversion time in ``[ms]``. + T1 : torch.Tensor + T1 relaxation time of shape ``(..., npools) in ``[ms]``. + T2 : torch.Tensor + T2 relaxation time of shape ``(..., npools) in ``[ms]``. + weight : torch.Tensor + Pool relative fraction. + k : torch.Tensor + Chemical exchange matrix ``(...., npools, npools)`` in ``[Hz]``. + prep_props : dict + Extra pulse parameters. + + Returns + ------- + PrepPulse : deepmr.bloch.Operator + Adiabatic Inversion pulse operator, including crusher. + + """ + if TI is not None and TI != 0.0: + # parse inversion properties + if inv_props is None: + inv_props = {} + + # prep operator + Tinv = ops.AdiabaticPulse( + T1.device, alpha=180.0, name="Inversion Pulse", **inv_props + ) + Einv = ops.Relaxation( + T1.device, TI, T1, T2, weight, k, name="Preparation Interval" + ) + Sinv = ops.Spoil(name="Inversion Crusher") + + return ops.CompositeOperator(Sinv, Einv, Tinv, name="Inversion Propagator") + else: + return ops.Identity(name="Inversion Propagator")
+ + +
[docs]def T2Prep(Tprep, T1, T2, weight, k, prep_props): + """ + T2 prep operator. + + Consists of a 90°-180°--90° composite pulse followed by a crusher gradient. + + Parameters + ---------- + Tprep : torch.Tensor + T2 preparation time in ``[ms]``. + T1 : torch.Tensor + T1 relaxation time of shape ``(..., npools) in ``[ms]``. + T2 : torch.Tensor + T2 relaxation time of shape ``(..., npools) in ``[ms]``. + weight : torch.Tensor + Pool relative fraction. + k : torch.Tensor + Chemical exchange matrix ``(...., npools, npools)`` in ``[Hz]``. + prep_props : dict + Extra pulse parameters. + + Returns + ------- + PrepPulse : deepmr.bloch.Operator + Adiabatic T2prep pulse operator, including crusher. + + """ + if Tprep is not None and Tprep != 0.0: + # parse inversion properties + if prep_props is None: + prep_props = {} + + # prep operator + T90p = ops.AdiabaticPulse( + T1.device, alpha=90.0, phi=0.0, name="Flip Pulse", **prep_props + ) + Eprep = ops.Relaxation( + T1.device, 0.5 * Tprep, T1, T2, weight, k, name="Preparation Interval" + ) + T180 = ops.AdiabaticPulse( + T1.device, alpha=180.0, name="Inversion Pulse", **prep_props + ) + T90m = ops.AdiabaticPulse( + T1.device, alpha=90.0, phi=-180.0, name="Flip-back Pulse", **prep_props + ) + Sprep = ops.Spoil(name="Prep Crusher") + + return ops.CompositeOperator( + Sprep, T90m, Eprep, T180, Eprep, T90p, name="T2prep Propagator" + ) + else: + return ops.Identity(name="T2prep Propagator")
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/bloch/blocks/readout.html b/_modules/deepmr/bloch/blocks/readout.html new file mode 100644 index 00000000..7c5049a0 --- /dev/null +++ b/_modules/deepmr/bloch/blocks/readout.html @@ -0,0 +1,875 @@ + + + + + + + + + + + deepmr.bloch.blocks.readout — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.bloch.blocks.readout

+"""Common readout blocks."""
+
+__all__ = ["ExcPulse", "FSEStep", "bSSFPStep", "SSFPFidStep", "SSFPEchoStep"]
+
+from .. import ops
+
+
+
[docs]def ExcPulse(states, B1, rf_props): + """ + RF operator. + + Parameters + ---------- + states : dict + EPG states matrix. + B1 : float | complex + Flip angle scaling of shape ``(...,nmodes)``. + rf_props : dict + Extra RF parameters. + + Returns + ------- + RFPulse : deepmr.bloch.Operator + RF pulse rotation operator. + + """ + # parse rf properties + if rf_props is None: + rf_props = {} + + # parse + device = states["F"].device + nlocs = states["F"].shape[-3] + + return ops.RFPulse(device, nlocs=nlocs, B1=B1, name="Excitation Pulse", **rf_props)
+ + +
[docs]def FSEStep( + states, + ESP, + T1, + T2, + weight=None, + k=None, + chemshift=None, + D=None, + v=None, + grad_props=None, +): + """ + (Unbalanced) SSFP propagator. + + Consists of rfpulse (omitted) - free precession - spoiling gradient. + + Parameters + ---------- + states : dict + EPG states matrix. + ESP : float + Echo Spacing in ``[ms]``. + T1 : torch.Tensor + T1 relaxation time of shape ``(..., npools) in ``[ms]``. + T2 : torch.Tensor + T2 relaxation time of shape ``(..., npools) in ``[ms]``. + weight : torch.Tensor + Pool relative fraction. + k : torch.Tensor + Chemical exchange matrix ``(...., npools, npools)`` in ``[Hz]``. + chemshift : torch.Tensor + Chemical shift of each pool of shape ``(npools,). + D : torch.Tensor + Apparent diffusion coefficient of shape ``(...,)`` in ``[um**2/ms]``. Assume same coefficient for each pool. + v : torch.Tensor + Spin velocity of shape ``(...,)`` in ``[cm/s]``. Assume same coefficient for each pool. + grad_props : dict + Extra parameters. + + Returns + ------- + TEop : deepmr.bloch.Operator + Propagator until TE. If TE=0, this is the Identity. + TETRop : deepmr.bloch.Operator + Propagator until next TR. + + """ + X, S = _free_precess( + states, 0.5 * ESP, T1, T2, weight, k, chemshift, D, v, grad_props + ) + + # build Xpre and Xpost + Xpre = ops.CompositeOperator(S, X, name="FSEpre") + Xpost = ops.CompositeOperator(X, S, name="FSEpost") + + return Xpre, Xpost
+ + +
[docs]def bSSFPStep(states, TE, TR, T1, T2, weight=None, k=None, chemshift=None): + """ + (Balanced) SSFP propagator. + + Consists of rfpulse (omitted) - free precession. + + Parameters + ---------- + states : dict + EPG states matrix. + TE : float + Echo Time in ``[ms]``. + TR : float + Repetition Time in ``[ms]``. + T1 : torch.Tensor + T1 relaxation time of shape ``(..., npools) in ``[ms]``. + T2 : torch.Tensor + T2 relaxation time of shape ``(..., npools) in ``[ms]``. + weight : torch.Tensor + Pool relative fraction. + k : torch.Tensor + Chemical exchange matrix ``(...., npools, npools)`` in ``[Hz]``. + chemshift : torch.Tensor + Chemical shift of each pool of shape ``(npools,). + D : torch.Tensor + Apparent diffusion coefficient of shape ``(...,)`` in ``[um**2/ms]``. Assume same coefficient for each pool. + v : torch.Tensor + Spin velocity of shape ``(...,)`` in ``[cm/s]``. Assume same coefficient for each pool. + grad_props : dict + Extra parameters. + + Returns + ------- + TEop : deepmr.bloch.Operator + Propagator until TE. If TE=0, this is the Identity. + TETRop : deepmr.bloch.Operator + Propagator until next TR. + + """ + XTE, _ = _free_precess(states, TE, T1, T2, weight, k, chemshift, None, None, None) + XTETR, _ = _free_precess( + states, TR - TE, T1, T2, weight, k, chemshift, None, None, None + ) + return XTE, XTETR
+ + +
[docs]def SSFPFidStep( + states, + TE, + TR, + T1, + T2, + weight=None, + k=None, + chemshift=None, + D=None, + v=None, + grad_props=None, +): + """ + (Unbalanced) SSFP propagator. + + Consists of rfpulse (omitted) - free precession - spoiling gradient. + + Parameters + ---------- + states : dict + EPG states matrix. + TE : float + Echo Time in ``[ms]``. + TR : float + Repetition Time in ``[ms]``. + T1 : torch.Tensor + T1 relaxation time of shape ``(..., npools) in ``[ms]``. + T2 : torch.Tensor + T2 relaxation time of shape ``(..., npools) in ``[ms]``. + weight : torch.Tensor + Pool relative fraction. + k : torch.Tensor + Chemical exchange matrix ``(...., npools, npools)`` in ``[Hz]``. + chemshift : torch.Tensor + Chemical shift of each pool of shape ``(npools,). + D : torch.Tensor + Apparent diffusion coefficient of shape ``(...,)`` in ``[um**2/ms]``. Assume same coefficient for each pool. + v : torch.Tensor + Spin velocity of shape ``(...,)`` in ``[cm/s]``. Assume same coefficient for each pool. + grad_props : dict + Extra parameters. + + Returns + ------- + TEop : deepmr.bloch.Operator + Propagator until TE. If TE=0, this is the Identity. + TETRop : deepmr.bloch.Operator + Propagator until next TR. + + """ + XTE, _ = _free_precess(states, TE, T1, T2, weight, k, chemshift, D, v, grad_props) + XTETR, S = _free_precess( + states, TR - TE, T1, T2, weight, k, chemshift, D, v, grad_props + ) + return XTE, ops.CompositeOperator(S, XTETR, name="SSFPFid TE-TR Propagator")
+ + +
[docs]def SSFPEchoStep( + states, + TE, + TR, + T1, + T2, + weight=None, + k=None, + chemshift=None, + D=None, + v=None, + grad_props=None, +): + """ + (Reverse) SSFP propagator. + + Consists of rfpulse (omitted) - spoiling gradient - free precession. + + Parameters + ---------- + states : dict + EPG states matrix. + TE : float + Echo Time in ``[ms]``. + TR : float + Repetition Time in ``[ms]``. + T1 : torch.Tensor + T1 relaxation time of shape ``(..., npools) in ``[ms]``. + T2 : torch.Tensor + T2 relaxation time of shape ``(..., npools) in ``[ms]``. + weight : torch.Tensor + Pool relative fraction. + k : torch.Tensor + Chemical exchange matrix ``(...., npools, npools)`` in ``[Hz]``. + chemshift : torch.Tensor + Chemical shift of each pool of shape ``(npools,). + D : torch.Tensor + Apparent diffusion coefficient of shape ``(...,)`` in ``[um**2/ms]``. Assume same coefficient for each pool. + v : torch.Tensor + Spin velocity of shape ``(...,)`` in ``[cm/s]``. Assume same coefficient for each pool. + grad_props : dict + Extra parameters. + + Returns + ------- + TEop : deepmr.bloch.Operator + Propagator until TE. If TE=0, this is the Identity. + TETRop : deepmr.bloch.Operator + Propagator until next TR. + + """ + XTE, _ = _free_precess(states, TE, T1, T2, weight, k, chemshift, D, v, grad_props) + XTETR, S = _free_precess( + states, TR - TE, T1, T2, weight, k, chemshift, D, v, grad_props + ) + return ops.CompositeOperator(S, XTE, name="SSFPFid TE-TR Propagator"), XTETR
+ + +# %% local subroutine +def _free_precess(states, t, T1, T2, weight, k, chemshift, D, v, grad_props): + # parse gradient properties + if grad_props is None: + grad_props = {} + tau = None + elif grad_props: + tau = grad_props["duration"] + grad_props.pop("duration") + else: + tau = 0.0 + + # parse + device = states["F"].device + nstates = states["F"].shape[-4] + + # check if has exchange, diffusion and flow + hasK = k is not None + hasD = D is not None + hasV = v is not None + + # prep until TR + if t != 0: + if hasK: + E = ops.Relaxation( + device, + t, + T1, + T2, + weight, + k, + chemshift, + name="Relaxation-MT", + ) + else: + E = ops.Relaxation(device, t, T1, T2, name="Relaxation") + + # setup washout + if hasV and "moving" in states: + W = ops.FlowWash( + device, + t, + v, + name="Magnetization inflow / washout until next TR", + **grad_props, + ) + else: + W = ops.Identity(name="Inflow+Washout") + + # set up diffusion + if hasD: + D = ops.DiffusionDamping( + device, tau, D, nstates, name="Diffusion Damping", **grad_props + ) + else: + D = ops.Identity(name="Diffusion until TE") + + # set up flow + if hasV: + J = ops.FlowDephasing( + device, tau, v, nstates, name="Flow Dephasing", **grad_props + ) + else: + J = ops.Identity(name="Flow until next TR") + else: + E = ops.Identity(name="Relaxation") + D = ops.Identity(name="Diffusion Damping") + J = ops.Identity(name="Flow Dephasing") + W = ops.Identity(name="Inflow-Washout") + + # set up gradient spoiling + S = ops.Shift(name="Gradient Spoiling") + + # put everything together + X = ops.CompositeOperator(W, J, D, E, name="Free Precession") + + return X, S +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/bloch/model/bssfpmrf.html b/_modules/deepmr/bloch/model/bssfpmrf.html new file mode 100644 index 00000000..a3136e37 --- /dev/null +++ b/_modules/deepmr/bloch/model/bssfpmrf.html @@ -0,0 +1,853 @@ + + + + + + + + + + + deepmr.bloch.model.bssfpmrf — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.bloch.model.bssfpmrf

+"""bSSFP MR Fingerprinting simulator"""
+
+__all__ = ["bssfpmrf"]
+
+import warnings
+import numpy as np
+
+import dacite
+from dacite import Config
+
+from .. import blocks
+from .. import ops
+from . import epg
+
+
+
[docs]def bssfpmrf( + flip, + TR, + T1, + T2, + sliceprof=False, + DE=False, + diff=None, + device="cpu", + TI=None, + **kwargs +): + """ + Simulate an inversion-prepared bSSFP sequence with variable flip angles. + + Parameters + ---------- + flip : float | np.ndarray | torch.Tensor + Flip angle in ``[deg]`` of shape ``(npulses,)`` or ``(npulses, nmodes)``. + TR : float + Repetition time in [ms]. + T1 : float | np.ndarray | torch.Tensor + Longitudinal relaxation time for main pool in ``[ms]``. + T2 : float | np.ndarray | torch.Tensor + Transverse relaxation time for main pool in ``[ms]``. + sliceprof : float | np.ndarray | torch.Tensor + Excitation slice profile (i.e., flip angle scaling across slice). + If ``False``, pulse are non selective. If ``True``, pulses are selective but ideal profile is assumed. + If array, flip angle scaling along slice is simulated. Defaults to ``False``. + DE : bool, optional + If True, simulation is repeated two times to mimick Driven Equilibrium acquisition. + Defaults to ``False``. + diff : str | tuple[str], optional + String or tuple of strings, saying which arguments + to get the signal derivative with respect to. + Defaults to ``None`` (no differentation). + device : str + Computational device (e.g., ``cpu`` or ``cuda:n``, with ``n=0,1,2...``). + Defaults to ``cpu``. + TI : float + Inversion time in ``[ms]``. + Defaults to ``None`` (no preparation). + + Other Parameters + ---------------- + nstates : int, optional + Maximum number of EPG states to be retained during simulation. + High numbers improve accuracy but decrease performance. + Defaults to ``10``. + max_chunk_size : int, optional + Maximum number of atoms to be simulated in parallel. + High numbers increase speed and memory footprint. + Defaults to ``natoms``. + verbose : bool, optional + If ``True``, prints execution time for signal (and gradient) calculations. + Defaults to ``False``. + TE : float, optional + Echo time in ``[ms]``. Defaults to ``0.0``. + B1sqrdTau : float, optional + Pulse energies in ``[uT**2 * ms]`` when ``flip = 1.0 [deg]``. + global_inversion : bool, optional + Assume nonselective (``True``) or selective (``False``) inversion. + Defaults to ``True``. + inv_B1sqrdTau : float, optional + Inversion pulse energy in ``[uT**2 * ms]`` when ``flip = 1.0 [deg]``. + B1 : float | np.ndarray | torch.Tensor , optional + Flip angle scaling factor (``1.0 := nominal flip angle``). + Defaults to ``None``. + B0 : float | np.ndarray | torch.Tensor , optional + Bulk off-resonance in [Hz]. Defaults to ``None`` + B1Tx2 : float | np.ndarray | torch.Tensor + Flip angle scaling factor for secondary RF mode (``1.0 := nominal flip angle``). + Defaults to ``None``. + B1phase : float | np.ndarray | torch.Tensor + B1 relative phase in ``[deg]``. (``0.0 := nominal rf phase``). + Defaults to ``None``. + chemshift : float | np.ndarray | torch.Tensor + Chemical shift for main pool in ``[Hz]``. + Defaults to ``None``. + T1bm : float | np.ndarray | torch.Tensor + Longitudinal relaxation time for secondary pool in ``[ms]``. + Defaults to ``None``. + T2bm : float | np.ndarray | torch.Tensor + Transverse relaxation time for main secondary in ``[ms]``. + Defaults to ``None``. + kbm : float | np.ndarray | torch.Tensor + Nondirectional exchange between main and secondary pool in ``[Hz]``. + Defaults to ``None``. + weight_bm : float | np.ndarray | torch.Tensor + Relative secondary pool fraction. + Defaults to ``None``. + chemshift_bm : float | np.ndarray | torch.Tensor + Chemical shift for secondary pool in ``[Hz]``. + Defaults to ``None``. + kmt : float | np.ndarray | torch.Tensor + Nondirectional exchange between free and bound pool in ``[Hz]``. + If secondary pool is defined, exchange is between secondary and bound pools + (i.e., myelin water and macromolecular), otherwise exchange + is between main and bound pools. + Defaults to ``None``. + weight_mt : float | np.ndarray | torch.Tensor + Relative bound pool fraction. + Defaults to ``None``. + + """ + # constructor + init_params = { + "flip": flip, + "TR": TR, + "T1": T1, + "T2": T2, + "diff": diff, + "device": device, + "TI": TI, + **kwargs, + } + + # get TE + if "TE" not in init_params: + TE = 0.0 + else: + TE = init_params["TE"] + + # get verbosity + if "verbose" in init_params: + verbose = init_params["verbose"] + else: + verbose = False + + # get verbosity + if "asnumpy" in init_params: + asnumpy = init_params["asnumpy"] + else: + asnumpy = True + + # get selectivity: + if sliceprof: + selective_exc = True + else: + selective_exc = False + + # check for global inversion + if "global_inversion" in init_params: + selective_inv = not (init_params["global_inversion"]) + else: + selective_inv = False + + # check for conflicts in inversion selectivity + if selective_exc is False and selective_inv is True: + warnings.warn("3D acquisition - forcing inversion pulse to global.") + selective_inv = False + + # inversion pulse properties + if TI is None: + inv_props = {} + else: + inv_props = {"slice_selective": selective_inv} + + if "inv_B1sqrdTau" in kwargs: + inv_props["b1rms"] = kwargs["inv_B1sqrdTau"] ** 0.5 + inv_props["duration"] = 1.0 + + # check conflicts in inversion settings + if TI is None: + if inv_props: + warnings.warn( + "Inversion not enabled - ignoring inversion pulse properties." + ) + inv_props = {} + + # excitation pulse properties + rf_props = {"slice_selective": selective_exc} + if "B1sqrdTau" in kwargs: + inv_props["b1rms"] = kwargs["B1sqrdTau"] ** 0.5 + inv_props["duration"] = 1.0 + + if np.isscalar(sliceprof) is False: + rf_props["slice_profile"] = kwargs["sliceprof"] + + # get nlocs + if "nlocs" in init_params: + nlocs = init_params["nlocs"] + else: + if selective_exc: + nlocs = 15 + else: + nlocs = 1 + + # interpolate slice profile: + if "slice_profile" in rf_props: + nlocs = min(nlocs, len(rf_props["slice_profile"])) + else: + nlocs = 1 + + # assign nlocs + init_params["nlocs"] = nlocs + + # put all properties together + props = {"inv_props": inv_props, "rf_props": rf_props, "DE": DE} + + # initialize simulator + simulator = dacite.from_dict( + bSSFPMRF, init_params, config=Config(check_types=False) + ) + + # run simulator + if diff: + # actual simulation + sig, dsig = simulator(flip=flip, TR=TR, TI=TI, TE=TE, props=props) + + # post processing + if asnumpy: + sig = sig.detach().cpu().numpy() + dsig = dsig.detach().cpu().numpy() + + # prepare info + info = {"trun": simulator.trun, "tgrad": simulator.tgrad} + if verbose: + return sig, dsig, info + else: + return sig, dsig + else: + # actual simulation + sig = simulator(flip=flip, TR=TR, TI=TI, TE=TE, props=props) + + # post processing + if asnumpy: + sig = sig.cpu().numpy() + + # prepare info + info = {"trun": simulator.trun} + if verbose: + return sig, info + else: + return sig
+ + +# %% utils +class bSSFPMRF(epg.EPGSimulator): + """Class to simulate inversion-prepared (variable flip angle) bSSFP.""" + + @staticmethod + def sequence( + flip, + TR, + TI, + TE, + props, + T1, + T2, + B1, + df, + weight, + k, + chemshift, + states, + signal, + ): + # parsing pulses and grad parameters + inv_props = props["inv_props"] + rf_props = props["rf_props"] + driven_equilibrium = props["DE"] + + # get number of repetitions + if driven_equilibrium: + nreps = 2 + else: + nreps = 1 + + # get number of frames and echoes + npulses = flip.shape[0] + + # define preparation + Prep = blocks.InversionPrep(TI, T1, T2, weight, k, inv_props) + + # prepare RF pulse + RF = blocks.ExcPulse(states, B1, rf_props) + + # prepare free precession period + Xpre, Xpost = blocks.bSSFPFidStep(states, TE, TR, T1, T2, weight, k, chemshift) + + for r in range(nreps): + # magnetization prep + states = Prep(states) + + # actual sequence loop + for n in range(npulses): + # apply pulse + states = RF(states, flip[n]) + + # relax, recover and record signal for each TE + states = Xpre(states) + signal[n] = ops.observe(states, RF.phi) + + # relax, recover and spoil + states = Xpost(states) + + return signal +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/bloch/model/fse.html b/_modules/deepmr/bloch/model/fse.html new file mode 100644 index 00000000..54559af5 --- /dev/null +++ b/_modules/deepmr/bloch/model/fse.html @@ -0,0 +1,860 @@ + + + + + + + + + + + deepmr.bloch.model.fse — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.bloch.model.fse

+"""Fast Spin Echo simulator"""
+
+__all__ = ["fse"]
+
+import warnings
+import numpy as np
+
+import dacite
+from dacite import Config
+
+from .. import blocks
+from .. import ops
+from . import epg
+
+
+
[docs]def fse(flip, phases, ESP, T1, T2, sliceprof=False, diff=None, device="cpu", **kwargs): + """ + Simulate a Fast Spin Echo sequence. + + Parameters + ---------- + flip : float | np.ndarray | torch.Tensor + Flip angle in ``[deg]`` of shape ``(npulses,)`` or ``(npulses, nmodes)``. + phases : float | np.ndarray | torch.Tensor + Refocusing angle phases in ``[deg]`` of shape ``(npulses,)`` or ``(npulses, nmodes)``. + ESP : float + Echo spacing in [ms]. + T1 : float | np.ndarray | torch.Tensor + Longitudinal relaxation time for main pool in ``[ms]``. + T2 : float | np.ndarray | torch.Tensor + Transverse relaxation time for main pool in ``[ms]``. + sliceprof : float | np.ndarray | torch.Tensor + Excitation slice profile (i.e., flip angle scaling across slice). + If ``False``, pulse are non selective. If ``True``, pulses are selective but ideal profile is assumed. + If array, flip angle scaling along slice is simulated. Defaults to ``False``. + spoil_inc : float, optional + RF spoiling increment in ``[deg]``. Defaults to ``117°``. + diff : str | tuple[str], optional + String or tuple of strings, saying which arguments + to get the signal derivative with respect to. + Defaults to ``None`` (no differentation). + device : str + Computational device (e.g., ``cpu`` or ``cuda:n``, with ``n=0,1,2...``). + Defaults to ``cpu``. + + Other Parameters + ---------------- + nstates : int, optional + Maximum number of EPG states to be retained during simulation. + High numbers improve accuracy but decrease performance. + Defaults to ``10``. + max_chunk_size : int, optional + Maximum number of atoms to be simulated in parallel. + High numbers increase speed and memory footprint. + Defaults to ``natoms``. + verbose : bool, optional + If ``True``, prints execution time for signal (and gradient) calculations. + Defaults to ``False``. + B1sqrdTau : float, optional + Refocusing pulse energies in ``[uT**2 * ms]`` when ``flip = 1.0 [deg]``. + exc_flip : float + Excitation flip angle. Defaults to ``90 [deg]``. + exc_B1sqrdTau: float + Excitation pulse energy in ``[uT**2 * ms]``. + grad_tau : float, optional + Gradient lobe duration in ``[ms]``. + grad_amplitude : float, optional + Gradient amplitude along unbalanced direction in ``[mT / m]``. + If total_dephasing is not provided, this is used to compute diffusion and flow effects. + grad_dephasing : float, optional + Total gradient-induced dephasing across a voxel (in grad direction). + If gradient_amplitude is not provided, this is used to compute diffusion and flow effects. + voxelsize : str | list | tuple | np.ndarray | torch.Tensor, optional + Voxel size (``dx``, ``dy``, ``dz``) in ``[mm]``. + If scalar, assume isotropic voxel. Defaults to ``None``. + grad_orient : str | list | tuple | np.ndarray | torch.Tensor, optional + Gradient orientation (``"x"``, ``"y"``, ``"z"`` or ``versor``). Defaults to ``"z"``. + slice_orient : str | list | tuple | np.ndarray | torch.Tensor, optional + Slice orientation (``"x"``, ``"y"``, ``"z"`` or ``versor``). + Ignored if pulses are non-selective. Defaults to ``"z"``. + B1 : float | np.ndarray | torch.Tensor , optional + Flip angle scaling factor (``1.0 := nominal flip angle``). + Defaults to ``None``. + B0 : float | np.ndarray | torch.Tensor , optional + Bulk off-resonance in [Hz]. Defaults to ``None`` + B1Tx2 : float | np.ndarray | torch.Tensor + Flip angle scaling factor for secondary RF mode (``1.0 := nominal flip angle``). + Defaults to ``None``. + B1phase : float | np.ndarray | torch.Tensor + B1 relative phase in ``[deg]``. (``0.0 := nominal rf phase``). + Defaults to ``None``. + T2star : float | np.ndarray | torch.Tensor + Effective relaxation time for main pool in ``[ms]``. + Defaults to ``None``. + D : float | np.ndarray | torch.Tensor + Apparent diffusion coefficient in ``[um**2 / ms]``. + Defaults to ``None``. + v : float | np.ndarray | torch.Tensor + Spin velocity ``[cm / s]``. Defaults to ``None``. + chemshift : float | np.ndarray | torch.Tensor + Chemical shift for main pool in ``[Hz]``. + Defaults to ``None``. + T1bm : float | np.ndarray | torch.Tensor + Longitudinal relaxation time for secondary pool in ``[ms]``. + Defaults to ``None``. + T2bm : float | np.ndarray | torch.Tensor + Transverse relaxation time for main secondary in ``[ms]``. + Defaults to ``None``. + kbm : float | np.ndarray | torch.Tensor + Nondirectional exchange between main and secondary pool in ``[Hz]``. + Defaults to ``None``. + weight_bm : float | np.ndarray | torch.Tensor + Relative secondary pool fraction. + Defaults to ``None``. + chemshift_bm : float | np.ndarray | torch.Tensor + Chemical shift for secondary pool in ``[Hz]``. + Defaults to ``None``. + kmt : float | np.ndarray | torch.Tensor + Nondirectional exchange between free and bound pool in ``[Hz]``. + If secondary pool is defined, exchange is between secondary and bound pools + (i.e., myelin water and macromolecular), otherwise exchange + is between main and bound pools. + Defaults to ``None``. + weight_mt : float | np.ndarray | torch.Tensor + Relative bound pool fraction. + Defaults to ``None``. + + """ + # constructor + init_params = { + "flip": flip, + "phases": phases, + "ESP": ESP, + "T1": T1, + "T2": T2, + "diff": diff, + "device": device, + **kwargs, + } + + # get verbosity + if "verbose" in init_params: + verbose = init_params["verbose"] + else: + verbose = False + + # get verbosity + if "asnumpy" in init_params: + asnumpy = init_params["asnumpy"] + else: + asnumpy = True + + # get selectivity: + if sliceprof: + selective = True + else: + selective = False + + # add moving pool if required + if selective and "v" in init_params: + init_params["moving"] = True + + # excitation pulse properties + exc_props = {"slice_selective": selective} + if "exc_flip" in kwargs: + exc_props["flip"] = kwargs["exc_flip"] + else: + exc_props["flip"] = 90.0 + if "exc_B1sqrdTau" in kwargs: + exc_props["b1rms"] = kwargs["exc_B1sqrdTau"] ** 0.5 + exc_props["duration"] = 1.0 + + # refocusing pulse properties + rf_props = {"slice_selective": selective} + if "B1sqrdTau" in kwargs: + rf_props["b1rms"] = kwargs["B1sqrdTau"] ** 0.5 + rf_props["duration"] = 1.0 + + if np.isscalar(sliceprof) is False: + rf_props["slice_profile"] = kwargs["sliceprof"] + + # get nlocs + if "nlocs" in init_params: + nlocs = init_params["nlocs"] + else: + if selective: + nlocs = 15 + else: + nlocs = 1 + + # interpolate slice profile: + if "slice_profile" in rf_props: + nlocs = min(nlocs, len(rf_props["slice_profile"])) + else: + nlocs = 1 + + # assign nlocs + init_params["nlocs"] = nlocs + + # unbalanced gradient properties + grad_props = {} + if "grad_tau" in kwargs: + grad_props["duration"] = kwargs["grad_tau"] + if "grad_dephasing" in kwargs: + grad_props["total_dephasing"] = kwargs["grad_dephasing"] + if "voxelsize" in kwargs: + grad_props["voxelsize"] = kwargs["voxelsize"] + if "grad_amplitude" in kwargs: + grad_props["grad_amplitude"] = kwargs["grad_amplitude"] + if "grad_orient" in kwargs: + grad_props["grad_direction"] = kwargs["grad_orient"] + if "slice_orient" in kwargs: + grad_props["slice_direction"] = kwargs["slice_orient"] + + # check for possible inconsistencies: + if "total_dephasing" in rf_props and "grad_amplitude" in rf_props: + warnings.warn( + "Both total_dephasing and grad_amplitude are provided - using the first" + ) + + # put all properties together + props = {"exc_props": exc_props, "rf_props": rf_props, "grad_props": grad_props} + + # initialize simulator + simulator = dacite.from_dict(FSE, init_params, config=Config(check_types=False)) + + # run simulator + if diff: + # actual simulation + sig, dsig = simulator(flip=flip, phases=phases, ESP=ESP, props=props) + + # post processing + if asnumpy: + sig = sig.detach().cpu().numpy() + dsig = dsig.detach().cpu().numpy() + + # prepare info + info = {"trun": simulator.trun, "tgrad": simulator.tgrad} + if verbose: + return sig, dsig, info + else: + return sig, dsig + else: + # actual simulation + sig = simulator(flip=flip, phases=phases, ESP=ESP, props=props) + + # post processing + if asnumpy: + sig = sig.cpu().numpy() + + # prepare info + info = {"trun": simulator.trun} + if verbose: + return sig, info["trun"] + else: + return sig
+ + +# %% utils +spin_defaults = {"D": None, "v": None} + + +class FSE(epg.EPGSimulator): + """Class to simulate Fast Spin Echo.""" + + @staticmethod + def sequence( + flip, + phases, + ESP, + props, + T1, + T2, + B1, + df, + weight, + k, + chemshift, + D, + v, + states, + signal, + ): + # parsing pulses and grad parameters + exc_props = props["exc_props"] + rf_props = props["rf_props"] + grad_props = props["grad_props"] + + # get number of frames and echoes + npulses = flip.shape[0] + + # define preparation + Exc = blocks.ExcPulse(states, B1, exc_props) + + # prepare RF pulse + RF = blocks.ExcPulse(states, B1, rf_props) + + # prepare free precession period + Xpre, Xpost = blocks.FSEStep( + states, ESP, T1, T2, weight, k, chemshift, D, v, grad_props + ) + + # magnetization prep + states = Exc(states, exc_props["flip"]) + + # actual sequence loop + for n in range(npulses): + # relax, recover and shift for half echo spacing + states = Xpre(states) + + # apply refocusing + states = RF(states, flip[n], phases[n]) + + # relax, recover and spoil for half echo spacing + states = Xpost(states) + + # observe magnetization + signal[n] = ops.observe(states, RF.phi) + + return signal * 1j +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/bloch/model/memprage.html b/_modules/deepmr/bloch/model/memprage.html new file mode 100644 index 00000000..6d70a26d --- /dev/null +++ b/_modules/deepmr/bloch/model/memprage.html @@ -0,0 +1,924 @@ + + + + + + + + + + + deepmr.bloch.model.memprage — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.bloch.model.memprage

+"""MEMPRAGE simulator"""
+
+__all__ = ["memprage"]
+
+import warnings
+import numpy as np
+import torch
+
+import dacite
+from dacite import Config
+
+from .. import blocks
+from .. import ops
+from . import epg
+
+
+
[docs]def memprage( + nshots, + nechoes, + flip, + ESP, + TR, + T1, + T2, + spoil_inc=117.0, + sliceprof=False, + diff=None, + device="cpu", + TI=0.0, + **kwargs +): + """ + Simulate a Multi-Echo Magnetization Prepared (MP) Rapid Gradient Echo sequence. + + Parameters + ---------- + nshots : int + Number of pulse in the ``Inversion`` block. + nechoes : int + Number of echoes in the ``SPGR`` block. + flip : float | np.ndarray | torch.Tensor + Flip angle in ``[deg]`` of shape ``(npulses,)`` or ``(npulses, nmodes)``. + ESP : float + Echo spacing in [ms]. + TR : float + Repetition time in [ms]. + T1 : float | np.ndarray | torch.Tensor + Longitudinal relaxation time for main pool in ``[ms]``. + T2 : float | np.ndarray | torch.Tensor + Transverse relaxation time for main pool in ``[ms]``. + sliceprof : float | np.ndarray | torch.Tensor + Excitation slice profile (i.e., flip angle scaling across slice). + If ``False``, pulse are non selective. If ``True``, pulses are selective but ideal profile is assumed. + If array, flip angle scaling along slice is simulated. Defaults to ``False``. + spoil_inc : float, optional + RF spoiling increment in ``[deg]``. Defaults to ``117°``. + diff : str | tuple[str], optional + String or tuple of strings, saying which arguments + to get the signal derivative with respect to. + Defaults to ``None`` (no differentation). + device : str + Computational device (e.g., ``cpu`` or ``cuda:n``, with ``n=0,1,2...``). + Defaults to ``cpu``. + TI : float + Inversion time in ``[ms]``. + Defaults to ``None`` (no preparation). + + Other Parameters + ---------------- + nstates : int, optional + Maximum number of EPG states to be retained during simulation. + High numbers improve accuracy but decrease performance. + Defaults to ``10``. + max_chunk_size : int, optional + Maximum number of atoms to be simulated in parallel. + High numbers increase speed and memory footprint. + Defaults to ``natoms``. + verbose : bool, optional + If ``True``, prints execution time for signal (and gradient) calculations. + Defaults to ``False``. + B1sqrdTau : float, optional + Pulse energies in ``[uT**2 * ms]`` when ``flip = 1.0 [deg]``. + global_inversion : bool, optional + Assume nonselective (``True``) or selective (``False``) inversion. + Defaults to ``True``. + inv_B1sqrdTau : float, optional + Inversion pulse energy in ``[uT**2 * ms]`` when ``flip = 1.0 [deg]``. + grad_tau : float, optional + Gradient lobe duration in ``[ms]``. + grad_amplitude : float, optional + Gradient amplitude along unbalanced direction in ``[mT / m]``. + If total_dephasing is not provided, this is used to compute diffusion and flow effects. + grad_dephasing : float, optional + Total gradient-induced dephasing across a voxel (in grad direction). + If gradient_amplitude is not provided, this is used to compute diffusion and flow effects. + voxelsize : str | list | tuple | np.ndarray | torch.Tensor, optional + Voxel size (``dx``, ``dy``, ``dz``) in ``[mm]``. + If scalar, assume isotropic voxel. Defaults to ``None``. + grad_orient : str | list | tuple | np.ndarray | torch.Tensor, optional + Gradient orientation (``"x"``, ``"y"``, ``"z"`` or ``versor``). Defaults to ``"z"``. + slice_orient : str | list | tuple | np.ndarray | torch.Tensor, optional + Slice orientation (``"x"``, ``"y"``, ``"z"`` or ``versor``). + Ignored if pulses are non-selective. Defaults to ``"z"``. + B1 : float | np.ndarray | torch.Tensor , optional + Flip angle scaling factor (``1.0 := nominal flip angle``). + Defaults to ``None``. + B0 : float | np.ndarray | torch.Tensor , optional + Bulk off-resonance in [Hz]. Defaults to ``None`` + B1Tx2 : float | np.ndarray | torch.Tensor + Flip angle scaling factor for secondary RF mode (``1.0 := nominal flip angle``). + Defaults to ``None``. + B1phase : float | np.ndarray | torch.Tensor + B1 relative phase in ``[deg]``. (``0.0 := nominal rf phase``). + Defaults to ``None``. + T2star : float | np.ndarray | torch.Tensor + Effective relaxation time for main pool in ``[ms]``. + Defaults to ``None``. + D : float | np.ndarray | torch.Tensor + Apparent diffusion coefficient in ``[um**2 / ms]``. + Defaults to ``None``. + v : float | np.ndarray | torch.Tensor + Spin velocity ``[cm / s]``. Defaults to ``None``. + chemshift : float | np.ndarray | torch.Tensor + Chemical shift for main pool in ``[Hz]``. + Defaults to ``None``. + T1bm : float | np.ndarray | torch.Tensor + Longitudinal relaxation time for secondary pool in ``[ms]``. + Defaults to ``None``. + T2bm : float | np.ndarray | torch.Tensor + Transverse relaxation time for main secondary in ``[ms]``. + Defaults to ``None``. + kbm : float | np.ndarray | torch.Tensor + Nondirectional exchange between main and secondary pool in ``[Hz]``. + Defaults to ``None``. + weight_bm : float | np.ndarray | torch.Tensor + Relative secondary pool fraction. + Defaults to ``None``. + chemshift_bm : float | np.ndarray | torch.Tensor + Chemical shift for secondary pool in ``[Hz]``. + Defaults to ``None``. + kmt : float | np.ndarray | torch.Tensor + Nondirectional exchange between free and bound pool in ``[Hz]``. + If secondary pool is defined, exchange is between secondary and bound pools + (i.e., myelin water and macromolecular), otherwise exchange + is between main and bound pools. + Defaults to ``None``. + weight_mt : float | np.ndarray | torch.Tensor + Relative bound pool fraction. + Defaults to ``None``. + + """ + # constructor + init_params = { + "flip": flip, + "ESP": ESP, + "TR": TR, + "T1": T1, + "T2": T2, + "diff": diff, + "device": device, + "TI": TI, + **kwargs, + } + + # get verbosity + if "verbose" in init_params: + verbose = init_params["verbose"] + else: + verbose = False + + # get verbosity + if "asnumpy" in init_params: + asnumpy = init_params["asnumpy"] + else: + asnumpy = True + + # get selectivity: + if sliceprof: + selective_exc = True + else: + selective_exc = False + + # add moving pool if required + if selective_exc and "v" in init_params: + init_params["moving"] = True + + # check for global inversion + if "global_inversion" in init_params: + selective_inv = not (init_params["global_inversion"]) + else: + selective_inv = False + + # check for conflicts in inversion selectivity + if selective_exc is False and selective_inv is True: + warnings.warn("3D acquisition - forcing inversion pulse to global.") + selective_inv = False + + # inversion pulse properties + if TI is None: + inv_props = {} + else: + inv_props = {"slice_selective": selective_inv} + + if "inv_B1sqrdTau" in kwargs: + inv_props["b1rms"] = kwargs["inv_B1sqrdTau"] ** 0.5 + inv_props["duration"] = 1.0 + + # check conflicts in inversion settings + if TI is None: + if inv_props: + warnings.warn( + "Inversion not enabled - ignoring inversion pulse properties." + ) + inv_props = {} + + # excitation pulse properties + rf_props = {"slice_selective": selective_exc} + if "B1sqrdTau" in kwargs: + inv_props["b1rms"] = kwargs["B1sqrdTau"] ** 0.5 + inv_props["duration"] = 1.0 + + if np.isscalar(sliceprof) is False: + rf_props["slice_profile"] = kwargs["sliceprof"] + + # get nlocs + if "nlocs" in init_params: + nlocs = init_params["nlocs"] + else: + if selective_exc: + nlocs = 15 + else: + nlocs = 1 + + # interpolate slice profile: + if "slice_profile" in rf_props: + nlocs = min(nlocs, len(rf_props["slice_profile"])) + else: + nlocs = 1 + + # assign nlocs + init_params["nlocs"] = nlocs + + # unbalanced gradient properties + grad_props = {} + if "grad_tau" in kwargs: + grad_props["duration"] = kwargs["grad_tau"] + if "grad_dephasing" in kwargs: + grad_props["total_dephasing"] = kwargs["grad_dephasing"] + if "voxelsize" in kwargs: + grad_props["voxelsize"] = kwargs["voxelsize"] + if "grad_amplitude" in kwargs: + grad_props["grad_amplitude"] = kwargs["grad_amplitude"] + if "grad_orient" in kwargs: + grad_props["grad_direction"] = kwargs["grad_orient"] + if "slice_orient" in kwargs: + grad_props["slice_direction"] = kwargs["slice_orient"] + + # check for possible inconsistencies: + if "total_dephasing" in rf_props and "grad_amplitude" in rf_props: + warnings.warn( + "Both total_dephasing and grad_amplitude are provided - using the first" + ) + + # put all properties together + props = { + "inv_props": inv_props, + "rf_props": rf_props, + "grad_props": grad_props, + "nechoes": nechoes, + "nshots": nshots, + "spoil_inc": spoil_inc, + } + + # initialize simulator + simulator = dacite.from_dict( + MEMPRAGE, init_params, config=Config(check_types=False) + ) + + # run simulator + if diff: + # actual simulation + sig, dsig = simulator(flip=flip, TR=TR, TI=TI, ESP=ESP, props=props) + + # post processing + if asnumpy: + sig = sig.detach().cpu().numpy() + dsig = dsig.detach().cpu().numpy() + + # prepare info + info = {"trun": simulator.trun, "tgrad": simulator.tgrad} + if verbose: + return sig, dsig, info + else: + return sig, dsig + else: + # actual simulation + sig = simulator(flip=flip, TR=TR, TI=TI, ESP=ESP, props=props) + + # post processing + if asnumpy: + sig = sig.cpu().numpy() + + # prepare info + info = {"trun": simulator.trun} + if verbose: + return sig, info + else: + return sig
+ + +# %% utils +spin_defaults = {"T2star": None, "D": None, "v": None} + + +class MEMPRAGE(epg.EPGSimulator): + """Class to simulate inversion-prepared Multi-Echo Rapid Gradient Echo.""" + + @staticmethod + def sequence( + flip, + TR, + TI, + ESP, + props, + T1, + T2, + B1, + df, + weight, + k, + chemshift, + D, + v, + states, + signal, + ): + # parsing pulses and grad parameters + inv_props = props["inv_props"] + rf_props = props["rf_props"] + grad_props = props["grad_props"] + spoil_inc = props["spoil_inc"] + npulses = props["nshots"] + nechoes = props["nechoes"] + + # define preparation + Prep = blocks.InversionPrep(TI, T1, T2, weight, k, inv_props) + + # prepare RF pulse + RF = blocks.ExcPulse(states, B1, rf_props) + + # prepare free precession period + X, XS = blocks.SSFPFidStep( + states, ESP, TR, T1, T2, weight, k, chemshift, D, v, grad_props + ) + + # initialize phase + phi = 0 + dphi = 0 + + # magnetization prep + states = Prep(states) + + # actual sequence loop + for n in range(npulses): + # update phase + dphi = (phi + spoil_inc) % 360.0 + phi = (phi + dphi) % 360.0 + + # apply pulse + states = RF(states, flip, phi) + + # relax, recover and record signal for each TE + for e in range(nechoes): + states = X(states) + signal[n, e] = ops.observe(states, RF.phi) + + # relax, recover and spoil + states = XS(states) + + return ops.susceptibility( + signal, + ESP * torch.arange(nechoes, device=df.device, dtype=torch.float32), + df, + ) +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/bloch/model/mprage.html b/_modules/deepmr/bloch/model/mprage.html new file mode 100644 index 00000000..8eb26fb5 --- /dev/null +++ b/_modules/deepmr/bloch/model/mprage.html @@ -0,0 +1,905 @@ + + + + + + + + + + + deepmr.bloch.model.mprage — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.bloch.model.mprage

+"""MPRAGE simulator"""
+
+__all__ = ["mprage"]
+
+import warnings
+import numpy as np
+
+import dacite
+from dacite import Config
+
+from .. import blocks
+from .. import ops
+from . import epg
+
+
+
[docs]def mprage( + nshots, + flip, + TR, + T1, + T2, + spoil_inc=117.0, + sliceprof=False, + diff=None, + device="cpu", + TI=0.0, + **kwargs +): + """ + Simulate a Magnetization Prepared (MP) Rapid Gradient Echo sequence. + + Parameters + ---------- + nshots : int + Number of pulse in the ``Inversion`` block. + flip : float | np.ndarray | torch.Tensor + Flip angle in ``[deg]`` of shape ``(npulses,)`` or ``(npulses, nmodes)``. + TR : float + Repetition time in [ms]. + T1 : float | np.ndarray | torch.Tensor + Longitudinal relaxation time for main pool in ``[ms]``. + T2 : float | np.ndarray | torch.Tensor + Transverse relaxation time for main pool in ``[ms]``. + sliceprof : float | np.ndarray | torch.Tensor + Excitation slice profile (i.e., flip angle scaling across slice). + If ``False``, pulse are non selective. If ``True``, pulses are selective but ideal profile is assumed. + If array, flip angle scaling along slice is simulated. Defaults to ``False``. + spoil_inc : float, optional + RF spoiling increment in ``[deg]``. Defaults to ``117°``. + diff : str | tuple[str], optional + String or tuple of strings, saying which arguments + to get the signal derivative with respect to. + Defaults to ``None`` (no differentation). + device : str + Computational device (e.g., ``cpu`` or ``cuda:n``, with ``n=0,1,2...``). + Defaults to ``cpu``. + TI : float + Inversion time in ``[ms]``. + Defaults to ``None`` (no preparation). + + Other Parameters + ---------------- + nstates : int, optional + Maximum number of EPG states to be retained during simulation. + High numbers improve accuracy but decrease performance. + Defaults to ``10``. + max_chunk_size : int, optional + Maximum number of atoms to be simulated in parallel. + High numbers increase speed and memory footprint. + Defaults to ``natoms``. + verbose : bool, optional + If ``True``, prints execution time for signal (and gradient) calculations. + Defaults to ``False``. + B1sqrdTau : float, optional + Pulse energies in ``[uT**2 * ms]`` when ``flip = 1.0 [deg]``. + global_inversion : bool, optional + Assume nonselective (``True``) or selective (``False``) inversion. + Defaults to ``True``. + inv_B1sqrdTau : float, optional + Inversion pulse energy in ``[uT**2 * ms]`` when ``flip = 1.0 [deg]``. + grad_tau : float, optional + Gradient lobe duration in ``[ms]``. + grad_amplitude : float, optional + Gradient amplitude along unbalanced direction in ``[mT / m]``. + If total_dephasing is not provided, this is used to compute diffusion and flow effects. + grad_dephasing : float, optional + Total gradient-induced dephasing across a voxel (in grad direction). + If gradient_amplitude is not provided, this is used to compute diffusion and flow effects. + voxelsize : str | list | tuple | np.ndarray | torch.Tensor, optional + Voxel size (``dx``, ``dy``, ``dz``) in ``[mm]``. + If scalar, assume isotropic voxel. Defaults to ``None``. + grad_orient : str | list | tuple | np.ndarray | torch.Tensor, optional + Gradient orientation (``"x"``, ``"y"``, ``"z"`` or ``versor``). Defaults to ``"z"``. + slice_orient : str | list | tuple | np.ndarray | torch.Tensor, optional + Slice orientation (``"x"``, ``"y"``, ``"z"`` or ``versor``). + Ignored if pulses are non-selective. Defaults to ``"z"``. + B1 : float | np.ndarray | torch.Tensor , optional + Flip angle scaling factor (``1.0 := nominal flip angle``). + Defaults to ``None``. + B0 : float | np.ndarray | torch.Tensor , optional + Bulk off-resonance in [Hz]. Defaults to ``None`` + B1Tx2 : float | np.ndarray | torch.Tensor + Flip angle scaling factor for secondary RF mode (``1.0 := nominal flip angle``). + Defaults to ``None``. + B1phase : float | np.ndarray | torch.Tensor + B1 relative phase in ``[deg]``. (``0.0 := nominal rf phase``). + Defaults to ``None``. + T2star : float | np.ndarray | torch.Tensor + Effective relaxation time for main pool in ``[ms]``. + Defaults to ``None``. + D : float | np.ndarray | torch.Tensor + Apparent diffusion coefficient in ``[um**2 / ms]``. + Defaults to ``None``. + v : float | np.ndarray | torch.Tensor + Spin velocity ``[cm / s]``. Defaults to ``None``. + chemshift : float | np.ndarray | torch.Tensor + Chemical shift for main pool in ``[Hz]``. + Defaults to ``None``. + T1bm : float | np.ndarray | torch.Tensor + Longitudinal relaxation time for secondary pool in ``[ms]``. + Defaults to ``None``. + T2bm : float | np.ndarray | torch.Tensor + Transverse relaxation time for main secondary in ``[ms]``. + Defaults to ``None``. + kbm : float | np.ndarray | torch.Tensor + Nondirectional exchange between main and secondary pool in ``[Hz]``. + Defaults to ``None``. + weight_bm : float | np.ndarray | torch.Tensor + Relative secondary pool fraction. + Defaults to ``None``. + chemshift_bm : float | np.ndarray | torch.Tensor + Chemical shift for secondary pool in ``[Hz]``. + Defaults to ``None``. + kmt : float | np.ndarray | torch.Tensor + Nondirectional exchange between free and bound pool in ``[Hz]``. + If secondary pool is defined, exchange is between secondary and bound pools + (i.e., myelin water and macromolecular), otherwise exchange + is between main and bound pools. + Defaults to ``None``. + weight_mt : float | np.ndarray | torch.Tensor + Relative bound pool fraction. + Defaults to ``None``. + + """ + # constructor + init_params = { + "flip": flip, + "TR": TR, + "T1": T1, + "T2": T2, + "diff": diff, + "device": device, + "TI": TI, + **kwargs, + } + + # get TE + if "TE" not in init_params: + TE = 0.0 + else: + TE = init_params["TE"] + + # get verbosity + if "verbose" in init_params: + verbose = init_params["verbose"] + else: + verbose = False + + # get verbosity + if "asnumpy" in init_params: + asnumpy = init_params["asnumpy"] + else: + asnumpy = True + + # get selectivity: + if sliceprof: + selective_exc = True + else: + selective_exc = False + + # add moving pool if required + if selective_exc and "v" in init_params: + init_params["moving"] = True + + # check for global inversion + if "global_inversion" in init_params: + selective_inv = not (init_params["global_inversion"]) + else: + selective_inv = False + + # check for conflicts in inversion selectivity + if selective_exc is False and selective_inv is True: + warnings.warn("3D acquisition - forcing inversion pulse to global.") + selective_inv = False + + # inversion pulse properties + if TI is None: + inv_props = {} + else: + inv_props = {"slice_selective": selective_inv} + + if "inv_B1sqrdTau" in kwargs: + inv_props["b1rms"] = kwargs["inv_B1sqrdTau"] ** 0.5 + inv_props["duration"] = 1.0 + + # excitation pulse properties + rf_props = {"slice_selective": selective_exc} + if "B1sqrdTau" in kwargs: + inv_props["b1rms"] = kwargs["B1sqrdTau"] ** 0.5 + inv_props["duration"] = 1.0 + + if np.isscalar(sliceprof) is False: + rf_props["slice_profile"] = kwargs["sliceprof"] + + # get nlocs + if "nlocs" in init_params: + nlocs = init_params["nlocs"] + else: + if selective_exc: + nlocs = 15 + else: + nlocs = 1 + + # interpolate slice profile: + if "slice_profile" in rf_props: + nlocs = min(nlocs, len(rf_props["slice_profile"])) + else: + nlocs = 1 + + # assign nlocs + init_params["nlocs"] = nlocs + + # unbalanced gradient properties + grad_props = {} + if "grad_tau" in kwargs: + grad_props["duration"] = kwargs["grad_tau"] + if "grad_dephasing" in kwargs: + grad_props["total_dephasing"] = kwargs["grad_dephasing"] + if "voxelsize" in kwargs: + grad_props["voxelsize"] = kwargs["voxelsize"] + if "grad_amplitude" in kwargs: + grad_props["grad_amplitude"] = kwargs["grad_amplitude"] + if "grad_orient" in kwargs: + grad_props["grad_direction"] = kwargs["grad_orient"] + if "slice_orient" in kwargs: + grad_props["slice_direction"] = kwargs["slice_orient"] + + # check for possible inconsistencies: + if "total_dephasing" in rf_props and "grad_amplitude" in rf_props: + warnings.warn( + "Both total_dephasing and grad_amplitude are provided - using the first" + ) + + # put all properties together + props = { + "inv_props": inv_props, + "rf_props": rf_props, + "grad_props": grad_props, + "nshots": nshots, + "spoil_inc": spoil_inc, + } + + # initialize simulator + simulator = dacite.from_dict(MPRAGE, init_params, config=Config(check_types=False)) + + # run simulator + if diff: + # actual simulation + sig, dsig = simulator(flip=flip, TR=TR, TI=TI, TE=TE, props=props) + + # post processing + if asnumpy: + sig = sig.detach().cpu().numpy() + dsig = dsig.detach().cpu().numpy() + + # prepare info + info = {"trun": simulator.trun, "tgrad": simulator.tgrad} + if verbose: + return sig, dsig, info + else: + return sig, dsig + else: + # actual simulation + sig = simulator(flip=flip, TR=TR, TI=TI, TE=TE, props=props) + + # post processing + if asnumpy: + sig = sig.cpu().numpy() + + # prepare info + info = {"trun": simulator.trun} + if verbose: + return sig, info + else: + return sig
+ + +# %% utils +spin_defaults = {"T2star": None, "D": None, "v": None} + + +class MPRAGE(epg.EPGSimulator): + """Class to simulate inversion-prepared Rapid Gradient Echo.""" + + @staticmethod + def sequence( + flip, + TR, + TI, + TE, + props, + T1, + T2, + B1, + df, + weight, + k, + chemshift, + D, + v, + states, + signal, + ): + # parsing pulses and grad parameters + inv_props = props["inv_props"] + rf_props = props["rf_props"] + grad_props = props["grad_props"] + spoil_inc = props["spoil_inc"] + npulses = props["nshots"] + + # define preparation + Prep = blocks.InversionPrep(TI, T1, T2, weight, k, inv_props) + + # prepare RF pulse + RF = blocks.ExcPulse(states, B1, rf_props) + + # prepare free precession period + X, XS = blocks.SSFPFidStep( + states, TE, TR, T1, T2, weight, k, chemshift, D, v, grad_props + ) + + # initialize phase + phi = 0 + dphi = 0 + + # magnetization prep + states = Prep(states) + + # actual sequence loop + for n in range(npulses): + # update phase + dphi = (phi + spoil_inc) % 360.0 + phi = (phi + dphi) % 360.0 + + # apply pulse + states = RF(states, flip[n], phi) + + # relax, recover and record signal for each TE + states = X(states) + signal[n] = ops.observe(states, RF.phi) + + # relax, recover and spoil + states = XS(states) + + return ops.susceptibility(signal, TE, df) +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/bloch/model/ssfpmrf.html b/_modules/deepmr/bloch/model/ssfpmrf.html new file mode 100644 index 00000000..ad93db47 --- /dev/null +++ b/_modules/deepmr/bloch/model/ssfpmrf.html @@ -0,0 +1,913 @@ + + + + + + + + + + + deepmr.bloch.model.ssfpmrf — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.bloch.model.ssfpmrf

+"""SSFP MR Fingerprinting simulator"""
+
+__all__ = ["ssfpmrf"]
+
+import warnings
+import numpy as np
+
+import dacite
+from dacite import Config
+
+from .. import blocks
+from .. import ops
+from . import epg
+
+
+
[docs]def ssfpmrf( + flip, + TR, + T1, + T2, + sliceprof=False, + DE=False, + diff=None, + device="cpu", + TI=None, + **kwargs +): + """ + Simulate an inversion-prepared SSFP sequence with variable flip angles. + + Parameters + ---------- + flip : float | np.ndarray | torch.Tensor + Flip angle in ``[deg]`` of shape ``(npulses,)`` or ``(npulses, nmodes)``. + TR : float + Repetition time in [ms]. + T1 : float | np.ndarray | torch.Tensor + Longitudinal relaxation time for main pool in ``[ms]``. + T2 : float | np.ndarray | torch.Tensor + Transverse relaxation time for main pool in ``[ms]``. + sliceprof : float | np.ndarray | torch.Tensor + Excitation slice profile (i.e., flip angle scaling across slice). + If ``False``, pulse are non selective. If ``True``, pulses are selective but ideal profile is assumed. + If array, flip angle scaling along slice is simulated. Defaults to ``False``. + DE : bool, optional + If True, simulation is repeated two times to mimick Driven Equilibrium acquisition. + Defaults to ``False``. + diff : str | tuple[str], optional + String or tuple of strings, saying which arguments + to get the signal derivative with respect to. + Defaults to ``None`` (no differentation). + device : str + Computational device (e.g., ``cpu`` or ``cuda:n``, with ``n=0,1,2...``). + Defaults to ``cpu``. + TI : float + Inversion time in ``[ms]``. + Defaults to ``None`` (no preparation). + + Other Parameters + ---------------- + nstates : int, optional + Maximum number of EPG states to be retained during simulation. + High numbers improve accuracy but decrease performance. + Defaults to ``10``. + max_chunk_size : int, optional + Maximum number of atoms to be simulated in parallel. + High numbers increase speed and memory footprint. + Defaults to ``natoms``. + verbose : bool, optional + If ``True``, prints execution time for signal (and gradient) calculations. + Defaults to ``False``. + TE : float, optional + Echo time in ``[ms]``. Defaults to ``0.0``. + B1sqrdTau : float, optional + Pulse energies in ``[uT**2 * ms]`` when ``flip = 1.0 [deg]``. + global_inversion : bool, optional + Assume nonselective (``True``) or selective (``False``) inversion. + Defaults to ``True``. + inv_B1sqrdTau : float, optional + Inversion pulse energy in ``[uT**2 * ms]`` when ``flip = 1.0 [deg]``. + grad_tau : float, optional + Gradient lobe duration in ``[ms]``. + grad_amplitude : float, optional + Gradient amplitude along unbalanced direction in ``[mT / m]``. + If total_dephasing is not provided, this is used to compute diffusion and flow effects. + grad_dephasing : float, optional + Total gradient-induced dephasing across a voxel (in grad direction). + If gradient_amplitude is not provided, this is used to compute diffusion and flow effects. + voxelsize : str | list | tuple | np.ndarray | torch.Tensor, optional + Voxel size (``dx``, ``dy``, ``dz``) in ``[mm]``. + If scalar, assume isotropic voxel. Defaults to ``None``. + grad_orient : str | list | tuple | np.ndarray | torch.Tensor, optional + Gradient orientation (``"x"``, ``"y"``, ``"z"`` or ``versor``). Defaults to ``"z"``. + slice_orient : str | list | tuple | np.ndarray | torch.Tensor, optional + Slice orientation (``"x"``, ``"y"``, ``"z"`` or ``versor``). + Ignored if pulses are non-selective. Defaults to ``"z"``. + B1 : float | np.ndarray | torch.Tensor , optional + Flip angle scaling factor (``1.0 := nominal flip angle``). + Defaults to ``None``. + B0 : float | np.ndarray | torch.Tensor , optional + Bulk off-resonance in [Hz]. Defaults to ``None`` + B1Tx2 : float | np.ndarray | torch.Tensor + Flip angle scaling factor for secondary RF mode (``1.0 := nominal flip angle``). + Defaults to ``None``. + B1phase : float | np.ndarray | torch.Tensor + B1 relative phase in ``[deg]``. (``0.0 := nominal rf phase``). + Defaults to ``None``. + T2star : float | np.ndarray | torch.Tensor + Effective relaxation time for main pool in ``[ms]``. + Defaults to ``None``. + D : float | np.ndarray | torch.Tensor + Apparent diffusion coefficient in ``[um**2 / ms]``. + Defaults to ``None``. + v : float | np.ndarray | torch.Tensor + Spin velocity ``[cm / s]``. Defaults to ``None``. + chemshift : float | np.ndarray | torch.Tensor + Chemical shift for main pool in ``[Hz]``. + Defaults to ``None``. + T1bm : float | np.ndarray | torch.Tensor + Longitudinal relaxation time for secondary pool in ``[ms]``. + Defaults to ``None``. + T2bm : float | np.ndarray | torch.Tensor + Transverse relaxation time for main secondary in ``[ms]``. + Defaults to ``None``. + kbm : float | np.ndarray | torch.Tensor + Nondirectional exchange between main and secondary pool in ``[Hz]``. + Defaults to ``None``. + weight_bm : float | np.ndarray | torch.Tensor + Relative secondary pool fraction. + Defaults to ``None``. + chemshift_bm : float | np.ndarray | torch.Tensor + Chemical shift for secondary pool in ``[Hz]``. + Defaults to ``None``. + kmt : float | np.ndarray | torch.Tensor + Nondirectional exchange between free and bound pool in ``[Hz]``. + If secondary pool is defined, exchange is between secondary and bound pools + (i.e., myelin water and macromolecular), otherwise exchange + is between main and bound pools. + Defaults to ``None``. + weight_mt : float | np.ndarray | torch.Tensor + Relative bound pool fraction. + Defaults to ``None``. + + """ + # constructor + init_params = { + "flip": flip, + "TR": TR, + "T1": T1, + "T2": T2, + "diff": diff, + "device": device, + "TI": TI, + **kwargs, + } + + # get TE + if "TE" not in init_params: + TE = 0.0 + else: + TE = init_params["TE"] + + # get verbosity + if "verbose" in init_params: + verbose = init_params["verbose"] + else: + verbose = False + + # get verbosity + if "asnumpy" in init_params: + asnumpy = init_params["asnumpy"] + else: + asnumpy = True + + # get selectivity: + if sliceprof: + selective_exc = True + else: + selective_exc = False + + # add moving pool if required + if selective_exc and "v" in init_params: + init_params["moving"] = True + + # check for global inversion + if "global_inversion" in init_params: + selective_inv = not (init_params["global_inversion"]) + else: + selective_inv = False + + # check for conflicts in inversion selectivity + if selective_exc is False and selective_inv is True: + warnings.warn("3D acquisition - forcing inversion pulse to global.") + selective_inv = False + + # inversion pulse properties + if TI is None: + inv_props = {} + else: + inv_props = {"slice_selective": selective_inv} + + if "inv_B1sqrdTau" in kwargs: + inv_props["b1rms"] = kwargs["inv_B1sqrdTau"] ** 0.5 + inv_props["duration"] = 1.0 + + # check conflicts in inversion settings + if TI is None: + if inv_props: + warnings.warn( + "Inversion not enabled - ignoring inversion pulse properties." + ) + inv_props = {} + + # excitation pulse properties + rf_props = {"slice_selective": selective_exc} + if "B1sqrdTau" in kwargs: + inv_props["b1rms"] = kwargs["B1sqrdTau"] ** 0.5 + inv_props["duration"] = 1.0 + + if np.isscalar(sliceprof) is False: + rf_props["slice_profile"] = kwargs["sliceprof"] + + # get nlocs + if "nlocs" in init_params: + nlocs = init_params["nlocs"] + else: + if selective_exc: + nlocs = 15 + else: + nlocs = 1 + + # interpolate slice profile: + if "slice_profile" in rf_props: + nlocs = min(nlocs, len(rf_props["slice_profile"])) + else: + nlocs = 1 + + # assign nlocs + init_params["nlocs"] = nlocs + + # unbalanced gradient properties + grad_props = {} + if "grad_tau" in kwargs: + grad_props["duration"] = kwargs["grad_tau"] + if "grad_dephasing" in kwargs: + grad_props["total_dephasing"] = kwargs["grad_dephasing"] + if "voxelsize" in kwargs: + grad_props["voxelsize"] = kwargs["voxelsize"] + if "grad_amplitude" in kwargs: + grad_props["grad_amplitude"] = kwargs["grad_amplitude"] + if "grad_orient" in kwargs: + grad_props["grad_direction"] = kwargs["grad_orient"] + if "slice_orient" in kwargs: + grad_props["slice_direction"] = kwargs["slice_orient"] + + # check for possible inconsistencies: + if "total_dephasing" in rf_props and "grad_amplitude" in rf_props: + warnings.warn( + "Both total_dephasing and grad_amplitude are provided - using the first" + ) + + # put all properties together + props = { + "inv_props": inv_props, + "rf_props": rf_props, + "grad_props": grad_props, + "DE": DE, + } + + # initialize simulator + simulator = dacite.from_dict(SSFPMRF, init_params, config=Config(check_types=False)) + + # run simulator + if diff: + # actual simulation + sig, dsig = simulator(flip=flip, TR=TR, TI=TI, TE=TE, props=props) + + # post processing + if asnumpy: + sig = sig.detach().cpu().numpy() + dsig = dsig.detach().cpu().numpy() + + # prepare info + info = {"trun": simulator.trun, "tgrad": simulator.tgrad} + if verbose: + return sig, dsig, info + else: + return sig, dsig + else: + # actual simulation + sig = simulator(flip=flip, TR=TR, TI=TI, TE=TE, props=props) + + # post processing + if asnumpy: + sig = sig.cpu().numpy() + + # prepare info + info = {"trun": simulator.trun} + if verbose: + return sig, info + else: + return sig
+ + +# %% utils +spin_defaults = {"T2star": None, "D": None, "v": None} + + +class SSFPMRF(epg.EPGSimulator): + """Class to simulate inversion-prepared (variable flip angle) SSFP.""" + + @staticmethod + def sequence( + flip, + TR, + TI, + TE, + props, + T1, + T2, + B1, + df, + weight, + k, + chemshift, + D, + v, + states, + signal, + ): + # parsing pulses and grad parameters + inv_props = props["inv_props"] + rf_props = props["rf_props"] + grad_props = props["grad_props"] + driven_equilibrium = props["DE"] + + # get number of repetitions + if driven_equilibrium: + nreps = 2 + else: + nreps = 1 + + # get number of frames and echoes + npulses = flip.shape[0] + + # define preparation + Prep = blocks.InversionPrep(TI, T1, T2, weight, k, inv_props) + + # prepare RF pulse + RF = blocks.ExcPulse(states, B1, rf_props) + + # prepare free precession period + X, XS = blocks.SSFPFidStep( + states, TE, TR, T1, T2, weight, k, chemshift, D, v, grad_props + ) + + for r in range(nreps): + # magnetization prep + states = Prep(states) + + # actual sequence loop + for n in range(npulses): + # apply pulse + states = RF(states, flip[n]) + + # relax, recover and record signal for each TE + states = X(states) + signal[n] = ops.observe(states, RF.phi) + + # relax, recover and spoil + states = XS(states) + + return ops.susceptibility(signal, TE, df) +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/bloch/model/t1t2shuffling.html b/_modules/deepmr/bloch/model/t1t2shuffling.html new file mode 100644 index 00000000..9f389f0a --- /dev/null +++ b/_modules/deepmr/bloch/model/t1t2shuffling.html @@ -0,0 +1,771 @@ + + + + + + + + + + + deepmr.bloch.model.t1t2shuffling — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.bloch.model.t1t2shuffling

+"""T1-T2 Shuffling simulator"""
+
+__all__ = ["t1t2shuffling"]
+
+import numpy as np
+
+import dacite
+from dacite import Config
+
+from .. import blocks
+from .. import ops
+from . import epg
+
+
+
[docs]def t1t2shuffling( + flip, phases, ESP, TR, T1, T2, sliceprof=False, diff=None, device="cpu", **kwargs +): + """ + Simulate a T1T2Shuffling Spin Echo sequence. Only single-pool for now. + + Parameters + ---------- + flip : float | np.ndarray | torch.Tensor + Flip angle in ``[deg]`` of shape ``(npulses,)`` or ``(npulses, nmodes)``. + phases : float | np.ndarray | torch.Tensor + Refocusing angle phases in ``[deg]`` of shape ``(npulses,)`` or ``(npulses, nmodes)``. + ESP : float + Echo spacing in [ms]. + TR : float + Repetition time in [ms]. + T1 : float | np.ndarray | torch.Tensor + Longitudinal relaxation time for main pool in ``[ms]``. + T2 : float | np.ndarray | torch.Tensor + Transverse relaxation time for main pool in ``[ms]``. + sliceprof : float | np.ndarray | torch.Tensor + Excitation slice profile (i.e., flip angle scaling across slice). + If ``False``, pulse are non selective. If ``True``, pulses are selective but ideal profile is assumed. + If array, flip angle scaling along slice is simulated. Defaults to ``False``. + spoil_inc : float, optional + RF spoiling increment in ``[deg]``. Defaults to ``117°``. + diff : str | tuple[str], optional + String or tuple of strings, saying which arguments + to get the signal derivative with respect to. + Defaults to ``None`` (no differentation). + device : str + Computational device (e.g., ``cpu`` or ``cuda:n``, with ``n=0,1,2...``). + Defaults to ``cpu``. + + Other Parameters + ---------------- + nstates : int, optional + Maximum number of EPG states to be retained during simulation. + High numbers improve accuracy but decrease performance. + Defaults to ``10``. + max_chunk_size : int, optional + Maximum number of atoms to be simulated in parallel. + High numbers increase speed and memory footprint. + Defaults to ``natoms``. + verbose : bool, optional + If ``True``, prints execution time for signal (and gradient) calculations. + Defaults to ``False``. + B1 : float | np.ndarray | torch.Tensor , optional + Flip angle scaling factor (``1.0 := nominal flip angle``). + Defaults to ``None``. + B0 : float | np.ndarray | torch.Tensor , optional + Bulk off-resonance in [Hz]. Defaults to ``None`` + B1Tx2 : float | np.ndarray | torch.Tensor + Flip angle scaling factor for secondary RF mode (``1.0 := nominal flip angle``). + Defaults to ``None``. + B1phase : float | np.ndarray | torch.Tensor + B1 relative phase in ``[deg]``. (``0.0 := nominal rf phase``). + Defaults to ``None``. + + """ + # constructor + init_params = { + "flip": flip, + "phases": phases, + "ESP": ESP, + "TR": TR, + "T1": T1, + "T2": T2, + "diff": diff, + "device": device, + **kwargs, + } + + # get verbosity + if "verbose" in init_params: + verbose = init_params["verbose"] + else: + verbose = False + + # get verbosity + if "asnumpy" in init_params: + asnumpy = init_params["asnumpy"] + else: + asnumpy = True + + # get selectivity: + if sliceprof: + selective = True + else: + selective = False + + # add moving pool if required + if selective and "v" in init_params: + init_params["moving"] = True + + # excitation pulse properties + exc_props = {"slice_selective": selective} + if "exc_flip" in kwargs: + exc_props["flip"] = kwargs["exc_flip"] + else: + exc_props["flip"] = 90.0 + + # refocusing pulse properties + rf_props = {"slice_selective": selective} + if np.isscalar(sliceprof) is False: + rf_props["slice_profile"] = kwargs["sliceprof"] + + # get nlocs + if "nlocs" in init_params: + nlocs = init_params["nlocs"] + else: + if selective: + nlocs = 15 + else: + nlocs = 1 + + # interpolate slice profile: + if "slice_profile" in rf_props: + nlocs = min(nlocs, len(rf_props["slice_profile"])) + else: + nlocs = 1 + + # assign nlocs + init_params["nlocs"] = nlocs + + # put all properties together + props = {"exc_props": exc_props, "rf_props": rf_props} + + # initialize simulator + simulator = dacite.from_dict( + T1T2Shuffling, init_params, config=Config(check_types=False) + ) + + # run simulator + if diff: + # actual simulation + sig, dsig = simulator(flip=flip, phases=phases, ESP=ESP, TR=TR, props=props) + + # flatten + sig, dsig = sig.swapaxes(-1, -2), dsig.swapaxes(-1, -2) + sig = sig.reshape(-1, sig.shape[-1] * sig.shape[-2]) + dsig = dsig.reshape(-1, sig.shape[-1] * sig.shape[-2]) + + # post processing + if asnumpy: + sig = sig.detach().cpu().numpy() + dsig = dsig.detach().cpu().numpy() + + # prepare info + info = {"trun": simulator.trun, "tgrad": simulator.tgrad} + if verbose: + return sig, dsig, info + else: + return sig, dsig + else: + # actual simulation + sig = simulator(flip=flip, phases=phases, ESP=ESP, TR=TR, props=props) + + # flatten + sig, dsig = sig.swapaxes(-1, -2) + sig = sig.reshape(-1, sig.shape[-1] * sig.shape[-2]) + + # post processing + if asnumpy: + sig = sig.cpu().numpy() + + # prepare info + info = {"trun": simulator.trun} + if verbose: + return sig, info + else: + return sig
+ + +# %% utils +class T1T2Shuffling(epg.EPGSimulator): + """Class to simulate T1-T2 Shuffling.""" + + @staticmethod + def sequence(flip, phases, ESP, TR, props, T1, T2, B1, states, signal): + # parsing pulses and grad parameters + exc_props = props["exc_props"] + rf_props = props["rf_props"] + + # get number of frames and echoes + npulses = flip.shape[0] + + # define preparation + Exc = blocks.ExcPulse(states, B1, exc_props) + + # prepare RF pulse + RF = blocks.ExcPulse(states, B1, rf_props) + + # prepare free precession period + Xpre, Xpost = blocks.FSEStep(states, ESP, T1, T2) + + # magnetization prep + states = Exc(states, exc_props["flip"]) + + # get recovery times + rectime = TR - (npulses + 1) * ESP # T + 1 to account for fast recovery + + # actual sequence loop + for n in range(npulses): + # relax, recover and shift for half echo spacing + states = Xpre(states) + + # apply refocusing + states = RF(states, flip[n], phases[n]) + + # relax, recover and spoil for half echo spacing + states = Xpost(states) + + # observe magnetization + signal[n] = ops.observe(states, RF.phi) + + return ops.t1sat(signal * 1j, rectime, T1) +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/bloch/ops/_adc_op.html b/_modules/deepmr/bloch/ops/_adc_op.html new file mode 100644 index 00000000..1c81f1eb --- /dev/null +++ b/_modules/deepmr/bloch/ops/_adc_op.html @@ -0,0 +1,642 @@ + + + + + + + + + + + deepmr.bloch.ops._adc_op — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.bloch.ops._adc_op

+"""
+EPG signal recording operator.
+
+Can be used to record signal during simulation.
+"""
+__all__ = ["observe", "susceptibility", "t1sat"]
+
+import torch
+
+
+
[docs]def observe(states, phi=None): + """ + Store observable magnetization. + + Parameters + ---------- + states : dict + Input states matrix for free pools. + phi : torch.Tensor + Effective phase for signal demodulation. + + Returns + ------- + signal : torch.Tensor + Net observable magnetization at current timepoint. + + """ + # parse + F = states["F"] # (nstates, nlocs, npools, 3) + + # get transverse magnetization + mxy = F[0, ..., 0] # (nlocs, npools) + + # demodulate + if phi is not None: + mxy = mxy * torch.exp(-1j * phi) + + # sum across pools + mxy = mxy.sum(axis=-1).mean(axis=-1) + + return mxy
+ + +
[docs]def susceptibility(signal, time, z): + r""" + Apply static susceptibility effects (bulk decay and dephasing). + + Parameters + ---------- + signal : torch.Tensor + Net observable magnetization. + time : torch.Tensor + Effective phase for signal demodulation. + z torch.Tensor + Complex field ``R2* + 1j $\Delta$ B0``. + + Returns + ------- + signal : torch.Tensor + Damped and dephased net observable magnetization. + + """ + if time.shape[-1] != 1: # multiecho + if signal.shape[-1] != time.shape[-1]: # assume echo must be broadcasted + signal = [..., None] + + # apply effect + if time.shape[-1] == 1 and time != 0: + signal = signal.clone() * torch.exp(-time * (z[..., 0] + 1j * z[..., 1])) + + return signal
+ + +
[docs]def t1sat(signal, time, t1): + """ + Apply t1 saturation effect. + + Parameters + ---------- + signal : torch.Tensor + Net observable magnetization. + time : torch.Tensor + Effective phase for signal demodulation. + t1 : torch.Tensor + Longitudinal magnetization time. + + Returns + ------- + signal : torch.Tensor + Saturated net observable magnetization. + + """ + if time.shape[-1] != 1: # multiecho + if signal.shape[-1] != time.shape[-1]: # assume echo must be broadcasted + signal = [..., None] + + # apply effect + if time.shape[-1] == 1 and time != 0: + E1 = torch.exp(-time / (t1 + 0.000000000000001)) + signal = signal.clone() * (1 - E1) / (1 - signal.clone() * E1) + + return signal
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/bloch/ops/_epg.html b/_modules/deepmr/bloch/ops/_epg.html new file mode 100644 index 00000000..436ee969 --- /dev/null +++ b/_modules/deepmr/bloch/ops/_epg.html @@ -0,0 +1,671 @@ + + + + + + + + + + + deepmr.bloch.ops._epg — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.bloch.ops._epg

+__all__ = ["EPGstates"]
+
+import numpy as np
+import torch
+
+
+
[docs]def EPGstates( + device, + batch_size, + nstates, + nlocs, + npulses, + npools=1, + weight=None, + model="single", + moving=False, +): + """ + EPG states matrix. + + Stores dephasing states for transverse and longitudinal magnetization. + + Parameters + ---------- + device : str + Computational device (e.g., ``cpu`` or ``cuda:n``, with ``n=0,1,2...``). + batch_size : int + Number of different atoms (e.g., voxels) to be simultaneously simulated. + nstates : int + Maximum number of dephasing states. + nlocs : int + Number of spatial locations contributing to each atom states (e.g., slice points). + npulses : int + Number of RF pulses applied during the sequence. + npools : int + Number of pools contributing to signal (e.g., Free Water / Myelin Water). + weight : torch.Tensor[float], optional + Relative fraction for each pool. For single pool, this is the initial magnetization. + The default is ``None`` (i.e., ``M0 = [0, 0, 1]``). + model : str, optional + Type of signal model. If "mt" is in the model name, + include pure longitudinal states (bound pool). + The default is ``single`` (single pool model). + moving : bool, optional + Flag for moving spins. If ``True`` include a fresh magnetization pool + to replace states with ``v != 0``. The default is ``False``. + + Returns + ------- + out : dict + Dictionary with states (e.g., ``F``, ``Z``) and signal buffer. + + """ + # prepare output + out = {} + + if weight is not None: + weight = weight.clone() + if len(weight.shape) == 1: + weight = weight[None, :] + + if np.isscalar(npulses): + npulses = [npulses] + + # initialize free pool + # transverse + F = torch.zeros( + (batch_size, nstates, nlocs, npools, 2), dtype=torch.complex64, device=device + ) + F = {"real": F.real, "imag": F.imag} + + # initialize free pool + # longitudinal + Z = torch.zeros( + (batch_size, nstates, nlocs, npools), dtype=torch.complex64, device=device + ) + Z[:, 0, ...] = 1.0 + + if weight is not None: + Z = Z * weight[:, :npools][:, None, None] + Z = {"real": Z.real, "imag": Z.imag} + + # append + out["states"] = {"F": F, "Z": Z} + + # initialize moving pool + if moving: + # transverse + Fmoving = torch.zeros( + (batch_size, nstates, nlocs, npools, 2), + dtype=torch.complex64, + device=device, + ) + Fmoving = {"real": Fmoving.real, "imag": Fmoving.imag} + + # initialize free pool + # longitudinal + Zmoving = torch.zeros( + (batch_size, nstates, nlocs, npools), dtype=torch.complex64, device=device + ) + Zmoving[:, 0, ...] = 1.0 + if weight is not None: + Zmoving = Zmoving * weight[:, :npools][:, None, None] + Zmoving = {"real": Zmoving.real, "imag": Zmoving.imag} + + # append + out["states"]["moving"] = {} + out["states"]["moving"]["F"] = Fmoving + out["states"]["moving"]["Z"] = Zmoving + + # initialize bound pool + if model is not None and "mt" in model: + Zbound = torch.zeros( + (batch_size, nstates, nlocs, 1), dtype=torch.complex64, device=device + ) + Zbound[:, 0, :, :] = 1.0 + Zbound = Zbound * weight[:, -1][:, None, None, None] + Zbound = {"real": Zbound.real, "imag": Zbound.imag} + out["states"]["Zbound"] = Zbound + + if moving: + out["states"]["moving"]["Zbound"] = Zbound.clone() + + # initialize output signal + sig = torch.zeros([batch_size] + npulses, dtype=torch.complex64, device=device) + sig = {"real": sig.real, "imag": sig.imag} + + # append + out["signal"] = sig + + return out
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/bloch/ops/_gradient_op.html b/_modules/deepmr/bloch/ops/_gradient_op.html new file mode 100644 index 00000000..bb2b3bad --- /dev/null +++ b/_modules/deepmr/bloch/ops/_gradient_op.html @@ -0,0 +1,616 @@ + + + + + + + + + + + deepmr.bloch.ops._gradient_op — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.bloch.ops._gradient_op

+"""
+EPG Gradient operators.
+
+Can be used to simulate dephasing due spoiling gradient
+and perfect crushing (i.e. after prep pulse or refocus pulse).
+"""
+
+__all__ = ["Shift", "Spoil"]
+
+import torch
+
+from ._abstract_op import Operator
+
+
+
[docs]class Shift(Operator): + """ + Perform shift operator corrsesponding to a 2npi dephasing of the magnetization. + + Parameters + ---------- + states : dict + Input states matrix for free pools. + + + Returns + ------- + states : dict + Output states matrix for free pools. + + """ + + def apply(self, states): + """Apply states shifting.""" + # parse + F = states["F"] + + # apply + F[..., 0] = torch.roll(F[..., 0], 1, -3) # Shift Fp states + F[..., 1] = torch.roll(F[..., 1], -1, -3) # Shift Fm states + F[-1, ..., 1] = 0.0 # Zero highest Fm state + F[0, ..., 0] = F[0, ..., 1].conj() # Fill in lowest Fp state + + # prepare for output + states["F"] = F + return states
+ + +
[docs]class Spoil(Operator): + """ + Non-physical spoiling operator that zeros all transverse states. + + Parameters + ---------- + states : dict + Input states matrix for free pools. + + + Returns + ------- + states : dict + Output states matrix for free pools. + + """ + + def apply(self, states): + """Destroy transverse magnetization.""" + # parse + F = states["F"] + + # apply + F[..., 0] = 0.0 + F[..., 1] = 0.0 + + # prepare for output + states["F"] = F + return states
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/bloch/ops/_motion_op.html b/_modules/deepmr/bloch/ops/_motion_op.html new file mode 100644 index 00000000..365089c1 --- /dev/null +++ b/_modules/deepmr/bloch/ops/_motion_op.html @@ -0,0 +1,1013 @@ + + + + + + + + + + + deepmr.bloch.ops._motion_op — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.bloch.ops._motion_op

+"""
+EPG Motion operators.
+
+Can be used to simulate bulk motion and (isotropic) diffusion damping.
+"""
+__all__ = ["DiffusionDamping", "FlowDephasing", "FlowWash"]
+
+import math
+
+import torch
+
+from ._abstract_op import Operator
+from ._utils import gamma_bar
+
+gamma_bar *= 1e6  # [MHz / T] -> [Hz / T]
+
+
+
[docs]class DiffusionDamping(Operator): + """ + Simulate diffusion effects by state dependent damping of the coefficients. + + Parameters + ---------- + device (str): str + Computational device (e.g., ``cpu`` or ``cuda:n``, with ``n=0,1,2...``). + time : torch.Tensor) + Time step in ``[ms]``. + D : torch.Tensor + Apparent diffusion coefficient ``[um**2 ms**-1]``. + nstates : int + Number of EPG dephasing orders. + total_dephasing : float, optional + Total dephasing due to unbalanced gradients in ``[rad]``. + voxelsize : float, optional + Voxel thickness along unbalanced direction in ``[mm]``. + grad_amplitude : float, optional + Gradient amplitude along unbalanced direction in ``[mT / m]``. + grad_direction : str | torch.Tensor + Gradient orientation (``"x"``, ``"y"``, ``"z"`` or ``versor``). + + Notes + ----- + User must provide either total dephasing and voxel size or gradient amplitude and duration. + + Other Parameters + ---------------- + name : str + Name of the operator. + + """ + +
[docs] def __init__( + self, + device, + time, + D, + nstates, + total_dephasing=None, + voxelsize=None, + grad_amplitude=None, + grad_direction=None, + **kwargs + ): # noqa + super().__init__(**kwargs) + + # offload (not sure if this is needed?) + time = torch.as_tensor(time, dtype=torch.float32, device=device) + time = torch.atleast_1d(time) + D = torch.as_tensor(D, dtype=torch.float32, device=device) + D = torch.atleast_1d(D) + + # initialize direction + if grad_direction is None: + grad_direction = "z" + + if grad_direction == "x": + grad_direction = (1.0, 0.0, 0.0) + if grad_direction == "y": + grad_direction = (0.0, 1.0, 0.0) + if grad_direction == "z": + grad_direction = (0.0, 0.0, 1.0) + + # prepare operators + D1, D2 = _diffusion_damp_prep( + time, + D, + nstates, + total_dephasing, + voxelsize, + grad_amplitude, + grad_direction, + ) + + # assign matrices + self.D1 = D1 + self.D2 = D2
+ + def apply(self, states): + """ + Apply diffusion damping. + + Parameters + ---------- + states : dict + Input states matrix for free pools + and, optionally, for bound pools. + + Returns + ------- + states : dict + Output states matrix for free pools + and, optionally, for bound pools. + + """ + states = diffusion_damp_apply(states, self.D1, self.D2) + + # diffusion for moving spins + if "moving" in states: + states["moving"] = diffusion_damp_apply(states["moving"], self.D1, self.D2) + + return states
+ + +
[docs]class FlowDephasing(Operator): + """ + Simulate state dependent phase accrual of the EPG coefficients due to flow. + + Parameters + ---------- + device (str): str + Computational device (e.g., ``cpu`` or ``cuda:n``, with ``n=0,1,2...``). + time : torch.Tensor) + Time step in ``[ms]``. + v : torch.Tensor + Spin velocity of shape ``(3,)`` in ``[cm / s]``. + If scalar, assume same direction as unbalanced gradient. + nstates : int + Number of EPG dephasing orders. + total_dephasing : float, optional + Total dephasing due to unbalanced gradients in ``[rad]``. + voxelsize : float, optional + Voxel thickness along unbalanced direction in ``[mm]``. + grad_amplitude : float, optional + Gradient amplitude along unbalanced direction in ``[mT / m]``. + grad_direction : str | torch.Tensor + Gradient orientation (``"x"``, ``"y"``, ``"z"`` or ``versor``). + + Notes + ----- + User must provide either total dephasing and voxel size or gradient amplitude and duration. + + """ + +
[docs] def __init__( + self, + device, + time, + v, + nstates, + total_dephasing=None, + voxelsize=None, + grad_amplitude=None, + grad_direction=None, + **kwargs + ): # noqa + super().__init__(**kwargs) + + # offload (not sure if this is needed?) + time = torch.as_tensor(time, dtype=torch.float32, device=device) + time = torch.atleast_1d(time) + v = torch.as_tensor(v, dtype=torch.float32, device=device) + v = torch.atleast_1d(v) + + # initialize direction + if grad_direction is None: + grad_direction = "z" + + if grad_direction == "x": + grad_direction = (1.0, 0.0, 0.0) + if grad_direction == "y": + grad_direction = (0.0, 1.0, 0.0) + if grad_direction == "z": + grad_direction = (0.0, 0.0, 1.0) + + # prepare operators + J1, J2 = _flow_dephase_prep( + time, + v, + nstates, + total_dephasing, + voxelsize, + grad_amplitude, + grad_direction, + ) + + # assign matrices + self.J1 = J1 + self.J2 = J2
+ + def apply(self, states): + """ + Apply flow dephasing. + + Parameters + ---------- + states : dict + Input states matrix for free pools + and, optionally, for bound pools. + + Returns + ------- + states : dict + Output states matrix for free pools + and, optionally, for bound pools. + + """ + states = flow_dephase_apply(states, self.J1, self.J2) + + # dephasing for moving spins + if "moving" in states: + states["moving"] = flow_dephase_apply(states["moving"], self.J1, self.J2) + + return states
+ + +
[docs]class FlowWash(Operator): + """ + Simulate EPG states replacement due to flow. + + device (str): str + Computational device (e.g., ``cpu`` or ``cuda:n``, with ``n=0,1,2...``). + time : torch.Tensor) + Time step in ``[ms]``. + v : torch.Tensor + Spin velocity of shape ``(3,)`` in ``[cm / s]``. + If scalar, assume same direction as unbalanced gradient. + voxelsize : float, optional + Voxel thickness along unbalanced direction in ``[mm]``. + slice_direction : str | torch.Tensor + Slice orientation (``"x"``, ``"y"``, ``"z"`` or ``versor``). + + """ + +
[docs] def __init__( + self, device, time, v, voxelsize, slice_direction=None, **kwargs + ): # noqa + super().__init__(**kwargs) + + # offload (not sure if this is needed?) + time = torch.as_tensor(time, dtype=torch.float32, device=device) + time = torch.atleast_1d(time) + v = torch.as_tensor(v, dtype=torch.float32, device=device) + v = torch.atleast_1d(v) + + # initialize direction + if slice_direction is None: + slice_direction = "z" + + if slice_direction == "x": + slice_direction = (1.0, 0.0, 0.0) + if slice_direction == "y": + slice_direction = (0.0, 1.0, 0.0) + if slice_direction == "z": + slice_direction = (0.0, 0.0, 1.0) + + # prepare operators + Win, Wout = _flow_washout_prep(time, v, voxelsize, slice_direction) + + # assign matrices + self.Win = Win + self.Wout = Wout
+ + def apply(self, states): + """ + Apply spin replacement. + + Parameters + ---------- + states : dict + Input states matrix for free pools + and, optionally, for bound pools. + + Returns + ------- + states : dict + Output states matrix for free pools + and, optionally, for bound pools. + + """ + states = flow_washout_apply(states, self.Win, self.Wout) + return states
+ + +# %% local utils +def _diffusion_damp_prep( + time, + D, + nstates, + total_dephasing, + voxelsize, + grad_amplitude, + grad_direction, +): + # check inputs + if total_dephasing is None or voxelsize is None: + assert ( + grad_amplitude is not None + ), "Please provide either total_dephasing/voxelsize or grad_amplitude." + if grad_amplitude is None: + assert ( + total_dephasing is not None and voxelsize is not None + ), "Please provide either total_dephasing/voxelsize or grad_amplitude." + + # if total dephasing is not provided, calculate it: + if total_dephasing is None or voxelsize is None: + gamma = 2 * math.pi * gamma_bar + k0_2 = (gamma * grad_amplitude * time * 1e-6) ** 2 + else: + voxelsize = _get_projection(voxelsize, grad_direction) + k0_2 = (total_dephasing / voxelsize / 1e-3) ** 2 + + # cast to tensor + k0_2 = torch.as_tensor(k0_2, dtype=torch.float32, device=time.device) + k0_2 = torch.atleast_1d(k0_2) + + # actual operator calculation + b_prime = k0_2 * time * 1e-3 + + # calculate dephasing order + l = torch.arange(nstates, dtype=torch.float32, device=D.device)[:, None, None] + lsq = l**2 + + # calculate b-factor + b1 = b_prime * lsq + b2 = b_prime * (lsq + l + 1.0 / 3.0) + + # actual operator calculation + D1 = torch.exp(-b1 * D * 1e-9) + D2 = torch.exp(-b2 * D * 1e-9) + + return D1, D2 + + +def diffusion_damp_apply(states, D1, D2): + # parse + F, Z = states["F"], states["Z"] + + # apply + F[..., 0] = F[..., 0].clone() * D2 # Transverse damping + F[..., 1] = F[..., 1].clone() * D2 # Transverse damping + Z = Z.clone() * D1 # Longitudinal damping + + # prepare for output + states["F"], states["Z"] = F, Z + return states + + +def _flow_dephase_prep( + time, + v, + nstates, + total_dephasing, + voxelsize, + grad_amplitude, + grad_direction, +): + # check inputs + if total_dephasing is None or voxelsize is None: + assert ( + grad_amplitude is not None + ), "Please provide either total_dephasing/voxelsize or grad_amplitude." + if grad_amplitude is None: + assert ( + total_dephasing is not None and voxelsize is not None + ), "Please provide either total_dephasing/voxelsize or grad_amplitude." + + # if total dephasing is not provided, calculate it: + if total_dephasing is None or voxelsize is None: + dk = 2 * math.pi * gamma_bar * grad_amplitude * time * 1e-6 + else: + voxelsize = _get_projection(voxelsize, grad_direction) + dk = total_dephasing / voxelsize / 1e-3 + + # calculate dephasing order + l = torch.arange(nstates, dtype=torch.float32, device=v.device)[:, None, None] + k0 = dk * l + + # get velocity + v = _get_projection(v, grad_direction) + + # actual operator calculation + J1 = torch.exp(-1j * k0 * v * 1e-5 * time) # cm / s -> m / ms + J2 = torch.exp(-1j * (k0 + 0.5 * dk) * v * 1e-5 * time) # cm / s -> m / ms + + return J1, J2 + + +def flow_dephase_apply(states, J1, J2): + # parse + F, Z = states["F"], states["Z"] + + # apply + F[..., 0] = F[..., 0].clone() * J2 # Transverse dephasing + F[..., 1] = F[..., 1].clone() * J2.conj() # Transverse dephasing + Z = Z.clone() * J1 # Longitudinal dephasing + + # prepare for output + states["F"], states["Z"] = F, Z + return states + + +def _flow_washout_prep(time, v, voxelsize, slice_direction=None): + # get effective velocity and voxel size + v = _get_projection(v, slice_direction) * 1e-2 # [cm / s] -> [mm / ms] + voxelsize = _get_projection(voxelsize, slice_direction) # [mm] + + # calculate washout rate + R = torch.abs(v / voxelsize) # [1 / ms] + + # flow wash-in/out + Win = R * time + Wout = 1 - Win + + # erase unphysical entries + Win = ( + 1.0 + - torch.heaviside( + Win - 1.0, torch.as_tensor(1.0, dtype=R.dtype, device=R.device) + ) + ) * Win + torch.heaviside( + Win - 1.0, torch.as_tensor(1.0, dtype=R.dtype, device=R.device) + ) + Wout = ( + torch.heaviside(Wout, torch.as_tensor(1.0, dtype=R.dtype, device=R.device)) + * Wout + ) + + return Win, Wout + + +def flow_washout_apply(states, Win, Wout): + # parse + F, Z = states["F"], states["Z"] + Fmoving, Zmoving = states["moving"]["F"], states["moving"]["Z"] + + # apply + F[..., 0] = Wout * F[..., 0].clone() + Win * Fmoving[..., 0] + F[..., 1] = Wout * F[..., 1].clone() + Win * Fmoving[..., 1] + Z = Wout * Z.clone() + Win * Zmoving + + # prepare for output + states["F"], states["Z"] = F, Z + return states + + +def _get_projection(arr, direction): + # prepare direction + arr = torch.as_tensor(arr) + direction = torch.as_tensor(direction) + + # get device + device = arr.device + arr = arr.to(device) + direction = direction.to(device) + + # expand if required + arr = torch.atleast_1d(arr) + if arr.shape[-1] == 1: + arr = torch.cat( + (direction[0] * arr, direction[1] * arr, direction[2] * arr), dim=-1 + ) + + return arr @ direction +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/bloch/ops/_relaxation_op.html b/_modules/deepmr/bloch/ops/_relaxation_op.html new file mode 100644 index 00000000..45024644 --- /dev/null +++ b/_modules/deepmr/bloch/ops/_relaxation_op.html @@ -0,0 +1,843 @@ + + + + + + + + + + + deepmr.bloch.ops._relaxation_op — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.bloch.ops._relaxation_op

+"""
+EPG Relaxation operators.
+
+Can be used to simulate longitudinal and transverse relaxation either
+in absence or presence of exchange (Chemical Exchange or MT), as well as
+accounting for chemical shift.
+"""
+
+__all__ = ["Relaxation"]
+
+import math
+
+import torch
+
+from ._abstract_op import Operator
+from ._utils import matrix_exp
+
+
+
[docs]class Relaxation(Operator): + """ + The "decay operator" applying relaxation and "regrowth" of the magnetization components. + + Parameters + ---------- + device (str): str + Computational device (e.g., ``cpu`` or ``cuda:n``, with ``n=0,1,2...``). + time : torch.Tensor) + Time step in ``[ms]``. + T1 : torch.Tensor + Longitudinal relaxation time in ``[ms]`` of shape ``(npools,)``. + T2 : torch.Tensor + Transverse relaxation time in ``[ms]`` of shape ``(npools,)``. + weight : torch.Tensor, optional + Relative pool fractions of shape ``(npools,)``. + k : torch.Tensor, optional + Exchange matrix of shape ``(npools, npools)`` in ``[s**-1]``. + df : torch.Tensor, optional + Chemical shift in ``[Hz]`` of shape ``(npools,)``. + + Other Parameters + ---------------- + name : str + Name of the operator. + + """ + +
[docs] def __init__( + self, device, time, T1, T2, weight=None, k=None, df=None, **kwargs + ): # noqa + super().__init__(**kwargs) + + # offload (not sure if this is needed?) + time = torch.as_tensor(time, dtype=torch.float32, device=device) + time = torch.atleast_1d(time) + T1 = torch.as_tensor(T1, dtype=torch.float32, device=device) + T1 = torch.atleast_1d(T1) + T2 = torch.as_tensor(T2, dtype=torch.float32, device=device) + T2 = torch.atleast_1d(T2) + + # cast to tensors + if weight is not None: + weight = torch.as_tensor(weight, dtype=torch.float32, device=device) + if k is not None: + k = torch.as_tensor(k, dtype=torch.float32, device=device) + k = _prepare_exchange(weight, k) + + if df is not None: + df = torch.as_tensor(df, dtype=torch.float32, device=device) + df = torch.atleast_1d(df) + + # prepare operators + if weight is None or k is None: + E2 = _transverse_relax_prep(time, T2) + E1, rE1 = _longitudinal_relax_prep(time, T1) + + # assign functions + self._transverse_relax_apply = _transverse_relax_apply + self._longitudinal_relax_apply = _longitudinal_relax_apply + + else: + E2, self._transverse_relax_apply = _transverse_relax_exchange_prep( + time, T2, k, df + ) + E1, rE1 = _longitudinal_relax_exchange_prep(time, T1, weight, k) + + # assign functions + self._longitudinal_relax_apply = _longitudinal_relax_exchange_apply + + # assign matrices + self.E1 = E1 + self.rE1 = rE1 + self.E2 = E2
+ + def apply(self, states): + """ + Apply free precession (relaxation + precession + exchange + recovery). + + Parameters + ---------- + states : dict + Input states matrix for free pools + and, optionally, for bound pools. + + Returns + ------- + states : dict + Output states matrix for free pools + and, optionally, for bound pools. + + """ + states = self._transverse_relax_apply(states, self.E2) + states = self._longitudinal_relax_apply(states, self.E1, self.rE1) + + # relaxation for moving spins + if "moving" in states: + states["moving"] = self._transverse_relax_apply(states["moving"], self.E2) + states["moving"] = self._longitudinal_relax_apply( + states["moving"], self.E1, self.rE1 + ) + + return states
+ + +# %% local utils +def _prepare_exchange(weight, k): + # prepare + if k.shape[-1] == 1: # BM or MT + k0 = 0 * k + k1 = torch.cat((k0, k * weight[..., [0]]), axis=-1) + k2 = torch.cat((k * weight[..., [1]], k0), axis=-1) + k = torch.stack((k1, k2), axis=-2) + else: # BM-MT + k0 = 0 * k[..., [0]] + k1 = torch.cat((k0, k[..., [0]] * weight[..., [0]], k0), axis=-1) + k2 = torch.cat( + (k[..., [0]] * weight[..., [1]], k0, k[..., [1]] * weight[..., [1]]), + axis=-1, + ) + k3 = torch.cat((k0, k[..., [1]] * weight[..., [2]], k0), axis=-1) + k = torch.stack((k1, k2, k3), axis=-2) + + # finalize exchange + return _particle_conservation(k) + + +def _particle_conservation(k): + """Adjust diagonal of exchange matrix by imposing particle conservation.""" + # get shape + npools = k.shape[-1] + + for n in range(npools): + k[..., n, n] = 0.0 # ignore existing diagonal + k[..., n, n] = -k[..., n].sum(dim=-1) + + return k + + +def _transverse_relax_apply(states, E2): + # parse + F = states["F"] + + # apply + F[..., 0] = F[..., 0].clone() * E2 # F+ + F[..., 1] = F[..., 1].clone() * E2.conj() # F- + + # prepare for output + states["F"] = F + return states + + +def _longitudinal_relax_apply(states, E1, rE1): + # parse + Z = states["Z"] + + # apply + Z = Z.clone() * E1 # decay + Z[0] = Z[0].clone() + rE1 # regrowth + + # prepare for output + states["Z"] = Z + return states + + +def _transverse_relax_prep(time, T2): + # compute R2 + R2 = 1 / T2 + + # calculate operators + E2 = torch.exp(-R2 * time) + + return E2 + + +def _longitudinal_relax_prep(time, T1): + # compute R2 + R1 = 1 / T1 + + # calculate operators + E1 = torch.exp(-R1 * time) + rE1 = 1 - E1 + + return E1, rE1 + + +def _transverse_relax_exchange_apply(states, E2): + # parse + F = states["F"] + + # apply + F[..., 0] = torch.einsum("...ij,...j->...i", E2, F[..., 0].clone()) + F[..., 1] = torch.einsum("...ij,...j->...i", E2.conj(), F[..., 1].clone()) + + # prepare for output + states["F"] = F + return states + + +def _longitudinal_relax_exchange_apply(states, E1, rE1): + # parse + Z = states["Z"] + + # get ztot + if "Zbound" in states: + Zbound = states["Zbound"] + Ztot = torch.cat((Z, Zbound), axis=-1) + else: + Ztot = Z + + # apply + Ztot = torch.einsum("...ij,...j->...i", E1, Ztot.clone()) + Ztot[0] = Ztot[0].clone() + rE1 + + # prepare for output + if "Zbound" in states: + states["Z"] = Ztot[..., :-1] + states["Zbound"] = Ztot[..., [-1]] + else: + states["Z"] = Ztot + return states + + +def _transverse_relax_exchange_prep(time, T2, k, df=None): + # compute R2 + R2 = 1 / T2 + + # add chemical shift + if df is not None: + R2tot = R2 + 1j * 2 * math.pi * df * 1e-3 # (account for time in [ms]) + else: + R2tot = R2 + + # get npools + npools = R2tot.shape[-1] + + # case 1: MT + if npools == 1: + return torch.exp(-R2tot * time), _transverse_relax_apply + + # case 2: BM or BM-MT + else: + # cast to complex + R2tot = R2tot.to(torch.complex64) + + # recovery + Id = torch.eye(npools, dtype=R2tot.dtype, device=R2tot.device) + + # coefficients + lambda2 = ( + k[..., :npools, :npools] * 1e-3 - R2tot[:, None] * Id + ) # assume MT pool is the last + + # actual operators + E2 = matrix_exp(lambda2 * time) + + return E2, _transverse_relax_exchange_apply + + +def _longitudinal_relax_exchange_prep(time, T1, weight, k): + # compute R2 + R1 = 1 / T1 + + # get npools + npools = R1.shape[-1] + + if weight.shape[-1] == npools + 1: # MT case + R1 = torch.cat((R1, R1[..., [0]]), axis=-1) + npools += 1 + + # cast to complex + R1 = R1.to(torch.complex64) + + # recovery + Id = torch.eye(npools, dtype=R1.dtype, device=R1.device) + C = weight * R1 + + # coefficients + lambda1 = k * 1e-3 - R1 * Id + + # actual operators + E1 = matrix_exp(lambda1 * time) + rE1 = torch.einsum("...ij,...j->...i", (E1 - Id), torch.linalg.solve(lambda1, C)) + + return E1, rE1 +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/bloch/ops/_rf_pulses_op.html b/_modules/deepmr/bloch/ops/_rf_pulses_op.html new file mode 100644 index 00000000..c1ac0288 --- /dev/null +++ b/_modules/deepmr/bloch/ops/_rf_pulses_op.html @@ -0,0 +1,1117 @@ + + + + + + + + + + + deepmr.bloch.ops._rf_pulses_op — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.bloch.ops._rf_pulses_op

+"""
+EPG RF Pulses operators.
+
+Can be used to simulate different types of RF pulses (soft and hard)
+and multiple transmit coil modes.
+"""
+
+__all__ = ["AdiabaticPulse", "RFPulse"]
+
+import math
+
+import numpy as np
+import scipy.interpolate
+import torch
+
+from ._abstract_op import Operator
+from ._stats import pulse_analysis
+from ._utils import gamma
+
+
+class BasePulse(Operator):
+    """
+    Operator representing a RF pulse.
+
+    Parameters
+    ----------
+    device : str
+        Computational device (e.g., ``cpu`` or ``cuda:n``, with ``n=0,1,2...``).
+    nlocs : int
+        Number of spatial locations for slice profile simulation.
+    alpha : torch.Tensor, optional
+        Pulse flip angle in ``[deg]`` of shape ``(nmodes,)``.
+        The default is ``0.0 [deg]``.
+    phi : torch.Tensor, optional
+        Pulse phase in ``[deg]`` of shape ``(nmodes,)``.
+        The default is ``0.0 [deg]``.
+
+    Other Parameters
+    ----------------
+    name : str
+        Name of the operator.
+    rf_envelope : torch.Tensor
+        Pulse time envelope.
+    duration : float
+        Pulse duration in ``[ms]``.
+    b1rms : float
+        Pulse root-mean-squared B1 in ``[uT / deg]``,
+        when pulse is scaled such as ``flip angle = 1.0 [deg]``.
+    freq_offset : float
+        Pulse frequency offset in ``[Hz]``.
+
+    """
+
+    def __init__(self, device, alpha=0.0, phi=0.0, **props):
+        super().__init__(**props)
+
+        # save device
+        self.device = device
+
+        # get pulse stats
+        self.b1rms = None  # pulse root-mean-squared b1 (for alpha=1 deg)
+        self.freq_offset = None  # pulse frequency offset [Hz] (nbands,)
+        self.G = None
+
+        # slice selection
+        if "slice_selective" in props:
+            self.slice_selective = props["slice_selective"]
+
+        # duration
+        if "duration" in props:
+            self.tau = torch.as_tensor(
+                props["duration"], dtype=torch.float32, device=device
+            )
+
+        # frequency offset
+        if "freq_offset" in props:
+            self.freq_offset = torch.as_tensor(
+                props["freq_offset"], dtype=torch.float32, device=device
+            )
+
+        # b1rms
+        # calculate from envelope...
+        if "rf_envelope" in props and "duration" in props:
+            info, _, _, _, _ = pulse_analysis(
+                props["rf_envelope"],
+                props["duration"],
+            )
+
+            # b1rms
+            self.b1rms = torch.as_tensor(
+                info["b1rms"], dtype=torch.float32, device=device
+            )
+
+        # ...or directly get from input
+        # b1rms
+        if "b1rms" in props:
+            self.b1rms = torch.as_tensor(
+                props["b1rms"], dtype=torch.float32, device=device
+            )
+
+        # calculate absorption linewidth and local field fluctuation
+        if self.freq_offset is not None:
+            G = super_lorentzian_lineshape(self.freq_offset) * 1e3  # s -> ms
+            G = torch.as_tensor(G, dtype=torch.float32, device=device)
+            self.G = torch.atleast_1d(G)
+        else:
+            G = super_lorentzian_lineshape(0.0) * 1e3  # s -> ms
+            G = torch.as_tensor(G, dtype=torch.float32, device=device)
+            self.G = torch.atleast_1d(G)
+
+        # initialize saturation
+        self._initialize_saturation()
+
+        # default slice profile
+        slice_profile = torch.as_tensor(1.0, dtype=torch.float32, device=device)
+        self.slice_profile = torch.atleast_1d(slice_profile)
+
+        # default B1 value
+        B1 = torch.ones(1, dtype=torch.float32, device=device)
+
+        # set B1
+        B1abs = B1.abs()
+        self.B1abs = torch.atleast_1d(B1abs)
+        B1angle = B1.angle()
+        self.B1angle = torch.atleast_1d(B1angle)
+
+    def prepare_rotation(self, alpha, phi):
+        """
+        Prepare the matrix describing rotation due to RF pulse.
+
+        Parameters
+        ----------
+        alpha : torch.Tensor
+            Pulse flip angle in ``[deg]`` of shape ``(nmodes,)``.
+        phi : torch.Tensor
+            Pulse phase in ``[deg]`` of shape ``(nmodes,)``.
+
+        """
+        # get device
+        device = self.device
+
+        # get B1
+        B1abs = self.B1abs
+        B1angle = self.B1angle
+
+        # cast to tensor if needed
+        alpha = torch.as_tensor(alpha, dtype=torch.float32, device=device)
+        alpha = torch.atleast_1d(alpha)
+        phi0 = torch.as_tensor(phi, dtype=torch.float32, device=device)
+        phi0 = torch.atleast_1d(phi0)
+
+        # convert from degrees to radians
+        alpha = torch.deg2rad(alpha)
+        phi0 = torch.deg2rad(phi0)
+
+        # apply B1 effect
+        fa = (B1abs * alpha).sum(axis=-1)
+        phi = (phi0 + B1angle).sum(axis=-1)
+
+        # apply slice profile
+        if self.slice_profile is not None:
+            fa = self.slice_profile * fa
+
+        # calculate operator
+        T00 = torch.cos(fa / 2) ** 2
+        T01 = torch.exp(2 * 1j * phi) * (torch.sin(fa / 2)) ** 2
+        T02 = -1j * torch.exp(1j * phi) * torch.sin(fa)
+        T10 = T01.conj()
+        T11 = T00
+        T12 = 1j * torch.exp(-1j * phi) * torch.sin(fa)
+        T20 = -0.5 * 1j * torch.exp(-1j * phi) * torch.sin(fa)
+        T21 = 0.5 * 1j * torch.exp(1j * phi) * torch.sin(fa)
+        T22 = torch.cos(fa)
+
+        # build rows
+        T0 = [T00[..., None], T01[..., None], T02[..., None]]
+        T1 = [T10[..., None], T11[..., None], T12[..., None]]
+        T2 = [T20[..., None], T21[..., None], T22[..., None]]
+
+        # build matrix
+        T = [T0, T1, T2]
+
+        # keep matrix
+        self.T = T
+
+        # return phase for demodulation
+        self.phi = phi0.sum(axis=-1)
+
+    def prepare_saturation(self, alpha):
+        """
+        Prepare the matrix describing saturation due to RF pulse.
+
+        Parameters
+        ----------
+        alpha : torch.Tensor
+            Pulse flip angle in ``[deg]`` of shape ``(nmodes,)``.
+
+        """
+        if self.WT is not None:
+            # get device
+            device = self.device
+
+            # get B1
+            B1abs = self.B1abs
+
+            # cast to tensor if needed
+            alpha = torch.as_tensor(alpha, dtype=torch.float32, device=device)
+            alpha = torch.atleast_1d(alpha)
+
+            # convert from degrees to radians
+            alpha = torch.deg2rad(alpha)
+
+            # apply B1 effect
+            fa = (B1abs * alpha).sum(axis=-1)
+
+            # apply slice profile
+            if self.slice_profile is not None:
+                fa = self.slice_profile * fa
+
+            # get scale
+            scale = fa**2
+
+            # actual calculation
+            self.S = torch.exp(scale * self.WT)
+
+    def _initialize_saturation(self):
+        # build operator
+        try:
+            # get parameters
+            tau = self.tau  # [ms]
+            b1rms = self.b1rms  # [uT]
+            G = self.G  # [ms]
+
+            # calculate W and D
+            W = math.pi * (gamma * 1e-3) ** 2 * b1rms**2 * G
+            self.WT = -W * tau
+
+        except:
+            self.WT = None
+
+    def _check_saturation_operator(self):
+        if self.WT is None:
+            missing = []
+            msg = " - please provide tau and either pulse envelope or its b1rms and frequency offset."
+            if self.tau is None:
+                missing.append("Tau")
+            if self.b1rms is None:
+                missing.append("B1rms")
+            if self.freq_offset is None:
+                missing.append("Frequency Offset")
+            missing = ", ".join(missing)
+            raise RuntimeError(f"{missing} not provided" + msg)
+
+    def apply(self, states, alpha=None, phi=0.0):
+        """
+        Apply RF pulse (rotation + saturation).
+
+        Parameters
+        ----------
+        states : dict
+            Input states matrix for free pools
+            and, optionally, for bound pools.
+        alpha : torch.Tensor, optional
+            Flip angle in ``[deg]``.
+        phi : torch.Tensor, optional
+            RF phase in ``[deg]``.
+
+        Returns
+        -------
+        states : dict
+            Output states matrix for free pools
+            and, optionally, for bound pools.
+
+        """
+        # rotate free pools
+        if alpha is not None:
+            self.prepare_rotation(alpha, phi)
+        states = _apply_rotation(states, self.T)
+
+        # rotate moving pools
+        if "moving" in states and self.slice_selective is False:
+            states["moving"] = _apply_rotation(states["moving"], self.T)
+
+        # saturate bound pool
+        if "Zbound" in states:
+            if alpha is not None:
+                self.prepare_saturation(alpha)
+            states = _apply_saturation(states, self.S)
+
+            # saturate moving pools
+            if "moving" in states and self.slice_selective is False:
+                states["moving"] = _apply_saturation(states["moving"], self.S)
+
+        return states
+
+
+
[docs]class RFPulse(BasePulse): + """ + Operator representing a RF pulse. + + Parameters + ---------- + device : str + Computational device (e.g., ``cpu`` or ``cuda:n``, with ``n=0,1,2...``). + nlocs : int + Number of spatial locations for slice profile simulation. + alpha : torch.Tensor, optional + Pulse flip angle in ``[deg]`` of shape ``(nmodes,)``. + The default is ``0.0 [deg]``. + phi : torch.Tensor, optional + Pulse phase in ``[deg]`` of shape ``(nmodes,)``. + The default is ``0.0 [deg]``. + B1 : torch.Tensor, optional + Flip angle scaling due to B1+ inhomogeneities + of shape ``(nmodes,)``. The default is ``1.0``. + + Other Parameters + ---------------- + name : str + Name of the operator. + rf_envelope : torch.Tensor + Pulse time envelope. + duration : float + Pulse duration in ``[ms]``. + b1rms : float + Pulse root-mean-squared B1 in ``[uT / deg]``, + when pulse is scaled such as ``flip angle = 1.0 [deg]``. + freq_offset : float + Pulse frequency offset in ``[Hz]``. + + """ + +
[docs] def __init__(self, device, nlocs=None, alpha=0.0, phi=0.0, B1=1.0, **props): # noqa + # base initialization + super().__init__(device, alpha, phi, **props) + + # slice selectivity + if "slice_selective" in props: + self.slice_selective = props["slice_selective"] + elif "slice_profile" in props: + self.slice_selective = True + else: + self.slice_selective = False + + # calculate from envelope... + if "rf_envelope" in props and "duration" in props: + # slice profile + if self.slice_selective: + _, slice_profile, _, _, _ = pulse_analysis( + props["rf_envelope"], props["duration"], npts=2 * nlocs + ) + self.slice_profile = torch.as_tensor( + abs(slice_profile), dtype=torch.float32, device=device + ) + self.slice_profile = torch.atleast_1d(slice_profile.squeeze())[:nlocs] + self.slice_profile = self.slice_profile / self.slice_profile[-1] + + # ...or directly get from input + # slice profile + if self.slice_selective and "slice_profile" in props: + slice_profile = torch.as_tensor( + props["slice_profile"], dtype=torch.float32, device=device + ) + self.slice_profile = torch.atleast_1d(slice_profile) + + # number of locations + if nlocs is not None: + self.nlocs = nlocs + else: + self.nlocs = len(self.slice_profile) + + # interpolate slice profile + if len(self.slice_profile) != self.nlocs: + x = np.linspace(0, 1, len(self.slice_profile)) + xq = np.linspace(0, 1, self.nlocs) + y = self.slice_profile.detach().cpu().numpy() + yq = _spline(x, y, xq) + slice_profile = torch.as_tensor(yq, dtype=torch.float32, device=device) + self.slice_profile = torch.atleast_1d(slice_profile) + self.slice_profile = self.slice_profile / self.slice_profile[-1] + + # default B1 value + if B1 is not None: + B1 = torch.as_tensor(B1, device=device) + B1abs = B1.abs() + self.B1abs = torch.atleast_1d(B1abs) + B1angle = B1.angle() + self.B1angle = torch.atleast_1d(B1angle) + + # actual preparation (if alpha is provided) + self.prepare_rotation(alpha, phi) + self.prepare_saturation(alpha)
+ + +
[docs]class AdiabaticPulse(BasePulse): + """ + Operator representing an adiabatic RF pulse. + + Parameters + ---------- + device : str + Computational device (e.g., ``cpu`` or ``cuda:n``, with ``n=0,1,2...``). + alpha : torch.Tensor, optional + Pulse flip angle in ``[deg]`` of shape ``(nmodes,)``. + The default is ``0.0 [deg]``. + phi : torch.Tensor, optional + Pulse phase in ``[deg]`` of shape ``(nmodes,)``. + The default is ``0.0 [deg]``. + efficiency : torch.Tensor, optional + Pulse efficiency of shape ``(nmodes,)``. + The default is ``1.0``. + + Other Parameters + ---------------- + name : str + Name of the operator. + rf_envelope : torch.Tensor + Pulse time envelope. + duration : float + Pulse duration in ``[ms]``. + b1rms : float + Pulse root-mean-squared B1 in ``[uT / deg]``, + when pulse is scaled such as ``flip angle = 1.0 [deg]``. + freq_offset : float + Pulse frequency offset in ``[Hz]``. + + """ + +
[docs] def __init__(self, device, alpha=0.0, phi=0.0, efficiency=1.0, **props): # noqa + super().__init__(device, alpha, phi, **props) + + # actual preparation (if alpha is provided) + self.prepare_rotation(alpha, phi) + self.prepare_saturation(alpha) + + # compute efficiency + self.efficiency = efficiency
+ + def apply(self, states, alpha=None, phi=0.0): # noqa + states = super().apply(states, alpha, phi) + # states = states * self.efficiency + return states
+ + +# %% local utils +def _apply_rotation(states, rf_mat): + """Propagate EPG states through an RF rotation.""" + # parse + Fin, Zin = states["F"], states["Z"] + + # prepare out + Fout = Fin.clone() + Zout = Zin.clone() + + # apply + Fout[..., 0] = ( + rf_mat[0][0] * Fin[..., 0] + rf_mat[0][1] * Fin[..., 1] + rf_mat[0][2] * Zin + ) + Fout[..., 1] = ( + rf_mat[1][0] * Fin[..., 0] + rf_mat[1][1] * Fin[..., 1] + rf_mat[1][2] * Zin + ) + Zout = rf_mat[2][0] * Fin[..., 0] + rf_mat[2][1] * Fin[..., 1] + rf_mat[2][2] * Zin + + # prepare for output + states["F"], states["Z"] = Fout, Zout + return states + + +def _apply_saturation(states, sat_mat): + """Propagate EPG states through an RF saturation.""" + # parse + Zbound = states["Zbound"] + + # prepare + Zbound = sat_mat * Zbound.clone() + + # prepare for output + states["Zbound"] = Zbound + return states + + +def super_lorentzian_lineshape(f, T2star=12e-6, fsample=[-30e3, 30e3]): + """ + Super Lorentzian lineshape. + + Parameters + ---------- + f : float + Frequency offset of the pulse in ``[Hz]``. + T2star : float, optional + T2 of semisolid compartment in ``[ms]``. Defaults to ``12e-3 (12 us)``. + fsample : list | tuple, optional + Frequency range at which function is to be evaluated in ``[Hz]``. + Defaults to ``[-2e3, 2e3]``. + + Returns + ------- + G(omega) : np.ndarray + Actual lineshape at arbitrary frequency ``f``. + + Examples + -------- + >>> G = SuperLorentzianLineshape(12e-3, torch.arange(-500, 500)) + + References + ---------- + Shaihan Malik (c), King's College London, April 2019 + Matteo Cencini: Python porting (December 2022) + + """ + # clone + if isinstance(f, torch.Tensor): + f = f.clone() + f = f.cpu().numpy() + else: + f = np.asarray(f, dtype=np.float32) + f = np.atleast_1d(f) + + # as suggested by Gloor, we can interpolate the lineshape across from + # ± 1kHz + nu = 100 # <-- number of points for theta integral + + # compute over a wider range of frequencies + n = 128 + if fsample[0] > -30e3: + fmin = -33e3 + else: + fmin = 1.1 * fsample[0] + + if fsample[1] < 30e3: + fmax = 33e3 + else: + fmax = 1.1 * fsample[1] + + ff = np.linspace(fmin, fmax, n, dtype=np.float32) + + # np for Super Lorentzian, predefine + u = np.linspace(0.0, 1.0, nu) + du = np.diff(u)[0] + + # get integration grid + ug, ffg = np.meshgrid(u, ff, indexing="ij") + + # prepare integrand + g = np.sqrt(2 / math.pi) * T2star / np.abs(3 * ug**2 - 1) + g = g * np.exp(-2 * (2 * math.pi * ffg * T2star / (3 * ug**2 - 1)) ** 2) + + # integrate over theta + G = du * g.sum(axis=0) + + # interpolate zero frequency + po = np.abs(ff) < 1e3 # points to interpolate + pu = np.logical_not(po) * ( + np.abs(ff) < 2e3 + ) # points to use to estimate interpolator + + Gi = _spline(ff[pu], G[pu], ff[po]) + G[po] = Gi # replace interpolated + + # calculate + if np.isscalar(f): + idx = np.argmin(abs(ff - f)) + else: + idx = [np.argmin(abs(ff - f0)) for f0 in f] + idx = np.asarray(idx) + + # get actual absorption + G = G[idx] + + return G + + +def _spline(x, y, xq): + """Same as MATLAB cubic spline interpolation.""" + # interpolate + cs = scipy.interpolate.InterpolatedUnivariateSpline(x, y) + return cs(xq) +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/fft.html b/_modules/deepmr/fft.html new file mode 100644 index 00000000..2ccd8b83 --- /dev/null +++ b/_modules/deepmr/fft.html @@ -0,0 +1,826 @@ + + + + + + + + + + + deepmr.fft — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.fft

+"""Sub-package containing Fast Fourier transform routines.
+
+FFT routines include:
+    
+* centered n-dimensional FFT and iFFT;
+* n-dimensional sparse uniform FFT/iFFT with embedded low rank subspace projection;
+* n-dimensional NUFFT with embedded low rank subspace projection.
+
+"""
+
+from . import fft as _fft
+from . import sparse_fft as _sparse_fft
+from . import nufft as _nufft
+
+from .fft import *  # noqa
+from .nufft import *  # noqa
+from .sparse_fft import *  # noqa
+
+__all__ = _fft.__all__
+__all__.extend(
+    [
+        "plan_toeplitz_fft",
+        "apply_sparse_fft_selfadj",
+        "plan_toeplitz_nufft",
+        "apply_nufft_selfadj",
+    ]
+)
+__all__.extend(["sparse_fft", "sparse_ifft", "nufft", "nufft_adj"])
+
+
+
[docs]def sparse_fft( + image, + indexes, + basis_adjoint=None, + device="cpu", + threadsperblock=128, +): + """ + N-dimensional sparse Fast Fourier Transform. + + Parameters + ---------- + image : torch.Tensor + Input image of shape ``(..., ncontrasts, ny, nx)`` (2D) + or ``(..., ncontrasts, nz, ny, nx)`` (3D). + indexes : torch.Tensor + Sampled k-space points indexes of shape ``(ncontrasts, nviews, nsamples, ndims)``. + basis_adjoint : torch.Tensor, optional + Adjoint low rank subspace projection operator + of shape ``(ncoeffs, ncontrasts)``; can be ``None``. The default is ``None``. + device : str, optional + Computational device (``cpu`` or ``cuda:n``, with ``n=0, 1,...nGPUs``). + The default is ``cpu``. + threadsperblock : int + CUDA blocks size (for GPU only). The default is ``128``. + + Returns + ------- + kspace : torch.Tensor + Output sparse kspace of shape ``(..., ncontrasts, nviews, nsamples)``. + + Notes + ----- + Sampled points indexes axes ordering is assumed to be ``(x, y)`` for 2D signals + and ``(x, y, z)`` for 3D. Conversely, axes ordering for grid shape is assumed to be ``(z, y, x)``. + + Indexes tensor shape is ``(ncontrasts, nviews, nsamples, ndim)``. If there are less dimensions + (e.g., single-shot or single contrast trajectory), assume singleton for the missing ones: + + * ``indexes.shape = (nsamples, ndim) -> (1, 1, nsamples, ndim)`` + * ``indexes.shape = (nviews, nsamples, ndim) -> (1, nviews, nsamples, ndim)`` + + """ + # get number of dimensions + ndim = indexes.shape[-1] + + # get shape if not provided + shape = image.shape[-ndim:] + + # plan interpolator + sampling_mask = _sparse_fft.prepare_sampling(indexes, shape, device) + + # perform actual interpolation + return _sparse_fft.apply_sparse_fft( + image, sampling_mask, basis_adjoint, threadsperblock=threadsperblock + )
+ + +
[docs]def sparse_ifft( + kspace, + indexes, + shape, + basis=None, + device="cpu", + threadsperblock=128, +): + """ + N-dimensional inverse sparse Fast Fourier Transform. + + Parameters + ---------- + kspace : torch.Tensor + Input sparse kspace of shape ``(..., ncontrasts, nviews, nsamples)``. + indexes : torch.Tensor + Sampled k-space points indexes of shape ``(ncontrasts, nviews, nsamples, ndims)``. + shape : int | Iterable[int] + Cartesian grid size of shape ``(ndim,)``. + If scalar, isotropic matrix is assumed. + basis : torch.Tensor, optional + Low rank subspace projection operator + of shape ``(ncontrasts, ncoeffs)``; can be ``None``. The default is ``None``. + device : str, optional + Computational device (``cpu`` or ``cuda:n``, with ``n=0, 1,...nGPUs``). + The default is ``cpu``. + threadsperblock : int + CUDA blocks size (for GPU only). The default is ``128``. + + Returns + ------- + image : torch.Tensor + Output image of shape ``(..., ncontrasts, ny, nx)`` (2D) + or ``(..., ncontrasts, nz, ny, nx)`` (3D). + + Notes + ----- + Sampled points indexes axes ordering is assumed to be ``(x, y)`` for 2D signals + and ``(x, y, z)`` for 3D. Conversely, axes ordering for grid shape is assumed to be ``(z, y, x)``. + + Sampled points indexes axes ordering is assumed to be ``(x, y)`` for 2D signals + (e.g., single-shot or single contrast trajectory), assume singleton for the missing ones: + + * ``indexes.shape = (nsamples, ndim) -> (1, 1, nsamples, ndim)`` + * ``indexes.shape = (nviews, nsamples, ndim) -> (1, nviews, nsamples, ndim)`` + + """ + # plan interpolator + sampling_mask = _sparse_fft.prepare_sampling(indexes, shape, device) + + # perform actual interpolation + return _sparse_fft.apply_sparse_ifft( + kspace, sampling_mask, basis, threadsperblock=threadsperblock + )
+ + +
[docs]def nufft( + image, + coord, + shape=None, + basis_adjoint=None, + device="cpu", + threadsperblock=128, + width=4, + oversamp=1.25, +): + """ + N-dimensional Non-Uniform Fast Fourier Transform. + + Parameters + ---------- + image : torch.Tensor + Input image of shape ``(..., ncontrasts, ny, nx)`` (2D) + or ``(..., ncontrasts, nz, ny, nx)`` (3D). + coord : torch.Tensor + K-space coordinates of shape ``(ncontrasts, nviews, nsamples, ndims)``. + Coordinates must be normalized between ``(-0.5 * shape, 0.5 * shape)``. + shape : int | Iterable[int], optional + Cartesian grid size of shape ``(ndim,)``. + If scalar, isotropic matrix is assumed. + The default is ``None`` (grid size equals to input data size, i.e. ``osf = 1``). + basis_adjoint : torch.Tensor, optional + Adjoint low rank subspace projection operator + of shape ``(ncoeffs, ncontrasts)``; can be ``None``. The default is ``None``. + device : str, optional + Computational device (``cpu`` or ``cuda:n``, with ``n=0, 1,...nGPUs``). + The default is ``cpu``. + threadsperblock : int + CUDA blocks size (for GPU only). The default is ``128``. + width : int | Iterable[int], optional + Interpolation kernel full-width of shape ``(ndim,)``. + If scalar, isotropic kernel is assumed. + The default is ``2``. + oversamp : float | Iterable[float], optional + Grid oversampling factor of shape ``(ndim,)``. + If scalar, isotropic oversampling is assumed. + The default is ``1.125``. + + Returns + ------- + kspace : torch.Tensor + Output Non-Cartesian kspace of shape ``(..., ncontrasts, nviews, nsamples)``. + + Notes + ----- + Non-uniform coordinates axes ordering is assumed to be ``(x, y)`` for 2D signals + and ``(x, y, z)`` for 3D. Conversely, axes ordering for grid shape, kernel width + and Kaiser Bessel parameters are assumed to be ``(z, y, x)``. + + Coordinates tensor shape is ``(ncontrasts, nviews, nsamples, ndim)``. If there are less dimensions + (e.g., single-shot or single contrast trajectory), assume singleton for the missing ones: + + * ``coord.shape = (nsamples, ndim) -> (1, 1, nsamples, ndim)`` + * ``coord.shape = (nviews, nsamples, ndim) -> (1, nviews, nsamples, ndim)`` + + """ + # get number of dimensions + ndim = coord.shape[-1] + + # get shape if not provided + if shape is None: + shape = image.shape[-ndim:] + + # plan interpolator + nufft_plan = _nufft.plan_nufft(coord, shape, width, oversamp, device) + + # perform actual interpolation + return _nufft.apply_nufft( + image, nufft_plan, basis_adjoint, threadsperblock=threadsperblock + )
+ + +
[docs]def nufft_adj( + kspace, + coord, + shape, + basis=None, + device="cpu", + threadsperblock=128, + width=4, + oversamp=1.25, +): + """ + N-dimensional adjoint Non-Uniform Fast Fourier Transform. + + Parameters + ---------- + kspace : torch.Tensor + Input Non-Cartesian kspace of shape ``(..., ncontrasts, nviews, nsamples)``. + coord : torch.Tensor + K-space coordinates of shape ``(ncontrasts, nviews, nsamples, ndims)``. + Coordinates must be normalized between ``(-0.5 * shape, 0.5 * shape)``. + shape : int | Iterable[int] + Cartesian grid size of shape ``(ndim,)``. + If scalar, isotropic matrix is assumed. + basis : torch.Tensor, optional + Low rank subspace projection operator + of shape ``(ncontrasts, ncoeffs)``; can be ``None``. The default is ``None``. + device : str, optional + Computational device (``cpu`` or ``cuda:n``, with ``n=0, 1,...nGPUs``). + The default is ``cpu``. + threadsperblock : int + CUDA blocks size (for GPU only). The default is ``128``. + width : int | Iterable[int], optional + Interpolation kernel full-width of shape ``(ndim,)``. + If scalar, isotropic kernel is assumed. + The default is ``2``. + oversamp : float | Iterable[float], optional + Grid oversampling factor of shape ``(ndim,)``. + If scalar, isotropic oversampling is assumed. + The default is ``1.125``. + + Returns + ------- + image : torch.Tensor + Output image of shape ``(..., ncontrasts, ny, nx)`` (2D) + or ``(..., ncontrasts, nz, ny, nx)`` (3D). + + Notes + ----- + Non-uniform coordinates axes ordering is assumed to be ``(x, y)`` for 2D signals + and ``(x, y, z)`` for 3D. Conversely, axes ordering for grid shape, kernel width + and Kaiser Bessel parameters are assumed to be ``(z, y, x)``. + + Coordinates tensor shape is ``(ncontrasts, nviews, nsamples, ndim)``. If there are less dimensions + (e.g., single-shot or single contrast trajectory), assume singleton for the missing ones: + + * ``coord.shape = (nsamples, ndim) -> (1, 1, nsamples, ndim)`` + * ``coord.shape = (nviews, nsamples, ndim) -> (1, nviews, nsamples, ndim)`` + + """ + # plan interpolator + nufft_plan = _nufft.plan_nufft(coord, shape, width, oversamp, device) + + # perform actual interpolation + return _nufft.apply_nufft_adj( + kspace, nufft_plan, basis, threadsperblock=threadsperblock + )
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/fft/fft.html b/_modules/deepmr/fft/fft.html new file mode 100644 index 00000000..b0af54e9 --- /dev/null +++ b/_modules/deepmr/fft/fft.html @@ -0,0 +1,715 @@ + + + + + + + + + + + deepmr.fft.fft — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.fft.fft

+"""FFT subroutines."""
+
+__all__ = ["fft", "ifft"]
+
+import numpy as np
+import torch
+
+
+
[docs]def fft(input, axes=None, norm="ortho", centered=True): + """ + Centered Fast Fourier Transform. + + Adapted from [1]. + + Parameters + ---------- + input : np.ndarray | torch.Tensor + Input signal. + axes : Iterable[int], optional + Axes over which to compute the FFT. + If not specified, apply FFT over all the axes. + norm : str, optional + FFT normalization. The default is ``ortho``. + centered : bool, optional + FFT centering. The default is ``True``. + + Returns + ------- + output : np.ndarray | torch.Tensor + Output signal. + + Examples + -------- + >>> import torch + >>> import deepmr + + First, create test image: + + >>> image = torch.zeros(32, 32, dtype=torch.complex64) + >>> image = image[16, 16] = 1.0 + + We now perform a 2D FFT: + + >>> kspace = deepmr.fft.fft(image) + + We can visualize the data: + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots(1, 2) + >>> im = ax[0].imshow(abs(image)) + >>> ax[0].set_title("Image", color="orangered", fontweight="bold") + >>> ax[0].axis("off") + >>> ax[0].set_alpha(0.0) + >>> fig.colorbar(im, ax=ax[0], shrink=0.5) + >>> ksp = ax[1].imshow(abs(kspace)) + >>> ax[1].set_title("k-Space", color="orangered", fontweight="bold") + >>> ax[1].axis("off") + >>> ax[1].set_alpha(0.0) + >>> fig.colorbar(ksp, ax=ax[1], shrink=0.5) + >>> plt.show() + + References + ---------- + [1] https://github.com/mikgroup/sigpy + + """ + # check if we are using numpy arrays + if isinstance(input, np.ndarray): + isnumpy = True + else: + isnumpy = False + + # make sure this is a tensor + input = torch.as_tensor(input) + ax = _normalize_axes(axes, input.ndim) + if centered: + output = torch.fft.fftshift( + torch.fft.fftn(torch.fft.ifftshift(input, dim=ax), dim=ax, norm=norm), + dim=ax, + ) + else: + output = torch.fft.fftn(input, dim=ax, norm=norm) + + if isnumpy: + output = np.asarray(output) + + return output
+ + +
[docs]def ifft(input, axes=None, norm="ortho", centered=True): + """ + Centered inverse Fast Fourier Transform. + + Adapted from [1]. + + Parameters + ---------- + input : np.ndarray | torch.Tensor + Input signal. + axes : Iterable[int] + Axes over which to compute the iFFT. + If not specified, apply iFFT over all the axes. + norm : str, optional + FFT normalization. The default is ``ortho``. + centered : bool, optional + FFT centering. The default is ``True``. + + Returns + ------- + output : np.ndarray | torch.Tensor + Output signal. + + Examples + -------- + >>> import torch + >>> import deepmr + + First, create test image: + + >>> kspace = torch.ones(32, 32, dtype=torch.complex64) + + We now perform a 2D iFFT: + + >>> image = deepmr.fft.ifft(kspace) + + We can visualize the data: + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots(1, 2) + >>> ksp = ax[1].imshow(abs(kspace)) + >>> ax[0].set_title("k-Space", color="orangered", fontweight="bold") + >>> ax[0].axis("off") + >>> ax[0].set_alpha(0.0) + >>> fig.colorbar(ksp, ax=ax[0], shrink=0.5) + >>> im = ax[0].imshow(abs(image)) + >>> ax[1].set_title("Image", color="orangered", fontweight="bold") + >>> ax[1].axis("off") + >>> ax[1].set_alpha(0.0) + >>> fig.colorbar(im, ax=ax[1], shrink=0.5) + >>> plt.show() + + References + ---------- + [1] https://github.com/mikgroup/sigpy + + """ + # check if we are using numpy arrays + if isinstance(input, np.ndarray): + isnumpy = True + else: + isnumpy = False + + # make sure this is a tensor + input = torch.as_tensor(input) + ax = _normalize_axes(axes, input.ndim) + if centered: + output = torch.fft.fftshift( + torch.fft.ifftn(torch.fft.ifftshift(input, dim=ax), dim=ax, norm=norm), + dim=ax, + ) + else: + output = torch.fft.ifftn(input, dim=ax, norm=norm) + + if isnumpy: + output = np.asarray(output) + + return output
+ + +# %% local subroutines +def _normalize_axes(axes, ndim): + if axes is None: + return tuple(range(ndim)) + else: + return tuple(a % ndim for a in sorted(axes)) +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/fft/nufft.html b/_modules/deepmr/fft/nufft.html new file mode 100644 index 00000000..5dfe0933 --- /dev/null +++ b/_modules/deepmr/fft/nufft.html @@ -0,0 +1,1209 @@ + + + + + + + + + + + deepmr.fft.nufft — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.fft.nufft

+"""NUFFT subroutines."""
+
+__all__ = [
+    "plan_nufft",
+    "plan_toeplitz_nufft",
+    "apply_nufft",
+    "apply_nufft_adj",
+    "apply_nufft_selfadj",
+]
+
+import gc
+import math
+
+from dataclasses import dataclass
+
+import numpy as np
+import torch
+import torch.autograd as autograd
+
+from .._signal import resize as _resize
+
+from . import fft as _fft
+from . import _interp
+from . import toeplitz as _toeplitz
+
+
+def plan_nufft(coord, shape, width=4, oversamp=1.25, device="cpu"):
+    """
+    Precompute NUFFT object.
+
+    Parameters
+    ----------
+    coord : torch.Tensor
+        K-space coordinates of shape ``(ncontrasts, nviews, nsamples, ndims)``.
+        Coordinates must be normalized between ``(-0.5 * shape, 0.5 * shape)``.
+    shape : int | Iterable[int]
+        Oversampled grid size of shape ``(ndim,)``.
+        If scalar, isotropic matrix is assumed.
+    width : int | Iterable[int], optional
+        Interpolation kernel full-width of shape ``(ndim,)``.
+        If scalar, isotropic kernel is assumed.
+        The default is ``3``.
+    oversamp : float | Iterable[float], optional
+        Grid oversampling factor of shape ``(ndim,)``.
+        If scalar, isotropic oversampling is assumed.
+        The default is ``1.125``.
+    device : str, optional
+        Computational device (``cpu`` or ``cuda:n``, with ``n=0, 1,...nGPUs``).
+        The default is ``cpu``.
+
+    Returns
+    -------
+    interpolator : NUFFTPlan
+        Structure containing sparse interpolator matrix:
+
+        * ndim (``int``): number of spatial dimensions.
+        * oversampling (``Iterable[float]``): grid oversampling factor (z, y, x).
+        * width (``Iterable[int]``): kernel width (z, y, x).
+        * beta (``Iterable[float]``): Kaiser Bessel parameter (z, y, x).
+        * os_shape (``Iterable[int]``): oversampled grid shape (z, y, x).
+        * shape (``Iterable[int]``): grid shape (z, y, x).
+        * interpolator (``Interpolator``): precomputed interpolator object.
+        * device (``str``): computational device.
+
+    Notes
+    -----
+    Non-uniform coordinates axes ordering is assumed to be ``(x, y)`` for 2D signals
+    and ``(x, y, z)`` for 3D. Conversely, axes ordering for grid shape, kernel width
+    and oversampling factors are assumed to be ``(y, x)`` and ``(z, y, x)``.
+
+    Coordinates tensor shape is ``(ncontrasts, nviews, nsamples, ndim)``. If there are less dimensions
+    (e.g., single-shot or single contrast trajectory), assume singleton for the missing ones:
+
+    * ``coord.shape = (nsamples, ndim) -> (1, 1, nsamples, ndim)``
+    * ``coord.shape = (nviews, nsamples, ndim) -> (1, nviews, nsamples, ndim)``
+
+    """
+    # make sure this is a tensor
+    coord = torch.as_tensor(coord)
+
+    # copy coord and switch to cpu
+    coord = coord.clone().cpu().to(torch.float32)
+
+    # get parameters
+    ndim = coord.shape[-1]
+
+    if np.isscalar(width):
+        width = np.asarray([width] * ndim, dtype=np.int16)
+    else:
+        width = np.asarray(width, dtype=np.int16)
+
+    if np.isscalar(oversamp):
+        oversamp = np.asarray([oversamp] * ndim, dtype=np.float32)
+    else:
+        oversamp = np.asarray(oversamp, dtype=np.float32)
+
+    # calculate Kaiser-Bessel beta parameter
+    beta = math.pi * (((width / oversamp) * (oversamp - 0.5)) ** 2 - 0.8) ** 0.5
+    if np.isscalar(shape):
+        shape = np.asarray([shape] * ndim, dtype=np.int16)
+    else:
+        shape = np.asarray(shape, dtype=np.int16)[-ndim:]
+
+    # check for Cartesian axes
+    is_cart = [
+        np.allclose(shape[ax] * coord[..., ax], np.round(shape[ax] * coord[..., ax]))
+        for ax in range(ndim)
+    ]
+    is_cart = np.asarray(is_cart[::-1])  # (z, y, x)
+
+    # Cartesian axes have osf = 1.0 and kernel width = 1 (no interpolation)
+    oversamp[is_cart] = 1.0
+    width[is_cart] = 1
+
+    # get oversampled grid shape
+    os_shape = _get_oversamp_shape(shape, oversamp, ndim)
+
+    # rescale trajectory
+    coord = _scale_coord(coord, shape[::-1], oversamp[::-1])
+
+    # compute interpolator
+    interpolator = _interp.plan_interpolator(coord, os_shape, width, beta, device)
+
+    # transform to tuples
+    ndim: int
+    oversamp = tuple(oversamp)
+    width = tuple(width)
+    beta = tuple(beta)
+    os_shape = tuple(os_shape)
+    shape = tuple(shape)
+
+    return NUFFTPlan(ndim, oversamp, width, beta, os_shape, shape, interpolator, device)
+
+
+
[docs]def plan_toeplitz_nufft(coord, shape, basis=None, dcf=None, width=4, device="cpu"): + """ + Compute spatio-temporal kernel for fast self-adjoint operation. + + Parameters + ---------- + coord : torch.Tensor + K-space coordinates of shape ``(ncontrasts, nviews, nsamples, ndims)``. + Coordinates must be normalized between ``(-0.5 * shape[i], 0.5 * shape[i])``, + with ``i = (z, y, x)``. + shape : int | Iterable[int] + Oversampled grid size of shape ``(ndim,)``. + If scalar, isotropic matrix is assumed. + basis : torch.Tensor, optional + Low rank subspace projection operator + of shape ``(ncontrasts, ncoeffs)``; can be ``None``. The default is ``None``. + dcf : torch.Tensor, optional + Density compensation function of shape ``(ncontrasts, nviews, nsamples)``. + The default is a tensor of ``1.0``. + width : int | Iterable[int], optional + Interpolation kernel full-width of shape ``(ndim,)``. + If scalar, isotropic kernel is assumed. + The default is ``3``. + device : str, optional + Computational device (``cpu`` or ``cuda:n``, with ``n=0, 1,...nGPUs``). + The default is ``cpu``. + + Returns + ------- + toeplitz_kernel : GramMatrix + Structure containing Toeplitz kernel (i.e., Fourier transform of system tPSF). + + """ + return _toeplitz.plan_toeplitz(coord, shape, basis, dcf, width, device)
+ + +class ApplyNUFFT(autograd.Function): + @staticmethod + def forward(image, nufft_plan, basis_adjoint, weight, device, threadsperblock): + return _apply_nufft( + image, nufft_plan, basis_adjoint, weight, device, threadsperblock + ) + + @staticmethod + def setup_context(ctx, inputs, output): + _, nufft_plan, basis_adjoint, weight, device, threadsperblock = inputs + ctx.set_materialize_grads(False) + ctx.nufft_plan = nufft_plan + ctx.basis_adjoint = basis_adjoint + ctx.weight = weight + ctx.device = device + ctx.threadsperblock = threadsperblock + + @staticmethod + def backward(ctx, kspace): + nufft_plan = ctx.nufft_plan + basis_adjoint = ctx.basis_adjoint + if basis_adjoint is not None: + basis = basis_adjoint.conj().t() + else: + basis = None + weight = ctx.weight + device = ctx.device + threadsperblock = ctx.threadsperblock + + return ( + _apply_nufft_adj( + kspace, nufft_plan, basis, weight, device, threadsperblock + ), + None, + None, + None, + None, + None, + ) + + +def apply_nufft( + image, nufft_plan, basis_adjoint=None, weight=None, device=None, threadsperblock=128 +): + """ + Apply Non-Uniform Fast Fourier Transform. + + Parameters + ---------- + image : np.ndarray | torch.Tensor + Input image of shape ``(..., ncontrasts, ny, nx)`` (2D) + or ``(..., ncontrasts, nz, ny, nx)`` (3D). + nufft_plan : NUFFTPlan + Pre-calculated NUFFT plan coefficients in sparse COO format. + basis_adjoint : torch.Tensor, optional + Adjoint low rank subspace projection operator + of shape ``(ncontrasts, ncoeffs)``; can be ``None``. The default is ``None``. + weight : np.ndarray | torch.Tensor, optional + Optional weight for output data samples. Useful to force adjointeness. + The default is ``None``. + device : str, optional + Computational device (``cpu`` or ``cuda:n``, with ``n=0, 1,...nGPUs``). + The default is ``None`` (same as interpolator). + threadsperblock : int + CUDA blocks size (for GPU only). The default is ``128``. + + Returns + ------- + kspace : np.ndarray | torch.Tensor + Output Non-Cartesian kspace of shape ``(..., ncontrasts, nviews, nsamples)``. + + """ + return ApplyNUFFT.apply( + image, nufft_plan, basis_adjoint, weight, device, threadsperblock + ) + + +class ApplyNUFFTAdjoint(autograd.Function): + @staticmethod + def forward(kspace, nufft_plan, basis, weight, device, threadsperblock): + return _apply_nufft_adj( + kspace, nufft_plan, basis, weight, device, threadsperblock + ) + + @staticmethod + def setup_context(ctx, inputs, output): + _, nufft_plan, basis, weight, device, threadsperblock = inputs + ctx.set_materialize_grads(False) + ctx.nufft_plan = nufft_plan + ctx.basis = basis + ctx.weight = weight + ctx.device = device + ctx.threadsperblock = threadsperblock + + @staticmethod + def backward(ctx, image): + nufft_plan = ctx.nufft_plan + basis = ctx.basis + if basis is not None: + basis_adjoint = basis.conj().t() + else: + basis_adjoint = None + weight = ctx.weight + device = ctx.device + threadsperblock = ctx.threadsperblock + + return ( + _apply_nufft( + image, nufft_plan, basis_adjoint, weight, device, threadsperblock + ), + None, + None, + None, + None, + None, + ) + + +def apply_nufft_adj( + kspace, nufft_plan, basis=None, weight=None, device=None, threadsperblock=128 +): + """ + Apply adjoint Non-Uniform Fast Fourier Transform. + + Parameters + ---------- + kspace : torch.Tensor + Input kspace of shape ``(..., ncontrasts, nviews, nsamples)``. + nufft_plan : NUFFTPlan + Pre-calculated NUFFT plan coefficients in sparse COO format. + basis : torch.Tensor, optional + Low rank subspace projection operator + of shape ``(ncontrasts, ncoeffs)``; can be ``None``. The default is ``None``. + weight : np.ndarray | torch.Tensor, optional + Optional weight for output data samples. Useful to force adjointeness. + The default is ``None``. + device : str, optional + Computational device (``cpu`` or ``cuda:n``, with ``n=0, 1,...nGPUs``). + The default is ``None ``(same as interpolator). + threadsperblock : int + CUDA blocks size (for GPU only). The default is ``128``. + + Returns + ------- + image : torch.Tensor + Output image of shape ``(..., ncontrasts, ny, nx)`` (2D) + or ``(..., ncontrasts, nz, ny, nx)`` (3D). + + """ + return ApplyNUFFTAdjoint.apply( + kspace, nufft_plan, basis, weight, device, threadsperblock + ) + + +class ApplyNUFFTSelfAdjoint(autograd.Function): + @staticmethod + def forward(image, toeplitz_kern, device, threadsperblock): + return _apply_nufft_selfadj(image, toeplitz_kern, device, threadsperblock) + + @staticmethod + def setup_context(ctx, inputs, output): + _, toeplitz_kern, device, threadsperblock = inputs + ctx.set_materialize_grads(False) + ctx.toeplitz_kern = toeplitz_kern + ctx.device = device + ctx.threadsperblock = threadsperblock + + @staticmethod + def backward(ctx, image): + toeplitz_kern = ctx.toeplitz_kern + device = ctx.device + threadsperblock = ctx.threadsperblock + return ( + _apply_nufft_selfadj(image, toeplitz_kern, device, threadsperblock), + None, + None, + None, + ) + + +
[docs]def apply_nufft_selfadj(image, toeplitz_kern, device=None, threadsperblock=128): + """ + Apply self-adjoint Non-Uniform Fast Fourier Transform via Toeplitz Convolution. + + Parameters + ---------- + image : torch.Tensor + Input image of shape ``(..., ncontrasts, ny, nx)`` (2D) + or ``(..., ncontrasts, nz, ny, nx)`` (3D). + toeplitz_kern : GramMatrix + Pre-calculated Toeplitz kernel. + device : str, optional + Computational device (``cpu`` or ``cuda:n``, with ``n=0, 1,...nGPUs``). + The default is ``None ``(same as interpolator). + threadsperblock : int + CUDA blocks size (for GPU only). The default is ``128``. + + Returns + ------- + image : torch.Tensor + Output image of shape ``(..., ncontrasts, ny, nx)`` (2D) + or ``(..., ncontrasts, nz, ny, nx)`` (3D). + + """ + return ApplyNUFFTSelfAdjoint.apply(image, toeplitz_kern, device, threadsperblock)
+ + +# %% local utils +@dataclass +class NUFFTPlan: + ndim: int + oversamp: tuple + width: tuple + beta: tuple + os_shape: tuple + shape: tuple + interpolator: object + device: str + + def to(self, device): + """ + Dispatch internal attributes to selected device. + + Parameters + ---------- + device : str + Computational device ("cpu" or "cuda:n", with n=0, 1,...nGPUs). + + """ + if device != self.device: + self.interpolator.to(device) + self.device = device + + return self + + +def _get_oversamp_shape(shape, oversamp, ndim): + return np.ceil(oversamp * shape).astype(np.int16) + + +def _scale_coord(coord, shape, oversamp): + ndim = coord.shape[-1] + output = coord.clone() + for i in range(-ndim, 0): + scale = np.ceil(oversamp[i] * shape[i]) / shape[i] + shift = np.ceil(oversamp[i] * shape[i]) // 2 + output[..., i] *= scale + output[..., i] += shift + + return output + + +def _apodize(data_in, ndim, oversamp, width, beta): + data_out = data_in + for n in range(1, ndim + 1): + axis = -n + if width[axis] != 1: + i = data_out.shape[axis] + os_i = np.ceil(oversamp[axis] * i) + idx = torch.arange(i, dtype=torch.float32, device=data_in.device) + + # Calculate apodization + apod = ( + beta[axis] ** 2 - (math.pi * width[axis] * (idx - i // 2) / os_i) ** 2 + ) ** 0.5 + apod /= torch.sinh(apod) + + # normalize by DC + apod = apod / apod[int(i // 2)] + + # avoid NaN + apod = torch.nan_to_num(apod, nan=1.0) + + # apply to axis + data_out *= apod.reshape([i] + [1] * (-axis - 1)) + + return data_out + + +def _apply_nufft(image, nufft_plan, basis_adjoint, weight, device, threadsperblock): + # check if it is numpy + if isinstance(image, np.ndarray): + isnumpy = True + else: + isnumpy = False + + # convert to tensor if nececessary + image = torch.as_tensor(image) + + # make sure datatype is correct + if image.dtype in (torch.float16, torch.float32, torch.float64): + image = image.to(torch.float32) + else: + image = image.to(torch.complex64) + + # handle basis + if basis_adjoint is not None: + basis_adjoint = torch.as_tensor(basis_adjoint) + + # make sure datatype is correct + if basis_adjoint.dtype in (torch.float16, torch.float32, torch.float64): + basis_adjoint = basis_adjoint.to(torch.float32) + else: + basis_adjoint = basis_adjoint.to(torch.complex64) + + # cast tp device is necessary + if device is not None: + nufft_plan.to(device) + + # unpack plan + ndim = nufft_plan.ndim + oversamp = nufft_plan.oversamp + width = nufft_plan.width + beta = nufft_plan.beta + os_shape = nufft_plan.os_shape + interpolator = nufft_plan.interpolator + device = nufft_plan.device + + # copy input to avoid original data modification + image = image.clone() + + # original device + odevice = image.device + + # offload to computational device + image = image.to(device) + + # apodize + _apodize(image, ndim, oversamp, width, beta) + + # zero-pad + image = _resize(image, list(image.shape[:-ndim]) + list(os_shape)) + + # FFT + kspace = _fft.fft(image, axes=range(-ndim, 0), norm=None) + + # interpolate + kspace = _interp.apply_interpolation( + kspace, interpolator, basis_adjoint, device, threadsperblock + ) + + # apply weight + if weight is not None: + weight = torch.as_tensor(weight, dtype=torch.float32, device=kspace.device) + kspace = weight * kspace + + # bring back to original device + kspace = kspace.to(odevice) + image = image.to(odevice) + + # transform back to numpy if required + if isnumpy: + kspace = kspace.numpy(force=True) + + # collect garbage + gc.collect() + + return kspace + + +def _apply_nufft_adj(kspace, nufft_plan, basis, weight, device, threadsperblock): + # check if it is numpy + if isinstance(kspace, np.ndarray): + isnumpy = True + else: + isnumpy = False + + # convert to tensor if nececessary + kspace = torch.as_tensor(kspace) + + # make sure datatype is correct + if kspace.dtype in (torch.float16, torch.float32, torch.float64): + kspace = kspace.to(torch.float32) + else: + kspace = kspace.to(torch.complex64) + + # handle basis + if basis is not None: + basis = torch.as_tensor(basis) + + # make sure datatype is correct + if basis.dtype in (torch.float16, torch.float32, torch.float64): + basis = basis.to(torch.float32) + else: + basis = basis.to(torch.complex64) + + # cast to device is necessary + if device is not None: + nufft_plan.to(device) + + # unpack plan + ndim = nufft_plan.ndim + oversamp = nufft_plan.oversamp + width = nufft_plan.width + beta = nufft_plan.beta + shape = nufft_plan.shape + interpolator = nufft_plan.interpolator + device = nufft_plan.device + + # original device + odevice = kspace.device + + # offload to computational device + kspace = kspace.to(device) + + # apply weight + if weight is not None: + weight = torch.as_tensor(weight, dtype=torch.float32, device=kspace.device) + kspace = weight * kspace + + # gridding + kspace = _interp.apply_gridding( + kspace, interpolator, basis, device, threadsperblock + ) + + # IFFT + image = _fft.ifft(kspace, axes=range(-ndim, 0), norm=None) + + # crop + image = _resize(image, list(image.shape[:-ndim]) + list(shape)) + + # apodize + _apodize(image, ndim, oversamp, width, beta) + + # bring back to original device + kspace = kspace.to(odevice) + image = image.to(odevice) + + # transform back to numpy if required + if isnumpy: + image = image.numpy(force=True) + + # collect garbage + gc.collect() + + return image + + +def _apply_nufft_selfadj(image, toeplitz_kern, device, threadsperblock): + # check if it is numpy + if isinstance(image, np.ndarray): + isnumpy = True + else: + isnumpy = False + + # convert to tensor if nececessary + image = torch.as_tensor(image) + + # make sure datatype is correct + if image.dtype in (torch.float16, torch.float32, torch.float64): + image = image.to(torch.float32) + else: + image = image.to(torch.complex64) + + # cast to device is necessary + if device is not None: + toeplitz_kern.to(device) + + # unpack plan + shape = toeplitz_kern.shape + ndim = toeplitz_kern.ndim + device = toeplitz_kern.device + + # original shape + oshape = image.shape[-ndim:] + + # original device + odevice = image.device + + # offload to computational device + image = image.to(device) + + # zero-pad + image = _resize(image, list(image.shape[:-ndim]) + list(shape)) + + # FFT + kspace = _fft.fft(image, axes=range(-ndim, 0), norm="ortho", centered=False) + + # Toeplitz convolution + tmp = torch.zeros_like(kspace) + tmp = _interp.apply_toeplitz(tmp, kspace, toeplitz_kern, device, threadsperblock) + + # IFFT + image = _fft.ifft(tmp, axes=range(-ndim, 0), norm="ortho", centered=False) + + # crop + image = _resize(image, list(image.shape[:-ndim]) + list(oshape)) + + # bring back to original device + image = image.to(odevice) + + # transform back to numpy if required + if isnumpy: + image = image.numpy(force=True) + + # collect garbage + gc.collect() + + return image +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/fft/sparse_fft.html b/_modules/deepmr/fft/sparse_fft.html new file mode 100644 index 00000000..77ed332b --- /dev/null +++ b/_modules/deepmr/fft/sparse_fft.html @@ -0,0 +1,1045 @@ + + + + + + + + + + + deepmr.fft.sparse_fft — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.fft.sparse_fft

+"""Sparse FFT subroutines."""
+
+__all__ = [
+    "prepare_sampling",
+    "plan_toeplitz_fft",
+    "apply_sparse_fft",
+    "apply_sparse_ifft",
+    "apply_sparse_fft_selfadj",
+]
+
+import gc
+
+import numpy as np
+import torch
+import torch.autograd as autograd
+
+from . import fft as _fft
+from . import _sparse
+
+
+def prepare_sampling(indexes, shape, device="cpu"):
+    """
+    Precompute sparse sampling mask object.
+
+    Parameters
+    ----------
+    indexes : torch.Tensor
+        Sampled k-space points indexes of shape ``(ncontrasts, nviews, nsamples, ndims)``.
+    shape : int | Iterable[int]
+        Oversampled grid size of shape ``(ndim,)``.
+        If scalar, isotropic matrix is assumed.
+    device : str, optional
+        Computational device (``cpu`` or ``cuda:n``, with ``n=0, 1,...nGPUs``).
+        The default is ``cpu``.
+
+    Returns
+    -------
+    interpolator : dict
+        Structure containing sparse interpolator matrix:
+
+            * index (``torch.Tensor[int]``): indexes of the non-zero entries of interpolator sparse matrix of shape (ndim, ncoord, width).
+            * value (``torch.Tensor[float32]``): values of the non-zero entries of interpolator sparse matrix of shape (ndim, ncoord, width).
+            * dshape (``Iterable[int]``): oversample grid shape of shape (ndim,). Order of axes is (z, y, x).
+            * ishape (``Iterable[int]``): interpolator shape (ncontrasts, nview, nsamples)
+            * ndim (``int``): number of spatial dimensions.
+            * device (``str``): computational device.
+
+    Notes
+    -----
+    Sampled point indexes axes ordering is assumed to be ``(x, y)`` for 2D signals
+    and ``(x, y, z)`` for 3D. Conversely, axes ordering for grid shape is
+    assumed to be ``(z, y, x)``.
+
+    Indexes tensor shape is ``(ncontrasts, nviews, nsamples, ndim)``. If there are less dimensions
+    (e.g., single-shot or single contrast trajectory), assume singleton for the missing ones:
+
+        * ``indexes.shape = (nsamples, ndim) -> (1, 1, nsamples, ndim)``
+        * ``indexes.shape = (nviews, nsamples, ndim) -> (1, nviews, nsamples, ndim)``
+
+    """
+    # get parameters
+    ndim = indexes.shape[-1]
+
+    if np.isscalar(shape):
+        shape = np.asarray([shape] * ndim, dtype=np.int16)
+    else:
+        shape = np.array(shape, dtype=np.int16)
+
+    return _sparse.plan_sampling(indexes, shape, device)
+
+
+
[docs]def plan_toeplitz_fft(coord, shape, basis=None, device="cpu"): + """ + Compute spatio-temporal kernel for fast self-adjoint operation. + + Parameters + ---------- + coord : torch.Tensor + K-space coordinates of shape ``(ncontrasts, nviews, nsamples, ndims)``. + Coordinates must be normalized between ``(-0.5 * shape[i], 0.5 * shape[i])``, + with ``i = (z, y, x)``. + shape : int | Iterable[int] + Oversampled grid size of shape ``(ndim,)``. + If scalar, isotropic matrix is assumed. + basis : torch.Tensor, optional + Low rank subspace projection operator + of shape ``(ncontrasts, ncoeffs)``; can be ``None``. The default is ``None``. + device : str, optional + Computational device (``cpu`` or ``cuda:n``, with ``n=0, 1,...nGPUs``). + The default is ``cpu``. + + Returns + ------- + toeplitz_kernel : GramMatrix + Structure containing Toeplitz kernel (i.e., Fourier transform of system tPSF). + + """ + return _sparse.plan_toeplitz(coord, shape, basis, device)
+ + +class ApplySparseFFT(autograd.Function): + @staticmethod + def forward(image, sampling_mask, basis_adjoint, weight, device, threadsperblock): + return _apply_sparse_fft( + image, sampling_mask, basis_adjoint, weight, device, threadsperblock + ) + + @staticmethod + def setup_context(ctx, inputs, output): + _, sampling_mask, basis_adjoint, weight, device, threadsperblock = inputs + ctx.set_materialize_grads(False) + ctx.sampling_mask = sampling_mask + ctx.basis_adjoint = basis_adjoint + ctx.weight = weight + ctx.device = device + ctx.threadsperblock = threadsperblock + + @staticmethod + def backward(ctx, kspace): + sampling_mask = ctx.sampling_mask + basis_adjoint = ctx.basis_adjoint + if basis_adjoint is not None: + basis = basis_adjoint.conj().t() + else: + basis = None + weight = ctx.weight + device = ctx.device + threadsperblock = ctx.threadsperblock + + return ( + _apply_sparse_ifft( + kspace, sampling_mask, basis, weight, device, threadsperblock + ), + None, + None, + None, + None, + None, + ) + + +def apply_sparse_fft( + image, + sampling_mask, + basis_adjoint=None, + weight=None, + device=None, + threadsperblock=128, +): + """ + Apply sparse Fast Fourier Transform. + + Parameters + ---------- + image : torch.Tensor + Input image of shape ``(..., ncontrasts, ny, nx)`` (2D) + or ``(..., ncontrasts, nz, ny, nx)`` (3D). + sampling_mask : dict + Pre-formatted sampling mask in sparse COO format. + basis_adjoint : torch.Tensor, optional + Adjoint low rank subspace projection operator + of shape ``(ncoeffs, ncontrasts)``; can be ``None``. The default is ``None``. + weight : np.ndarray | torch.Tensor, optional + Optional weight for output data samples. Useful to force adjointeness. + The default is ``None``. + device : str, optional + Computational device (``cpu`` or ``cuda:n``, with ``n=0, 1,...nGPUs``). + The default is ``None`` (same as interpolator). + threadsperblock : int + CUDA blocks size (for GPU only). The default is ``128``. + + Returns + ------- + kspace : torch.Tensor + Output sparse kspace of shape ``(..., ncontrasts, nviews, nsamples)``. + + """ + return ApplySparseFFT.apply( + image, sampling_mask, basis_adjoint, weight, device, threadsperblock + ) + + +class ApplySparseIFFT(autograd.Function): + @staticmethod + def forward(kspace, sampling_mask, basis, weight, device, threadsperblock): + return _apply_sparse_ifft( + kspace, sampling_mask, basis, weight, device, threadsperblock + ) + + @staticmethod + def setup_context(ctx, inputs, output): + _, sampling_mask, basis, weight, device, threadsperblock = inputs + ctx.set_materialize_grads(False) + ctx.sampling_mask = sampling_mask + ctx.basis = basis + ctx.weight = weight + ctx.device = device + ctx.threadsperblock = threadsperblock + + @staticmethod + def backward(ctx, image): + sampling_mask = ctx.sampling_mask + basis = ctx.basis + if basis is not None: + basis_adjoint = basis.conj().t() + else: + basis_adjoint = None + weight = ctx.weight + device = ctx.device + threadsperblock = ctx.threadsperblock + + return ( + _apply_sparse_fft( + image, sampling_mask, basis_adjoint, weight, device, threadsperblock + ), + None, + None, + None, + None, + None, + ) + + +def apply_sparse_ifft( + kspace, sampling_mask, basis=None, weight=None, device=None, threadsperblock=128 +): + """ + Apply adjoint Non-Uniform Fast Fourier Transform. + + Parameters + ---------- + kspace : torch.Tensor + Input sparse kspace of shape ``(..., ncontrasts, nviews, nsamples)``. + sampling_mask : dict + Pre-formatted sampling mask in sparse COO format. + basis : torch.Tensor, optional + Low rank subspace projection operator + of shape ``(ncontrasts, ncoeffs)``; can be ``None``. The default is ``None``. + weight : np.ndarray | torch.Tensor, optional + Optional weight for output data samples. Useful to force adjointeness. + The default is ``None``. + device : str, optional + Computational device (``cpu`` or ``cuda:n``, with ``n=0, 1,...nGPUs``). + The default is ``None ``(same as interpolator). + threadsperblock : int + CUDA blocks size (for GPU only). The default is ``128``. + + Returns + ------- + image : torch.Tensor + Output image of shape ``(..., ncontrasts, ny, nx)`` (2D) + or ``(..., ncontrasts, nz, ny, nx)`` (3D). + + """ + return ApplySparseIFFT.apply( + kspace, sampling_mask, basis, weight, device, threadsperblock + ) + + +class ApplySparseFFTSelfadjoint(autograd.Function): + @staticmethod + def forward(image, toeplitz_kern, device, threadsperblock): + return _apply_sparse_fft_selfadj(image, toeplitz_kern, device, threadsperblock) + + @staticmethod + def setup_context(ctx, inputs, output): + _, toeplitz_kern, device, threadsperblock = inputs + ctx.set_materialize_grads(False) + ctx.toeplitz_kern = toeplitz_kern + ctx.device = device + ctx.threadsperblock = threadsperblock + + @staticmethod + def backward(ctx, image): + toeplitz_kern = ctx.toeplitz_kern + device = ctx.device + threadsperblock = ctx.threadsperblock + return ( + _apply_sparse_fft_selfadj(image, toeplitz_kern, device, threadsperblock), + None, + None, + None, + ) + + +
[docs]def apply_sparse_fft_selfadj(image, toeplitz_kern, device=None, threadsperblock=128): + """ + Apply self-adjoint Fast Fourier Transform via Toeplitz convolution. + + Parameters + ---------- + image : torch.Tensor + Input image of shape ``(..., ncontrasts, ny, nx)`` (2D) + or ``(..., ncontrasts, nz, ny, nx)`` (3D). + toeplitz_kern : GramMatrix + Pre-calculated Toeplitz kernel. + device : str, optional + Computational device (``cpu`` or ``cuda:n``, with ``n=0, 1,...nGPUs``). + The default is ``None ``(same as interpolator). + threadsperblock : int + CUDA blocks size (for GPU only). The default is ``128``. + + Returns + ------- + image : torch.Tensor + Output image of shape ``(..., ncontrasts, ny, nx)`` (2D) + or ``(..., ncontrasts, nz, ny, nx)`` (3D). + + """ + return ApplySparseFFTSelfadjoint.apply( + image, toeplitz_kern, device, threadsperblock + )
+ + +# %% local utils +def _apply_sparse_fft( + image, sampling_mask, basis_adjoint, weight, device, threadsperblock +): + # check if it is numpy + if isinstance(image, np.ndarray): + isnumpy = True + else: + isnumpy = False + + # convert to tensor if nececessary + image = torch.as_tensor(image) + + # make sure datatype is correct + if image.dtype in (torch.float16, torch.float32, torch.float64): + image = image.to(torch.float32) + else: + image = image.to(torch.complex64) + + # handle basis + if basis_adjoint is not None: + basis_adjoint = torch.as_tensor(basis_adjoint) + + # make sure datatype is correct + if basis_adjoint.dtype in (torch.float16, torch.float32, torch.float64): + basis_adjoint = basis_adjoint.to(torch.float32) + else: + basis_adjoint = basis_adjoint.to(torch.complex64) + + # cast tp device is necessary + if device is not None: + sampling_mask.to(device) + + # unpack plan + ndim = sampling_mask.ndim + device = sampling_mask.device + + # Copy input to avoid original data modification + image = image.clone() + + # Original device + odevice = image.device + + # Offload to computational device + image = image.to(device) + + # FFT + kspace = _fft.fft(image, axes=range(-ndim, 0), norm=None) + + # Interpolate + kspace = _sparse.apply_sampling( + kspace, sampling_mask, basis_adjoint, device, threadsperblock + ) + + # apply weight + if weight is not None: + weight = torch.as_tensor(weight, dtype=torch.float32, device=kspace.device) + kspace = weight * kspace + + # Bring back to original device + kspace = kspace.to(odevice) + image = image.to(odevice) + + # transform back to numpy if required + if isnumpy: + kspace = kspace.numpy(force=True) + + # collect garbage + gc.collect() + + return kspace + + +def _apply_sparse_ifft(kspace, sampling_mask, basis, weight, device, threadsperblock): + # check if it is numpy + if isinstance(kspace, np.ndarray): + isnumpy = True + else: + isnumpy = False + + # convert to tensor if nececessary + kspace = torch.as_tensor(kspace) + + # make sure datatype is correct + if kspace.dtype in (torch.float16, torch.float32, torch.float64): + kspace = kspace.to(torch.float32) + else: + kspace = kspace.to(torch.complex64) + + # handle basis + if basis is not None: + basis = torch.as_tensor(basis) + + # make sure datatype is correct + if basis.dtype in (torch.float16, torch.float32, torch.float64): + basis = basis.to(torch.float32) + else: + basis = basis.to(torch.complex64) + + # cast to device is necessary + if device is not None: + sampling_mask.to(device) + + # unpack plan + ndim = sampling_mask.ndim + device = sampling_mask.device + + # Original device + odevice = kspace.device + + # Offload to computational device + kspace = kspace.to(device) + + # apply weight + if weight is not None: + weight = torch.as_tensor(weight, dtype=torch.float32, device=kspace.device) + kspace = weight * kspace + + # Gridding + kspace = _sparse.apply_zerofill( + kspace, sampling_mask, basis, device, threadsperblock + ) + + # IFFT + image = _fft.ifft(kspace, axes=range(-ndim, 0), norm=None) + + # Bring back to original device + kspace = kspace.to(odevice) + image = image.to(odevice) + + # transform back to numpy if required + if isnumpy: + image = image.numpy(force=True) + + # collect garbage + gc.collect() + + return image + + +def _apply_sparse_fft_selfadj(image, toeplitz_kern, device, threadsperblock): + # check if it is numpy + if isinstance(image, np.ndarray): + isnumpy = True + else: + isnumpy = False + + # convert to tensor if nececessary + image = torch.as_tensor(image) + + # make sure datatype is correct + if image.dtype in (torch.float16, torch.float32, torch.float64): + image = image.to(torch.float32) + else: + image = image.to(torch.complex64) + + # cast to device is necessary + if device is not None: + toeplitz_kern.to(device) + + # unpack plan + ndim = toeplitz_kern.ndim + device = toeplitz_kern.device + + # original device + odevice = image.device + + # offload to computational device + image = image.to(device) + + # FFT + kspace = _fft.fft(image, axes=range(-ndim, 0), norm="ortho", centered=False) + + # Toeplitz convolution + tmp = torch.zeros_like(kspace) + tmp = _sparse.apply_toeplitz(tmp, kspace, toeplitz_kern, device, threadsperblock) + + # IFFT + image = _fft.ifft(tmp, axes=range(-ndim, 0), norm="ortho", centered=False) + + # bring back to original device + image = image.to(odevice) + + # transform back to numpy if required + if isnumpy: + image = image.numpy(force=True) + + # collect garbage + gc.collect() + + return image +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/io/generic/hdf5.html b/_modules/deepmr/io/generic/hdf5.html new file mode 100644 index 00000000..63c2841a --- /dev/null +++ b/_modules/deepmr/io/generic/hdf5.html @@ -0,0 +1,675 @@ + + + + + + + + + + + deepmr.io.generic.hdf5 — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.io.generic.hdf5

+"""I/O Routines for HDF5 files."""
+
+__all__ = ["read_hdf5", "write_hdf5"]
+
+import copy
+
+import h5py
+import numpy as np
+
+from .pathlib import get_filepath
+
+dtypes = (
+    np.uint8,
+    np.uint16,
+    np.uint32,
+    np.uint64,
+    int,
+    np.int16,
+    np.int32,
+    np.int64,
+    float,
+    np.float16,
+    np.float32,
+    np.float64,
+)
+
+
+
[docs]def read_hdf5(filepath): + """ + Read HDF5 file as a Python dictionary + + Parameters + ---------- + filepath : str + Path to file on disk. + + Returns + ------- + dict + Deserialized HDF5 file. + + Example + ------- + Define an exemplary dictionary and save to file: + + >>> import os + >>> import numpy as np + >>> import deepmr.io + >>> pydict = {'headerstr': 'someinfo', 'testdouble': np.ones(3, dtype=np.float32)} + >>> filepath = os.path.realpath('.') + >>> deepmr.io.write_hdf5(filepath) + + Load from disk: + + >>> loaded_dict = deepmr.io.read_hdf5(filepath) + + Result is the same dictionary created before: + + >>> loaded_dict.keys() + ['headerstr', 'testdouble'] + >>> matfile['testdouble'] + array([1.0, 1.0, 1.0]) + >>> matfile['headerstr'] + 'someinfo' + + """ + filepath = get_filepath(filepath, True, "h5") + with h5py.File(filepath, "r") as h5file: + return _recursively_load_dict_contents_from_group(h5file, "/")
+ + +
[docs]def write_hdf5(input, filepath): + """ + Write a given dictionary to HDF5 file. + + Parameters + ---------- + input : dict + Input dictionary. + filepath : str + Path to file on disk. + + Example + ------- + Define an exemplary dictionary: + + >>> import numpy as np + >>> pydict = {'headerstr': 'someinfo', 'testdouble': np.ones(3, dtype=np.float32)} + + Save the dictionary to disk: + + >>> import os + >>> import deepmr.io + >>> filepath = os.path.realpath('.') + >>> deepmr.io.write_hdf5(filepath) + + """ + input = copy.deepcopy(input) + with h5py.File(filepath, "w") as h5file: + _recursively_save_dict_contents_to_group(h5file, "/", input)
+ + +def _recursively_load_dict_contents_from_group(h5file, path): + ans = {} + for key, item in h5file[path].items(): + if isinstance(item, h5py._hl.dataset.Dataset): + tmp = item[()] + if isinstance(tmp, bytes): + tmp = tmp.decode() + if isinstance(tmp, np.ndarray): + tmp = tmp.squeeze() + ans[key] = tmp + elif isinstance(item, h5py._hl.group.Group): + ans[key] = _recursively_load_dict_contents_from_group( + h5file, path + key + "/" + ) + return ans + + +def _recursively_save_dict_contents_to_group(h5file, path, dic): + for key, item in dic.items(): + if isinstance(item, (*dtypes, str, bytes)): + h5file[path + key] = item + elif np.isscalar(item): + h5file[path + key] = (item,) + elif isinstance(item, (list, tuple)): + h5file[path + key] = np.asarray(item) + elif isinstance(item, np.ndarray): + h5file[path + key] = item + elif isinstance(item, dict): + _recursively_save_dict_contents_to_group(h5file, path + key + "/", item) + elif item is None: + pass + else: + raise ValueError(f"Cannot save {type(item)} type") +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/io/generic/matlab.html b/_modules/deepmr/io/generic/matlab.html new file mode 100644 index 00000000..08261e1d --- /dev/null +++ b/_modules/deepmr/io/generic/matlab.html @@ -0,0 +1,612 @@ + + + + + + + + + + + deepmr.io.generic.matlab — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.io.generic.matlab

+"""I/O Routines for MATLAB files."""
+
+__all__ = ["read_matfile"]
+
+import scipy.io
+import mat73
+
+from .pathlib import get_filepath
+
+
+
[docs]def read_matfile(filepath, return_fullpath=False): + """ + Read matfile as a Python dictionary. + + Automatically handle legacy and HDF5 (i.e., -v7.3) formats. + + Parameters + ---------- + filepath : str + Path of the file on disk. + return_fullpath : bool, optional + If True, also return expanded file path. The default is False. + + Returns + ------- + dict + Deserialized matfile. + + Example + ------- + >>> from os.path import dirname, join as pjoin + >>> import scipy.io as sio + + Get the filename for an example .mat file from the tests/data directory. + + >>> dir = pjoin(dirname(sio.__file__), 'matlab', 'tests', 'data') + >>> filepath = pjoin(dir, 'testdouble_7.4_GLNX86.mat') + + Load the .mat file contents. + + >>> import deepmr.io + >>> matfile = deepmr.io.read_matfile(filepath) + + The result is a dictionary, one key/value pair for each variable: + + >>> matfile.keys() + ['testdouble'] + >>> matfile['testdouble'] + array([[0. , 0.78539816, 1.57079633, 2.35619449, 3.14159265, + 3.92699082, 4.71238898, 5.49778714, 6.28318531]]) + + Contrary to ``scipy.io``, this will load matlab v7.3 files as well, using ``mat73`` library [1]. + In addition, ``__global__``, ``__header__`` and ``__version__`` fields are automatically + discarded. + + References + ---------- + [1]: https://github.com/skjerns/mat7.3/tree/master + + """ + filepath = get_filepath(filepath, True, "mat") + try: + matfile = scipy.io.loadmat(filepath) + matfile.pop("__globals__", None) + matfile.pop("__header__", None) + matfile.pop("__version__", None) + except: + matfile = mat73.loadmat(filepath) + + if return_fullpath: + return matfile, filepath + return matfile
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/io/header/api.html b/_modules/deepmr/io/header/api.html new file mode 100644 index 00000000..aa749054 --- /dev/null +++ b/_modules/deepmr/io/header/api.html @@ -0,0 +1,822 @@ + + + + + + + + + + + deepmr.io.header.api — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.io.header.api

+"""Acquisition Header API."""
+
+__all__ = ["read_acqheader", "write_acqheader"]
+
+import copy
+import os
+import time
+
+import numpy as np
+
+from . import base as _base
+
+# from . import bart as _bart
+from . import matlab as _matlab
+from . import mrd as _mrd
+
+
+
[docs]def read_acqheader(filepath, *args, device="cpu", verbose=False, **kwargs): + """ + Read acquisition header from file. + + The header info (e.g., k-space trajectory, shape) can be used to + simulate acquisitions or to inform raw data loading (e.g., via ordering) + to reshape from acquisition to reconstruction ordering and image post-processing + (transposition, flipping) and exporting. + + Parameters + ---------- + filepath : str + Path to acquisition header file. Supports wildcard (e.g., ``/path-to-data-folder/*.h5``). + *args + Variable length argument list passed to the specific subroutines + for the different datatypes (see ``Keyword Arguments``). + device : str, optional + Computational device for internal attributes. The default is ``cpu``. + verbose : int, optional + Verbosity level ``(0=Silent, 1=Less, 2=More)``. The default is ``0``. + + + Keyword Arguments + ----------------- + dcfpath : str, optional + Path to the dcf file (:func:`deepmr.io.matlab.read_matlab_acqhead`). + The default is None. + methodpath : str, optional + Path to the schedule description file (:func:`deepmr.io.matlab.read_matlab_acqhead`). + The default is None. + sliceprofpath : str, optional + Path to the slice profile file (:func:`deepmr.io.matlab.read_matlab_acqhead`). + The default is None. + + Returns + ------- + head : Header + Deserialized acquisition header. + + Notes + ----- + The returned ``head`` (:func:`deepmr.Header`) is a structure with the following fields: + + * shape (torch.Tensor): + This is the expected image size of shape ``(nz, ny, nx)``. + * resolution (torch.Tensor): + This is the expected image resolution in mm of shape ``(dz, dy, dx)``. + * t (torch.Tensor): + This is the readout sampling time ``(0, t_read)`` in ``ms``. + with shape ``(nsamples,)``. + * traj (torch.Tensor): + This is the k-space trajectory normalized as ``(-0.5 * shape, 0.5 * shape)`` + with shape ``(ncontrasts, nviews, nsamples, ndims)``. + * dcf (torch.Tensor): + This is the k-space sampling density compensation factor + with shape ``(ncontrasts, nviews, nsamples)``. + * FA (torch.Tensor, float): + This is either the acquisition flip angle in degrees or the list + of flip angles of shape ``(ncontrasts,)`` for each image in the series. + * TR (torch.Tensor, float): + This is either the repetition time in ms or the list + of repetition times of shape ``(ncontrasts,)`` for each image in the series. + * TE (torch.Tensor, float): + This is either the echo time in ms or the list + of echo times of shape ``(ncontrasts,)`` for each image in the series. + * TI (torch.Tensor, float): + This is either the inversion time in ms or the list + of inversion times of shape ``(ncontrasts,)`` for each image in the series. + * user (dict): + User parameters. Some examples are: + + * ordering (torch.Tensor): + Indices for reordering (acquisition to reconstruction) + of acquired k-space data, shaped ``(3, nslices * ncontrasts * nview)``, whose rows are + ``contrast_index``, ``slice_index`` and ``view_index``, respectively. + * mode (str): + Acquisition mode (``2Dcart``, ``3Dcart``, ``2Dnoncart``, ``3Dnoncart``). + * separable (bool): + Whether the acquisition can be decoupled by fft along ``slice`` / ``readout`` directions + (3D stack-of-noncartesian / 3D cartesian, respectively) or not (3D noncartesian and 2D acquisitions). + * slice_profile (torch.Tensor): + Flip angle scaling along slice profile of shape ``(nlocs,)``. + * basis (torch.Tensor): + Low rank subspace basis for subspace reconstruction of shape ``(ncontrasts, ncoeff)``. + + * affine (np.ndarray): + Affine matrix describing image spacing, orientation and origin of shape ``(4, 4)``. + * ref_dicom (pydicom.Dataset): + Template dicom for image export. + * flip (list): + List of spatial axis to be flipped after image reconstruction. + The default is an empty list (no flipping). + * transpose (list): + Permutation of image dimensions after reconstruction, depending on acquisition mode: + + * **2Dcart:** reconstructed image has ``(nslices, ncontrasts, ny, nx) -> transpose = [1, 0, 2, 3]`` + * **2Dnoncart:** reconstructed image has ``(nslices, ncontrasts, ny, nx) -> transpose = [1, 0, 2, 3]`` + * **3Dcart:** reconstructed image has ``(ncontrasts, nz, ny, nx) -> transpose = [0, 1, 2, 3]`` + * **3Dnoncart:** reconstructed image has ``(nx, ncontrasts, nz, ny) -> transpose = [1, 2, 3, 0]`` + + The default is an empty list (no transposition). + + """ + tstart = time.time() + if verbose >= 1: + print(f"Reading acquisition header from file {filepath}...", end="\t") + + done = False + + # mrd + if filepath.endswith(".h5"): + try: + head = _mrd.read_mrd_acqhead(filepath) + done = True + except Exception as e: + msg0 = e + else: + msg0 = "" + + # matfile + if filepath.endswith(".mat") and not (done): + try: + head = _matlab.read_matlab_acqhead(filepath, *args, **kwargs) + done = True + except Exception as e: + msg1 = _get_error(e) + else: + msg1 = "" + + # bart + # if filepath.endswith(".cfl") and not(done): + # try: + # head = _bart.read_bart_acqhead(filepath) + # done = True + # except Exception: + # pass + + # fallback + if filepath.endswith(".h5") and not (done): + try: + done = True + head = _base.read_base_acqheader(filepath) + except Exception as e: + msg2 = _get_error(e) + else: + msg2 = "" + + # check if we loaded data + if not (done): + raise RuntimeError( + f"File (={filepath}) not recognized! Error:\nMRD {msg0}\nMATLAB {msg1}\nBase {msg2}" + ) + + # normalize trajectory + if head.traj is not None: + ndim = head.traj.shape[-1] + traj_max = ((head.traj**2).sum(axis=-1) ** 0.5).max() + head.traj = head.traj / (2 * traj_max) # normalize to (-0.5, 0.5) + head.traj = head.traj * head.shape[-ndim:] + + # cast + head.torch(device) + + # final report + if verbose == 2: + print(f"Readout time: {round(float(head.t[-1]), 2)} ms") + if head.traj is not None: + print(f"Trajectory range: ({head.traj.min()},{head.traj.max()})") + print( + f"Trajectory shape: (ncontrasts={head.traj.shape[0]}, nviews={head.traj.shape[1]}, nsamples={head.traj.shape[2]}, ndim={head.traj.shape[-1]})" + ) + if head.dcf is not None: + print( + f"DCF shape: (ncontrasts={head.dcf.shape[0]}, nviews={head.dcf.shape[1]}, nsamples={head.dcf.shape[2]})" + ) + if head.FA is not None: + if len(np.unique(head.FA)) > 1: + if verbose == 2: + print(f"Flip Angle train length: {len(head.FA)}") + else: + FA = float(np.unique(head.FA)[0]) + head.FA = FA + if verbose == 2: + print(f"Constant FA: {round(abs(FA), 2)} deg") + if head.TR is not None: + if len(np.unique(head.TR)) > 1: + if verbose == 2: + print(f"TR train length: {len(head.TR)}") + else: + TR = float(np.unique(head.TR)[0]) + head.TR = TR + if verbose == 2: + print(f"Constant TR: {round(TR, 2)} ms") + if head.TE is not None: + if len(np.unique(head.TE)) > 1: + if verbose == 2: + print(f"Echo train length: {len(head.TE)}") + else: + TE = float(np.unique(head.TE)[0]) + head.TE = TE + if verbose == 2: + print(f"Constant TE: {round(TE, 2)} ms") + if head.TI is not None and np.allclose(head.TI, 0.0) is False: + if len(np.unique(head.TI)) > 1: + if verbose == 2: + print(f"Inversion train length: {len(head.TI)}") + else: + TI = float(np.unique(head.TI)[0]) + head.TI = TI + if verbose == 2: + print(f"Constant TI: {round(TI, 2)} ms") + + tend = time.time() + if verbose >= 1: + print(f"done! Elapsed time: {round(tend-tstart, 2)} s...") + + return head
+ + +
[docs]def write_acqheader(filename, head, filepath="./", dataformat="hdf5"): + """ + Write acquisition header to file. + + Supported output format are ``hdf5`` (faster i/o) and ``mrd``. + + Parameters + ---------- + filename : str + Name of the file. + head: Header + Structure containing trajectory of shape ``(ncontrasts, nviews, npts, ndim)`` + and meta information (shape, resolution, spacing, etc). + filepath : str, optional + Path to file. The default is ``./``. + dataformat : str, optional + Available formats (``mrd`` or ``hdf5``). The default is ``hdf5``. + + """ + head = copy.deepcopy(head) + head.ref_dicom = None + if dataformat == "hdf5": + _base.write_base_acqheader(head, os.path.join(filepath, filename)) + elif dataformat == "mrd": + _mrd.write_mrd_acqhead(head, os.path.join(filepath, filename)) + else: + raise RuntimeError( + f"Data format = {dataformat} not recognized! Please use 'mrd' or 'hdf5'" + )
+ + +# %% sub routines +def _get_error(ex): + trace = [] + tb = ex.__traceback__ + while tb is not None: + trace.append( + { + "filename": tb.tb_frame.f_code.co_filename, + "name": tb.tb_frame.f_code.co_name, + "lineno": tb.tb_lineno, + } + ) + tb = tb.tb_next + + return str({"type": type(ex).__name__, "message": str(ex), "trace": trace}) +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/io/image/api.html b/_modules/deepmr/io/image/api.html new file mode 100644 index 00000000..dd42a772 --- /dev/null +++ b/_modules/deepmr/io/image/api.html @@ -0,0 +1,1011 @@ + + + + + + + + + + + deepmr.io.image.api — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.io.image.api

+"""Image IO API."""
+
+__all__ = ["read_image", "write_image"]
+
+import time
+
+import numpy as np
+import torch
+
+from . import dicom as _dicom
+from . import nifti as _nifti
+
+# from .dicom import *  # noqa
+# from .nifti import *  # noqa
+
+
+
[docs]def read_image(filepath, acqheader=None, device="cpu", verbose=0): + """ + Read image data from file. + + Supported formats are ``DICOM`` and ``NIfTI``. + + Parameters + ---------- + filepath : str + Path to image file. Supports wildcard (e.g., ``/path-to-dicom-exam/*``, ``/path-to-BIDS/*.nii``). + acqheader : Header, optional + Acquisition header loaded from trajectory. + If not provided, assume Cartesian acquisition and infer from data. + The default is ``None``. + device : str, optional + Computational device for internal attributes. The default is ``cpu``. + verbose : int, optional + Verbosity level ``(0=Silent, 1=Less, 2=More)``. The default is ``0``. + + Returns + ------- + image : torch.tensor + Complex image data. + head : Header + Metadata for image reconstruction. + + Example + ------- + >>> import deepmr + + Get the filenames for exemplary DICOM and NIfTI files. + + >>> dcmpath = deepmr.testdata("dicom") + >>> niftipath = deepmr.testdata("nifti") + + Load the file contents. + + >>> image_dcm, head_dcm = deepmr.io.read_image(dcmpath) + >>> image_nii, head_nii = deepmr.io.read_image(niftipath) + + The result is a image/header pair. ``Image`` contains image-space data. + Here, it represents a 2D cartesian acquisition with 3 echoes, 2 slices and 192x192 matrix size. + + >>> image_dcm.shape + torch.Size([3, 2, 192, 192]) + >>> image_nii.shape + torch.Size([3, 2, 192, 192]) + + ``Head`` contains the acquisition information. We can inspect the image shape and resolution: + + >>> head_dcm.shape + tensor([ 2, 192, 192]) + >>> head_nii.shape + tensor([ 2, 192, 192]) + >>> head_dcm.ref_dicom.SpacingBetweenSlices + '10.5' + >>> head_nii.ref_dicom.SpacingBetweenSlices + '10.5' + >>> head_dcm.ref_dicom.SliceThickness + '7.0' + >>> head_nii.ref_dicom.SliceThickness + '7.0' + >>> head_dcm.ref_dicom.PixelSpacing + [0.67, 0.67] + >>> head_nii.ref_dicom.PixelSpacing + [0.67,0.67] + + ``Head`` also contains contrast information (for forward simulation and parameter inference): + + >>> head_dcm.FA + 180.0 + >>> head_nii.FA + 180.0 + >>> head_dcm.TE + tensor([ 20.0, 40.0, 60.0]) + >>> head_nii.TE + tensor([ 20.0, 40.0, 60.0]) + >>> head_dcm.TR + 3000.0 + >>> head_nii.TR + 3000.0 + + Notes + ----- + The returned ``image`` tensor contains image space data. Dimensions are defined as following: + + * **2D:** ``(ncontrasts, nslices, ny, nx)``. + * **3D:** ``(ncontrasts, nz, ny, nx)``. + + The returned ``head`` (:func:`deepmr.Header`) is a structure with the following fields: + + * shape (torch.Tensor): + This is the expected image size of shape ``(nz, ny, nx)``. + * resolution (torch.Tensor): + This is the expected image resolution in mm of shape ``(dz, dy, dx)``. + * t (torch.Tensor): + This is the readout sampling time ``(0, t_read)`` in ``ms``. + with shape ``(nsamples,)``. + * traj (torch.Tensor): + This is the k-space trajectory normalized as ``(-0.5, 0.5)`` + with shape ``(ncontrasts, nviews, nsamples, ndims)``. + * dcf (torch.Tensor): + This is the k-space sampling density compensation factor + with shape ``(ncontrasts, nviews, nsamples)``. + * FA (torch.Tensor, float): + This is either the acquisition flip angle in degrees or the list + of flip angles of shape ``(ncontrasts,)`` for each image in the series. + * TR (torch.Tensor, float): + This is either the repetition time in ms or the list + of repetition times of shape ``(ncontrasts,)`` for each image in the series. + * TE (torch.Tensor, float): + This is either the echo time in ms or the list + of echo times of shape ``(ncontrasts,)`` for each image in the series. + * TI (torch.Tensor, float): + This is either the inversion time in ms or the list + of inversion times of shape ``(ncontrasts,)`` for each image in the series. + * user (dict): + User parameters. Some examples are: + + * ordering (torch.Tensor): + Indices for reordering (acquisition to reconstruction) + of acquired k-space data, shaped ``(3, nslices * ncontrasts * nview)``, whose rows are + ``contrast_index``, ``slice_index`` and ``view_index``, respectively. + * mode (str): + Acquisition mode (``2Dcart``, ``3Dcart``, ``2Dnoncart``, ``3Dnoncart``). + * separable (bool): + Whether the acquisition can be decoupled by fft along ``slice`` / ``readout`` directions + (3D stack-of-noncartesian / 3D cartesian, respectively) or not (3D noncartesian and 2D acquisitions). + * slice_profile (torch.Tensor): + Flip angle scaling along slice profile of shape ``(nlocs,)``. + * basis (torch.Tensor): + Low rank subspace basis for subspace reconstruction of shape ``(ncoeff, ncontrasts)``. + + * affine (np.ndarray): + Affine matrix describing image spacing, orientation and origin of shape ``(4, 4)``. + * ref_dicom (pydicom.Dataset): + Template dicom for image export. + + """ + tstart = time.time() + if verbose >= 1: + print(f"Reading image from file {filepath}...", end="\t") + + done = False + + # convert header to numpy + if acqheader is not None: + acqheader.numpy() + + # dicom + if verbose == 2: + t0 = time.time() + try: + image, head = _dicom.read_dicom(filepath) + done = True + except Exception as e: + msg0 = _get_error(e) + + # nifti + if verbose == 2: + t0 = time.time() + try: + image, head = _nifti.read_nifti(filepath) + done = True + except Exception as e: + msg1 = _get_error(e) + + if not (done): + raise RuntimeError( + f"File (={filepath}) not recognized! Error:\nDICOM {msg0}\nNIfTI {msg1}" + ) + if verbose == 2: + t1 = time.time() + print(f"done! Elapsed time: {round(t1-t0, 2)} s") + + # load trajectory info from acqheader if present + if acqheader is not None: + if acqheader.traj is not None: + head.traj = acqheader.traj + if acqheader.dcf is not None: + head.dcf = acqheader.dcf + if acqheader.t is not None: + head.t = acqheader.t + if acqheader.user is not None: + head.user = acqheader.user + if acqheader.FA is not None: + head.FA = acqheader.FA + if acqheader.TR is not None: + head.TR = acqheader.TR + if acqheader.TE is not None: + head.TE = acqheader.TE + if acqheader.TI is not None: + head.TI = acqheader.TI + + # remove flip and transpose + head.transpose = None + head.flip = None + + # final report + if verbose == 2: + if len(image.shape) == 3: + print( + f"Image shape: (nz={image.shape[-3]}, ny={image.shape[-2]}, nx={image.shape[-1]})" + ) + else: + print( + f"Image shape: (ncontrasts={image.shape[0]}, nz={image.shape[-3]}, ny={image.shape[-2]}, nx={image.shape[-1]})" + ) + if head.t is not None: + print(f"Readout time: {round(float(head.t[-1]), 2)} ms") + if head.traj is not None: + print(f"Trajectory range: ({head.traj.min()},{head.traj.max()})") + print( + f"Trajectory shape: (ncontrasts={head.traj.shape[0]}, nviews={head.traj.shape[1]}, nsamples={head.traj.shape[2]}, ndim={head.traj.shape[-1]})" + ) + if head.dcf is not None: + print( + f"DCF shape: (ncontrasts={head.dcf.shape[0]}, nviews={head.dcf.shape[1]}, nsamples={head.dcf.shape[2]})" + ) + if head.FA is not None: + if len(np.unique(head.FA)) > 1: + if verbose == 2: + print(f"Flip Angle train length: {len(head.FA)}") + else: + FA = float(np.unique(head.FA)[0]) + head.FA = FA + if verbose == 2: + print(f"Constant FA: {round(abs(FA), 2)} deg") + if head.TR is not None: + if len(np.unique(head.TR)) > 1: + if verbose == 2: + print(f"TR train length: {len(head.TR)}") + else: + TR = float(np.unique(head.TR)[0]) + head.TR = TR + if verbose == 2: + print(f"Constant TR: {round(TR, 2)} ms") + if head.TE is not None: + if len(np.unique(head.TE)) > 1: + if verbose == 2: + print(f"Echo train length: {len(head.TE)}") + else: + TE = float(np.unique(head.TE)[0]) + head.TE = TE + if verbose == 2: + print(f"Constant TE: {round(TE, 2)} ms") + if head.TI is not None and np.allclose(head.TI, 0.0) is False: + if len(np.unique(head.TI)) > 1: + if verbose == 2: + print(f"Inversion train length: {len(head.TI)}") + else: + TI = float(np.unique(head.TI)[0]) + head.TI = TI + if verbose == 2: + print(f"Constant TI: {round(TI, 2)} ms") + + # cast + image = torch.as_tensor( + np.ascontiguousarray(image), dtype=torch.complex64, device=device + ) + head.torch(device) + + tend = time.time() + if verbose == 1: + print(f"done! Elapsed time: {round(tend-tstart, 2)} s") + elif verbose == 2: + print(f"Total elapsed time: {round(tend-tstart, 2)} s") + + return image, head
+ + +
[docs]def write_image( + filename, + image, + head=None, + dataformat="nifti", + filepath="./", + series_description="", + series_number_offset=0, + series_number_scale=1000, + rescale=False, + anonymize=False, + verbose=False, +): + """ + Write image to disk. + + Parameters + ---------- + filename : str + Name of the file. + image : np.ndarray + Complex image data of shape ``(ncontrasts, nslices, ny, n)``. + See ``'Notes'`` for additional information. + head : Header, optional + Structure containing trajectory of shape ``(ncontrasts, nviews, npts, ndim)`` + and meta information (shape, resolution, spacing, etc). If None, + assume 1mm isotropic resolution, contiguous slices and axial orientation. + The default is None. + dataformat : str, optional + Available formats (``dicom`` or ``nifti``). The default is ``nifti``. + filepath : str, optional + Path to file. The default is ``./``. + series_description : str, optional + Custom series description. The default is ``""`` (empty string). + series_number_offset : int, optional + Series number offset with respect to the acquired one. + Final series number is ``series_number_scale * acquired_series_number + series_number_offset``. + he default is ``0``. + series_number_scale : int, optional + Series number multiplicative scaling with respect to the acquired one. + Final series number is ``series_number_scale * acquired_series_number + series_number_offset``. + The default is ``1000``. + rescale : bool, optional + If true, rescale image intensity between ``0`` and ``int16_max``. + Beware! Avoid this if you are working with quantitative maps. + The default is ``False``. + anonymize : bool, optional + If True, remove sensible info from header. The default is ``False``. + verbose : bool, optional + Verbosity flag. The default is ``False``. + + Example + ------- + >>> import deepmr + >>> import tempfile + + Get the filenames for an example DICOM file. + + >>> filepath = deepmr.testdata("dicom") + + Load the file contents. + + >>> image_orig, head_orig = deepmr.io.read_image(filepath) + >>> with tempfile.TemporaryDirectory() as tempdir: + >>> dcmpath = os.path.join(tempdir, "dicomtest") + >>> niftipath = os.path.join(tempdir, "niftitest.nii") + >>> deepmr.io.write_image(dcmpath, image_orig, head_orig, dataformat="dicom") + >>> deepmr.io.write_image(niftipath, image_orig, head_orig, dataformat="nifti") + >>> deepmr.io.write_image(dcmpath, image_orig, head_orig, dataformat="dicom") + >>> deepmr.io.write_image(niftipath, image_orig, head_orig, dataformat="nifti") + >>> image_dcm, head_dcm = deepmr.io.read_image(dcmpath) + >>> image_nii, head_nii = deepmr.io.read_image(niftipath) + + The result is a image/header pair. ``Image`` contains image-space data. + Here, it represents a 2D cartesian acquisition with 3 echoes, 2 slices and 192x192 matrix size. + + >>> image_dcm.shape + torch.Size([3, 2, 192, 192]) + >>> image_nii.shape + torch.Size([3, 2, 192, 192]) + + ``Head`` contains the acquisition information. We can inspect the image shape and resolution: + + >>> head_dcm.shape + tensor([ 2, 192, 192]) + >>> head_nii.shape + tensor([ 2, 192, 192]) + >>> head_dcm.ref_dicom.SpacingBetweenSlices + '10.5' + >>> head_nii.ref_dicom.SpacingBetweenSlices + '10.5' + >>> head_dcm.ref_dicom.SliceThickness + '7.0' + >>> head_nii.ref_dicom.SliceThickness + '7.0' + >>> head_dcm.ref_dicom.PixelSpacing + [0.67, 0.67] + >>> head_nii.ref_dicom.PixelSpacing + [0.67,0.67] + + ``Head`` also contains contrast information (for forward simulation and parameter inference): + + >>> head_dcm.FA + 180.0 + >>> head_nii.FA + 180.0 + >>> head_dcm.TE + tensor([ 20.0, 40.0, 60.0]) + >>> head_nii.TE + tensor([ 20.0, 40.0, 60.0]) + >>> head_dcm.TR + 3000.0 + >>> head_nii.TR + 3000.0 + + + Notes + ----- + When the image to be written is the result of a reconstruction performed on k-space data loaded using :func:`deepmr.io.read_rawdata`, + axis order depends on acquisition mode: + + * **2Dcart:** ``(nslices, ncontrasts, ny, nx)`` + * **2Dnoncart:** ``(nslices, ncontrasts, ny, nx)`` + * **3Dcart:** ``(ncontrasts, nz, ny, nx)`` + * **3Dnoncart:** ``(nx, ncontrasts, nz, ny)`` + + In this case, image should be transposed to ``(ncontrasts, nslices, ny, nx)`` or ``(ncontrasts, nz, ny, nx)`` for 2D/3D acquisitions, respectively. + If provided, ``head`` will contain the appropriate permutation order (:func:`head.transpose`): + + * **2Dcart:** ``head.transpose = [1, 0, 2, 3]`` + * **2Dnoncart:** ``head.transpose = [1, 0, 2, 3]`` + * **3Dcart:** ``head.transpose = [0, 1, 2, 3]`` + * **3Dnoncart:** ``head.transpose = [1, 2, 3, 0]`` + + If ``head`` is not provided, the user shoudl manually transpose the image tensor to match the required shape. + + """ + if dataformat == "dicom": + _dicom.write_dicom( + filename, + image, + filepath, + head, + series_description, + series_number_offset, + series_number_scale, + rescale, + anonymize, + verbose, + ) + elif dataformat == "nifti": + _nifti.write_nifti( + filename, + image, + filepath, + head, + series_description, + series_number_offset, + series_number_scale, + rescale, + anonymize, + verbose, + ) + else: + raise RuntimeError( + f"Data format = {dataformat} not recognized! Please use 'dicom' or 'nifti'" + )
+ + +# %% sub routines +def _get_error(ex): + trace = [] + tb = ex.__traceback__ + while tb is not None: + trace.append( + { + "filename": tb.tb_frame.f_code.co_filename, + "name": tb.tb_frame.f_code.co_name, + "lineno": tb.tb_lineno, + } + ) + tb = tb.tb_next + + return str({"type": type(ex).__name__, "message": str(ex), "trace": trace}) +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/io/kspace/api.html b/_modules/deepmr/io/kspace/api.html new file mode 100644 index 00000000..83c3ab1e --- /dev/null +++ b/_modules/deepmr/io/kspace/api.html @@ -0,0 +1,1000 @@ + + + + + + + + + + + deepmr.io.kspace.api — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.io.kspace.api

+"""KSpace IO API."""
+
+__all__ = ["read_rawdata"]
+
+import math
+import time
+
+import numpy as np
+import torch
+
+from . import gehc as _gehc
+from . import mrd as _mrd
+
+# from . import siemens as _siemens
+
+
+
[docs]def read_rawdata(filepath, acqheader=None, device="cpu", verbose=0): + """ + Read kspace data from file. + + Currently, handles data written in ISMRMD format [1] (vendor agnostic) + and GEHC proprietary raw data (requires access to a private repository). + + Parameters + ---------- + filepath : str + Path to kspace file. Supports wildcard (e.g., ``/path-to-data-folder/*.h5``). + acqheader : Header, optional + Acquisition header loaded from trajectory. + If not provided, assume Cartesian acquisition and infer from data. + The default is ``None``. + device : str, optional + Computational device for internal attributes. The default is ``cpu``. + verbose : int, optional + Verbosity level ``(0=Silent, 1=Less, 2=More)``. The default is ``0``. + + Returns + ------- + data : torch.tensor + Complex k-space data. + head : Header + Metadata for image reconstruction. + + Example + ------- + >>> import deepmr + + Get the filename for an example ``.mrd`` file. + + >>> filepath = deepmr.testdata("mrd") + + Load the file contents. + + >>> data, head = deepmr.io.read_rawdata(filepath) + + The result is a data/header pair. ``Data`` contains k-space data. + Here, it represents a 2D spiral acquisition with 1 slice, 36 coils, 32 arms and 1284 samples per arm: + + >>> data.shape + torch.Size([1, 36, 1, 32, 1284]) + + ``Head`` contains the acquisition information. We can inspect the k-space trajectory and dcf size, + the expected image shape and resolution: + + >>> head.traj.shape + torch.Size([1, 32, 1284, 2]) + >>> head.dcf.shape + torch.Size([1, 32, 1284]) + >>> head.shape + tensor([ 1, 192, 192]) + >>> head.ref_dicom.SliceThickness + '5.0' + >>> head.ref_dicom.PixelSpacing + [1.56, 1.56] + + ``Head`` also contains contrast information (for forward simulation and parameter inference): + + >>> head.FA + 10.0 + >>> head.TE + 0.86 + >>> head.TR + 4.96 + + Notes + ----- + The returned ``data`` tensor contains raw k-space data. Dimensions are defined as following: + + * **2Dcart:** ``(nslices, ncoils, ncontrasts, ny, nx)``. + * **2Dnoncart:** ``(nslices, ncoils, ncontrasts, nviews, nsamples)``. + * **3Dcart:** ``(nx, ncoils, ncontrasts, nz, ny)``. + * **3Dnoncart:** ``(ncoils, ncontrasts, nviews, nsamples)``. + + When possible, data are already pre-processed: + + * For Cartesian data (2D and 3D) readout oversampling is removed if the number of samples along readout is larger than the number of rows in the image space (shape[-1]). + * For Non-Cartesian (2D and 3D), fov is centered according to trajectory and isocenter info from the header. + * For separable acquisitions (3D stack-of-Non-Cartesians and 3D Cartesians), k-space is decoupled via FFT (along slice and readout axes, respectively). + + The returned ``head`` (:func:`deepmr.Header`) is a structure with the following fields: + + * shape (torch.Tensor): + This is the expected image size of shape ``(nz, ny, nx)``. + * resolution (torch.Tensor): + This is the expected image resolution in mm of shape ``(dz, dy, dx)``. + * t (torch.Tensor): + This is the readout sampling time ``(0, t_read)`` in ``ms``. + with shape ``(nsamples,)``. + * traj (torch.Tensor): + This is the k-space trajectory normalized as ``(-0.5, 0.5)`` + with shape ``(ncontrasts, nviews, nsamples, ndims)``. + * dcf (torch.Tensor): + This is the k-space sampling density compensation factor + with shape ``(ncontrasts, nviews, nsamples)``. + * FA (torch.Tensor, float): + This is either the acquisition flip angle in degrees or the list + of flip angles of shape ``(ncontrasts,)`` for each image in the series. + * TR (torch.Tensor, float): + This is either the repetition time in ms or the list + of repetition times of shape ``(ncontrasts,)`` for each image in the series. + * TE (torch.Tensor, float): + This is either the echo time in ms or the list + of echo times of shape ``(ncontrasts,)`` for each image in the series. + * TI (torch.Tensor, float): + This is either the inversion time in ms or the list + of inversion times of shape ``(ncontrasts,)`` for each image in the series. + * user (dict): + User parameters. Some examples are: + + * ordering (torch.Tensor): + Indices for reordering (acquisition to reconstruction) + of acquired k-space data, shaped ``(3, nslices * ncontrasts * nview)``, whose rows are + ``contrast_index``, ``slice_index`` and ``view_index``, respectively. + * mode (str): + Acquisition mode (``2Dcart``, ``3Dcart``, ``2Dnoncart``, ``3Dnoncart``). + * separable (bool): + Whether the acquisition can be decoupled by fft along ``slice``/ ``readout`` directions + (3D stack-of-noncartesian / 3D cartesian, respectively) or not (3D noncartesian and 2D acquisitions). + * slice_profile (torch.Tensor): + Flip angle scaling along slice profile of shape ``(nlocs,)``. + * basis (torch.Tensor): + Low rank subspace basis for subspace reconstruction of shape ``(ncoeff, ncontrasts)``. + + * affine (np.ndarray): + Affine matrix describing image spacing, orientation and origin of shape ``(4, 4)``. + * ref_dicom (pydicom.Dataset): + Template dicom for image export. + * flip (list): + List of spatial axis to be flipped after image reconstruction. + The default is an empty list (no flipping). + * transpose (list): + Permutation of image dimensions after reconstruction, depending on acquisition mode: + + * **2Dcart:** reconstructed image has ``(nslices, ncontrasts, ny, nx) -> transpose = [1, 0, 2, 3]`` + * **2Dnoncart:** reconstructed image has ``(nslices, ncontrasts, ny, nx) -> transpose = [1, 0, 2, 3]`` + * **3Dcart:** reconstructed image has ``(ncontrasts, nz, ny, nx) -> transpose = [0, 1, 2, 3]`` + * **3Dnoncart:** reconstructed image has ``(nx, ncontrasts, nz, ny) -> transpose = [1, 2, 3, 0]`` + + The default is an empty list (no transposition). + + References + ---------- + [1]: Inati, S.J., Naegele, J.D., Zwart, N.R., Roopchansingh, V., Lizak, M.J., Hansen, D.C., Liu, C.-Y., Atkinson, D., + Kellman, P., Kozerke, S., Xue, H., Campbell-Washburn, A.E., Sørensen, T.S. and Hansen, M.S. (2017), + ISMRM Raw data format: A proposed standard for MRI raw datasets. Magn. Reson. Med., 77: 411-421. + https://doi.org/10.1002/mrm.26089 + + """ + tstart = time.time() + if verbose >= 1: + print(f"Reading raw k-space from file {filepath}...", end="\t") + + done = False + + # convert header to numpy + if acqheader is not None: + acqheader.numpy() + + # mrd + if verbose == 2: + t0 = time.time() + try: + data, head = _mrd.read_mrd_rawdata(filepath) + done = True + except Exception as e: + msg0 = _get_error(e) + + # gehc + if not (done): + try: + data, head = _gehc.read_gehc_rawdata(filepath, acqheader) + done = True + except Exception as e: + msg1 = _get_error(e) + + # siemens + # if not(done): + # try: + # head = _siemens.read_siemens_rawdata(filepath, acqheader) + # done = True + # except Exception: + # pass + + # check if we loaded data + if not (done): + raise RuntimeError( + f"File (={filepath}) not recognized! Errors:\nMRD: {msg0}\nGEHC: {msg1}" + ) + if verbose == 2: + t1 = time.time() + print(f"done! Elapsed time: {round(t1-t0, 2)} s") + + # transpose + data = data.transpose(2, 0, 1, 3, 4) # (slice, coil, contrast, view, sample) + + # select actual readout + if verbose == 2: + nsamples = data.shape[-1] + print("Selecting actual readout samples...", end="\t") + t0 = time.time() + data = _select_readout(data, head) + if verbose == 2: + t1 = time.time() + print( + f"done! Selected {data.shape[-1]} out of {nsamples} samples. Elapsed time: {round(t1-t0, 2)} s" + ) + + # center fov + if verbose == 2: + if head.traj is not None: + t0 = time.time() + ndim = head.traj.shape[-1] + shift = head._shift[:ndim] + if ndim == 2: + print(f"Shifting FoV by (dx={shift[0]}, dy={shift[1]}) mm", end="\t") + if ndim == 3: + print( + f"Shifting FoV by (dx={shift[0]}, dy={shift[1]}, dz={shift[2]}) mm", + end="\t", + ) + data = _fov_centering(data, head) + if verbose == 2: + if head.traj is not None: + t1 = time.time() + print(f"done! Elapsed time: {round(t1-t0, 2)} s") + + # remove oversampling for Cartesian + if "mode" in head.user: + if head.user["mode"][2:] == "cart": + if verbose == 2: + t0 = time.time() + ns1 = data.shape[0] + ns2 = head.shape[0] + print( + f"Removing oversampling along readout ({round(ns1/ns2, 2)})...", + end="\t", + ) + data, head = _remove_oversampling(data, head) + if verbose == 2: + t1 = time.time() + print(f"done! Elapsed time: {round(t1-t0, 2)} s") + + # transpose readout in slice direction for 3D Cartesian + if "mode" in head.user: + if head.user["mode"] == "3Dcart": + data = data.transpose( + -1, 1, 2, 0, 3 + ) # (z, ch, e, y, x) -> (x, ch, e, z, y) + + # decouple separable acquisition + if "separable" in head.user and head.user["separable"]: + if verbose == 2: + t0 = time.time() + print("Separable 3D acquisition, performing FFT along slice...", end="\t") + data = _fft(data, 0) + if verbose == 2: + t1 = time.time() + print(f"done! Elapsed time: {round(t1-t0, 4)} s") + + # set-up transposition + if "mode" in head.user: + if head.user["mode"] == "2Dcart": + head.transpose = [1, 0, 2, 3] + if verbose == 2: + print("Acquisition mode: 2D Cartesian") + print( + f"K-space shape: (nslices={data.shape[0]}, nchannels={data.shape[1]}, ncontrasts={data.shape[2]}, ny={data.shape[3]}, nx={data.shape[4]})" + ) + print( + f"Expected image shape: (nslices={data.shape[0]}, nchannels={data.shape[1]}, ncontrasts={data.shape[2]}, ny={head.shape[1]}, nx={head.shape[2]})" + ) + elif head.user["mode"] == "2Dnoncart": + head.transpose = [1, 0, 2, 3] + if verbose == 2: + print("Acquisition mode: 2D Non-Cartesian") + print( + f"K-space shape: (nslices={data.shape[0]}, nchannels={data.shape[1]}, ncontrasts={data.shape[2]}, nviews={data.shape[3]}, nsamples={data.shape[4]})" + ) + print( + f"Expected image shape: (nslices={data.shape[0]}, nchannels={data.shape[1]}, ncontrasts={data.shape[2]}, ny={head.shape[1]}, nx={head.shape[2]})" + ) + elif head.user["mode"] == "3Dnoncart": + data = data[0] + head.transpose = [1, 0, 2, 3] + if verbose == 2: + print("Acquisition mode: 3D Non-Cartesian") + print( + f"K-space shape: (nchannels={data.shape[0]}, ncontrasts={data.shape[1]}, nviews={data.shape[2]}, nsamples={data.shape[3]})" + ) + print( + f"Expected image shape: (nchannels={data.shape[0]}, ncontrasts={data.shape[1]}, nz={head.shape[0]}, ny={head.shape[1]}, nx={head.shape[2]})" + ) + elif head.user["mode"] == "3Dcart": + head.transpose = [1, 2, 3, 0] + if verbose == 2: + print("Acquisition mode: 3D Cartesian") + print( + f"K-space shape: (nx={data.shape[0]}, nchannels={data.shape[1]}, ncontrasts={data.shape[2]}, nz={data.shape[3]}, ny={data.shape[4]})" + ) + print( + f"Expected image shape: (nx={head.shape[2]}, nchannels={data.shape[1]}, ncontrasts={data.shape[2]}, nz={head.shape[0]}, ny={head.shape[1]})" + ) + + # remove unused trajectory for cartesian + if head.user["mode"][2:] == "cart": + head.traj = None + head.dcf = None + + # clean header + head.user.pop("mode", None) + head.user.pop("separable", None) + + # final report + if verbose == 2: + print(f"Readout time: {round(float(head.t[-1]), 2)} ms") + if head.traj is not None: + print(f"Trajectory range: ({head.traj.min()},{head.traj.max()})") + print( + f"Trajectory shape: (ncontrasts={head.traj.shape[0]}, nviews={head.traj.shape[1]}, nsamples={head.traj.shape[2]}, ndim={head.traj.shape[-1]})" + ) + if head.dcf is not None: + if len(head.dcf.shape) == 1: + print(f"DCF shape: (nsamples={head.dcf.shape[-1]})") + else: + print( + f"DCF shape: (ncontrasts={head.dcf.shape[0]}, nviews={head.dcf.shape[1]}, nsamples={head.dcf.shape[2]})" + ) + if head.FA is not None: + if len(np.unique(head.FA)) > 1: + if verbose == 2: + print(f"Flip Angle train length: {len(head.FA)}") + else: + FA = float(np.unique(head.FA)[0]) + head.FA = FA + if verbose == 2: + print(f"Constant FA: {round(abs(FA), 2)} deg") + if head.TR is not None: + if len(np.unique(head.TR)) > 1: + if verbose == 2: + print(f"TR train length: {len(head.TR)}") + else: + TR = float(np.unique(head.TR)[0]) + head.TR = TR + if verbose == 2: + print(f"Constant TR: {round(TR, 2)} ms") + if head.TE is not None: + if len(np.unique(head.TE)) > 1: + if verbose == 2: + print(f"Echo train length: {len(head.TE)}") + else: + TE = float(np.unique(head.TE)[0]) + head.TE = TE + if verbose == 2: + print(f"Constant TE: {round(TE, 2)} ms") + if head.TI is not None and np.allclose(head.TI, 0.0) is False: + if len(np.unique(head.TI)) > 1: + if verbose == 2: + print(f"Inversion train length: {len(head.TI)}") + else: + TI = float(np.unique(head.TI)[0]) + head.TI = TI + if verbose == 2: + print(f"Constant TI: {round(TI, 2)} ms") + + # cast + data = torch.as_tensor( + np.ascontiguousarray(data), dtype=torch.complex64, device=device + ) + head.torch(device) + + tend = time.time() + if verbose == 1: + print(f"done! Elapsed time: {round(tend-tstart, 2)} s") + elif verbose == 2: + print(f"Total elapsed time: {round(tend-tstart, 2)} s") + + return data, head
+ + +# %% sub routines +def _get_error(ex): + trace = [] + tb = ex.__traceback__ + while tb is not None: + trace.append( + { + "filename": tb.tb_frame.f_code.co_filename, + "name": tb.tb_frame.f_code.co_name, + "lineno": tb.tb_lineno, + } + ) + tb = tb.tb_next + + return str({"type": type(ex).__name__, "message": str(ex), "trace": trace}) + + +def _select_readout(data, head): + if head._adc is not None: + if head._adc[-1] == data.shape[-1]: + data = data[..., head._adc[0] :] + else: + data = data[..., head._adc[0] : head._adc[1] + 1] + return data + + +def _fov_centering(data, head): + if head.traj is not None and np.allclose(head._shift, 0.0) is False: + # ndimensions + ndim = head.traj.shape[-1] + + # shift (mm) + dr = np.asarray(head._shift)[:ndim] + + # convert in units of voxels + dr = dr / head.resolution[::-1][:ndim] / head.shape[::-1][:ndim] + + # apply + data *= np.exp(1j * 2 * math.pi * (head.traj * dr).sum(axis=-1)) + + return data + + +def _remove_oversampling(data, head): + if data.shape[-1] != head.shape[-1]: # oversampled + center = int(data.shape[-1] // 2) + hwidth = int(head.shape[-1] // 2) + data = _fft(data, -1) + data = data[..., center - hwidth : center + hwidth] + data = _fft(data, -1) + head.t = np.linspace(0, head.t[-1], data.shape[-1]) + + return data, head + + +def _fft(data, axis): + tmp = torch.as_tensor(data) + tmp = torch.fft.fftshift( + torch.fft.fft(torch.fft.fftshift(tmp, dim=axis), dim=axis), dim=axis + ) + return tmp.numpy() +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/linops/coil.html b/_modules/deepmr/linops/coil.html new file mode 100644 index 00000000..bf19506c --- /dev/null +++ b/_modules/deepmr/linops/coil.html @@ -0,0 +1,745 @@ + + + + + + + + + + + deepmr.linops.coil — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.linops.coil

+"""Sensitivity coil linear operator."""
+
+__all__ = ["SenseOp", "SenseAdjointOp"]
+
+import numpy as np
+import torch
+
+from . import base
+
+
+
[docs]class SenseOp(base.Linop): + """ + Multiply input image by coil sensitivity profile. + + Coil sensitivity profiles are expected to have the following dimensions: + + * 2D MRI: ``(nsets, nslices, ncoils, ny, nx)`` + * 3D Cartesian MRI: ``(nsets, nx, ncoils, nz, ny)`` + * 3D NonCartesian MRI: ``(nsets, ncoils, nz, ny, nx)`` + + where ``nsets`` represents multiple sets of coil sensitivity estimation + for soft-SENSE implementations (e.g., ESPIRIT), equal to ``1`` for conventional SENSE + and ``ncoils`` represents the number of receiver channels in the coil array. + + """ + +
[docs] def __init__(self, ndim, sensmap, device=None, multicontrast=True): + super().__init__(ndim) + + # cast map to tensor + self.sensmap = torch.as_tensor(sensmap) + + # assign device + if device is None: + self.device = self.sensmap.device + else: + self.device = device + + # offloat to device + self.sensmap = self.sensmap.to(self.device) + + # multicontrast + self.multicontrast = multicontrast + + if self.multicontrast and self.ndim == 2: + self.sensmap = self.sensmap.unsqueeze(-3) + if self.multicontrast and self.ndim == 3: + self.sensmap = self.sensmap.unsqueeze(-4)
+ + def forward(self, x): + """ + Forward coil operator. + + Parameters + ---------- + x : np.ndarray | torch.Tensor + Input combined images of shape ``(nslices, ..., ny, nx)``. + (2D MRI / 3D Cartesian MRI) or ``(..., nz, ny, nx)`` (3D NonCartesian MRI). + + Returns + ------- + y : np.ndarray | torch.Tensor + Output images of shape ``(nsets, nslices, ncoils, ..., ny, nx)``. + (2D MRI / 3D Cartesian) or ``(nsets, ncoils, ..., nz, ny, nx)`` (3D NonCartesian MRI) + modulated by coil sensitivity profiles. + + """ + if isinstance(x, np.ndarray): + isnumpy = True + else: + isnumpy = False + + # convert to tensor + x = torch.as_tensor(x) + + # transfer to device + self.sensmap = self.sensmap.to(x.device) + + # unsqueeze + if self.multicontrast: + if self.ndim == 2: + x = x.unsqueeze(-4) + elif self.ndim == 3: + x = x.unsqueeze(-5) + else: + if self.ndim == 2: + x = x.unsqueeze(-3) + elif self.ndim == 3: + x = x.unsqueeze(-4) + + # project + y = self.sensmap * x + + # convert back to numpy if required + if isnumpy: + y = y.numpy(force=True) + + return y + + def _adjoint_linop(self): + if self.multicontrast and self.ndim == 2: + sensmap = self.sensmap.squeeze(-3) + if self.multicontrast and self.ndim == 3: + sensmap = self.sensmap.squeeze(-4) + if self.multicontrast is False: + sensmap = self.sensmap + return SenseAdjointOp(self.ndim, sensmap, self.device, self.multicontrast)
+ + +
[docs]class SenseAdjointOp(base.Linop): + """ + Perform coil combination. + + Coil sensitivity profiles are expected to have the following dimensions: + + * 2D MRI: ``(nslices, nsets, ncoils, ny, nx)`` + * 3D MRI: ``(nsets, ncoils, nz, ny, nx)`` + + where ``nsets`` represents multiple sets of coil sensitivity estimation + for soft-SENSE implementations (e.g., ESPIRIT), equal to ``1`` for conventional SENSE + and ``ncoils`` represents the number of receiver channels in the coil array. + + """ + +
[docs] def __init__(self, ndim, sensmap, device=None, multicontrast=True): + super().__init__(ndim) + + # cast map to tensor + self.sensmap = torch.as_tensor(sensmap) + + # assign device + if device is None: + self.device = self.sensmap.device + else: + self.device = device + + # offloat to device + self.sensmap = self.sensmap.to(self.device) + + # multicontrast + self.multicontrast = multicontrast + + if self.multicontrast and self.ndim == 2: + self.sensmap = self.sensmap.unsqueeze(-3) + if self.multicontrast and self.ndim == 3: + self.sensmap = self.sensmap.unsqueeze(-4)
+ + def forward(self, y): + """ + Adjoint coil operator (coil combination). + + Parameters + ---------- + y : np.ndarray | torch.Tensor + Output images of shape ``(nsets, nslices, ncoils, ..., ny, nx)``. + (2D MRI / 3D Cartesian MRI) or ``(nsets, ncoils, ..., nz, ny, nx)`` + (3D NonCartesian MRI) modulated by coil sensitivity profiles. + + Returns + ------- + x : np.ndarray | torch.Tensor + Output combined images of shape ``(nslices, ..., ny, nx)``. + (2D MRI / 3D Cartesian MRI) or ``(..., nz, ny, nx)`` (3D NonCartesian MRI). + + """ + if isinstance(y, np.ndarray): + isnumpy = True + else: + isnumpy = False + + # convert to tensor + y = torch.as_tensor(y) + + # transfer to device + self.sensmap = self.sensmap.to(y.device) + + # apply sensitivity + tmp = self.sensmap.conj() * y + + # combine (over channels and sets) + if self.multicontrast: + if self.ndim == 2: + x = tmp.sum(axis=-4).sum(axis=0) + elif self.ndim == 3: + x = tmp.sum(axis=-5).sum(axis=0) + else: + if self.ndim == 2: + x = tmp.sum(axis=-3).sum(axis=0) + elif self.ndim == 3: + x = tmp.sum(axis=-4).sum(axis=0) + + # convert back to numpy if required + if isnumpy: + x = x.numpy(force=True) + + return x + + def _adjoint_linop(self): + if self.multicontrast and self.ndim == 2: + sensmap = self.sensmap.squeeze(-3) + if self.multicontrast and self.ndim == 3: + sensmap = self.sensmap.squeeze(-4) + if self.multicontrast is False: + sensmap = self.sensmap + return SenseOp(self.ndim, sensmap, self.device, self.multicontrast)
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/linops/fft.html b/_modules/deepmr/linops/fft.html new file mode 100644 index 00000000..ae6df5da --- /dev/null +++ b/_modules/deepmr/linops/fft.html @@ -0,0 +1,887 @@ + + + + + + + + + + + deepmr.linops.fft — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.linops.fft

+"""Fast Fourier Transform linear operator."""
+
+__all__ = ["FFTOp", "IFFTOp", "FFTGramOp"]
+
+import numpy as np
+import torch
+
+from .. import fft as _fft
+
+from . import base
+
+
+
[docs]class FFTOp(base.Linop): + """ + Fast Fourier Transform operator. + + K-space sampling mask, if provided, is expected to to have the following dimensions: + + * 2D MRI: ``(ncontrasts, ny, nx)`` + * 3D MRI: ``(ncontrasts, nz, ny)`` + + Input images are expected to have the following dimensions: + + * 2D MRI: ``(nslices, nsets, ncoils, ncontrasts, ny, nx)`` + * 3D MRI: ``(nsets, ncoils, ncontrasts, nz, ny)`` + + where ``nsets`` represents multiple sets of coil sensitivity estimation + for soft-SENSE implementations (e.g., ESPIRIT), equal to ``1`` for conventional SENSE + and ``ncoils`` represents the number of receiver channels in the coil array. + + Similarly, output k-space data are expected to have the following dimensions: + + * 2D MRI: ``(nslices, nsets, ncoils, ncontrasts, ny, nx)`` + * 3D MRI: ``(nsets, ncoils, ncontrasts, nz, ny)`` + + """ + +
[docs] def __init__(self, mask=None, basis_adjoint=None, device=None): + super().__init__(ndim=2) + if device is None: + device = "cpu" + self.device = device + + if mask is not None: + self.mask = torch.as_tensor(mask, device=device) + else: + self.mask = None + if basis_adjoint is not None: + self.basis_adjoint = torch.as_tensor(basis_adjoint, device=device) + else: + self.basis_adjoint = None
+ + def forward(self, x): + """ + Apply Sparse Fast Fourier Transform. + + Parameters + ---------- + x : np.ndarray | torch.Tensor + Input image of shape ``(..., ncontrasts, ny, nx)`` (2D) + or ``(..., ncontrasts, nz, ny)`` (3D). + + Returns + ------- + y : np.ndarray | torch.Tensor + Output sparse kspace of shape ``(..., ncontrasts, nviews, nsamples)``. + + """ + if isinstance(x, np.ndarray): + isnumpy = True + else: + isnumpy = False + + # convert to tensor + x = torch.as_tensor(x) + + if self.device is None: + self.device = x.device + + # cast + x = x.to(self.device) + # get adjoint basis + if self.basis_adjoint is not None: + basis_adjoint = self.basis_adjoint + else: + basis_adjoint = None + + # apply Fourier transform + y = _fft.fft(x, axes=(-1, -2), norm="ortho") + + # project + if basis_adjoint is not None: + y = y[..., None] + y = y.swapaxes(-4, -1) + yshape = list(y.shape) + y = y.reshape(-1, y.shape[-1]) # (prod(y.shape[:-1]), ncoeff) + y = y @ basis_adjoint # (prod(y.shape[:-1]), ncontrasts) + y = y.reshape(*yshape[:-1], y.shape[-1]) + y = y.swapaxes(-4, -1) + y = y[..., 0] + + # mask if required + if self.mask is not None: + y = self.mask * y + + # cast back to numpy if required + if isnumpy: + y = y.numpy(force=True) + + return y + + def _adjoint_linop(self): + # get adjoint basis + if self.basis_adjoint is not None: + basis = self.basis_adjoint.conj().t() + else: + basis = None + return IFFTOp(self.mask, basis, self.device)
+ + +
[docs]class IFFTOp(base.Linop): + """ + Inverse Fast Fourier Transform operator. + + K-space sampling mask, if provided, is expected to to have the following dimensions: + + * 2D MRI: ``(ncontrasts, ny, nx)`` + * 3D MRI: ``(ncontrasts, nz, ny)`` + + Input k-space data are expected to have the following dimensions: + + * 2D MRI: ``(nslices, nsets, ncoils, ncontrasts, ny, nx)`` + * 3D MRI: ``(nsets, ncoils, ncontrasts, nz, ny)`` + + where ``nsets`` represents multiple sets of coil sensitivity estimation + for soft-SENSE implementations (e.g., ESPIRIT), equal to ``1`` for conventional SENSE + and ``ncoils`` represents the number of receiver channels in the coil array. + + Similarly, output images are expected to have the following dimensions: + + * 2D MRI: ``(nslices, nsets, ncoils, ncontrasts, ny, nx)`` + * 3D MRI: ``(nsets, ncoils, ncontrasts, nz, ny)`` + + """ + +
[docs] def __init__(self, mask=None, basis=None, device=None, **kwargs): + super().__init__(ndim=2, **kwargs) + if device is None: + device = "cpu" + self.device = device + + if mask is not None: + self.mask = torch.as_tensor(mask, device=device) + else: + self.mask = None + if basis is not None: + self.basis = torch.as_tensor(basis, device=device) + else: + self.basis = None
+ + def forward(self, y): + """ + Apply adjoint Non-Uniform Fast Fourier Transform. + + Parameters + ---------- + y : torch.Tensor + Input sparse kspace of shape ``(..., ncontrasts, nviews, nsamples)``. + + Returns + ------- + x : np.ndarray | torch.Tensor + Output image of shape ``(..., ncontrasts, ny, nx)`` (2D) + or ``(..., ncontrasts, nz, ny)`` (3D). + + """ + if isinstance(y, np.ndarray): + isnumpy = True + else: + isnumpy = False + + # convert to tensor + y = torch.as_tensor(y) + + if self.device is None: + self.device = y.device + + # cast + y = y.to(self.device) + if self.mask is not None: + self.mask = self.mask.to(self.device) + if self.basis is not None: + self.basis = self.basis.to(self.device) + + # mask if required + if self.mask is not None: + y = self.mask * y + + # project + if self.basis is not None: + y = y[..., None] + y = y.swapaxes(-4, -1) + yshape = list(y.shape) + y = y.reshape(-1, y.shape[-1]) # (prod(y.shape[:-1]), ncoeff) + y = y @ self.basis # (prod(y.shape[:-1]), ncontrasts) + y = y.reshape(*yshape[:-1], y.shape[-1]) + y = y.swapaxes(-4, -1) + y = y[..., 0] + + # apply Fourier transform + x = _fft.ifft(y, axes=(-1, -2), norm="ortho") + + # cast back to numpy if required + if isnumpy: + x = x.numpy(force=True) + + return x + + def _adjoint_linop(self): + # get adjoint basis + if self.basis is not None: + basis_adjoint = self.basis.conj().t() + else: + basis_adjoint = None + return FFTOp(self.mask, basis_adjoint, self.device)
+ + +
[docs]class FFTGramOp(base.Linop): + """ + Self-adjoint Sparse Fast Fourier Transform operator. + + K-space sampling mask, if provided, is expected to to have the following dimensions: + + * 2D MRI: ``(ncontrasts, ny, nx)`` + * 3D MRI: ``(ncontrasts, nz, ny)`` + + Input and output images are expected to have the following dimensions: + + * 2D MRI: ``(nslices, nsets, ncoils, ncontrasts, ny, nx)`` + * 3D MRI: ``(nsets, ncoils, ncontrasts, nz, ny)`` + + where ``nsets`` represents multiple sets of coil sensitivity estimation + for soft-SENSE implementations (e.g., ESPIRIT), equal to ``1`` for conventional SENSE + and ``ncoils`` represents the number of receiver channels in the coil array. + + """ + +
[docs] def __init__(self, mask=None, basis=None, device=None, **kwargs): + super().__init__(ndim=2, **kwargs) + self.device = device + if device is None: + device = "cpu" + if basis is not None: + basis = torch.as_tensor(basis, device=device) + else: + basis = None + if mask is not None: + mask = torch.as_tensor(mask, device=device) + else: + mask = None + + # calculate space-time kernel + if basis is not None and mask is not None: + T, K = basis.shape + nt, nz, ny = mask.shape # or (nt, ny, nx) for 2D + assert nt == T + tmp = mask.permute(2, 1, 0).reshape((ny, nz, T, 1, 1)) * basis.reshape( + (1, 1, nt, 1, K) + ) # (ny, nz, T, 1, K) / (nx, ny, T, 1, K) + toeplitz_kern = (tmp * basis.reshape(1, 1, T, K, 1)).sum( + axis=2 + ) # (ny, nz, K, K) / (nx, ny, K, K) + toeplitz_kern = torch.fft.fftshift( + torch.fft.fftshift(toeplitz_kern, axis=0), axis=1 + ) + self._toeplitz_kern = ( + toeplitz_kern.swapaxes(0, 1).reshape(-1, K, K).contiguous() + ) # (nz*ny, K, K) / (ny*nx, K, K) + else: + self._toeplitz_kern = None
+ + def forward(self, x): + """ + Apply Toeplitz convolution (``SparseFFT.H * SparseFFT``). + + Parameters + ---------- + x : np.ndarray | torch.Tensor + Input image of shape ``(..., ncontrasts, ny, nx)`` (2D) + or ``(..., ncontrasts, nz, ny)`` (3D). + + Returns + ------- + y : np.ndarray | torch.Tensor + Output image of shape ``(..., ncontrasts, ny, nx)`` (2D) + or ``(..., ncontrasts, nz, ny)`` (3D). + + """ + if isinstance(x, np.ndarray): + isnumpy = True + else: + isnumpy = False + + # convert to tensor + x = torch.as_tensor(x) + + if self.device is None: + self.device = x.device + + # cast + x = x.to(self.device) + if self._toeplitz_kern is not None: + self._toeplitz_kern = self._toeplitz_kern.to(self.device) + + # fourier transform + y = _fft.fft(x, axes=(-1, -2), norm="ortho", centered=False) + + # project if required + if self._toeplitz_kern is not None: + y = y[..., None] # (..., ncoeff, nz, ny, 1) / (..., ncoeff, ny, nx, 1) + y = y.swapaxes( + -4, -1 + ) # (..., 1, nz, ny, ncoeff) / (..., 1, ny, nx, ncoeff) + yshape = list(y.shape) + y = y.reshape( + int(np.prod(yshape[:-4])), -1, y.shape[-1] + ) # (prod(y.shape[:-4]), nz*ny, ncoeff) / (prod(y.shape[:-4]), ny*nx, ncoeff) + y = torch.einsum("...bi,bij->...bj", y, self._toeplitz_kern) + y = y.reshape( + *yshape + ) # (..., 1, nz, ny, ncoeff) / # (..., 1, ny, nx, ncoeff) + y = y.swapaxes( + -4, -1 + ) # (..., ncoeff, nz, ny, 1) / # (..., ncoeff, ny, nx, 1) + y = y[..., 0] # (..., ncoeff, nz, ny) / # (..., ncoeff, ny, nx) + + # apply Fourier transform + x = _fft.ifft(y, axes=(-1, -2), norm="ortho", centered=False) + + # cast back to numpy if required + if isnumpy: + x = x.numpy(force=True) + + return x + + def _adjoint_linop(self): + return self
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/linops/nufft.html b/_modules/deepmr/linops/nufft.html new file mode 100644 index 00000000..d35fa652 --- /dev/null +++ b/_modules/deepmr/linops/nufft.html @@ -0,0 +1,781 @@ + + + + + + + + + + + deepmr.linops.nufft — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.linops.nufft

+"""Non-Uniform Fast Fourier Transform linear operator."""
+
+__all__ = ["NUFFTOp", "NUFFTAdjointOp", "NUFFTGramOp"]
+
+import torch
+
+from .. import fft as _fft
+
+from . import base
+
+
+
[docs]class NUFFTOp(base.Linop): + """ + Non-Uniform Fast Fourier Transform operator. + + K-space sampling trajectory are expected to be shaped ``(ncontrasts, nviews, nsamples, ndims)``. + + Input images are expected to have the following dimensions: + + * 2D MRI: ``(nslices, nsets, ncoils, ncontrasts, ny, nx)`` + * 3D MRI: ``(nsets, ncoils, ncontrasts, nz, ny, nx)`` + + where ``nsets`` represents multiple sets of coil sensitivity estimation + for soft-SENSE implementations (e.g., ESPIRIT), equal to ``1`` for conventional SENSE + and ``ncoils`` represents the number of receiver channels in the coil array. + + Similarly, output k-space data are expected to be shaped ``(nslices, nsets, ncoils, ncontrasts, nviews, nsamples)``. + + """ + +
[docs] def __init__( + self, + coord=None, + shape=None, + basis_adjoint=None, + weight=None, + device="cpu", + threadsperblock=128, + width=4, + oversamp=1.25, + ): + if coord is not None and shape is not None: + super().__init__(ndim=coord.shape[-1]) + self.nufft_plan = _fft.plan_nufft(coord, shape, width, oversamp, device) + else: + super().__init__(ndim=None) + self.nufft_plan = None + if weight is not None: + self.weight = torch.as_tensor(weight**0.5, device=device) + else: + self.weight = None + if basis_adjoint is not None: + self.basis_adjoint = torch.as_tensor(basis_adjoint, device=device) + else: + self.basis_adjoint = None + self.threadsperblock = threadsperblock
+ + def forward(self, x): + """ + Apply Non-Uniform Fast Fourier Transform. + + Parameters + ---------- + x : np.ndarray | torch.Tensor + Input image of shape ``(..., ncontrasts, ny, nx)`` (2D) + or ``(..., ncontrasts, nz, ny, nx)`` (3D). + + Returns + ------- + y : np.ndarray | torch.Tensor + Output Non-Cartesian kspace of shape ``(..., ncontrasts, nviews, nsamples)``. + + """ + return _fft.apply_nufft( + x, + self.nufft_plan, + self.basis_adjoint, + self.weight, + threadsperblock=self.threadsperblock, + ) + + def _adjoint_linop(self): + if self.basis_adjoint is not None: + basis = self.basis_adjoint.conj().T + else: + basis = None + if self.weight is not None: + weight = self.weight**2 + else: + weight = None + adjOp = NUFFTAdjointOp( + basis=basis, weight=weight, threadsperblock=self.threadsperblock + ) + adjOp.ndim = self.ndim + adjOp.nufft_plan = self.nufft_plan + return adjOp
+ + +
[docs]class NUFFTAdjointOp(base.Linop): + """ + Adjoint Non-Uniform Fast Fourier Transform operator. + + K-space sampling trajectory are expected to be shaped ``(ncontrasts, nviews, nsamples, ndims)``. + + Input k-psace data are expected to have the following dimensions: + + * 2D MRI: ``(nslices, nsets, ncoils, ncontrasts, ny, nx)`` + * 3D MRI: ``(nsets, ncoils, ncontrasts, nz, ny, nx)`` + + where ``nsets`` represents multiple sets of coil sensitivity estimation + for soft-SENSE implementations (e.g., ESPIRIT), equal to ``1`` for conventional SENSE + and ``ncoils`` represents the number of receiver channels in the coil array. + + Similarly, output images data are expected to be shaped ``(nslices, nsets, ncoils, ncontrasts, nviews, nsamples)``. + + """ + +
[docs] def __init__( + self, + coord=None, + shape=None, + basis=None, + weight=None, + device="cpu", + threadsperblock=128, + width=4, + oversamp=1.25, + **kwargs + ): + if coord is not None and shape is not None: + super().__init__(ndim=coord.shape[-1]) + self.nufft_plan = _fft.plan_nufft(coord, shape, width, oversamp, device) + else: + super().__init__(ndim=None) + self.nufft_plan = None + if weight is not None: + self.weight = torch.as_tensor(weight**0.5, device=device) + else: + self.weight = None + if basis is not None: + self.basis = torch.as_tensor(basis, device=device) + else: + self.basis = None + self.threadsperblock = threadsperblock
+ + def forward(self, y): + """ + Apply adjoint Non-Uniform Fast Fourier Transform. + + Parameters + ---------- + y : torch.Tensor + Input Non-Cartesian kspace of shape ``(..., ncontrasts, nviews, nsamples)``. + + Returns + ------- + x : np.ndarray | torch.Tensor + Output image of shape ``(..., ncontrasts, ny, nx)`` (2D) + or ``(..., ncontrasts, nz, ny, nx)`` (3D). + + """ + return _fft.apply_nufft_adj( + y, + self.nufft_plan, + self.basis, + self.weight, + threadsperblock=self.threadsperblock, + ) + + def _adjoint_linop(self): + if self.basis is not None: + basis_adjoint = self.basis.conj().T + else: + basis_adjoint = None + if self.weight is not None: + weight = self.weight**2 + else: + weight = None + adjOp = NUFFTOp( + basis_adjoint=basis_adjoint, + weight=weight, + threadsperblock=self.threadsperblock, + ) + adjOp.ndim = self.ndim + adjOp.nufft_plan = self.nufft_plan + return adjOp
+ + +
[docs]class NUFFTGramOp(base.Linop): + """ + Self-adjoint Non-Uniform Fast Fourier Transform operator. + + K-space sampling trajectory are expected to be shaped ``(ncontrasts, nviews, nsamples, ndims)``. + + Input and output data are expected to be shaped ``(nslices, nsets, ncoils, ncontrasts, nviews, nsamples)``, + where ``nsets`` represents multiple sets of coil sensitivity estimation + for soft-SENSE implementations (e.g., ESPIRIT), equal to ``1`` for conventional SENSE + and ``ncoils`` represents the number of receiver channels in the coil array. + + """ + +
[docs] def __init__( + self, + coord, + shape, + basis=None, + weight=None, + device="cpu", + threadsperblock=128, + width=6, + **kwargs + ): + super().__init__(ndim=coord.shape[-1], **kwargs) + self.toeplitz_kern = _fft.plan_toeplitz_nufft( + coord, shape, basis, weight, width, device + ) + self.threadsperblock = threadsperblock
+ + def forward(self, x): + """ + Apply Toeplitz convolution (``NUFFT.H * NUFFT``). + + Parameters + ---------- + x : np.ndarray | torch.Tensor + Input image of shape ``(..., ncontrasts, ny, nx)`` (2D) + or ``(..., ncontrasts, nz, ny, nx)`` (3D). + + Returns + ------- + y : np.ndarray | torch.Tensor + Output image of shape ``(..., ncontrasts, ny, nx)`` (2D) + or ``(..., ncontrasts, nz, ny, nx)`` (3D). + + """ + return _fft.apply_nufft_selfadj( + x, self.toeplitz_kern, threadsperblock=self.threadsperblock + ) + + def _adjoint_linop(self): + return self
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/optim/admm.html b/_modules/deepmr/optim/admm.html new file mode 100644 index 00000000..f8651b51 --- /dev/null +++ b/_modules/deepmr/optim/admm.html @@ -0,0 +1,728 @@ + + + + + + + + + + + deepmr.optim.admm — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.optim.admm

+"""Alternate Direction of Multipliers Method iteration."""
+
+__all__ = ["admm_solve", "ADMMStep"]
+
+import numpy as np
+import torch
+
+import torch.nn as nn
+
+from .cg import cg_solve
+
+from .. import linops as _linops
+
+
+@torch.no_grad()
+def admm_solve(
+    input, step, AHA, D, niter=10, device=None, dc_niter=10, dc_tol=1e-4, dc_ndim=None
+):
+    """
+    Solve inverse problem using Alternate Direction of Multipliers Method.
+
+    Parameters
+    ----------
+    input : np.ndarray | torch.Tensor
+        Signal to be reconstructed. Assume it is the adjoint AH of measurement
+        operator A applied to the measured data y (i.e., input = AHy).
+    step : float
+        Gradient step size; should be <= 1 / max(eig(AHA)).
+    AHA : Callable | torch.Tensor | np.ndarray
+        Normal operator AHA = AH * A.
+    D : Callable
+        Signal denoiser for plug-n-play restoration.
+    niter : int, optional
+        Number of iterations. The default is ``10``.
+    device : str, optional
+        Computational device.
+        The default is ``None`` (infer from input).
+    dc_niter : int, optional
+        Number of iterations of inner data consistency step.
+        The default is ``10``.
+    dc_tol : float, optional
+        Stopping condition for inner data consistency step.
+        The default is ``1e-4``.
+    dc_ndim : int, optional
+        Number of spatial dimensions of the problem for inner data consistency step.
+        It is used to infer the batch axes. If ``AHA`` is a ``deepmr.linop.Linop``
+        operator, this is inferred from ``AHA.ndim`` and ``ndim`` is ignored.
+
+    Returns
+    -------
+    output : np.ndarray | torch.Tensor
+        Reconstructed signal.
+
+    """
+    # cast to numpy if required
+    if isinstance(input, np.ndarray):
+        isnumpy = True
+        input = torch.as_tensor(input)
+    else:
+        isnumpy = False
+
+    # keep original device
+    idevice = input.device
+    if device is None:
+        device = idevice
+
+    # put on device
+    input = input.to(device)
+    if isinstance(AHA, _linops.Linop):
+        AHA = AHA.to(device)
+    elif callable(AHA) is False:
+        AHA = torch.as_tensor(AHA, dtype=input.dtype, device=device)
+
+    # assume input is AH(y), i.e., adjoint of measurement operator
+    # applied on measured data
+    AHy = input.clone()
+
+    # initialize algorithm
+    ADMM = ADMMStep(step, AHA, AHy, D, niter=dc_niter, tol=dc_tol, ndim=dc_ndim)
+
+    # initialize
+    input = 0 * input
+
+    # run algorithm
+    for n in range(niter):
+        output = ADMM(input)
+        input = output.clone()
+
+    # back to original device
+    output = output.to(device)
+
+    # cast back to numpy if requried
+    if isnumpy:
+        output = output.numpy(force=True)
+
+    return output
+
+
+
[docs]class ADMMStep(nn.Module): + """ + Alternate Direction of Multipliers Method step. + + This represents propagation through a single iteration of a + ADMM algorithm; can be used to build + unrolled architectures. + + Attributes + ---------- + step : float + ADMM step size; should be <= 1 / max(eig(AHA)). + AHA : Callable | torch.Tensor + Normal operator AHA = AH * A. + Ahy : torch.Tensor + Adjoint AH of measurement + operator A applied to the measured data y. + D : Iterable(Callable) + Signal denoiser(s) for plug-n-play restoration. + trainable : bool, optional + If ``True``, gradient update step is trainable, otherwise it is not. + The default is ``False``. + niter : int, optional + Number of iterations of inner data consistency step. + tol : float, optional + Stopping condition for inner data consistency step. + ndim : int, optional + Number of spatial dimensions of the problem for inner data consistency step. + It is used to infer the batch axes. If ``AHA`` is a ``deepmr.linop.Linop`` + operator, this is inferred from ``AHA.ndim`` and ``ndim`` is ignored. + + """ + +
[docs] def __init__( + self, step, AHA, AHy, D, trainable=False, niter=10, tol=1e-4, ndim=None + ): + super().__init__() + if trainable: + self.step = nn.Parameter(step) + else: + self.step = step + + # set up problem dims + try: + self.ndim = AHA.ndim + except Exception: + self.ndim = ndim + + # assign operators + self.AHA = AHA + self.AHy = AHy + + # assign denoisers + if hasattr(D, "__iter__"): + self.D = list(D) + else: + self.D = [D] + + # prepare auxiliary + self.xi = torch.zeros( + [1 + len(self.D)] + list(AHy.shape), + dtype=AHy.dtype, + device=AHy.device, + ) + self.ui = torch.zeros_like(self.xi) + + # dc solver settings + self.niter = niter + self.tol = tol
+ + def forward(self, input): + # data consistency step: zk = (AHA + gamma * I).solve(AHy) + self.xi[0] = cg_solve( + self.AHy + self.step * (input - self.ui[0]), + self.AHA, + niter=self.niter, + tol=self.tol, + lamda=self.step, + ndim=self.ndim, + ) + + # denoise using each regularizator + for n in range(len(self.D)): + self.xi[n + 1] = self.D[n](input - self.ui[n + 1]) + + # average consensus + output = self.xi.mean(axis=0) + self.ui += self.xi - output[None, ...] + + return output
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/optim/cg.html b/_modules/deepmr/optim/cg.html new file mode 100644 index 00000000..e16b0317 --- /dev/null +++ b/_modules/deepmr/optim/cg.html @@ -0,0 +1,719 @@ + + + + + + + + + + + deepmr.optim.cg — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.optim.cg

+"""Conjugate Gradient iteration."""
+
+__all__ = ["cg_solve", "CGStep"]
+
+import numpy as np
+import torch
+
+import torch.nn as nn
+
+from .. import linops as _linops
+
+
+@torch.no_grad()
+def cg_solve(
+    input,
+    AHA,
+    niter=10,
+    device=None,
+    tol=1e-4,
+    lamda=0.0,
+    ndim=None,
+):
+    """
+    Solve inverse problem using Conjugate Gradient method.
+
+    Parameters
+    ----------
+    input : np.ndarray | torch.Tensor
+        Signal to be reconstructed. Assume it is the adjoint AH of measurement
+        operator A applied to the measured data y (i.e., input = AHy).
+    AHA : Callable | torch.Tensor | np.ndarray
+        Normal operator AHA = AH * A.
+    niter : int, optional
+        Number of iterations. The default is ``10``.
+    device : str, optional
+        Computational device.
+        The default is ``None`` (infer from input).
+    tol : float, optional
+        Stopping condition. The default is ``1e-4``.
+    lamda : float, optional
+        Tikhonov regularization strength. The default is ``0.0``.
+    ndim : int, optional
+        Number of spatial dimensions of the problem.
+        It is used to infer the batch axes. If ``AHA`` is a ``deepmr.linop.Linop``
+        operator, this is inferred from ``AHA.ndim`` and ``ndim`` is ignored.
+
+
+    Returns
+    -------
+    output : np.ndarray | torch.Tensor
+        Reconstructed signal.
+
+    """
+    # cast to numpy if required
+    if isinstance(input, np.ndarray):
+        isnumpy = True
+        input = torch.as_tensor(input)
+    else:
+        isnumpy = False
+
+    # keep original device
+    idevice = input.device
+    if device is None:
+        device = idevice
+
+    # put on device
+    input = input.to(device)
+    if isinstance(AHA, _linops.Linop):
+        AHA = AHA.to(device)
+    elif callable(AHA) is False:
+        AHA = torch.as_tensor(AHA, dtype=input.dtype, device=device)
+
+    # assume input is AH(y), i.e., adjoint of measurement operator
+    # applied on measured data
+    AHy = input.clone()
+
+    # add Tikhonov regularization
+    if lamda != 0.0:
+        if isinstance(AHA, _linops.Linop):
+            _AHA = AHA + lamda * _linops.Identity(AHA.ndim)
+        elif callable(AHA):
+            _AHA = lambda x: AHA(x) + lamda * x
+        else:
+            _AHA = lambda x: AHA @ x + lamda * x
+    else:
+        _AHA = AHA
+
+    # initialize algorithm
+    CG = CGStep(_AHA, AHy, ndim, tol)
+
+    # initialize
+    input = 0 * input
+
+    # run algorithm
+    for n in range(niter):
+        output = CG(input)
+        if CG.check_convergence():
+            break
+        input = output.clone()
+
+    # back to original device
+    output = output.to(device)
+
+    # cast back to numpy if requried
+    if isnumpy:
+        output = output.numpy(force=True)
+
+    return output
+
+
+
[docs]class CGStep(nn.Module): + """ + Conjugate Gradient method step. + + This represents propagation through a single iteration of a + CG algorithm; can be used to build + unrolled architectures. + + Attributes + ---------- + AHA : Callable | torch.Tensor + Normal operator AHA = AH * A. + Ahy : torch.Tensor + Adjoint AH of measurement + operator A applied to the measured data y. + ndim : int + Number of spatial dimensions of the problem. + It is used to infer the batch axes. If ``AHA`` is a ``deepmr.linop.Linop`` + operator, this is inferred from ``AHA.ndim`` and ``ndim`` is ignored. + tol : float, optional + Stopping condition. + The default is ``None`` (run until niter). + + """ + +
[docs] def __init__(self, AHA, AHy, ndim=None, tol=None): + super().__init__() + # set up problem dims + try: + self.ndim = AHA.ndim + except Exception: + self.ndim = ndim + + # assign operators + self.AHA = AHA + self.AHy = AHy + + # preallocate + self.r = self.AHy.clone() + self.p = self.r + self.rsold = self.dot(self.r, self.r) + self.rsnew = None + self.tol = tol
+ + def dot(self, s1, s2): + dot = s1.conj() * s2 + dot = dot.reshape(*s1.shape[: -self.ndim], -1).sum(axis=-1) + + return dot + + def forward(self, input): + AHAp = self.AHA(self.p) + alpha = self.rsold / self.dot(self.p, AHAp) + output = input + self.p * alpha + self.r = self.r + AHAp * (-alpha) + self.rsnew = torch.real(self.dot(self.r, self.r)) + self.p = self.r + self.p * (self.rsnew / self.rsold) + self.rsold = self.rsnew + + return output + + def check_convergence(self): + if self.tol is not None: + if self.rsnew.sqrt() < self.tol: + return True + else: + return False + else: + return False
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/optim/pgd.html b/_modules/deepmr/optim/pgd.html new file mode 100644 index 00000000..0a201081 --- /dev/null +++ b/_modules/deepmr/optim/pgd.html @@ -0,0 +1,722 @@ + + + + + + + + + + + deepmr.optim.pgd — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.optim.pgd

+"""Proximal Gradient Method iteration."""
+
+__all__ = ["pgd_solve", "PGDStep"]
+
+import numpy as np
+import torch
+
+import torch.nn as nn
+
+from .. import linops as _linops
+
+
+@torch.no_grad()
+def pgd_solve(input, step, AHA, D, niter=10, accelerate=True, device=None, tol=None):
+    """
+    Solve inverse problem using Proximal Gradient Method.
+
+    Parameters
+    ----------
+    input : np.ndarray | torch.Tensor
+        Signal to be reconstructed. Assume it is the adjoint AH of measurement
+        operator A applied to the measured data y (i.e., input = AHy).
+    step : float
+        Gradient step size; should be <= 1 / max(eig(AHA)).
+    AHA : Callable | torch.Tensor | np.ndarray
+        Normal operator AHA = AH * A.
+    D : Callable
+        Signal denoiser for plug-n-play restoration.
+    niter : int, optional
+        Number of iterations. The default is ``10``.
+    accelerate : bool, optional
+        Toggle Nesterov acceleration (``True``, i.e., FISTA) or
+        not (``False``, ISTA). The default is ``True``.
+    device : str, optional
+        Computational device.
+        The default is ``None`` (infer from input).
+    tol : float, optional
+        Stopping condition.
+        The default is ``None`` (run until niter).
+
+    Returns
+    -------
+    output : np.ndarray | torch.Tensor
+        Reconstructed signal.
+
+    """
+    # cast to numpy if required
+    if isinstance(input, np.ndarray):
+        isnumpy = True
+        input = torch.as_tensor(input)
+    else:
+        isnumpy = False
+
+    # keep original device
+    idevice = input.device
+    if device is None:
+        device = idevice
+
+    # put on device
+    input = input.to(device)
+    if isinstance(AHA, _linops.Linop):
+        AHA = AHA.to(device)
+    elif callable(AHA) is False:
+        AHA = torch.as_tensor(AHA, dtype=input.dtype, device=device)
+
+    # assume input is AH(y), i.e., adjoint of measurement operator
+    # applied on measured data
+    AHy = input.clone()
+
+    # initialize Nesterov acceleration
+    if accelerate:
+        q = _get_acceleration(niter)
+    else:
+        q = [0.0] * niter
+
+    # initialize algorithm
+    PGD = PGDStep(step, AHA, AHy, D)
+
+    # initialize
+    input = 0 * input
+
+    # run algorithm
+    for n in range(niter):
+        output = PGD(input, q[n])
+
+        # if required, compute residual and check if we reached convergence
+        if PGD.check_convergence(output, input, step):
+            break
+
+        # update variable
+        input = output.clone()
+
+    # back to original device
+    output = output.to(device)
+
+    # cast back to numpy if requried
+    if isnumpy:
+        output = output.numpy(force=True)
+
+    return output
+
+
+
[docs]class PGDStep(nn.Module): + """ + Proximal Gradient Method step. + + This represents propagation through a single iteration of a + Proximal Gradient Descent algorithm; can be used to build + unrolled architectures. + + Attributes + ---------- + step : float + Gradient step size; should be <= 1 / max(eig(AHA)). + AHA : Callable | torch.Tensor + Normal operator AHA = AH * A. + Ahy : torch.Tensor + Adjoint AH of measurement + operator A applied to the measured data y. + D : Callable + Signal denoiser for plug-n-play restoration. + trainable : bool, optional + If ``True``, gradient update step is trainable, otherwise it is not. + The default is ``False``. + tol : float, optional + Stopping condition. + The default is ``None`` (run until niter). + + """ + +
[docs] def __init__(self, step, AHA, AHy, D, trainable=False, tol=None): + super().__init__() + if trainable: + self.step = nn.Parameter(step) + else: + self.step = step + + # assign + self.AHA = AHA + self.AHy = AHy + self.D = D + self.s = AHy.clone() + self.tol = tol
+ + def forward(self, input, q=0.0): + # gradient step : zk = xk-1 - gamma * AH(A(xk-1) - y != FISTA (accelerated) + z = input - self.step * (self.AHA(input) - self.AHy) + + # denoise: sk = D(zk) + s = self.D(z) + + # update: xk = sk + [(qk-1 - 1) / qk] * (sk - sk-1) + if q != 0.0: + output = s + q * (s - self.s) + self.s = s.clone() + else: + output = s # q1...qn = 1.0 != ISTA (non-accelerated) + + return output + + def check_convergence(self, output, input, step): + if self.tol is not None: + resid = torch.linalg.norm(output - input).item() / step + if resid < self.tol: + return True + else: + return False + else: + return False
+ + +# %% local utils +def _get_acceleration(niter): + t = [] + t_new = 1 + + for n in range(niter): + t_old = t_new + t_new = (1 + (1 + 4 * t_old**2) ** 0.5) / 2 + t.append((t_old - 1) / t_new) + + return t +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/prox/llr.html b/_modules/deepmr/prox/llr.html new file mode 100644 index 00000000..6d23235a --- /dev/null +++ b/_modules/deepmr/prox/llr.html @@ -0,0 +1,706 @@ + + + + + + + + + + + deepmr.prox.llr — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.prox.llr

+"""Local Low Rank denoisining."""
+
+__all__ = ["LLRDenoiser", "llr_denoise"]
+
+import numpy as np
+
+import torch
+import torch.nn as nn
+
+from .. import _signal
+
+from . import threshold
+
+
+
[docs]class LLRDenoiser(nn.Module): + r""" + Local Low Rank denoising. + + The solution is available in closed-form, thus the denoiser is cheap to compute. + + Attributes + ---------- + ndim : int, + Number of spatial dimensions. + W : int, optional + Patch size (assume isotropic). + ths : float, optional + Denoise threshold. The default is ``0.1``. + trainable : bool, optional + If ``True``, threshold value is trainable, otherwise it is not. + The default is ``False``. + S : int, optional + Patch stride (assume isotropic). + If not provided, use non-overlapping patches. + rand_shift : bool, optional + If True, randomly shift across spatial dimensions before denoising. + axis : bool, optional + Axis assumed as coefficient axis (e.g., coils or contrasts). + If not provided, use first axis to the left of spatial dimensions. + device : str, optional + Device on which the wavelet transform is computed. + The default is ``None`` (infer from input). + + """ + +
[docs] def __init__( + self, + ndim, + W, + ths=0.1, + trainable=False, + S=None, + rand_shift=True, + axis=None, + device=None, + ): + super().__init__() + + if trainable: + self.ths = nn.Parameter(ths) + else: + self.ths = ths + + self.ndim = ndim + self.W = [W] * ndim + if S is None: + self.S = [W] * ndim + else: + self.S = [S] * ndim + self.rand_shift = rand_shift + if axis is None: + self.axis = -self.ndim - 1 + else: + self.axis = axis + self.device = device
+ + def forward(self, x): + # default device + idevice = x.device + if self.device is None: + device = idevice + else: + device = self.device + x = x.to(device) + + # circshift randomly + if self.rand_shift is True: + shift = tuple(np.random.randint(0, self.W, size=self.ndim)) + axes = tuple(range(-self.ndim, 0)) + x = torch.roll(x, shift, axes) + + # reshape to (..., ncoeff, ny, nx), (..., ncoeff, nz, ny, nx) + x = x.swapaxes(self.axis, -self.ndim - 1) + x0shape = x.shape + x = x.reshape(-1, *x0shape[-self.ndim - 1 :]) + x1shape = x.shape + + # build patches + patches = _signal.tensor2patches(x, self.W, self.S) + pshape = patches.shape + patches = patches.reshape(*pshape[:1], -1, int(np.prod(pshape[-self.ndim :]))) + + # perform SVD and soft-threshold S matrix + u, s, vh = torch.linalg.svd(patches, full_matrices=False) + s_st = threshold.soft_thresh(s, self.ths) + patches = u * s_st[..., None, :] @ vh + patches = patches.reshape(*pshape) + output = _signal.patches2tensor(patches, x1shape[-self.ndim :], self.W, self.S) + output = output.reshape(x0shape) + output = output.swapaxes(self.axis, -self.ndim - 1) + + # randshift back + if self.rand_shift is True: + shift = tuple([-s for s in shift]) + output = torch.roll(output, shift, axes) + + return output.to(idevice)
+ + +
[docs]def llr_denoise(input, ndim, ths, W, S=None, rand_shift=True, axis=None, device=None): + """ + Apply Local Low Rank denoising. + + The solution is available in closed-form, thus the denoiser is cheap to compute. + + Attributes + ---------- + ndim : int, + Number of spatial dimensions. + W : int + Patch size (assume isotropic). + ths : float, optional + Denoise threshold. The default is ``0.1``. + S : int, optional + Patch stride (assume isotropic). + If not provided, use non-overlapping patches. + rand_shift : bool, optional + If True, randomly shift across spatial dimensions before denoising. + axis : bool, optional + Axis assumed as coefficient axis (e.g., coils or contrasts). + If not provided, use first axis to the left of spatial dimensions. + device : str, optional + Device on which the wavelet transform is computed. + The default is ``None``. + + Returns + ------- + output : np.ndarray | torch.Tensor + Denoised image of shape (..., n_ndim, ..., n_0). + + """ + # cast to torch if required + if isinstance(input, np.ndarray): + isnumpy = True + input = torch.as_tensor(input) + else: + isnumpy = False + + LLR = LLRDenoiser(ndim, W, ths, False, S, rand_shift, axis, device) + output = LLR(input) + + # cast back to numpy if requried + if isnumpy: + output = output.numpy(force=True) + + return output
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/prox/tgv.html b/_modules/deepmr/prox/tgv.html new file mode 100644 index 00000000..1e6d6aa8 --- /dev/null +++ b/_modules/deepmr/prox/tgv.html @@ -0,0 +1,1058 @@ + + + + + + + + + + + deepmr.prox.tgv — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.prox.tgv

+"""Total generalized variation denoising prior."""
+
+__all__ = ["TGVDenoiser", "tgv_denoise"]
+
+import numpy as np
+import torch
+import torch.nn as nn
+
+
+
[docs]class TGVDenoiser(nn.Module): + r""" + Proximal operator of (2nd order) Total Generalised Variation operator. + + (see K. Bredies, K. Kunisch, and T. Pock, "Total generalized variation," SIAM J. Imaging Sci., 3(3), 492-526, 2010.) + + This algorithm converges to the unique image :math:`x` (and the auxiliary vector field :math:`r`) minimizing + + .. math:: + + \underset{x, r}{\arg\min} \; \frac{1}{2}\|x-y\|_2^2 + \lambda_1 \|r\|_{1,2} + \lambda_2 \|J(Dx-r)\|_{1,F} + + where :math:`D` maps an image to its gradient field and :math:`J` maps a vector field to its Jacobian. + For a large value of :math:`\lambda_2`, the TGV behaves like the TV. + For a small value, it behaves like the :math:`\ell_1`-Frobenius norm of the Hessian. + + The problem is solved with an over-relaxed Chambolle-Pock algorithm (see L. Condat, "A primal-dual splitting method + for convex optimization involving Lipschitzian, proximable and linear composite terms", J. Optimization Theory and + Applications, vol. 158, no. 2, pp. 460-479, 2013. + + Code (and description) adapted from Laurent Condat's matlab version (https://lcondat.github.io/software.html) and + Daniil Smolyakov's `code <https://github.com/RoundedGlint585/TGVDenoising/blob/master/TGV%20WithoutHist.ipynb>`_. + + Attributes + ---------- + ndim : int + Number of spatial dimensions, can be either ``2`` or ``3``. + ths : float, optional + Denoise threshold. The default is ``0.1``. + trainable : bool, optional + If ``True``, threshold value is trainable, otherwise it is not. + The default is ``False``. + device : str, optional + Device on which the wavelet transform is computed. + The default is ``None`` (infer from input). + verbose : bool, optional + Whether to print computation details or not. The default is ``False``. + niter : int, optional, + Maximum number of iterations. The default is ``1000``. + crit : float, optional + Convergence criterion. The default is 1e-5. + x2 : torch.Tensor, optional + Primary variable for warm restart. The default is ``None``. + u2 : torch.Tensor, optional + Dual variable for warm restart. The default is ``None``. + r2 : torch.Tensor, optional + Auxiliary variable for warm restart. The default is ``None``. + + Notes + ----- + The regularization term :math:`\|r\|_{1,2} + \|J(Dx-r)\|_{1,F}` is implicitly normalized by its Lipschitz + constant, i.e. :math:`\sqrt{72}`, see e.g. K. Bredies et al., "Total generalized variation," SIAM J. Imaging + Sci., 3(3), 492-526, 2010. + + """ + +
[docs] def __init__( + self, + ndim, + ths=0.1, + trainable=False, + device=None, + verbose=False, + niter=100, + crit=1e-5, + x2=None, + u2=None, + r2=None, + ): + super().__init__() + + if trainable: + self.ths = nn.Parameter(ths) + else: + self.ths = ths + + self.denoiser = _TGVDenoiser( + ndim=ndim, + device=device, + verbose=verbose, + n_it_max=niter, + crit=crit, + x2=x2, + u2=u2, + r2=r2, + ) + self.denoiser.device = device
+ + def forward(self, input): + # get complex + if torch.is_complex(input): + iscomplex = True + else: + iscomplex = False + + # default device + idevice = input.device + if self.denoiser.device is None: + device = idevice + else: + device = self.denoiser.device + + # get input shape + ndim = self.denoiser.ndim + ishape = input.shape + + # reshape for computation + input = input.reshape(-1, *ishape[-ndim:]) + if iscomplex: + input = torch.stack((input.real, input.imag), axis=1) + input = input.reshape(-1, *ishape[-ndim:]) + + # apply denoising + output = self.denoiser(input[:, None, ...].to(device), self.ths).to( + idevice + ) # perform the denoising on the real-valued tensor + + # reshape back + if iscomplex: + output = ( + output[::2, ...] + 1j * output[1::2, ...] + ) # build the denoised complex data + output = output.reshape(ishape) + + return output.to(idevice)
+ + +
[docs]def tgv_denoise( + input, + ndim, + ths=0.1, + device=None, + verbose=False, + niter=100, + crit=1e-5, + x2=None, + u2=None, +): + r""" + Apply Total Generalized Variation denoising. + + (see K. Bredies, K. Kunisch, and T. Pock, "Total generalized variation," SIAM J. Imaging Sci., 3(3), 492-526, 2010.) + + This algorithm converges to the unique image :math:`x` (and the auxiliary vector field :math:`r`) minimizing + + .. math:: + + \underset{x, r}{\arg\min} \; \frac{1}{2}\|x-y\|_2^2 + \lambda_1 \|r\|_{1,2} + \lambda_2 \|J(Dx-r)\|_{1,F} + + where :math:`D` maps an image to its gradient field and :math:`J` maps a vector field to its Jacobian. + For a large value of :math:`\lambda_2`, the TGV behaves like the TV. + For a small value, it behaves like the :math:`\ell_1`-Frobenius norm of the Hessian. + + The problem is solved with an over-relaxed Chambolle-Pock algorithm (see L. Condat, "A primal-dual splitting method + for convex optimization involving Lipschitzian, proximable and linear composite terms", J. Optimization Theory and + Applications, vol. 158, no. 2, pp. 460-479, 2013. + + Code (and description) adapted from Laurent Condat's matlab version (https://lcondat.github.io/software.html) and + Daniil Smolyakov's `code <https://github.com/RoundedGlint585/TGVDenoising/blob/master/TGV%20WithoutHist.ipynb>`_. + + Arguments + --------- + input : np.ndarray | torch.Tensor + Input image of shape (..., n_ndim, ..., n_0). + ndim : int + Number of spatial dimensions, can be either ``2`` or ``3``. + ths : float, optional + Denoise threshold. Default is ``0.1``. + ndim : int + Number of spatial dimensions, can be either ``2`` or ``3``. + ths : float, optional + Denoise threshold. The default is ``0.1``. + trainable : bool, optional + If ``True``, threshold value is trainable, otherwise it is not. + The default is ``False``. + device : str, optional + Device on which the wavelet transform is computed. + The default is ``None`` (infer from input). + verbose : bool, optional + Whether to print computation details or not. The default is ``False``. + niter : int, optional, + Maximum number of iterations. The default is ``1000``. + crit : float, optional + Convergence criterion. The default is 1e-5. + x2 : torch.Tensor, optional + Primary variable for warm restart. The default is ``None``. + u2 : torch.Tensor, optional + Dual variable for warm restart. The default is ``None``. + r2 : torch.Tensor, optional + Auxiliary variable for warm restart. The default is ``None``. + + Returns + ------- + output : np.ndarray | torch.Tensor + Denoised image of shape (..., n_ndim, ..., n_0). + + """ + # cast to numpy if required + if isinstance(input, np.ndarray): + isnumpy = True + input = torch.as_tensor(input) + else: + isnumpy = False + + # initialize denoiser + TV = TGVDenoiser(ndim, ths, False, device, verbose, niter, crit, x2, u2) + output = TV(input) + + # cast back to numpy if requried + if isnumpy: + output = output.numpy(force=True) + + return output
+ + +# %% local utils +class _TGVDenoiser(nn.Module): + def __init__( + self, + ndim, + device, + verbose=False, + n_it_max=1000, + crit=1e-5, + x2=None, + u2=None, + r2=None, + ): + super().__init__() + self.device = device + self.ndim = ndim + + if ndim == 2: + self.nabla = self.nabla2 + self.nabla_adjoint = self.nabla2_adjoint + self.epsilon = self.epsilon2 + self.epsilon_adjoint = self.epsilon2_adjoint + elif ndim == 3: + self.nabla = self.nabla3 + self.nabla_adjoint = self.nabla3_adjoint + self.epsilon = self.epsilon3 + self.epsilon_adjoint = self.epsilon3_adjoint + + self.verbose = verbose + self.n_it_max = n_it_max + self.crit = crit + self.restart = True + + self.tau = 0.01 # > 0 + + self.rho = 1.99 # in 1,2 + self.sigma = 1 / self.tau / 72 + + self.x2 = x2 + self.r2 = r2 + self.u2 = u2 + + self.has_converged = False + + def prox_tau_fx(self, x, y): + return (x + self.tau * y) / (1 + self.tau) + + def prox_tau_fr(self, r, lambda1): + left = torch.sqrt(torch.sum(r**2, axis=-1)) / (self.tau * lambda1) + tmp = r - r / ( + torch.maximum( + left, torch.tensor([1], device=left.device).type(left.dtype) + ).unsqueeze(-1) + ) + return tmp + + def prox_sigma_g_conj(self, u, lambda2): + return u / ( + torch.maximum( + torch.sqrt(torch.sum(u**2, axis=-1)) / lambda2, + torch.tensor([1], device=u.device).type(u.dtype), + ).unsqueeze(-1) + ) + + def forward(self, y, ths=None): + restart = ( + True + if (self.restart or self.x2 is None or self.x2.shape != y.shape) + else False + ) + + if restart: + self.x2 = y.clone() + self.r2 = torch.zeros((*self.x2.shape, 2), device=self.x2.device).type( + self.x2.dtype + ) + self.u2 = torch.zeros((*self.x2.shape, 4), device=self.x2.device).type( + self.x2.dtype + ) + self.restart = False + + if ths is not None: + lambda1 = ths * 0.1 + lambda2 = ths * 0.15 + + cy = (y**2).sum() / 2 + primalcostlowerbound = 0 + + for _ in range(self.n_it_max): + x_prev = self.x2.clone() + tmp = self.tau * self.epsilon_adjoint(self.u2) + x = self.prox_tau_fx(self.x2 - self.nabla_adjoint(tmp), y) + r = self.prox_tau_fr(self.r2 + tmp, lambda1) + u = self.prox_sigma_g_conj( + self.u2 + + self.sigma + * self.epsilon(self.nabla(2 * x - self.x2) - (2 * r - self.r2)), + lambda2, + ) + self.x2 = self.x2 + self.rho * (x - self.x2) + self.r2 = self.r2 + self.rho * (r - self.r2) + self.u2 = self.u2 + self.rho * (u - self.u2) + + rel_err = torch.linalg.norm( + x_prev.flatten() - self.x2.flatten() + ) / torch.linalg.norm(self.x2.flatten() + 1e-12) + + if _ > 1 and rel_err < self.crit: + self.has_converged = True + if self.verbose: + print("TGV prox reached convergence") + break + + if self.verbose and _ % 100 == 0: + primalcost = ( + torch.linalg.norm(x.flatten() - y.flatten()) ** 2 + + lambda1 * torch.sum(torch.sqrt(torch.sum(r**2, axis=-1))) + + lambda2 + * torch.sum( + torch.sqrt( + torch.sum(self.epsilon(self.nabla(x) - r) ** 2, axis=-1) + ) + ) + ) + # dualcost = cy - ((y - nablaT(epsilonT(u))) ** 2).sum() / 2.0 + tmp = torch.max( + torch.sqrt(torch.sum(self.epsilon_adjoint(u) ** 2, axis=-1)) + ) # to check feasibility: the value will be <= lambda1 only at convergence. Since u is not feasible, the dual cost is not reliable: the gap=primalcost-dualcost can be <0 and cannot be used as stopping criterion. + u3 = u / torch.maximum( + tmp / lambda1, torch.tensor([1], device=tmp.device).type(tmp.dtype) + ) # u3 is a scaled version of u, which is feasible. so, its dual cost is a valid, but very rough lower bound of the primal cost. + dualcost2 = ( + cy + - torch.sum((y - self.nabla_adjoint(self.epsilon_adjoint(u3))) ** 2) + / 2.0 + ) # we display the best value of dualcost2 computed so far. + primalcostlowerbound = max(primalcostlowerbound, dualcost2.item()) + if self.verbose: + print( + "Iter: ", + _, + " Primal cost: ", + primalcost.item(), + " Rel err:", + rel_err, + ) + + if _ == self.n_it_max - 1: + if self.verbose: + print( + "The algorithm did not converge, stopped after " + + str(_ + 1) + + " iterations." + ) + + return self.x2 + + @staticmethod + def nabla2(x): + r""" + Applies the finite differences operator associated with tensors of the same shape as x. + """ + b, c, h, w = x.shape + u = torch.zeros((b, c, h, w, 2), device=x.device).type(x.dtype) + u[:, :, :-1, :, 0] = u[:, :, :-1, :, 0] - x[:, :, :-1] + u[:, :, :-1, :, 0] = u[:, :, :-1, :, 0] + x[:, :, 1:] + u[:, :, :, :-1, 1] = u[:, :, :, :-1, 1] - x[..., :-1] + u[:, :, :, :-1, 1] = u[:, :, :, :-1, 1] + x[..., 1:] + return u + + @staticmethod + def nabla2_adjoint(x): + r""" + Applies the adjoint of the finite difference operator. + """ + b, c, h, w = x.shape[:-1] + u = torch.zeros((b, c, h, w), device=x.device).type( + x.dtype + ) # note that we just reversed left and right sides of each line to obtain the transposed operator + u[:, :, :-1] = u[:, :, :-1] - x[:, :, :-1, :, 0] + u[:, :, 1:] = u[:, :, 1:] + x[:, :, :-1, :, 0] + u[..., :-1] = u[..., :-1] - x[..., :-1, 1] + u[..., 1:] = u[..., 1:] + x[..., :-1, 1] + return u + + @staticmethod + def nabla3(x): + r""" + Applies the finite differences operator associated with tensors of the same shape as x. + """ + b, c, d, h, w = x.shape + u = torch.zeros((b, c, d, h, w, 3), device=x.device).type(x.dtype) + u[:, :, :-1, :, :, 0] = u[:, :, :-1, :, :, 0] - x[:, :, :-1] + u[:, :, :-1, :, :, 0] = u[:, :, :-1, :, :, 0] + x[:, :, 1:] + u[:, :, :, :-1, :, 1] = u[:, :, :, :-1, :, 1] - x[:, :, :, :-1] + u[:, :, :, :-1, :, 1] = u[:, :, :, :-1, :, 1] + x[:, :, :, 1:] + u[:, :, :, :, :-1, 2] = u[:, :, :, :, :-1, 2] - x[:, :, :, :, :-1] + u[:, :, :, :, :-1, 2] = u[:, :, :, :, :-1, 2] + x[:, :, :, :, 1:] + + return u + + @staticmethod + def nabla3_adjoint(x): + r""" + Applies the adjoint of the finite difference operator. + """ + b, c, d, h, w = x.shape + u = torch.zeros((b, c, d, h, w), device=x.device).type(x.dtype) + u[:, :, :-1, :, 0] = u[:, :, :-1, :, 0] - x[:, :, :-1] + u[:, :, 1:, :, 0] = u[:, :, 1:, :, 0] + x[:, :, :-1] + u[:, :, :, :-1, 1] = u[:, :, :, :-1, 1] - x[:, :, :, :-1] + u[:, :, :, 1:, 1] = u[:, :, :, 1:, 1] + x[:, :, :, :-1] + u[:, :, :, :, :-1, 2] = u[:, :, :, :, :-1, 2] - x[:, :, :, :, :-1] + u[:, :, :, :, 1:, 2] = u[:, :, :, :, 1:, 2] + x[:, :, :, :, :-1] + + return u + + @staticmethod + def epsilon2(I): # Simplified + r""" + Applies the jacobian of a vector field. + """ + b, c, h, w, _ = I.shape + G = torch.zeros((b, c, h, w, 4), device=I.device).type(I.dtype) + G[:, :, 1:, :, 0] = G[:, :, 1:, :, 0] - I[:, :, :-1, :, 0] # xdy + G[..., 0] = G[..., 0] + I[..., 0] + G[..., 1:, 1] = G[..., 1:, 1] - I[..., :-1, 0] # xdx + G[..., 1:, 1] = G[..., 1:, 1] + I[..., 1:, 0] + G[..., 1:, 2] = G[..., 1:, 2] - I[..., :-1, 1] # xdx + G[..., 2] = G[..., 2] + I[..., 1] + G[:, :, :-1, :, 3] = G[:, :, :-1, :, 3] - I[:, :, :-1, :, 1] # xdy + G[:, :, :-1, :, 3] = G[:, :, :-1, :, 3] + I[:, :, 1:, :, 1] + return G + + @staticmethod + def epsilon2_adjoint(G): + r""" + Applies the adjoint of the jacobian of a vector field. + """ + b, c, h, w, _ = G.shape + I = torch.zeros((b, c, h, w, 2), device=G.device).type(G.dtype) + I[:, :, :-1, :, 0] = I[:, :, :-1, :, 0] - G[:, :, 1:, :, 0] + I[..., 0] = I[..., 0] + G[..., 0] + I[..., :-1, 0] = I[..., :-1, 0] - G[..., 1:, 1] + I[..., 1:, 0] = I[..., 1:, 0] + G[..., 1:, 1] + I[..., :-1, 1] = I[..., :-1, 1] - G[..., 1:, 2] + I[..., 1] = I[..., 1] + G[..., 2] + I[:, :, :-1, :, 1] = I[:, :, :-1, :, 1] - G[:, :, :-1, :, 3] + I[:, :, 1:, :, 1] = I[:, :, 1:, :, 1] + G[:, :, :-1, :, 3] + return I + + @staticmethod + def epsilon3(I): # Adapted for 3D matrices + r""" + Applies the jacobian of a vector field. + """ + b, c, d, h, w = I.shape + G = torch.zeros((b, c, d, h, w, 6), device=I.device).type(I.dtype) + G[:, :, :, 1:, :, 0] = G[:, :, :, 1:, :, 0] - I[:, :, :, :-1, :, 0] # xdy + G[..., 0] = G[..., 0] + I[..., 0] + G[..., 1:, :, 1] = G[..., 1:, :, 1] - I[..., :, :-1, 0] # xdx + G[..., 1:, :, 1] = G[..., 1:, :, 1] + I[..., :, 1:, 0] + G[..., 1:, :, 2] = G[..., 1:, :, 2] - I[..., :, :-1, 1] # xdz + G[..., 2] = G[..., 2] + I[..., :, 1, 0] + G[:, :, :, :-1, :, 3] = G[:, :, :, :-1, :, 3] - I[:, :, :, :-1, :, 1] # xdy + G[:, :, :, :-1, :, 3] = G[:, :, :, :-1, :, 3] + I[:, :, :, 1:, :, 1] + G[..., 3] = G[..., 3] + I[..., 0] + G[..., 1:, :, 4] = G[..., 1:, :, 4] - I[..., 1:, :, :-1, 2] # xdz + G[..., 4] = G[..., 4] + I[..., 1, :, :, 0] + G[:, :, :, :, :-1, 5] = G[:, :, :, :, :-1, 5] - I[:, :, :, :, :-1, 2] # xdy + G[:, :, :, 1:, :, 5] = G[:, :, :, 1:, :, 5] + I[:, :, :, :, :-1, 2] + return G + + @staticmethod + def epsilon3_adjoint(G): # Adapted for 3D matrices + r""" + Applies the adjoint of the jacobian of a vector field. + """ + b, c, d, h, w, _ = G.shape + I = torch.zeros((b, c, d, h, w, 3), device=G.device).type(G.dtype) + I[:, :, :, :-1, :, 0] = I[:, :, :, :-1, :, 0] - G[:, :, :, 1:, :, 0] + I[..., 0] = I[..., 0] + G[..., 0] + I[..., :-1, :, 0] = I[..., :-1, :, 0] - G[..., 1:, :, 1] + I[..., 1:, :, 0] = I[..., 1:, :, 0] + G[..., 1:, :, 1] + I[..., :-1, :, 1] = I[..., :-1, :, 1] - G[..., 1:, :, 2] + I[..., 0] = I[..., 0] + G[..., 2] + I[:, :, :, :-1, :, 1] = I[:, :, :, :-1, :, 1] - G[:, :, :, :-1, :, 3] + I[:, :, :, 1:, :, 1] = I[:, :, :, 1:, :, 1] + G[:, :, :, :-1, :, 3] + I[..., 1] = I[..., 1] + G[..., 3] + I[..., :-1, :, 2] = I[..., :-1, :, 2] - G[..., 1:, :, 4] + I[..., 0] = I[..., 0] + G[..., 4] + I[:, :, :, :, :-1, 2] = I[:, :, :, :, :-1, 2] - G[:, :, :, :, :-1, 5] + I[:, :, :, 1:, :, 2] = I[:, :, :, 1:, :, 2] + G[:, :, :, :, :-1, 5] + return I +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/prox/tv.html b/_modules/deepmr/prox/tv.html new file mode 100644 index 00000000..eb5cc12b --- /dev/null +++ b/_modules/deepmr/prox/tv.html @@ -0,0 +1,903 @@ + + + + + + + + + + + deepmr.prox.tv — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.prox.tv

+"""Total variation denoising prior."""
+
+__all__ = ["TVDenoiser", "tv_denoise"]
+
+import numpy as np
+import torch
+import torch.nn as nn
+
+
+
[docs]class TVDenoiser(nn.Module): + r""" + Proximal operator of the isotropic Total Variation operator. + + This algorithm converges to the unique image :math:`x` that is the solution of + + .. math:: + + \underset{x}{\arg\min} \; \frac{1}{2}\|x-y\|_2^2 + \gamma \|Dx\|_{1,2}, + + where :math:`D` maps an image to its gradient field. + + The problem is solved with an over-relaxed Chambolle-Pock algorithm (see L. Condat, "A primal-dual splitting method + for convex optimization involving Lipschitzian, proximable and linear composite terms", J. Optimization Theory and + Applications, vol. 158, no. 2, pp. 460-479, 2013. + + Code (and description) adapted from ``deepinv``, in turn adapted from + Laurent Condat's matlab version (https://lcondat.github.io/software.html) and + Daniil Smolyakov's `code <https://github.com/RoundedGlint585/TGVDenoising/blob/master/TGV%20WithoutHist.ipynb>`_. + + This algorithm is implemented with warm restart, i.e. the primary and dual variables are kept in memory + between calls to the forward method. This speeds up the computation when using this class in an iterative algorithm. + + Attributes + --------- + ndim : int + Number of spatial dimensions, can be either ``2`` or ``3``. + ths : float, optional + Denoise threshold. The default is ``0.1``. + trainable : bool, optional + If ``True``, threshold value is trainable, otherwise it is not. + The default is ``False``. + device : str, optional + Device on which the wavelet transform is computed. + The default is ``None`` (infer from input). + verbose : bool, optional + Whether to print computation details or not. The default is ``False``. + niter : int, optional, + Maximum number of iterations. The default is ``1000``. + crit : float, optional + Convergence criterion. The default is 1e-5. + x2 : torch.Tensor, optional + Primary variable for warm restart. The default is ``None``. + u2 : torch.Tensor, optional + Dual variable for warm restart. The default is ``None``. + + Notes + ----- + The regularization term :math:`\|Dx\|_{1,2}` is implicitly normalized by its Lipschitz constant, i.e. + :math:`\sqrt{8}`, see e.g. A. Beck and M. Teboulle, "Fast gradient-based algorithms for constrained total + variation image denoising and deblurring problems", IEEE T. on Image Processing. 18(11), 2419-2434, 2009. + + """ + +
[docs] def __init__( + self, + ndim, + ths=0.1, + trainable=False, + device=None, + verbose=False, + niter=100, + crit=1e-5, + x2=None, + u2=None, + ): + super().__init__() + + if trainable: + self.ths = nn.Parameter(ths) + else: + self.ths = ths + + self.denoiser = _TVDenoiser( + ndim=ndim, + device=device, + verbose=verbose, + n_it_max=niter, + crit=crit, + x2=x2, + u2=u2, + ) + self.denoiser.device = device
+ + def forward(self, input): + # get complex + if torch.is_complex(input): + iscomplex = True + else: + iscomplex = False + + # default device + idevice = input.device + if self.denoiser.device is None: + device = idevice + else: + device = self.denoiser.device + + # get input shape + ndim = self.denoiser.ndim + ishape = input.shape + + # reshape for computation + input = input.reshape(-1, *ishape[-ndim:]) + if iscomplex: + input = torch.stack((input.real, input.imag), axis=1) + input = input.reshape(-1, *ishape[-ndim:]) + + # apply denoising + output = self.denoiser(input[:, None, ...].to(device), self.ths).to( + idevice + ) # perform the denoising on the real-valued tensor + + # reshape back + if iscomplex: + output = ( + output[::2, ...] + 1j * output[1::2, ...] + ) # build the denoised complex data + output = output.reshape(ishape) + + return output.to(idevice)
+ + +
[docs]def tv_denoise( + input, + ndim, + ths=0.1, + device=None, + verbose=False, + niter=100, + crit=1e-5, + x2=None, + u2=None, +): + r""" + Apply isotropic Total Variation denoising. + + This algorithm converges to the unique image :math:`x` that is the solution of + + .. math:: + + \underset{x}{\arg\min} \; \frac{1}{2}\|x-y\|_2^2 + \gamma \|Dx\|_{1,2}, + + where :math:`D` maps an image to its gradient field. + + The problem is solved with an over-relaxed Chambolle-Pock algorithm (see L. Condat, "A primal-dual splitting method + for convex optimization involving Lipschitzian, proximable and linear composite terms", J. Optimization Theory and + Applications, vol. 158, no. 2, pp. 460-479, 2013. + + Code (and description) adapted from ``deepinv``, in turn adapted from + Laurent Condat's matlab version (https://lcondat.github.io/software.html) and + Daniil Smolyakov's `code <https://github.com/RoundedGlint585/TGVDenoising/blob/master/TGV%20WithoutHist.ipynb>`_. + + This algorithm is implemented with warm restart, i.e. the primary and dual variables are kept in memory + between calls to the forward method. This speeds up the computation when using this class in an iterative algorithm. + + Arguments + --------- + input : np.ndarray | torch.Tensor + Input image of shape (..., n_ndim, ..., n_0). + ndim : int + Number of spatial dimensions, can be either ``2`` or ``3``. + ths : float, optional + Denoise threshold. The default is``0.1``. + device : str, optional + Device on which the wavelet transform is computed. + The default is ``None`` (infer from input). + verbose : bool, optional + Whether to print computation details or not. The default is ``False``. + niter : int, optional, + Maximum number of iterations. The default is ``1000``. + crit : float, optional + Convergence criterion. The default is 1e-5. + x2 : torch.Tensor, optional + Primary variable for warm restart. The default is ``None``. + u2 : torch.Tensor, optional + Dual variable for warm restart. The default is ``None``. + + Notes + ----- + The regularization term :math:`\|Dx\|_{1,2}` is implicitly normalized by its Lipschitz constant, i.e. + :math:`\sqrt{8}`, see e.g. A. Beck and M. Teboulle, "Fast gradient-based algorithms for constrained total + variation image denoising and deblurring problems", IEEE T. on Image Processing. 18(11), 2419-2434, 2009. + + Returns + ------- + output : np.ndarray | torch.Tensor + Denoised image of shape (..., n_ndim, ..., n_0). + + """ + # cast to numpy if required + if isinstance(input, np.ndarray): + isnumpy = True + input = torch.as_tensor(input) + else: + isnumpy = False + + # initialize denoiser + TV = TVDenoiser(ndim, ths, False, device, verbose, niter, crit, x2, u2) + output = TV(input) + + # cast back to numpy if requried + if isnumpy: + output = output.numpy(force=True) + + return output
+ + +# %% local utils +class _TVDenoiser(nn.Module): + def __init__( + self, + ndim, + device=None, + verbose=False, + n_it_max=1000, + crit=1e-5, + x2=None, + u2=None, + ): + super().__init__() + self.device = device + self.ndim = ndim + + if ndim == 2: + self.nabla = self.nabla2 + self.nabla_adjoint = self.nabla2_adjoint + elif ndim == 3: + self.nabla = self.nabla3 + self.nabla_adjoint = self.nabla3_adjoint + + self.verbose = verbose + self.n_it_max = n_it_max + self.crit = crit + self.restart = True + + self.tau = 0.01 # > 0 + + self.rho = 1.99 # in 1,2 + self.sigma = 1 / self.tau / 8 + + self.x2 = x2 + self.u2 = u2 + + self.has_converged = False + + def prox_tau_fx(self, x, y): + return (x + self.tau * y) / (1 + self.tau) + + def prox_sigma_g_conj(self, u, lambda2): + return u / ( + torch.maximum( + torch.sqrt(torch.sum(u**2, axis=-1)) / lambda2, + torch.tensor([1], device=u.device).type(u.dtype), + ).unsqueeze(-1) + ) + + def forward(self, y, ths=None): + restart = ( + True + if (self.restart or self.x2 is None or self.x2.shape != y.shape) + else False + ) + + if restart: + self.x2 = y.clone() + self.u2 = torch.zeros((*self.x2.shape, 2), device=self.x2.device).type( + self.x2.dtype + ) + self.restart = False + + if ths is not None: + lambd = ths + + for _ in range(self.n_it_max): + x_prev = self.x2.clone() + + x = self.prox_tau_fx(self.x2 - self.tau * self.nabla_adjoint(self.u2), y) + u = self.prox_sigma_g_conj( + self.u2 + self.sigma * self.nabla(2 * x - self.x2), lambd + ) + self.x2 = self.x2 + self.rho * (x - self.x2) + self.u2 = self.u2 + self.rho * (u - self.u2) + + rel_err = torch.linalg.norm( + x_prev.flatten() - self.x2.flatten() + ) / torch.linalg.norm(self.x2.flatten() + 1e-12) + + if _ > 1 and rel_err < self.crit: + if self.verbose: + print("TV prox reached convergence") + break + + return self.x2 + + @staticmethod + def nabla2(x): + r""" + Applies the finite differences operator associated with tensors of the same shape as x. + """ + b, c, h, w = x.shape + u = torch.zeros((b, c, h, w, 2), device=x.device).type(x.dtype) + u[:, :, :-1, :, 0] = u[:, :, :-1, :, 0] - x[:, :, :-1] + u[:, :, :-1, :, 0] = u[:, :, :-1, :, 0] + x[:, :, 1:] + u[:, :, :, :-1, 1] = u[:, :, :, :-1, 1] - x[..., :-1] + u[:, :, :, :-1, 1] = u[:, :, :, :-1, 1] + x[..., 1:] + return u + + @staticmethod + def nabla2_adjoint(x): + r""" + Applies the adjoint of the finite difference operator. + """ + b, c, h, w = x.shape[:-1] + u = torch.zeros((b, c, h, w), device=x.device).type( + x.dtype + ) # note that we just reversed left and right sides of each line to obtain the transposed operator + u[:, :, :-1] = u[:, :, :-1] - x[:, :, :-1, :, 0] + u[:, :, 1:] = u[:, :, 1:] + x[:, :, :-1, :, 0] + u[..., :-1] = u[..., :-1] - x[..., :-1, 1] + u[..., 1:] = u[..., 1:] + x[..., :-1, 1] + return u + + @staticmethod + def nabla3(x): + r""" + Applies the finite differences operator associated with tensors of the same shape as x. + """ + b, c, d, h, w = x.shape + u = torch.zeros((b, c, d, h, w, 3), device=x.device).type(x.dtype) + u[:, :, :-1, :, :, 0] = u[:, :, :-1, :, :, 0] - x[:, :, :-1] + u[:, :, :-1, :, :, 0] = u[:, :, :-1, :, :, 0] + x[:, :, 1:] + u[:, :, :, :-1, :, 1] = u[:, :, :, :-1, :, 1] - x[:, :, :, :-1] + u[:, :, :, :-1, :, 1] = u[:, :, :, :-1, :, 1] + x[:, :, :, 1:] + u[:, :, :, :, :-1, 2] = u[:, :, :, :, :-1, 2] - x[:, :, :, :, :-1] + u[:, :, :, :, :-1, 2] = u[:, :, :, :, :-1, 2] + x[:, :, :, :, 1:] + + return u + + @staticmethod + def nabla3_adjoint(x): + r""" + Applies the adjoint of the finite difference operator. + """ + b, c, d, h, w = x.shape + u = torch.zeros((b, c, d, h, w), device=x.device).type(x.dtype) + u[:, :, :-1, :, 0] = u[:, :, :-1, :, 0] - x[:, :, :-1] + u[:, :, 1:, :, 0] = u[:, :, 1:, :, 0] + x[:, :, :-1] + u[:, :, :, :-1, 1] = u[:, :, :, :-1, 1] - x[:, :, :, :-1] + u[:, :, :, 1:, 1] = u[:, :, :, 1:, 1] + x[:, :, :, :-1] + u[:, :, :, :, :-1, 2] = u[:, :, :, :, :-1, 2] - x[:, :, :, :, :-1] + u[:, :, :, :, 1:, 2] = u[:, :, :, :, 1:, 2] + x[:, :, :, :, :-1] + + return u +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/prox/wavelet.html b/_modules/deepmr/prox/wavelet.html new file mode 100644 index 00000000..e81d0e81 --- /dev/null +++ b/_modules/deepmr/prox/wavelet.html @@ -0,0 +1,1291 @@ + + + + + + + + + + + deepmr.prox.wavelet — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.prox.wavelet

+"""Wavelet denoising."""
+
+__all__ = [
+    "WaveletDenoiser",
+    "wavelet_denoise",
+    "WaveletDictDenoiser",
+    "wavelet_dict_denoise",
+]
+
+import numpy as np
+import torch
+import torch.nn as nn
+
+import ptwt
+import pywt
+
+from . import threshold
+
+
+
[docs]class WaveletDenoiser(nn.Module): + r""" + Orthogonal Wavelet denoising with the :math:`\ell_1` norm. + + Adapted from :func:``deepinv.denoisers.WaveletDenoiser`` + to support complex-valued inputs. + + This denoiser is defined as the solution to the optimization problem: + + .. math:: + + \underset{x}{\arg\min} \; \|x-y\|^2 + \gamma \|\Psi x\|_n + + where :math:`\Psi` is an orthonormal wavelet transform, :math:`\lambda>0` is a hyperparameter, and where + :math:`\|\cdot\|_n` is either the :math:`\ell_1` norm (``non_linearity="soft"``) or + the :math:`\ell_0` norm (``non_linearity="hard"``). A variant of the :math:`\ell_0` norm is also available + (``non_linearity="topk"``), where the thresholding is done by keeping the :math:`k` largest coefficients + in each wavelet subband and setting the others to zero. + + The solution is available in closed-form, thus the denoiser is cheap to compute. + + Notes + ----- + Following common practice in signal processing, only detail coefficients are regularized, and the approximation + coefficients are left untouched. + + Warning + ------- + For 3D data, the computational complexity of the wavelet transform cubically with the size of the support. For + large 3D data, it is recommended to use wavelets with small support (e.g. db1 to db4). + + Attributes + ---------- + ndim : int + Number of spatial dimensions, can be either ``2`` or ``3``. + ths : float, optional + Denoise threshold. The default is ``0.1``. + trainable : bool, optional + If ``True``, threshold value is trainable, otherwise it is not. + The default is ``False``. + wv : str, optional + Wavelet name to choose among those available in `pywt <https://pywavelets.readthedocs.io/en/latest/>`_. + The default is ``"db4"``. + device : str, optional + Device on which the wavelet transform is computed. + The default is ``None`` (infer from input). + non_linearity : str, optional + ``"soft"``, ``"hard"`` or ``"topk"`` thresholding. + The default is ``"soft"``. + level: int, optional + Level of the wavelet transform. The default is ``None``. + + """ + +
[docs] def __init__( + self, + ndim, + ths=0.1, + trainable=False, + wv="db4", + device=None, + non_linearity="soft", + level=None, + *args, + **kwargs + ): + super().__init__() + + if trainable: + self.ths = nn.Parameter(ths) + else: + self.ths = ths + + self.denoiser = _WaveletDenoiser( + level=level, + wv=wv, + device=device, + non_linearity=non_linearity, + wvdim=ndim, + *args, + **kwargs + ) + self.denoiser.device = device
+ + def forward(self, input): + # get complex + if torch.is_complex(input): + iscomplex = True + else: + iscomplex = False + + # default device + idevice = input.device + if self.denoiser.device is None: + device = idevice + else: + device = self.denoiser.device + + # get input shape + ndim = self.denoiser.dimension + ishape = input.shape + + # reshape for computation + input = input.reshape(-1, *ishape[-ndim:]) + if iscomplex: + input = torch.stack((input.real, input.imag), axis=1) + input = input.reshape(-1, *ishape[-ndim:]) + + # apply denoising + output = self.denoiser(input[:, None, ...].to(device), self.ths).to( + idevice + ) # perform the denoising on the real-valued tensor + + # reshape back + if iscomplex: + output = ( + output[::2, ...] + 1j * output[1::2, ...] + ) # build the denoised complex data + output = output.reshape(ishape) + + return output.to(idevice)
+ + +
[docs]def wavelet_denoise( + input, ndim, ths, wv="db4", device=None, non_linearity="soft", level=None +): + r""" + Apply orthogonal Wavelet denoising with the :math:`\ell_1` norm. + + Adapted from :func:``deepinv.denoisers.WaveletDenoiser`` + to support complex-valued inputs. + + This denoiser is defined as the solution to the optimization problem: + + .. math:: + + \underset{x}{\arg\min} \; \|x-y\|^2 + \gamma \|\Psi x\|_n + + where :math:`\Psi` is an orthonormal wavelet transform, :math:`\lambda>0` is a hyperparameter, and where + :math:`\|\cdot\|_n` is either the :math:`\ell_1` norm (``non_linearity="soft"``) or + the :math:`\ell_0` norm (``non_linearity="hard"``). A variant of the :math:`\ell_0` norm is also available + (``non_linearity="topk"``), where the thresholding is done by keeping the :math:`k` largest coefficients + in each wavelet subband and setting the others to zero. + + The solution is available in closed-form, thus the denoiser is cheap to compute. + + Arguments + --------- + input : np.ndarray | torch.Tensor + Input image of shape (..., n_ndim, ..., n_0). + ndim : int + Number of spatial dimensions, can be either ``2`` or ``3``. + ths : float + Denoise threshold. + wv : str, optional + Wavelet name to choose among those available in `pywt <https://pywavelets.readthedocs.io/en/latest/>`_. + The default is ``"db4"``. + device : str, optional + Device on which the wavelet transform is computed. + The default is ``None`` (infer from input). + non_linearity : str, optional + ``"soft"``, ``"hard"`` or ``"topk"`` thresholding. + The default is ``"soft"``. + level: int, optional + Level of the wavelet transform. The default is ``None``. + + Returns + ------- + output : np.ndarray | torch.Tensor + Denoised image of shape (..., n_ndim, ..., n_0). + + """ + # cast to numpy if required + if isinstance(input, np.ndarray): + isnumpy = True + input = torch.as_tensor(input) + else: + isnumpy = False + + # initialize denoiser + W = WaveletDenoiser(ndim, ths, False, wv, device, non_linearity, level) + output = W(input) + + # cast back to numpy if requried + if isnumpy: + output = output.numpy(force=True) + + return output
+ + +
[docs]class WaveletDictDenoiser(nn.Module): + r""" + Overcomplete Wavelet denoising with the :math:`\ell_1` norm. + + This denoiser is defined as the solution to the optimization problem: + + .. math:: + + \underset{x}{\arg\min} \; \|x-y\|^2 + \lambda \|\Psi x\|_n + + where :math:`\Psi` is an overcomplete wavelet transform, composed of 2 or more wavelets, i.e., + :math:`\Psi=[\Psi_1,\Psi_2,\dots,\Psi_L]`, :math:`\lambda>0` is a hyperparameter, and where + :math:`\|\cdot\|_n` is either the :math:`\ell_1` norm (``non_linearity="soft"``), + the :math:`\ell_0` norm (``non_linearity="hard"``) or a variant of the :math:`\ell_0` norm + (``non_linearity="topk"``) where only the top-k coefficients are kept; see :meth:`deepinv.models.WaveletDenoiser` for + more details. + + The solution is not available in closed-form, thus the denoiser runs an optimization algorithm for each test image. + + Attributes + ---------- + ndim : int + Number of spatial dimensions, can be either ``2`` or ``3``. + ths : float, optional + Denoise threshold. The default is ``0.1``. + trainable : bool, optional + If ``True``, threshold value is trainable, otherwise it is not. + The default is ``False``. + wv : Iterable[str], optional + List of mother wavelets. The names of the wavelets can be found in `here <https://wavelets.pybytes.com/>`_. + The default is ``["db8", "db4"]``. + device : str, optional + Device on which the wavelet transform is computed. + The default is ``None`` (infer from input). + non_linearity : str, optional + ``"soft"``, ``"hard"`` or ``"topk"`` thresholding. + The default is ``"soft"``. + level: int, optional + Level of the wavelet transform. The default is ``None``. + max_iter : int, optional + Number of iterations of the optimization algorithm. + The default is ``10``. + + """ + +
[docs] def __init__( + self, + ndim, + ths=0.1, + trainable=False, + wv=None, + device=None, + non_linearity="soft", + level=None, + max_iter=10, + *args, + **kwargs + ): + super().__init__() + + if trainable: + self.ths = nn.Parameter(ths) + else: + self.ths = ths + + self.denoiser = _WaveletDictDenoiser( + level=level, + wv=wv, + device=device, + max_iter=max_iter, + non_linearity=non_linearity, + wvdim=ndim, + *args, + **kwargs + ) + + self.denoiser.device = device
+ + def forward(self, input): + # get complex + if torch.is_complex(input): + iscomplex = True + else: + iscomplex = False + + # default device + idevice = input.device + if self.denoiser.device is None: + device = idevice + else: + device = self.denoiser.device + + # get input shape + ndim = self.denoiser.dimension + ishape = input.shape + + # reshape for computation + input = input.reshape(-1, *ishape[-ndim:]) + if iscomplex: + input = torch.stack((input.real, input.imag), axis=1) + input = input.reshape(-1, *ishape[-ndim:]) + + # apply denoising + output = self.denoiser(input[:, None, ...].to(device), self.ths).to( + idevice + ) # perform the denoising on the real-valued tensor + + # reshape back + if iscomplex: + output = ( + output[::2, ...] + 1j * output[1::2, ...] + ) # build the denoised complex data + output = output.reshape(ishape) + + return output.to(idevice)
+ + +
[docs]def wavelet_dict_denoise( + input, + ndim, + ths, + wv=None, + device=None, + non_linearity="soft", + level=None, + max_iter=10, +): + r""" + Apply overcomplete Wavelet denoising with the :math:`\ell_1` norm. + + This denoiser is defined as the solution to the optimization problem: + + .. math:: + + \underset{x}{\arg\min} \; \|x-y\|^2 + \lambda \|\Psi x\|_n + + where :math:`\Psi` is an overcomplete wavelet transform, composed of 2 or more wavelets, i.e., + :math:`\Psi=[\Psi_1,\Psi_2,\dots,\Psi_L]`, :math:`\lambda>0` is a hyperparameter, and where + :math:`\|\cdot\|_n` is either the :math:`\ell_1` norm (``non_linearity="soft"``), + the :math:`\ell_0` norm (``non_linearity="hard"``) or a variant of the :math:`\ell_0` norm + (``non_linearity="topk"``) where only the top-k coefficients are kept; see :meth:`deepinv.models.WaveletDenoiser` for + more details. + + The solution is not available in closed-form, thus the denoiser runs an optimization algorithm for each test image. + + Attributes + ---------- + input : np.ndarray | torch.Tensor + Input image of shape (..., n_ndim, ..., n_0). + ndim : int + Number of spatial dimensions, can be either ``2`` or ``3``. + ths : float + Denoise threshold. + wv : Iterable[str], optional + List of mother wavelets. The names of the wavelets can be found in `here <https://wavelets.pybytes.com/>`_. + The default is ``["db8", "db4"]``. + device : str, optional + Device on which the wavelet transform is computed. + The default is ``None`` (infer from input). + non_linearity : str, optional + ``"soft"``, ``"hard"`` or ``"topk"`` thresholding. + The default is ``"soft"``. + level: int, optional + Level of the wavelet transform. The default is ``None``. + max_iter : int, optional + Number of iterations of the optimization algorithm. + The default is ``10``. + + Returns + ------- + output : np.ndarray | torch.Tensor + Denoised image of shape (..., n_ndim, ..., n_0). + + """ + # cast to numpy if required + if isinstance(input, np.ndarray): + isnumpy = True + input = torch.as_tensor(input) + else: + isnumpy = False + + # initialize denoiser + WD = WaveletDictDenoiser( + ndim, ths, False, wv, device, non_linearity, level, max_iter + ) + output = WD(input) + + # cast back to numpy if requried + if isnumpy: + output = output.numpy(force=True) + + return output
+ + +# %% local utils +class _WaveletDenoiser(nn.Module): + def __init__( + self, level=None, wv="db4", device="cpu", non_linearity="soft", wvdim=2 + ): + super().__init__() + self.level = level + self.wv = wv + self.device = device + self.non_linearity = non_linearity + self.dimension = wvdim + + def dwt(self, x): + r""" + Applies the wavelet analysis. + """ + if self.level is None: + level = pywt.dwtn_max_level(x.shape[-self.dimension :], self.wv) + self.level = level + else: + level = self.level + if self.dimension == 2: + dec = ptwt.wavedec2(x, pywt.Wavelet(self.wv), mode="zero", level=level) + elif self.dimension == 3: + dec = ptwt.wavedec3(x, pywt.Wavelet(self.wv), mode="zero", level=level) + dec = [list(t) if isinstance(t, tuple) else t for t in dec] + return dec + + def flatten_coeffs(self, dec): + r""" + Flattens the wavelet coefficients and returns them in a single torch vector of shape (n_coeffs,). + """ + if self.dimension == 2: + flat = torch.hstack( + [dec[0].flatten()] + + [decl.flatten() for l in range(1, len(dec)) for decl in dec[l]] + ) + elif self.dimension == 3: + flat = torch.hstack( + [dec[0].flatten()] + + [dec[l][key].flatten() for l in range(1, len(dec)) for key in dec[l]] + ) + return flat + + @staticmethod + def psi(x, wavelet="db4", level=None, dimension=2): + r""" + Returns a flattened list containing the wavelet coefficients. + """ + if level is None: + level = pywt.dwtn_max_level(x.shape[-dimension:], wavelet) + if dimension == 2: + dec = ptwt.wavedec2(x, pywt.Wavelet(wavelet), mode="zero", level=level) + dec = [list(t) if isinstance(t, tuple) else t for t in dec] + vec = [decl.flatten() for l in range(1, len(dec)) for decl in dec[l]] + elif dimension == 3: + dec = ptwt.wavedec3(x, pywt.Wavelet(wavelet), mode="zero", level=level) + dec = [list(t) if isinstance(t, tuple) else t for t in dec] + vec = [dec[l][key].flatten() for l in range(1, len(dec)) for key in dec[l]] + return vec + + def iwt(self, coeffs): + r""" + Applies the wavelet synthesis. + """ + coeffs = [tuple(t) if isinstance(t, list) else t for t in coeffs] + if self.dimension == 2: + rec = ptwt.waverec2(coeffs, pywt.Wavelet(self.wv)) + elif self.dimension == 3: + rec = ptwt.waverec3(coeffs, pywt.Wavelet(self.wv)) + return rec + + def prox_l1(self, x, ths): + r""" + Soft thresholding of the wavelet coefficients. + + Arguments + --------- + x : torch.Tensor + Wavelet coefficients. + ths : float, optional + Threshold. It can be element-wise, in which case + it is assumed to be broadcastable with ``input``. + The default is ``0.1``. + + Returns + ------- + torch.Tensor + Thresholded wavelet coefficients. + + """ + return threshold.soft_thresh(x, ths) + + def prox_l0(self, x, ths): + r""" + Hard thresholding of the wavelet coefficients. + + Arguments + --------- + x : torch.Tensor + Wavelet coefficients. + ths : float, optional + Threshold. It can be element-wise, in which case + it is assumed to be broadcastable with ``input``. + The default is ``0.1``. + + Returns + ------- + torch.Tensor + Thresholded wavelet coefficients. + + """ + if isinstance(ths, float): + ths_map = ths + else: + ths_map = ths.repeat( + 1, 1, 1, x.shape[-2], x.shape[-1] + ) # Reshaping to image wavelet shape + out = x.clone() + out[abs(out) < ths_map] = 0 + return out + + def hard_threshold_topk(self, x, ths): + r""" + Hard thresholding of the wavelet coefficients by keeping only the top-k coefficients and setting the others to 0. + + Arguments + --------- + x : torch.Tensor + Wavelet coefficients. + ths : float | int, optional + Top k coefficients to keep. If ``float``, it is interpreted as a proportion of the total + number of coefficients. If ``int``, it is interpreted as the number of coefficients to keep. + The default is ``0.1`. + + Returns + ------- + torch.Tensor + Thresholded wavelet coefficients. + + """ + if isinstance(ths, float): + k = int(ths * x.shape[-3] * x.shape[-2] * x.shape[-1]) + else: + k = int(ths) + + # Reshape arrays to 2D and initialize output to 0 + x_flat = x.reshape(x.shape[0], -1) + out = torch.zeros_like(x_flat) + + topk_indices_flat = torch.topk(abs(x_flat), k, dim=-1)[1] + + # Convert the flattened indices to the original indices of x + batch_indices = ( + torch.arange(x.shape[0], device=x.device).unsqueeze(1).repeat(1, k) + ) + topk_indices = torch.stack([batch_indices, topk_indices_flat], dim=-1) + + # Set output's top-k elements to values from original x + out[tuple(topk_indices.view(-1, 2).t())] = x_flat[ + tuple(topk_indices.view(-1, 2).t()) + ] + return torch.reshape(out, x.shape) + + def thresold_func(self, x, ths): + r""" " + Apply thresholding to the wavelet coefficients. + """ + if self.non_linearity == "soft": + y = self.prox_l1(x, ths) + elif self.non_linearity == "hard": + y = self.prox_l0(x, ths) + elif self.non_linearity == "topk": + y = self.hard_threshold_topk(x, ths) + return y + + def thresold_2D(self, coeffs, ths): + r""" + Thresholds coefficients of the 2D wavelet transform. + """ + for level in range(1, self.level + 1): + ths_cur = self.reshape_ths(ths, level) + for c in range(3): + coeffs[level][c] = self.thresold_func(coeffs[level][c], ths_cur[c]) + return coeffs + + def threshold_3D(self, coeffs, ths): + r""" + Thresholds coefficients of the 3D wavelet transform. + """ + for level in range(1, self.level + 1): + ths_cur = self.reshape_ths(ths, level) + for c, key in enumerate(["aad", "ada", "daa", "add", "dad", "dda", "ddd"]): + coeffs[level][key] = self.prox_l1(coeffs[level][key], ths_cur[c]) + return coeffs + + def threshold_ND(self, coeffs, ths): + r""" + Apply thresholding to the wavelet coefficients of arbitrary dimension. + """ + if self.dimension == 2: + coeffs = self.thresold_2D(coeffs, ths) + elif self.dimension == 3: + coeffs = self.threshold_3D(coeffs, ths) + else: + raise ValueError("Only 2D and 3D wavelet transforms are supported") + + return coeffs + + def pad_input(self, x): + r""" + Pad the input to make it compatible with the wavelet transform. + """ + if self.dimension == 2: + h, w = x.size()[-2:] + padding_bottom = h % 2 + padding_right = w % 2 + p = (padding_bottom, padding_right) + x = torch.nn.ReplicationPad2d((0, p[0], 0, p[1]))(x) + elif self.dimension == 3: + d, h, w = x.size()[-3:] + padding_depth = d % 2 + padding_bottom = h % 2 + padding_right = w % 2 + p = (padding_depth, padding_bottom, padding_right) + x = torch.nn.ReplicationPad3d((0, p[0], 0, p[1], 0, p[2]))(x) + return x, p + + def crop_output(self, x, padding): + r""" + Crop the output to make it compatible with the wavelet transform. + """ + d, h, w = x.size()[-3:] + if len(padding) == 2: + out = x[..., : h - padding[0], : w - padding[1]] + elif len(padding) == 3: + out = x[..., : d - padding[0], : h - padding[1], : w - padding[2]] + return out + + def reshape_ths(self, ths, level): + r""" + Reshape the thresholding parameter in the appropriate format, i.e. either: + - a list of 3 elements, or + - a tensor of 3 elements. + + Since the approximation coefficients are not thresholded, we do not need to provide a thresholding parameter, + ths has shape (n_levels-1, 3). + """ + numel = 3 if self.dimension == 2 else 7 + if not torch.is_tensor(ths): + if isinstance(ths, int) or isinstance(ths, float): + ths_cur = [ths] * numel + elif len(ths) == 1: + ths_cur = [ths[0]] * numel + else: + ths_cur = ths[level] + if len(ths_cur) == 1: + ths_cur = [ths_cur[0]] * numel + else: + if len(ths.shape) == 1: # Needs to reshape to shape (n_levels-1, 3) + ths_cur = ths.squeeze().repeat(numel) + else: + ths_cur = ths[level - 2] + + return ths_cur + + def forward(self, x, ths): + # Pad data + x, padding = self.pad_input(x) + + # Apply wavelet transform + coeffs = self.dwt(x) + + # Threshold coefficients (we do not threshold the approximation coefficients) + coeffs = self.threshold_ND(coeffs, ths) + + # Inverse wavelet transform + y = self.iwt(coeffs) + + # Crop data + y = self.crop_output(y, padding) + return y + + +class _WaveletDictDenoiser(nn.Module): + r""" + Overcomplete Wavelet denoising with the :math:`\ell_1` norm. + + This denoiser is defined as the solution to the optimization problem: + + .. math:: + + \underset{x}{\arg\min} \; \|x-y\|^2 + \lambda \|\Psi x\|_n + + where :math:`\Psi` is an overcomplete wavelet transform, composed of 2 or more wavelets, i.e., + :math:`\Psi=[\Psi_1,\Psi_2,\dots,\Psi_L]`, :math:`\lambda>0` is a hyperparameter, and where + :math:`\|\cdot\|_n` is either the :math:`\ell_1` norm (``non_linearity="soft"``), + the :math:`\ell_0` norm (``non_linearity="hard"``) or a variant of the :math:`\ell_0` norm + (``non_linearity="topk"``) where only the top-k coefficients are kept; see :meth:`deepinv.models.WaveletDenoiser` for + more details. + + The solution is not available in closed-form, thus the denoiser runs an optimization algorithm for each test image. + + :param int level: decomposition level of the wavelet transform. + :param list[str] wv: list of mother wavelets. The names of the wavelets can be found in `here + <https://wavelets.pybytes.com/>`_. (default: ["db8", "db4"]). + :param str device: cpu or gpu. + :param int max_iter: number of iterations of the optimization algorithm (default: 10). + :param str non_linearity: "soft", "hard" or "topk" thresholding (default: "soft") + """ + + def __init__( + self, + level=None, + list_wv=["db8", "db4"], + max_iter=10, + non_linearity="soft", + wvdim=2, + ): + super().__init__() + self.level = level + self.list_prox = nn.ModuleList( + [ + _WaveletDenoiser( + level=level, wv=wv, non_linearity=non_linearity, wvdim=wvdim + ) + for wv in list_wv + ] + ) + self.max_iter = max_iter + + def forward(self, y, ths): + z_p = y.repeat(len(self.list_prox), *([1] * (len(y.shape)))) + p_p = torch.zeros_like(z_p) + x = p_p.clone() + for it in range(self.max_iter): + x_prev = x.clone() + for p in range(len(self.list_prox)): + p_p[p, ...] = self.list_prox[p](z_p[p, ...], ths) + x = torch.mean(p_p.clone(), axis=0) + for p in range(len(self.list_prox)): + z_p[p, ...] = x + z_p[p, ...].clone() - p_p[p, ...] + rel_crit = torch.linalg.norm((x - x_prev).flatten()) / torch.linalg.norm( + x.flatten() + 1e-6 + ) + if rel_crit < 1e-3: + break + return x +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/recon/alg/classic_recon.html b/_modules/deepmr/recon/alg/classic_recon.html new file mode 100644 index 00000000..681c76ca --- /dev/null +++ b/_modules/deepmr/recon/alg/classic_recon.html @@ -0,0 +1,777 @@ + + + + + + + + + + + deepmr.recon.alg.classic_recon — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.recon.alg.classic_recon

+"""Classical iterative reconstruction wrapper."""
+
+__all__ = ["recon_lstsq"]
+
+import numpy as np
+import torch
+
+from ... import optim as _optim
+from ... import prox as _prox
+from .. import calib as _calib
+from ... import linops as _linops
+
+from . import linop as _linop
+
+from numba.core.errors import NumbaPerformanceWarning
+import warnings
+
+warnings.simplefilter("ignore", category=NumbaPerformanceWarning)
+
+
+
[docs]def recon_lstsq( + data, + head, + mask=None, + niter=1, + prior=None, + prior_ths=0.01, + prior_params=None, + solver_params=None, + lamda=0.0, + stepsize=1.0, + basis=None, + nsets=1, + device=None, + cal_data=None, + toeplitz=True, + use_dcf=True, +): + """ + Classical MR reconstruction. + + Parameters + ---------- + data : np.ndarray | torch.Tensor + Input k-space data of shape ``(nslices, ncoils, ncontrasts, nviews, nsamples)``. + head : deepmr.Header + DeepMR acquisition header, containing ``traj``, ``shape`` and ``dcf``. + mask : np.ndarray | torch.Tensor, optional + Sampling mask for Cartesian imaging. + Expected shape is ``(ncontrasts, nviews, nsamples)``. + The default is ``None``. + niter : int, optional + Number of recon iterations. If single iteration, + perform simple zero-filled recon. The default is ``1``. + prior : str | deepinv.optim.Prior, optional + Prior for image regularization. If string, it must be one of the following: + + * ``"L1Wav"``: L1 Wavelet regularization. + * ``"TV"``: Total Variation regularization. + + The default is ``None`` (no regularizer). + prior_ths : float, optional + Threshold for denoising in regularizer. The default is ``0.01``. + prior_params : dict, optional + Parameters for Prior initializations. + See :func:`deepmr.prox`. + The defaul it ``None`` (use each regularizer default parameters). + solver_params : dict, optional + Parameters for Solver initializations. + See :func:`deepmr.optim`. + The defaul it ``None`` (use each solver default parameters). + lamda : float, optional + Regularization strength. If 0.0, do not apply regularization. + The default is ``0.0``. + stepsize : float, optional + Iterations step size. If not provided, estimate from Encoding + operator maximum eigenvalue. The default is ``None``. + basis : np.ndarray | torch.Tensor, optional + Low rank subspace basis of shape ``(ncontrasts, ncoeffs)``. The default is ``None``. + nsets : int, optional + Number of coil sensitivity sets of maps. The default is ``1. + device : str, optional + Computational device. The default is ``None`` (same as ``data``). + cal_data : np.ndarray | torch.Tensor, optional + Calibration dataset for coil sensitivity estimation. + The default is ``None`` (use center region of ``data``). + toeplitz : bool, optional + Use Toeplitz approach for normal equation. The default is ``True``. + use_dcf : bool, optional + Use dcf to accelerate convergence. The default is ``True``. + + Returns + ------- + img np.ndarray | torch.Tensor + Reconstructed image of shape: + + * 2D Cartesian: ``(nslices, ncontrasts, ny, nx). + * 2D Non Cartesian: ``(nslices, ncontrasts, ny, nx). + * 2D Non Cartesian: ``(nslices, ncontrasts, ny, nx). + * 3D Non Cartesian: ``(ncontrasts, nz, ny, nx). + + """ + if isinstance(data, np.ndarray): + data = torch.as_tensor(data) + isnumpy = True + else: + isnumpy = False + + if device is None: + device = data.device + data = data.to(device) + + if use_dcf and head.dcf is not None: + dcf = head.dcf.to(device) + else: + dcf = None + + # toggle off Topelitz for non-iterative + if niter == 1: + toeplitz = False + + # get ndim + if head.traj is not None: + ndim = head.traj.shape[-1] + else: + ndim = 2 # assume 3D data already decoupled along readout + + # build encoding operator + E, EHE = _linop.EncodingOp( + data, + mask, + head.traj, + dcf, + head.shape, + nsets, + basis, + device, + cal_data, + toeplitz, + ) + + # transfer + E = E.to(device) + EHE = EHE.to(device) + + # perform zero-filled reconstruction + if dcf is not None: + img = E.H(dcf**0.5 * data[:, None, ...]) + else: + img = E.H(data[:, None, ...]) + + # if non-iterative, just perform linear recon + if niter == 1: + output = img + if isnumpy: + output = output.numpy(force=True) + return output + + # default solver params + if solver_params is None: + solver_params = {} + + # rescale + img = _calib.intensity_scaling(img, ndim=ndim) + + # if no prior is specified, use CG recon + if prior is None: + output = _optim.cg_solve( + img, EHE, niter=niter, lamda=lamda, ndim=ndim, **solver_params + ) + if isnumpy: + output = output.numpy(force=True) + return output + + # modify EHE + if lamda != 0.0: + _EHE = EHE + lamda * _linops.Identity(ndim) + else: + _EHE = EHE + + # compute spectral norm + xhat = torch.rand(img.shape, dtype=img.dtype, device=img.device) + max_eig = _optim.power_method(None, xhat, AHA=_EHE, device=device, niter=30) + if max_eig != 0.0: + stepsize = stepsize / max_eig + + # if a single prior is specified, use PDG + if isinstance(prior, (list, tuple)) is False: + # default prior params + if prior_params is None: + prior_params = {} + + # get prior + D = _get_prior(prior, ndim, lamda, device, **prior_params) + + # solve + output = _optim.pgd_solve( + img, stepsize, _EHE, D, niter=niter, accelerate=True, **solver_params + ) + else: + npriors = len(prior) + if prior_params is None: + prior_params = [{} for n in range(npriors)] + else: + assert ( + isinstance(prior_params, (list, tuple)) and len(prior_params) == npriors + ), "Please provide parameters for each regularizer (or leave completely empty to use default)" + + # get priors + D = [] + for n in range(npriors): + d = _get_prior(prior[n], ndim, lamda, device, **prior_params[n]) + D.append(d) + + # solve + output = _optim.admm_solve(img, stepsize, _EHE, D, niter=niter, **solver_params) + if isnumpy: + output = output.numpy(force=True) + + return output
+ + +# %% local utils +def _get_prior(ptype, ndim, lamda, device, **params): + if isinstance(ptype, str): + if ptype == "L1Wave": + return _prox.WaveletDenoiser(ndim, ths=lamda, device=device, **params) + elif ptype == "TV": + return _prox.TVDenoiser(ndim, ths=lamda, device=device, **params) + elif ptype == "LLR": + return _prox.LLRDenoiser(ndim, ths=lamda, device=device, **params) + else: + raise ValueError( + f"Prior type = {ptype} not recognized; either specify 'L1Wave', 'TV' or 'LLR', or 'nn.Module' object." + ) + else: + raise NotImplementedError("Direct prior object not implemented.") +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/recon/alg/linop.html b/_modules/deepmr/recon/alg/linop.html new file mode 100644 index 00000000..c3e84ae5 --- /dev/null +++ b/_modules/deepmr/recon/alg/linop.html @@ -0,0 +1,685 @@ + + + + + + + + + + + deepmr.recon.alg.linop — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.recon.alg.linop

+"""Sub-package containing MR Encoding Operator builder."""
+
+__all__ = ["EncodingOp"]
+
+from ... import linops as _linops
+from .. import calib as _calib
+
+
+
[docs]def EncodingOp( + data, + mask=None, + traj=None, + dcf=None, + shape=None, + nsets=1, + basis=None, + device=None, + cal_data=None, + toeplitz=False, +): + """ + Prepare MR encoding operator for Cartesian / Non-Cartesian imaging. + + Parameters + ---------- + data : np.ndarray | torch.Tensor + Input k-space data of shape ``(nslices, ncoils, ncontrasts, nviews, nsamples)``. + mask : np.ndarray | torch.Tensor, optional + Sampling mask for Cartesian imaging. + Expected shape is ``(ncontrasts, nviews, nsamples)``. + The default is ``None``. + traj : np.ndarray | torch.Tensor, optional + K-space trajectory for Non Cartesian imaging. + Expected shape is ``(ncontrasts, nviews, nsamples, ndims)``. + The default is ``None``. + dcf : np.ndarray | torch.Tensor, optional + K-space density compensation. + Expected shape is ``(ncontrasts, nviews, nsamples)``. + The default is ``None``. + shape : Iterable[int], optional + Cartesian grid size of shape ``(nz, ny, nx)``. + The default is ``None``. + nsets : int, optional + Number of coil sensitivity sets of maps. The default is ``1. + basis : np.ndarray | torch.Tensor, optional + Low rank subspace basis of shape ``(ncontrasts, ncoeffs)``. The default is ``None``. + device : str, optional + Computational device. The default is ``None`` (same as ``data``). + cal_data : np.ndarray | torch.Tensor, optional + Calibration dataset for coil sensitivity estimation. + The default is ``None`` (use center region of ``data``). + toeplitz : bool, optional + Use Toeplitz approach for normal equation. The default is ``False``. + + Returns + ------- + E : deepmr.linops.Linop + MR encoding operator (i.e., ksp = E(img)). + EHE : deepmr.linops.NormalLinop + MR normal operator (i.e., img_n = EHE(img_n-1)). + + """ + # parse number of coils + ncoils = data.shape[-4] + + # get device + if device is None: + device = data.device + + if mask is not None and traj is not None: + raise ValueError("Please provide either mask or traj, not both.") + if mask is not None: # Cartesian + # Fourier + F = _linops.FFTOp(mask, basis, device) + + # Normal operator + if toeplitz: + FHF = _linops.FFTGramOp(mask, basis, device) + else: + FHF = F.H * F + + # Sensititivy + if ncoils == 1: + return F, FHF + else: + if cal_data is not None: + sensmap, _ = _calib.espirit_cal(cal_data.to(device), nsets=nsets) + else: + sensmap, _ = _calib.espirit_cal(data.to(device), nsets=nsets) + + # infer from mask shape whether we are using multicontrast or not + if len(mask.shape) == 2: + multicontrast = False # (ny, nx) / (nz, ny) + else: + multicontrast = True # (ncontrast, ny, nx) / (ncontrast, nz, ny) + + # Coil operator + C = _linops.SenseOp(2, sensmap, multicontrast=multicontrast) + + # Full encoding operator + E = F * C + EHE = C.H * FHF * C + + return E, EHE + + if traj is not None: + assert shape is not None, "Please provide shape for Non-Cartesian imaging." + ndim = traj.shape[-1] + + # Fourier + F = _linops.NUFFTOp(traj, shape[-ndim:], basis, dcf, device=device) + + # Normal operator + if toeplitz: + FHF = _linops.NUFFTGramOp(traj, shape[-ndim:], basis, dcf, device=device) + else: + FHF = F.H * F + + # Sensititivy + if ncoils == 1: + return F, FHF + else: + if cal_data is not None: + sensmap, _ = _calib.espirit_cal( + cal_data.to(device), nsets=nsets, coord=traj, shape=shape, dcf=dcf + ) + else: + sensmap, _ = _calib.espirit_cal( + data.to(device), nsets=nsets, coord=traj, shape=shape, dcf=dcf + ) + + # infer from mask shape whether we are using multicontrast or not + if len(traj.shape) < 4: + multicontrast = False # (nviews, nsamples, naxes) / (nsamples, naxes) + else: + multicontrast = True # (ncontrast, nviews, nsamples, naxes + + # Coil operator + C = _linops.SenseOp(ndim, sensmap, multicontrast=multicontrast) + + # Full encoding operator + E = F * C + EHE = C.H * FHF * C + + return E, EHE
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/recon/calib/espirit.html b/_modules/deepmr/recon/calib/espirit.html new file mode 100644 index 00000000..05673010 --- /dev/null +++ b/_modules/deepmr/recon/calib/espirit.html @@ -0,0 +1,753 @@ + + + + + + + + + + + deepmr.recon.calib.espirit — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.recon.calib.espirit

+"""Pytorch ESPIRIT implementation. Adapted for convenience from https://github.com/mikgroup/espirit-python/tree/master"""
+
+__all__ = ["espirit_cal"]
+
+import numpy as np
+import torch
+
+from ... import fft as _fft
+from ... import _signal
+
+from . import acs as _acs
+
+
+
[docs]def espirit_cal( + data, coord=None, dcf=None, shape=None, k=6, r=24, t=0.02, c=0.0, nsets=1 +): + """ + Derives the ESPIRiT [1] operator. + + Parameters + ---------- + data : np.ndarray | torch.Tensor + Multi channel k-space data. + coord : np.ndarray | torch.Tensor, optional + K-space trajectory of ``shape = (ncontrasts, nviews, nsamples, ndim)``. + The default is ``None`` (Cartesian acquisition). + dcf : np.ndarray | torch.Tensor, optional + K-space density compensation of ``shape = (ncontrasts, nviews, nsamples)``. + The default is ``None`` (no compensation). + shape : Iterable[int] | optional + Shape of the k-space after gridding. If not provided, estimate from + input data (assumed on a Cartesian grid already). + The default is ``None`` (Cartesian acquisition). + k : int, optional + k-space kernel size. The default is ``6``. + r : int, optional + Calibration region size. The default is ``24``. + t : float, optional + Rank of the auto-calibration matrix (A). + The default is ``0.02``. + c : float, optional + Crop threshold that determines eigenvalues "=1". + The defaults is ``0.95``. + nsets : int, optional + Number of set of maps to be returned. + The default is ``1`` (conventional SENSE recon). + + Returns + ------- + maps : np.ndarray | torch.Tensor + Output coil sensitivity maps. + + Notes + ----- + The input k-space ``data`` tensor is assumed to have the following shape: + + * **2Dcart:** ``(nslices, ncoils, ..., ny, nx)``. + * **2Dnoncart:** ``(nslices, ncoils, ..., nviews, nsamples)``. + * **3Dcart:** ``(nx, ncoils, ..., nz, ny)``. + * **3Dnoncart:** ``(ncoils, ..., nviews, nsamples)``. + + For multi-contrast acquisitions, calibration is obtained by averaging over + contrast dimensions. + + The output sensitivity maps are assumed to have the following shape: + + * **2Dcart:** ``(nsets, nslices, ncoils, ny, nx)``. + * **2Dnoncart:** ``(nsets, nslices, ncoils, ny, nx)``. + * **3Dcart:** ``(nsets, nx, ncoils, nz, ny)``. + * **3Dnoncart:** ``(nsets, ncoils, nz, ny, nx)``. + + References + ---------- + .. [1] Uecker M, Lai P, Murphy MJ, Virtue P, Elad M, Pauly JM, Vasanawala SS, Lustig M. + ESPIRiT--an eigenvalue approach to autocalibrating parallel MRI: where SENSE meets GRAPPA. + Magn Reson Med. 2014 Mar;71(3):990-1001. doi: 10.1002/mrm.24751. PMID: 23649942; PMCID: PMC4142121. + + """ + if isinstance(data, np.ndarray): + isnumpy = True + else: + isnumpy = False + + while len(data.shape) < 5: + data = data[None, ...] + + # keep shape + if coord is not None: + ndim = coord.shape[-1] + if np.isscalar(shape): + shape = ndim * [shape] + else: + shape = list(shape)[-ndim:] + shape = [int(s) for s in shape] + else: + ndim = 2 + shape = list(data.shape[-2:]) + + # extract calibration region + cshape = list(np.asarray(shape, dtype=int) // 2) + cal_data = _acs.find_acs(data, cshape, coord, dcf) + + # calculate maps + maps = _espirit(cal_data.clone(), k, r, t, c) + + # select maps + if nsets == 1: + maps = maps[[0]] + else: + maps = maps[:nsets] + + # resample + maps = _signal.resample(maps, shape) # (nsets, ncoils, nz, ny, nx) + + # normalize + maps_rss = _signal.rss(maps, axis=1, keepdim=True) + maps = maps / maps_rss[[0]] + + # reformat + if ndim == 2: # Cartesian or 2D Non-Cartesian + maps = maps.swapaxes( + 1, 2 + ) # (nsets, nslices, ncoils, ny, nx) / (nsets, nx, ncoils, nz, ny) + + # cast back to numpy if required + if isnumpy: + maps = maps.numpy(force=True) + + return maps, _signal.resize(cal_data, ndim * [r])
+ + +# %% local utils +def _espirit(X, k, r, t, c): + # transpose + X = X.permute(3, 2, 1, 0) + + # get shape + sx, sy, sz, nc = X.shape + + sxt = (sx // 2 - r // 2, sx // 2 + r // 2) if (sx > 1) else (0, 1) + syt = (sy // 2 - r // 2, sy // 2 + r // 2) if (sy > 1) else (0, 1) + szt = (sz // 2 - r // 2, sz // 2 + r // 2) if (sz > 1) else (0, 1) + + # Extract calibration region. + C = X[sxt[0] : sxt[1], syt[0] : syt[1], szt[0] : szt[1], :].to( + dtype=torch.complex64 + ) + + # Construct Hankel matrix. + p = (sx > 1) + (sy > 1) + (sz > 1) + A = torch.zeros( + [(r - k + 1) ** p, k**p * nc], dtype=torch.complex64, device=X.device + ) + + idx = 0 + for xdx in range(max(1, C.shape[0] - k + 1)): + for ydx in range(max(1, C.shape[1] - k + 1)): + for zdx in range(max(1, C.shape[2] - k + 1)): + block = C[xdx : xdx + k, ydx : ydx + k, zdx : zdx + k, :].to( + dtype=torch.complex64 + ) + A[idx, :] = block.flatten() + idx += 1 + + # Take the Singular Value Decomposition. + U, S, VH = torch.linalg.svd(A, full_matrices=True) + V = VH.conj().t() + + # Select kernels + n = torch.sum(S >= t * S[0]) + V = V[:, :n] + + kxt = (sx // 2 - k // 2, sx // 2 + k // 2) if (sx > 1) else (0, 1) + kyt = (sy // 2 - k // 2, sy // 2 + k // 2) if (sy > 1) else (0, 1) + kzt = (sz // 2 - k // 2, sz // 2 + k // 2) if (sz > 1) else (0, 1) + + # Reshape into k-space kernel, flips it and takes the conjugate + kernels = torch.zeros((sx, sy, sz, nc, n), dtype=torch.complex64, device=X.device) + kerdims = [ + ((sx > 1) * k + (sx == 1) * 1), + ((sy > 1) * k + (sy == 1) * 1), + ((sz > 1) * k + (sz == 1) * 1), + nc, + ] + for idx in range(n): + kernels[kxt[0] : kxt[1], kyt[0] : kyt[1], kzt[0] : kzt[1], :, idx] = V[ + :, idx + ].reshape(kerdims) + + # Take the iucfft + axes = (0, 1, 2) + kerimgs = ( + _fft.fft(kernels.flip(0).flip(1).flip(2).conj(), axes) + * (sx * sy * sz) ** 0.5 + / (k**p) ** 0.5 + ) + + # Take the point-wise eigenvalue decomposition and keep eigenvalues greater than c + u, s, vh = torch.linalg.svd( + kerimgs.view(sx, sy, sz, nc, n).reshape(-1, nc, n), full_matrices=True + ) + mask = s**2 > c + + # mask u (nvoxels, neigen, neigen) + u = mask[:, None, :] * u + + # Reshape back to the original shape and assign to maps + maps = u.view(sx, sy, sz, nc, nc) + + # transpose + maps = maps.permute(4, 3, 2, 1, 0) + + return maps +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/recon/calib/scaling.html b/_modules/deepmr/recon/calib/scaling.html new file mode 100644 index 00000000..62b3ad40 --- /dev/null +++ b/_modules/deepmr/recon/calib/scaling.html @@ -0,0 +1,582 @@ + + + + + + + + + + + deepmr.recon.calib.scaling — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.recon.calib.scaling

+"""Utils for image  intensity rescaling."""
+
+__all__ = ["intensity_scaling"]
+
+import torch
+
+from ... import fft as _fft
+from ... import _signal
+
+
+
[docs]def intensity_scaling(input, ndim): + """ + Rescale intensity range of the input image. + + This has the main purpose of enabling easier tuning of + regularization strength in interative reconstructions. + + Parameters + ---------- + input : np.ndarray | torch.Tensor + Input signal of shape ``(..., ny, nx)`` (2D) or + ``(..., nz, ny, nx)`` (3D). + ndim : int, optional + PNumber of spatial dimensions. + + Returns + ------- + output : np.ndarray | torch.Tensor. + Rescaled signal. + + """ + ksp = _fft.fft(torch.as_tensor(input), axes=range(-ndim, 0)) + ksp_lores = _signal.resize(ksp, ndim * [32]) + img_lores = _fft.ifft(ksp_lores, axes=range(-ndim, 0)) + for n in range(len(img_lores.shape) - ndim): + img_lores = torch.linalg.norm(img_lores, axis=0) + + # get scaling + img_lores = torch.nan_to_num(img_lores, posinf=0.0, neginf=0.0, nan=0.0) + scale = torch.quantile(abs(img_lores.ravel()), 0.95) + + return input / scale
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/recon/inference/fse.html b/_modules/deepmr/recon/inference/fse.html new file mode 100644 index 00000000..53c5bcf6 --- /dev/null +++ b/_modules/deepmr/recon/inference/fse.html @@ -0,0 +1,609 @@ + + + + + + + + + + + deepmr.recon.inference.fse — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.recon.inference.fse

+"""FSE T2 mapping fitting routines."""
+
+__all__ = ["fse_fit"]
+
+import numpy as np
+import torch
+
+from ... import bloch
+
+from . import solvers
+
+
+
[docs]def fse_fit(input, t2grid, flip, ESP, phases=None): + """ + Fit T2 from input Fast Spin Echo data. + + Parameters + ---------- + input : np.ndarray | torch.Tensor + Input image series of shape (ncontrasts, nz, ny, nx). + t2grid : Iterable[float] + T2 grid (start, stop, nsteps) in [ms]. + flip : np.ndarray | torch.Tensor + Refocusing flip angles in [deg]. + ESP : float + Echo spacing in [ms]. + phases : np.ndarray | torch.Tensor, optional + Refocusing pulses phases. The default is 0 * flip. + + Returns + ------- + m0 : np.ndarray | torch.Tensor + Proton Density map of shape (nz, ny, nx). + t2map : np.ndarray | torch.Tensor + T2 map of shape (nz, ny, nx) in [ms]. + + """ + + if isinstance(input, torch.Tensor): + istorch = True + device = input.device + input = input.numpy(force=True) + else: + istorch = False + + # default + if phases is None: + phases = 0.0 * flip + + # first build grid + t2lut = np.linspace(t2grid[0], t2grid[1], t2grid[2]) + t1 = 1100.0 + + # build dictionary + atoms = bloch.fse(flip, phases, ESP, t1, t2lut) + blochdict = solvers.BlochDictionary(atoms, t2lut[:, None], ["T2"]) + + # perform matching + m0, maps = solvers.tsmi2map(blochdict, input) + + # here, we only have T2 + t2map = maps["T2"] + + # cast back + if istorch: + m0 = torch.as_tensor(m0, device=device) + t2map = torch.as_tensor(t2map, device=device) + + return m0, t2map
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/deepmr/recon/inference/mpnrage.html b/_modules/deepmr/recon/inference/mpnrage.html new file mode 100644 index 00000000..40c5f43b --- /dev/null +++ b/_modules/deepmr/recon/inference/mpnrage.html @@ -0,0 +1,607 @@ + + + + + + + + + + + deepmr.recon.inference.mpnrage — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for deepmr.recon.inference.mpnrage

+"""MPnRAGE T1 mapping fitting routines."""
+
+__all__ = ["mpnrage_fit"]
+
+import numpy as np
+import torch
+
+from ... import bloch
+
+from . import solvers
+
+
+
[docs]def mpnrage_fit(input, t1grid, flip, TR, TI): + """ + Fit T1 from input MPnRAGE data. + + Parameters + ---------- + input : np.ndarray | torch.Tensor + Input image series of shape (ncontrasts, nz, ny, nx). + t1grid : Iterable[float] + T1 grid (start, stop, nsteps) in [ms]. + flip : np.ndarray | torch.Tensor + Excitation flip angles in [deg]. + TR : float + Repetition Time in [ms]. + TI : float + Inversion Time in [ms]. + + Returns + ------- + m0 : np.ndarray | torch.Tensor + Proton Density map of shape (nz, ny, nx). + t1map : np.ndarray | torch.Tensor + T1 map of shape (nz, ny, nx) in [ms]. + + """ + + if isinstance(input, torch.Tensor): + istorch = True + device = input.device + input = input.numpy(force=True) + else: + istorch = False + + # first build grid + t1lut = np.linspace(t1grid[0], t1grid[1], t1grid[2]) + t2 = 10.0 + + # build dictionary + nshots = input.shape[0] + flip = flip * np.ones(nshots) + atoms = bloch.mprage(nshots, flip, TR, t1lut, t2, TI=TI) + blochdict = solvers.BlochDictionary(atoms, t1lut[:, None], ["T1"]) + + # perform matching + m0, maps = solvers.tsmi2map(blochdict, input) + + # here, we only have T1 + t1map = maps["T1"] + + # cast back + if istorch: + m0 = torch.as_tensor(m0, device=device) + t1map = torch.as_tensor(t1map, device=device) + + return m0, t1map
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/index.html b/_modules/index.html new file mode 100644 index 00000000..5ff2310f --- /dev/null +++ b/_modules/index.html @@ -0,0 +1,600 @@ + + + + + + + + + + + Overview: module code — deepmr documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ + + + +
+
+ + + + + +
+ + + + + + + + + + + + + +
+ +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

All modules for which code is available

+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + +
+ + +
+ + + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_sources/core/bloch.md b/_sources/core/bloch.md new file mode 100644 index 00000000..adaaf153 --- /dev/null +++ b/_sources/core/bloch.md @@ -0,0 +1,139 @@ +# Bloch Simulation + +```{eval-rst} +.. automodule:: deepmr.bloch +``` + +## Numerical Models +```{eval-rst} +.. currentmodule:: deepmr.bloch +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.bloch.mprage + deepmr.bloch.memprage + deepmr.bloch.bssfpmrf + deepmr.bloch.ssfpmrf + deepmr.bloch.fse + deepmr.bloch.t1t2shuffling +``` + +## Custom signal models + +DeepMR also contains helper classes to define custom signal models. The main class for analytical and numerical models are `deepmr.bloch.AnalyticalSimulator` +and `deepmr.bloch.EPGSimulator`, respectively. + +Users can define a custom signal model by subclassing it and overloading ``sequence`` method. Base class already handle spin parameters (e.g., ``T1``, ``T2``, ...) +as well as simulation properties (e.g., computational ``device``, maximum ``number of batches``...) so the user has only to care about specific sequence arguments (e.g., ``flip angle``, ``TR``, ... for GRE or ``flip angle``, ``ETL``, for FSE). In order to work properly, ``sequence`` method must be a ``staticmethod`` and the arguments must follow this order: + +1. sequence parameters (``flip angle``, ``TE``, ``TR``, ``nrepetitions``, ...) +2. spin parameters (``T1``, ``T2``, ``B1``, ...) +3. (mandatory) buffer for output signal (analytical) or EPG states and output signal (numerical): ``signal`` / `` states``, ``signal`` + +```python +from deepmr import bloch +from deepmr.bloch import ops + +class SSFP(bloch.EPGSimulator): + + @staticmethod + def signal(flip, TR, T1, T2, states, signal): + + # get device and sequence length + device = flip.device + npulses = flip.shape[-1] + + # define operators + T = ops.RFPulse(device, alpha=flip) # RF pulse + E = ops.Relaxation(device, TR, T1, T2) # relaxation until TR + S = ops.Shift() # gradient spoil + + # apply sequence + for n in range(npulses): + states = T(states) + signal[n] = ops.observe(states) + states = E(states) + states = S(states) + + # return output + return signal +``` + +The resulting class can be used to perform simulation by instantiating an object (spin properties as input)and using the ``__call__`` method (sequence properties as input): + +```python +ssfp = SSFP(device=device, T1=T1, T2=T2) # build simulator +signal = ssfp(flip=flip, TR=TR) # run simulation +``` + +For convenience, simulator instantiation and actual simulation can (and should) be wrapped in a wrapper function: + +```python +def simulate_ssfp(flip, TR, T1, T2, device="cpu"): + mysim = SSFP(device=device, T1=T1, T2=T2) + return ssfp(flip=flip, TR=TR) +``` + +The class also enable automatic forward differentiation wrt to input spin parameters via ``diff`` argument: + +```python +import numpy as np + +def simulate_ssfp(flip, TR, T1, T2, diff=None, device="cpu"): + ssfp = SSFP(device=device, T1=T1, T2=T2, diff=diff) + return ssfp(flip=flip, TR=TR) + +# this will return signal only (evolution towards steady state of unbalanced SSFP sequence) +signal = simulate_ssfp(flip=10.0*np.ones(1000, dtype=np.float32), TR=4.5, T1=500.0, T2=50.0) + +# this will also return derivatives +signal, dsignal = simulate_ssfp(flip=10.0*np.ones(1000, dtype=np.float32), TR=8.5, T1=500.0, T2=50.0, diff=("T1", "T2")) + +# dsignal[0] = dsignal / dT1 (derivative of signal wrt T1) +# dsignal[1] = dsignal / dT2 (derivative of signal wrt T2) +``` + +This is useful e.g. for nonlinear fitting and for calculating objective functions (CRLB) for sequence optimization. + +To facilitate the development of signal models, we include basic sequence building blocks (e.g., Inversion Preparation, SSFP Propagator) and low-level Extended Phase Graphs operators: + + +## Sequence Blocks + +```{eval-rst} +.. currentmodule:: deepmr.bloch +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.bloch.InversionPrep + deepmr.bloch.T2Prep + deepmr.bloch.ExcPulse + deepmr.bloch.bSSFPStep + deepmr.bloch.SSFPFidStep + deepmr.bloch.SSFPEchoStep + deepmr.bloch.FSEStep +``` + +## Low-level Operators + +```{eval-rst} +.. currentmodule:: deepmr.bloch +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.bloch.EPGstates + deepmr.bloch.RFPulse + deepmr.bloch.AdiabaticPulse + deepmr.bloch.Relaxation + deepmr.bloch.Shift + deepmr.bloch.Spoil + deepmr.bloch.DiffusionDamping + deepmr.bloch.FlowDephasing + deepmr.bloch.FlowWash + deepmr.bloch.observe + deepmr.bloch.susceptibility + deepmr.bloch.t1sat +``` diff --git a/_sources/core/fft.md b/_sources/core/fft.md new file mode 100644 index 00000000..11891a7d --- /dev/null +++ b/_sources/core/fft.md @@ -0,0 +1,39 @@ +# Fourier Transform + +```{eval-rst} +.. automodule:: deepmr.fft +``` + +## Centered FFT +```{eval-rst} +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.fft.fft + deepmr.fft.ifft +``` + +## Sparse FFT +```{eval-rst} +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.fft.sparse_fft + deepmr.fft.sparse_ifft + deepmr.fft.plan_toeplitz_fft + deepmr.fft.apply_sparse_fft_selfadj +``` + +## Non-Uniform FFT (NUFFT) +```{eval-rst} +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.fft.nufft + deepmr.fft.nufft_adj + deepmr.fft.plan_toeplitz_nufft + deepmr.fft.apply_nufft_selfadj +``` diff --git a/_sources/core/generated/deepmr.b0field.rst b/_sources/core/generated/deepmr.b0field.rst new file mode 100644 index 00000000..5691bad7 --- /dev/null +++ b/_sources/core/generated/deepmr.b0field.rst @@ -0,0 +1,6 @@ +deepmr.b0field +============== + +.. currentmodule:: deepmr + +.. autofunction:: b0field \ No newline at end of file diff --git a/_sources/core/generated/deepmr.b1field.rst b/_sources/core/generated/deepmr.b1field.rst new file mode 100644 index 00000000..b4989504 --- /dev/null +++ b/_sources/core/generated/deepmr.b1field.rst @@ -0,0 +1,6 @@ +deepmr.b1field +============== + +.. currentmodule:: deepmr + +.. autofunction:: b1field \ No newline at end of file diff --git a/_sources/core/generated/deepmr.bloch.AdiabaticPulse.rst b/_sources/core/generated/deepmr.bloch.AdiabaticPulse.rst new file mode 100644 index 00000000..ef431366 --- /dev/null +++ b/_sources/core/generated/deepmr.bloch.AdiabaticPulse.rst @@ -0,0 +1,25 @@ +deepmr.bloch.AdiabaticPulse +=========================== + +.. currentmodule:: deepmr.bloch + +.. autoclass:: AdiabaticPulse + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~AdiabaticPulse.__init__ + ~AdiabaticPulse.apply + ~AdiabaticPulse.prepare_rotation + ~AdiabaticPulse.prepare_saturation + + + + + + \ No newline at end of file diff --git a/_sources/core/generated/deepmr.bloch.DiffusionDamping.rst b/_sources/core/generated/deepmr.bloch.DiffusionDamping.rst new file mode 100644 index 00000000..99600d3f --- /dev/null +++ b/_sources/core/generated/deepmr.bloch.DiffusionDamping.rst @@ -0,0 +1,23 @@ +deepmr.bloch.DiffusionDamping +============================= + +.. currentmodule:: deepmr.bloch + +.. autoclass:: DiffusionDamping + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~DiffusionDamping.__init__ + ~DiffusionDamping.apply + + + + + + \ No newline at end of file diff --git a/_sources/core/generated/deepmr.bloch.EPGstates.rst b/_sources/core/generated/deepmr.bloch.EPGstates.rst new file mode 100644 index 00000000..5ad643a9 --- /dev/null +++ b/_sources/core/generated/deepmr.bloch.EPGstates.rst @@ -0,0 +1,6 @@ +deepmr.bloch.EPGstates +====================== + +.. currentmodule:: deepmr.bloch + +.. autofunction:: EPGstates \ No newline at end of file diff --git a/_sources/core/generated/deepmr.bloch.ExcPulse.rst b/_sources/core/generated/deepmr.bloch.ExcPulse.rst new file mode 100644 index 00000000..8f0d42d1 --- /dev/null +++ b/_sources/core/generated/deepmr.bloch.ExcPulse.rst @@ -0,0 +1,6 @@ +deepmr.bloch.ExcPulse +===================== + +.. currentmodule:: deepmr.bloch + +.. autofunction:: ExcPulse \ No newline at end of file diff --git a/_sources/core/generated/deepmr.bloch.FSEStep.rst b/_sources/core/generated/deepmr.bloch.FSEStep.rst new file mode 100644 index 00000000..cd09f19c --- /dev/null +++ b/_sources/core/generated/deepmr.bloch.FSEStep.rst @@ -0,0 +1,6 @@ +deepmr.bloch.FSEStep +==================== + +.. currentmodule:: deepmr.bloch + +.. autofunction:: FSEStep \ No newline at end of file diff --git a/_sources/core/generated/deepmr.bloch.FlowDephasing.rst b/_sources/core/generated/deepmr.bloch.FlowDephasing.rst new file mode 100644 index 00000000..3fe42b72 --- /dev/null +++ b/_sources/core/generated/deepmr.bloch.FlowDephasing.rst @@ -0,0 +1,23 @@ +deepmr.bloch.FlowDephasing +========================== + +.. currentmodule:: deepmr.bloch + +.. autoclass:: FlowDephasing + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~FlowDephasing.__init__ + ~FlowDephasing.apply + + + + + + \ No newline at end of file diff --git a/_sources/core/generated/deepmr.bloch.FlowWash.rst b/_sources/core/generated/deepmr.bloch.FlowWash.rst new file mode 100644 index 00000000..05247039 --- /dev/null +++ b/_sources/core/generated/deepmr.bloch.FlowWash.rst @@ -0,0 +1,23 @@ +deepmr.bloch.FlowWash +===================== + +.. currentmodule:: deepmr.bloch + +.. autoclass:: FlowWash + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~FlowWash.__init__ + ~FlowWash.apply + + + + + + \ No newline at end of file diff --git a/_sources/core/generated/deepmr.bloch.InversionPrep.rst b/_sources/core/generated/deepmr.bloch.InversionPrep.rst new file mode 100644 index 00000000..41a84a33 --- /dev/null +++ b/_sources/core/generated/deepmr.bloch.InversionPrep.rst @@ -0,0 +1,6 @@ +deepmr.bloch.InversionPrep +========================== + +.. currentmodule:: deepmr.bloch + +.. autofunction:: InversionPrep \ No newline at end of file diff --git a/_sources/core/generated/deepmr.bloch.RFPulse.rst b/_sources/core/generated/deepmr.bloch.RFPulse.rst new file mode 100644 index 00000000..a7e8f79c --- /dev/null +++ b/_sources/core/generated/deepmr.bloch.RFPulse.rst @@ -0,0 +1,25 @@ +deepmr.bloch.RFPulse +==================== + +.. currentmodule:: deepmr.bloch + +.. autoclass:: RFPulse + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~RFPulse.__init__ + ~RFPulse.apply + ~RFPulse.prepare_rotation + ~RFPulse.prepare_saturation + + + + + + \ No newline at end of file diff --git a/_sources/core/generated/deepmr.bloch.Relaxation.rst b/_sources/core/generated/deepmr.bloch.Relaxation.rst new file mode 100644 index 00000000..94ef26f1 --- /dev/null +++ b/_sources/core/generated/deepmr.bloch.Relaxation.rst @@ -0,0 +1,23 @@ +deepmr.bloch.Relaxation +======================= + +.. currentmodule:: deepmr.bloch + +.. autoclass:: Relaxation + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~Relaxation.__init__ + ~Relaxation.apply + + + + + + \ No newline at end of file diff --git a/_sources/core/generated/deepmr.bloch.SSFPEchoStep.rst b/_sources/core/generated/deepmr.bloch.SSFPEchoStep.rst new file mode 100644 index 00000000..3291cff8 --- /dev/null +++ b/_sources/core/generated/deepmr.bloch.SSFPEchoStep.rst @@ -0,0 +1,6 @@ +deepmr.bloch.SSFPEchoStep +========================= + +.. currentmodule:: deepmr.bloch + +.. autofunction:: SSFPEchoStep \ No newline at end of file diff --git a/_sources/core/generated/deepmr.bloch.SSFPFidStep.rst b/_sources/core/generated/deepmr.bloch.SSFPFidStep.rst new file mode 100644 index 00000000..aa871ab0 --- /dev/null +++ b/_sources/core/generated/deepmr.bloch.SSFPFidStep.rst @@ -0,0 +1,6 @@ +deepmr.bloch.SSFPFidStep +======================== + +.. currentmodule:: deepmr.bloch + +.. autofunction:: SSFPFidStep \ No newline at end of file diff --git a/_sources/core/generated/deepmr.bloch.Shift.rst b/_sources/core/generated/deepmr.bloch.Shift.rst new file mode 100644 index 00000000..46233078 --- /dev/null +++ b/_sources/core/generated/deepmr.bloch.Shift.rst @@ -0,0 +1,23 @@ +deepmr.bloch.Shift +================== + +.. currentmodule:: deepmr.bloch + +.. autoclass:: Shift + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~Shift.__init__ + ~Shift.apply + + + + + + \ No newline at end of file diff --git a/_sources/core/generated/deepmr.bloch.Spoil.rst b/_sources/core/generated/deepmr.bloch.Spoil.rst new file mode 100644 index 00000000..c8d4423e --- /dev/null +++ b/_sources/core/generated/deepmr.bloch.Spoil.rst @@ -0,0 +1,23 @@ +deepmr.bloch.Spoil +================== + +.. currentmodule:: deepmr.bloch + +.. autoclass:: Spoil + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~Spoil.__init__ + ~Spoil.apply + + + + + + \ No newline at end of file diff --git a/_sources/core/generated/deepmr.bloch.T2Prep.rst b/_sources/core/generated/deepmr.bloch.T2Prep.rst new file mode 100644 index 00000000..dc68ff5c --- /dev/null +++ b/_sources/core/generated/deepmr.bloch.T2Prep.rst @@ -0,0 +1,6 @@ +deepmr.bloch.T2Prep +=================== + +.. currentmodule:: deepmr.bloch + +.. autofunction:: T2Prep \ No newline at end of file diff --git a/_sources/core/generated/deepmr.bloch.bSSFPStep.rst b/_sources/core/generated/deepmr.bloch.bSSFPStep.rst new file mode 100644 index 00000000..bfc63184 --- /dev/null +++ b/_sources/core/generated/deepmr.bloch.bSSFPStep.rst @@ -0,0 +1,6 @@ +deepmr.bloch.bSSFPStep +====================== + +.. currentmodule:: deepmr.bloch + +.. autofunction:: bSSFPStep \ No newline at end of file diff --git a/_sources/core/generated/deepmr.bloch.bssfpmrf.rst b/_sources/core/generated/deepmr.bloch.bssfpmrf.rst new file mode 100644 index 00000000..e2ccaf55 --- /dev/null +++ b/_sources/core/generated/deepmr.bloch.bssfpmrf.rst @@ -0,0 +1,6 @@ +deepmr.bloch.bssfpmrf +===================== + +.. currentmodule:: deepmr.bloch + +.. autofunction:: bssfpmrf \ No newline at end of file diff --git a/_sources/core/generated/deepmr.bloch.fse.rst b/_sources/core/generated/deepmr.bloch.fse.rst new file mode 100644 index 00000000..3b17b3fe --- /dev/null +++ b/_sources/core/generated/deepmr.bloch.fse.rst @@ -0,0 +1,6 @@ +deepmr.bloch.fse +================ + +.. currentmodule:: deepmr.bloch + +.. autofunction:: fse \ No newline at end of file diff --git a/_sources/core/generated/deepmr.bloch.memprage.rst b/_sources/core/generated/deepmr.bloch.memprage.rst new file mode 100644 index 00000000..bd2ec854 --- /dev/null +++ b/_sources/core/generated/deepmr.bloch.memprage.rst @@ -0,0 +1,6 @@ +deepmr.bloch.memprage +===================== + +.. currentmodule:: deepmr.bloch + +.. autofunction:: memprage \ No newline at end of file diff --git a/_sources/core/generated/deepmr.bloch.mprage.rst b/_sources/core/generated/deepmr.bloch.mprage.rst new file mode 100644 index 00000000..d6936d5d --- /dev/null +++ b/_sources/core/generated/deepmr.bloch.mprage.rst @@ -0,0 +1,6 @@ +deepmr.bloch.mprage +=================== + +.. currentmodule:: deepmr.bloch + +.. autofunction:: mprage \ No newline at end of file diff --git a/_sources/core/generated/deepmr.bloch.observe.rst b/_sources/core/generated/deepmr.bloch.observe.rst new file mode 100644 index 00000000..78211644 --- /dev/null +++ b/_sources/core/generated/deepmr.bloch.observe.rst @@ -0,0 +1,6 @@ +deepmr.bloch.observe +==================== + +.. currentmodule:: deepmr.bloch + +.. autofunction:: observe \ No newline at end of file diff --git a/_sources/core/generated/deepmr.bloch.ssfpmrf.rst b/_sources/core/generated/deepmr.bloch.ssfpmrf.rst new file mode 100644 index 00000000..901379d2 --- /dev/null +++ b/_sources/core/generated/deepmr.bloch.ssfpmrf.rst @@ -0,0 +1,6 @@ +deepmr.bloch.ssfpmrf +==================== + +.. currentmodule:: deepmr.bloch + +.. autofunction:: ssfpmrf \ No newline at end of file diff --git a/_sources/core/generated/deepmr.bloch.susceptibility.rst b/_sources/core/generated/deepmr.bloch.susceptibility.rst new file mode 100644 index 00000000..d8ad9469 --- /dev/null +++ b/_sources/core/generated/deepmr.bloch.susceptibility.rst @@ -0,0 +1,6 @@ +deepmr.bloch.susceptibility +=========================== + +.. currentmodule:: deepmr.bloch + +.. autofunction:: susceptibility \ No newline at end of file diff --git a/_sources/core/generated/deepmr.bloch.t1sat.rst b/_sources/core/generated/deepmr.bloch.t1sat.rst new file mode 100644 index 00000000..50821645 --- /dev/null +++ b/_sources/core/generated/deepmr.bloch.t1sat.rst @@ -0,0 +1,6 @@ +deepmr.bloch.t1sat +================== + +.. currentmodule:: deepmr.bloch + +.. autofunction:: t1sat \ No newline at end of file diff --git a/_sources/core/generated/deepmr.bloch.t1t2shuffling.rst b/_sources/core/generated/deepmr.bloch.t1t2shuffling.rst new file mode 100644 index 00000000..98b348cb --- /dev/null +++ b/_sources/core/generated/deepmr.bloch.t1t2shuffling.rst @@ -0,0 +1,6 @@ +deepmr.bloch.t1t2shuffling +========================== + +.. currentmodule:: deepmr.bloch + +.. autofunction:: t1t2shuffling \ No newline at end of file diff --git a/_sources/core/generated/deepmr.brainweb.rst b/_sources/core/generated/deepmr.brainweb.rst new file mode 100644 index 00000000..900fe585 --- /dev/null +++ b/_sources/core/generated/deepmr.brainweb.rst @@ -0,0 +1,6 @@ +deepmr.brainweb +=============== + +.. currentmodule:: deepmr + +.. autofunction:: brainweb \ No newline at end of file diff --git a/_sources/core/generated/deepmr.cartesian2D.rst b/_sources/core/generated/deepmr.cartesian2D.rst new file mode 100644 index 00000000..ec68c7d7 --- /dev/null +++ b/_sources/core/generated/deepmr.cartesian2D.rst @@ -0,0 +1,6 @@ +deepmr.cartesian2D +================== + +.. currentmodule:: deepmr + +.. autofunction:: cartesian2D \ No newline at end of file diff --git a/_sources/core/generated/deepmr.cartesian3D.rst b/_sources/core/generated/deepmr.cartesian3D.rst new file mode 100644 index 00000000..4db577af --- /dev/null +++ b/_sources/core/generated/deepmr.cartesian3D.rst @@ -0,0 +1,6 @@ +deepmr.cartesian3D +================== + +.. currentmodule:: deepmr + +.. autofunction:: cartesian3D \ No newline at end of file diff --git a/_sources/core/generated/deepmr.custom_phantom.rst b/_sources/core/generated/deepmr.custom_phantom.rst new file mode 100644 index 00000000..e7f26d9e --- /dev/null +++ b/_sources/core/generated/deepmr.custom_phantom.rst @@ -0,0 +1,6 @@ +deepmr.custom\_phantom +====================== + +.. currentmodule:: deepmr + +.. autofunction:: custom_phantom \ No newline at end of file diff --git a/_sources/core/generated/deepmr.fermi.rst b/_sources/core/generated/deepmr.fermi.rst new file mode 100644 index 00000000..8e2642d3 --- /dev/null +++ b/_sources/core/generated/deepmr.fermi.rst @@ -0,0 +1,6 @@ +deepmr.fermi +============ + +.. currentmodule:: deepmr + +.. autofunction:: fermi \ No newline at end of file diff --git a/_sources/core/generated/deepmr.fft.apply_nufft_selfadj.rst b/_sources/core/generated/deepmr.fft.apply_nufft_selfadj.rst new file mode 100644 index 00000000..7d2d5f78 --- /dev/null +++ b/_sources/core/generated/deepmr.fft.apply_nufft_selfadj.rst @@ -0,0 +1,6 @@ +deepmr.fft.apply\_nufft\_selfadj +================================ + +.. currentmodule:: deepmr.fft + +.. autofunction:: apply_nufft_selfadj \ No newline at end of file diff --git a/_sources/core/generated/deepmr.fft.apply_sparse_fft_selfadj.rst b/_sources/core/generated/deepmr.fft.apply_sparse_fft_selfadj.rst new file mode 100644 index 00000000..f0ba04a1 --- /dev/null +++ b/_sources/core/generated/deepmr.fft.apply_sparse_fft_selfadj.rst @@ -0,0 +1,6 @@ +deepmr.fft.apply\_sparse\_fft\_selfadj +====================================== + +.. currentmodule:: deepmr.fft + +.. autofunction:: apply_sparse_fft_selfadj \ No newline at end of file diff --git a/_sources/core/generated/deepmr.fft.fft.rst b/_sources/core/generated/deepmr.fft.fft.rst new file mode 100644 index 00000000..9c42d98f --- /dev/null +++ b/_sources/core/generated/deepmr.fft.fft.rst @@ -0,0 +1,6 @@ +deepmr.fft.fft +============== + +.. currentmodule:: deepmr.fft + +.. autofunction:: fft \ No newline at end of file diff --git a/_sources/core/generated/deepmr.fft.ifft.rst b/_sources/core/generated/deepmr.fft.ifft.rst new file mode 100644 index 00000000..2e9afa71 --- /dev/null +++ b/_sources/core/generated/deepmr.fft.ifft.rst @@ -0,0 +1,6 @@ +deepmr.fft.ifft +=============== + +.. currentmodule:: deepmr.fft + +.. autofunction:: ifft \ No newline at end of file diff --git a/_sources/core/generated/deepmr.fft.nufft.rst b/_sources/core/generated/deepmr.fft.nufft.rst new file mode 100644 index 00000000..fb4ce9ae --- /dev/null +++ b/_sources/core/generated/deepmr.fft.nufft.rst @@ -0,0 +1,6 @@ +deepmr.fft.nufft +================ + +.. currentmodule:: deepmr.fft + +.. autofunction:: nufft \ No newline at end of file diff --git a/_sources/core/generated/deepmr.fft.nufft_adj.rst b/_sources/core/generated/deepmr.fft.nufft_adj.rst new file mode 100644 index 00000000..0b4cddea --- /dev/null +++ b/_sources/core/generated/deepmr.fft.nufft_adj.rst @@ -0,0 +1,6 @@ +deepmr.fft.nufft\_adj +===================== + +.. currentmodule:: deepmr.fft + +.. autofunction:: nufft_adj \ No newline at end of file diff --git a/_sources/core/generated/deepmr.fft.plan_toeplitz_fft.rst b/_sources/core/generated/deepmr.fft.plan_toeplitz_fft.rst new file mode 100644 index 00000000..75843a26 --- /dev/null +++ b/_sources/core/generated/deepmr.fft.plan_toeplitz_fft.rst @@ -0,0 +1,6 @@ +deepmr.fft.plan\_toeplitz\_fft +============================== + +.. currentmodule:: deepmr.fft + +.. autofunction:: plan_toeplitz_fft \ No newline at end of file diff --git a/_sources/core/generated/deepmr.fft.plan_toeplitz_nufft.rst b/_sources/core/generated/deepmr.fft.plan_toeplitz_nufft.rst new file mode 100644 index 00000000..a89a1d24 --- /dev/null +++ b/_sources/core/generated/deepmr.fft.plan_toeplitz_nufft.rst @@ -0,0 +1,6 @@ +deepmr.fft.plan\_toeplitz\_nufft +================================ + +.. currentmodule:: deepmr.fft + +.. autofunction:: plan_toeplitz_nufft \ No newline at end of file diff --git a/_sources/core/generated/deepmr.fft.sparse_fft.rst b/_sources/core/generated/deepmr.fft.sparse_fft.rst new file mode 100644 index 00000000..88f971b1 --- /dev/null +++ b/_sources/core/generated/deepmr.fft.sparse_fft.rst @@ -0,0 +1,6 @@ +deepmr.fft.sparse\_fft +====================== + +.. currentmodule:: deepmr.fft + +.. autofunction:: sparse_fft \ No newline at end of file diff --git a/_sources/core/generated/deepmr.fft.sparse_ifft.rst b/_sources/core/generated/deepmr.fft.sparse_ifft.rst new file mode 100644 index 00000000..90262a68 --- /dev/null +++ b/_sources/core/generated/deepmr.fft.sparse_ifft.rst @@ -0,0 +1,6 @@ +deepmr.fft.sparse\_ifft +======================= + +.. currentmodule:: deepmr.fft + +.. autofunction:: sparse_ifft \ No newline at end of file diff --git a/_sources/core/generated/deepmr.fwt.rst b/_sources/core/generated/deepmr.fwt.rst new file mode 100644 index 00000000..00d7f0ba --- /dev/null +++ b/_sources/core/generated/deepmr.fwt.rst @@ -0,0 +1,6 @@ +deepmr.fwt +========== + +.. currentmodule:: deepmr + +.. autofunction:: fwt \ No newline at end of file diff --git a/_sources/core/generated/deepmr.io.read_acqheader.rst b/_sources/core/generated/deepmr.io.read_acqheader.rst new file mode 100644 index 00000000..7d3d2591 --- /dev/null +++ b/_sources/core/generated/deepmr.io.read_acqheader.rst @@ -0,0 +1,6 @@ +deepmr.io.read\_acqheader +========================= + +.. currentmodule:: deepmr.io + +.. autofunction:: read_acqheader \ No newline at end of file diff --git a/_sources/core/generated/deepmr.io.read_hdf5.rst b/_sources/core/generated/deepmr.io.read_hdf5.rst new file mode 100644 index 00000000..4ef0b366 --- /dev/null +++ b/_sources/core/generated/deepmr.io.read_hdf5.rst @@ -0,0 +1,6 @@ +deepmr.io.read\_hdf5 +==================== + +.. currentmodule:: deepmr.io + +.. autofunction:: read_hdf5 \ No newline at end of file diff --git a/_sources/core/generated/deepmr.io.read_image.rst b/_sources/core/generated/deepmr.io.read_image.rst new file mode 100644 index 00000000..5967972a --- /dev/null +++ b/_sources/core/generated/deepmr.io.read_image.rst @@ -0,0 +1,6 @@ +deepmr.io.read\_image +===================== + +.. currentmodule:: deepmr.io + +.. autofunction:: read_image \ No newline at end of file diff --git a/_sources/core/generated/deepmr.io.read_matfile.rst b/_sources/core/generated/deepmr.io.read_matfile.rst new file mode 100644 index 00000000..e5eb2105 --- /dev/null +++ b/_sources/core/generated/deepmr.io.read_matfile.rst @@ -0,0 +1,6 @@ +deepmr.io.read\_matfile +======================= + +.. currentmodule:: deepmr.io + +.. autofunction:: read_matfile \ No newline at end of file diff --git a/_sources/core/generated/deepmr.io.read_rawdata.rst b/_sources/core/generated/deepmr.io.read_rawdata.rst new file mode 100644 index 00000000..8b3db758 --- /dev/null +++ b/_sources/core/generated/deepmr.io.read_rawdata.rst @@ -0,0 +1,6 @@ +deepmr.io.read\_rawdata +======================= + +.. currentmodule:: deepmr.io + +.. autofunction:: read_rawdata \ No newline at end of file diff --git a/_sources/core/generated/deepmr.io.write_acqheader.rst b/_sources/core/generated/deepmr.io.write_acqheader.rst new file mode 100644 index 00000000..0b02e867 --- /dev/null +++ b/_sources/core/generated/deepmr.io.write_acqheader.rst @@ -0,0 +1,6 @@ +deepmr.io.write\_acqheader +========================== + +.. currentmodule:: deepmr.io + +.. autofunction:: write_acqheader \ No newline at end of file diff --git a/_sources/core/generated/deepmr.io.write_hdf5.rst b/_sources/core/generated/deepmr.io.write_hdf5.rst new file mode 100644 index 00000000..e3218773 --- /dev/null +++ b/_sources/core/generated/deepmr.io.write_hdf5.rst @@ -0,0 +1,6 @@ +deepmr.io.write\_hdf5 +===================== + +.. currentmodule:: deepmr.io + +.. autofunction:: write_hdf5 \ No newline at end of file diff --git a/_sources/core/generated/deepmr.io.write_image.rst b/_sources/core/generated/deepmr.io.write_image.rst new file mode 100644 index 00000000..fabba62f --- /dev/null +++ b/_sources/core/generated/deepmr.io.write_image.rst @@ -0,0 +1,6 @@ +deepmr.io.write\_image +====================== + +.. currentmodule:: deepmr.io + +.. autofunction:: write_image \ No newline at end of file diff --git a/_sources/core/generated/deepmr.iwt.rst b/_sources/core/generated/deepmr.iwt.rst new file mode 100644 index 00000000..0127d4ce --- /dev/null +++ b/_sources/core/generated/deepmr.iwt.rst @@ -0,0 +1,6 @@ +deepmr.iwt +========== + +.. currentmodule:: deepmr + +.. autofunction:: iwt \ No newline at end of file diff --git a/_sources/core/generated/deepmr.linops.FFTGramOp.rst b/_sources/core/generated/deepmr.linops.FFTGramOp.rst new file mode 100644 index 00000000..0aad4382 --- /dev/null +++ b/_sources/core/generated/deepmr.linops.FFTGramOp.rst @@ -0,0 +1,30 @@ +deepmr.linops.FFTGramOp +======================= + +.. currentmodule:: deepmr.linops + +.. autoclass:: FFTGramOp + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~FFTGramOp.__init__ + ~FFTGramOp.forward + ~FFTGramOp.to + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~FFTGramOp.H + + \ No newline at end of file diff --git a/_sources/core/generated/deepmr.linops.FFTOp.rst b/_sources/core/generated/deepmr.linops.FFTOp.rst new file mode 100644 index 00000000..8b2ba00c --- /dev/null +++ b/_sources/core/generated/deepmr.linops.FFTOp.rst @@ -0,0 +1,30 @@ +deepmr.linops.FFTOp +=================== + +.. currentmodule:: deepmr.linops + +.. autoclass:: FFTOp + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~FFTOp.__init__ + ~FFTOp.forward + ~FFTOp.to + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~FFTOp.H + + \ No newline at end of file diff --git a/_sources/core/generated/deepmr.linops.IFFTOp.rst b/_sources/core/generated/deepmr.linops.IFFTOp.rst new file mode 100644 index 00000000..be82e2cc --- /dev/null +++ b/_sources/core/generated/deepmr.linops.IFFTOp.rst @@ -0,0 +1,30 @@ +deepmr.linops.IFFTOp +==================== + +.. currentmodule:: deepmr.linops + +.. autoclass:: IFFTOp + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~IFFTOp.__init__ + ~IFFTOp.forward + ~IFFTOp.to + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~IFFTOp.H + + \ No newline at end of file diff --git a/_sources/core/generated/deepmr.linops.NUFFTAdjointOp.rst b/_sources/core/generated/deepmr.linops.NUFFTAdjointOp.rst new file mode 100644 index 00000000..7fa7b42c --- /dev/null +++ b/_sources/core/generated/deepmr.linops.NUFFTAdjointOp.rst @@ -0,0 +1,30 @@ +deepmr.linops.NUFFTAdjointOp +============================ + +.. currentmodule:: deepmr.linops + +.. autoclass:: NUFFTAdjointOp + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~NUFFTAdjointOp.__init__ + ~NUFFTAdjointOp.forward + ~NUFFTAdjointOp.to + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~NUFFTAdjointOp.H + + \ No newline at end of file diff --git a/_sources/core/generated/deepmr.linops.NUFFTGramOp.rst b/_sources/core/generated/deepmr.linops.NUFFTGramOp.rst new file mode 100644 index 00000000..244178af --- /dev/null +++ b/_sources/core/generated/deepmr.linops.NUFFTGramOp.rst @@ -0,0 +1,30 @@ +deepmr.linops.NUFFTGramOp +========================= + +.. currentmodule:: deepmr.linops + +.. autoclass:: NUFFTGramOp + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~NUFFTGramOp.__init__ + ~NUFFTGramOp.forward + ~NUFFTGramOp.to + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~NUFFTGramOp.H + + \ No newline at end of file diff --git a/_sources/core/generated/deepmr.linops.NUFFTOp.rst b/_sources/core/generated/deepmr.linops.NUFFTOp.rst new file mode 100644 index 00000000..4cc4a490 --- /dev/null +++ b/_sources/core/generated/deepmr.linops.NUFFTOp.rst @@ -0,0 +1,30 @@ +deepmr.linops.NUFFTOp +===================== + +.. currentmodule:: deepmr.linops + +.. autoclass:: NUFFTOp + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~NUFFTOp.__init__ + ~NUFFTOp.forward + ~NUFFTOp.to + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~NUFFTOp.H + + \ No newline at end of file diff --git a/_sources/core/generated/deepmr.linops.SenseAdjointOp.rst b/_sources/core/generated/deepmr.linops.SenseAdjointOp.rst new file mode 100644 index 00000000..967450b2 --- /dev/null +++ b/_sources/core/generated/deepmr.linops.SenseAdjointOp.rst @@ -0,0 +1,30 @@ +deepmr.linops.SenseAdjointOp +============================ + +.. currentmodule:: deepmr.linops + +.. autoclass:: SenseAdjointOp + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~SenseAdjointOp.__init__ + ~SenseAdjointOp.forward + ~SenseAdjointOp.to + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SenseAdjointOp.H + + \ No newline at end of file diff --git a/_sources/core/generated/deepmr.linops.SenseOp.rst b/_sources/core/generated/deepmr.linops.SenseOp.rst new file mode 100644 index 00000000..a76aec37 --- /dev/null +++ b/_sources/core/generated/deepmr.linops.SenseOp.rst @@ -0,0 +1,30 @@ +deepmr.linops.SenseOp +===================== + +.. currentmodule:: deepmr.linops + +.. autoclass:: SenseOp + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~SenseOp.__init__ + ~SenseOp.forward + ~SenseOp.to + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SenseOp.H + + \ No newline at end of file diff --git a/_sources/core/generated/deepmr.optim.ADMMStep.rst b/_sources/core/generated/deepmr.optim.ADMMStep.rst new file mode 100644 index 00000000..a7c47847 --- /dev/null +++ b/_sources/core/generated/deepmr.optim.ADMMStep.rst @@ -0,0 +1,23 @@ +deepmr.optim.ADMMStep +===================== + +.. currentmodule:: deepmr.optim + +.. autoclass:: ADMMStep + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~ADMMStep.__init__ + ~ADMMStep.forward + + + + + + \ No newline at end of file diff --git a/_sources/core/generated/deepmr.optim.CGStep.rst b/_sources/core/generated/deepmr.optim.CGStep.rst new file mode 100644 index 00000000..59811c7d --- /dev/null +++ b/_sources/core/generated/deepmr.optim.CGStep.rst @@ -0,0 +1,25 @@ +deepmr.optim.CGStep +=================== + +.. currentmodule:: deepmr.optim + +.. autoclass:: CGStep + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~CGStep.__init__ + ~CGStep.check_convergence + ~CGStep.dot + ~CGStep.forward + + + + + + \ No newline at end of file diff --git a/_sources/core/generated/deepmr.optim.PGDStep.rst b/_sources/core/generated/deepmr.optim.PGDStep.rst new file mode 100644 index 00000000..7d941d48 --- /dev/null +++ b/_sources/core/generated/deepmr.optim.PGDStep.rst @@ -0,0 +1,24 @@ +deepmr.optim.PGDStep +==================== + +.. currentmodule:: deepmr.optim + +.. autoclass:: PGDStep + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~PGDStep.__init__ + ~PGDStep.check_convergence + ~PGDStep.forward + + + + + + \ No newline at end of file diff --git a/_sources/core/generated/deepmr.optim.admm_solve.rst b/_sources/core/generated/deepmr.optim.admm_solve.rst new file mode 100644 index 00000000..a1c48f1e --- /dev/null +++ b/_sources/core/generated/deepmr.optim.admm_solve.rst @@ -0,0 +1,6 @@ +deepmr.optim.admm\_solve +======================== + +.. currentmodule:: deepmr.optim + +.. autodata:: admm_solve \ No newline at end of file diff --git a/_sources/core/generated/deepmr.optim.cg_solve.rst b/_sources/core/generated/deepmr.optim.cg_solve.rst new file mode 100644 index 00000000..336f96ba --- /dev/null +++ b/_sources/core/generated/deepmr.optim.cg_solve.rst @@ -0,0 +1,6 @@ +deepmr.optim.cg\_solve +====================== + +.. currentmodule:: deepmr.optim + +.. autodata:: cg_solve \ No newline at end of file diff --git a/_sources/core/generated/deepmr.optim.pgd_solve.rst b/_sources/core/generated/deepmr.optim.pgd_solve.rst new file mode 100644 index 00000000..d87d6bbc --- /dev/null +++ b/_sources/core/generated/deepmr.optim.pgd_solve.rst @@ -0,0 +1,6 @@ +deepmr.optim.pgd\_solve +======================= + +.. currentmodule:: deepmr.optim + +.. autodata:: pgd_solve \ No newline at end of file diff --git a/_sources/core/generated/deepmr.optim.power_method.rst b/_sources/core/generated/deepmr.optim.power_method.rst new file mode 100644 index 00000000..6a2b1a5b --- /dev/null +++ b/_sources/core/generated/deepmr.optim.power_method.rst @@ -0,0 +1,6 @@ +deepmr.optim.power\_method +========================== + +.. currentmodule:: deepmr.optim + +.. autodata:: power_method \ No newline at end of file diff --git a/_sources/core/generated/deepmr.patches2tensor.rst b/_sources/core/generated/deepmr.patches2tensor.rst new file mode 100644 index 00000000..5b013f65 --- /dev/null +++ b/_sources/core/generated/deepmr.patches2tensor.rst @@ -0,0 +1,6 @@ +deepmr.patches2tensor +===================== + +.. currentmodule:: deepmr + +.. autofunction:: patches2tensor \ No newline at end of file diff --git a/_sources/core/generated/deepmr.phase_cycling.rst b/_sources/core/generated/deepmr.phase_cycling.rst new file mode 100644 index 00000000..28f9049f --- /dev/null +++ b/_sources/core/generated/deepmr.phase_cycling.rst @@ -0,0 +1,6 @@ +deepmr.phase\_cycling +===================== + +.. currentmodule:: deepmr + +.. autofunction:: phase_cycling \ No newline at end of file diff --git a/_sources/core/generated/deepmr.piecewise_fa.rst b/_sources/core/generated/deepmr.piecewise_fa.rst new file mode 100644 index 00000000..bd83b768 --- /dev/null +++ b/_sources/core/generated/deepmr.piecewise_fa.rst @@ -0,0 +1,6 @@ +deepmr.piecewise\_fa +==================== + +.. currentmodule:: deepmr + +.. autofunction:: piecewise_fa \ No newline at end of file diff --git a/_sources/core/generated/deepmr.prox.LLRDenoiser.rst b/_sources/core/generated/deepmr.prox.LLRDenoiser.rst new file mode 100644 index 00000000..7a9f3aa4 --- /dev/null +++ b/_sources/core/generated/deepmr.prox.LLRDenoiser.rst @@ -0,0 +1,23 @@ +deepmr.prox.LLRDenoiser +======================= + +.. currentmodule:: deepmr.prox + +.. autoclass:: LLRDenoiser + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~LLRDenoiser.__init__ + ~LLRDenoiser.forward + + + + + + \ No newline at end of file diff --git a/_sources/core/generated/deepmr.prox.TGVDenoiser.rst b/_sources/core/generated/deepmr.prox.TGVDenoiser.rst new file mode 100644 index 00000000..fa1286f5 --- /dev/null +++ b/_sources/core/generated/deepmr.prox.TGVDenoiser.rst @@ -0,0 +1,23 @@ +deepmr.prox.TGVDenoiser +======================= + +.. currentmodule:: deepmr.prox + +.. autoclass:: TGVDenoiser + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~TGVDenoiser.__init__ + ~TGVDenoiser.forward + + + + + + \ No newline at end of file diff --git a/_sources/core/generated/deepmr.prox.TVDenoiser.rst b/_sources/core/generated/deepmr.prox.TVDenoiser.rst new file mode 100644 index 00000000..56c11e87 --- /dev/null +++ b/_sources/core/generated/deepmr.prox.TVDenoiser.rst @@ -0,0 +1,23 @@ +deepmr.prox.TVDenoiser +====================== + +.. currentmodule:: deepmr.prox + +.. autoclass:: TVDenoiser + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~TVDenoiser.__init__ + ~TVDenoiser.forward + + + + + + \ No newline at end of file diff --git a/_sources/core/generated/deepmr.prox.WaveletDenoiser.rst b/_sources/core/generated/deepmr.prox.WaveletDenoiser.rst new file mode 100644 index 00000000..909460e9 --- /dev/null +++ b/_sources/core/generated/deepmr.prox.WaveletDenoiser.rst @@ -0,0 +1,23 @@ +deepmr.prox.WaveletDenoiser +=========================== + +.. currentmodule:: deepmr.prox + +.. autoclass:: WaveletDenoiser + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~WaveletDenoiser.__init__ + ~WaveletDenoiser.forward + + + + + + \ No newline at end of file diff --git a/_sources/core/generated/deepmr.prox.WaveletDictDenoiser.rst b/_sources/core/generated/deepmr.prox.WaveletDictDenoiser.rst new file mode 100644 index 00000000..b691f274 --- /dev/null +++ b/_sources/core/generated/deepmr.prox.WaveletDictDenoiser.rst @@ -0,0 +1,23 @@ +deepmr.prox.WaveletDictDenoiser +=============================== + +.. currentmodule:: deepmr.prox + +.. autoclass:: WaveletDictDenoiser + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~WaveletDictDenoiser.__init__ + ~WaveletDictDenoiser.forward + + + + + + \ No newline at end of file diff --git a/_sources/core/generated/deepmr.prox.llr_denoise.rst b/_sources/core/generated/deepmr.prox.llr_denoise.rst new file mode 100644 index 00000000..12f0063a --- /dev/null +++ b/_sources/core/generated/deepmr.prox.llr_denoise.rst @@ -0,0 +1,6 @@ +deepmr.prox.llr\_denoise +======================== + +.. currentmodule:: deepmr.prox + +.. autofunction:: llr_denoise \ No newline at end of file diff --git a/_sources/core/generated/deepmr.prox.tgv_denoise.rst b/_sources/core/generated/deepmr.prox.tgv_denoise.rst new file mode 100644 index 00000000..a601a7ea --- /dev/null +++ b/_sources/core/generated/deepmr.prox.tgv_denoise.rst @@ -0,0 +1,6 @@ +deepmr.prox.tgv\_denoise +======================== + +.. currentmodule:: deepmr.prox + +.. autofunction:: tgv_denoise \ No newline at end of file diff --git a/_sources/core/generated/deepmr.prox.tv_denoise.rst b/_sources/core/generated/deepmr.prox.tv_denoise.rst new file mode 100644 index 00000000..c36b4ae8 --- /dev/null +++ b/_sources/core/generated/deepmr.prox.tv_denoise.rst @@ -0,0 +1,6 @@ +deepmr.prox.tv\_denoise +======================= + +.. currentmodule:: deepmr.prox + +.. autofunction:: tv_denoise \ No newline at end of file diff --git a/_sources/core/generated/deepmr.prox.wavelet_denoise.rst b/_sources/core/generated/deepmr.prox.wavelet_denoise.rst new file mode 100644 index 00000000..0dfd6652 --- /dev/null +++ b/_sources/core/generated/deepmr.prox.wavelet_denoise.rst @@ -0,0 +1,6 @@ +deepmr.prox.wavelet\_denoise +============================ + +.. currentmodule:: deepmr.prox + +.. autofunction:: wavelet_denoise \ No newline at end of file diff --git a/_sources/core/generated/deepmr.prox.wavelet_dict_denoise.rst b/_sources/core/generated/deepmr.prox.wavelet_dict_denoise.rst new file mode 100644 index 00000000..09ca8cdf --- /dev/null +++ b/_sources/core/generated/deepmr.prox.wavelet_dict_denoise.rst @@ -0,0 +1,6 @@ +deepmr.prox.wavelet\_dict\_denoise +================================== + +.. currentmodule:: deepmr.prox + +.. autofunction:: wavelet_dict_denoise \ No newline at end of file diff --git a/_sources/core/generated/deepmr.radial.rst b/_sources/core/generated/deepmr.radial.rst new file mode 100644 index 00000000..7e893925 --- /dev/null +++ b/_sources/core/generated/deepmr.radial.rst @@ -0,0 +1,6 @@ +deepmr.radial +============= + +.. currentmodule:: deepmr + +.. autofunction:: radial \ No newline at end of file diff --git a/_sources/core/generated/deepmr.radial_proj.rst b/_sources/core/generated/deepmr.radial_proj.rst new file mode 100644 index 00000000..60ad94c6 --- /dev/null +++ b/_sources/core/generated/deepmr.radial_proj.rst @@ -0,0 +1,6 @@ +deepmr.radial\_proj +=================== + +.. currentmodule:: deepmr + +.. autofunction:: radial_proj \ No newline at end of file diff --git a/_sources/core/generated/deepmr.radial_stack.rst b/_sources/core/generated/deepmr.radial_stack.rst new file mode 100644 index 00000000..eab10c0e --- /dev/null +++ b/_sources/core/generated/deepmr.radial_stack.rst @@ -0,0 +1,6 @@ +deepmr.radial\_stack +==================== + +.. currentmodule:: deepmr + +.. autofunction:: radial_stack \ No newline at end of file diff --git a/_sources/core/generated/deepmr.resample.rst b/_sources/core/generated/deepmr.resample.rst new file mode 100644 index 00000000..ff934c5f --- /dev/null +++ b/_sources/core/generated/deepmr.resample.rst @@ -0,0 +1,6 @@ +deepmr.resample +=============== + +.. currentmodule:: deepmr + +.. autofunction:: resample \ No newline at end of file diff --git a/_sources/core/generated/deepmr.resize.rst b/_sources/core/generated/deepmr.resize.rst new file mode 100644 index 00000000..5979419e --- /dev/null +++ b/_sources/core/generated/deepmr.resize.rst @@ -0,0 +1,6 @@ +deepmr.resize +============= + +.. currentmodule:: deepmr + +.. autofunction:: resize \ No newline at end of file diff --git a/_sources/core/generated/deepmr.rf_spoiling.rst b/_sources/core/generated/deepmr.rf_spoiling.rst new file mode 100644 index 00000000..f37ddd41 --- /dev/null +++ b/_sources/core/generated/deepmr.rf_spoiling.rst @@ -0,0 +1,6 @@ +deepmr.rf\_spoiling +=================== + +.. currentmodule:: deepmr + +.. autofunction:: rf_spoiling \ No newline at end of file diff --git a/_sources/core/generated/deepmr.rigid_motion.rst b/_sources/core/generated/deepmr.rigid_motion.rst new file mode 100644 index 00000000..405a9303 --- /dev/null +++ b/_sources/core/generated/deepmr.rigid_motion.rst @@ -0,0 +1,6 @@ +deepmr.rigid\_motion +==================== + +.. currentmodule:: deepmr + +.. autofunction:: rigid_motion \ No newline at end of file diff --git a/_sources/core/generated/deepmr.rosette.rst b/_sources/core/generated/deepmr.rosette.rst new file mode 100644 index 00000000..9ca9c207 --- /dev/null +++ b/_sources/core/generated/deepmr.rosette.rst @@ -0,0 +1,6 @@ +deepmr.rosette +============== + +.. currentmodule:: deepmr + +.. autofunction:: rosette \ No newline at end of file diff --git a/_sources/core/generated/deepmr.rosette_proj.rst b/_sources/core/generated/deepmr.rosette_proj.rst new file mode 100644 index 00000000..38d994ad --- /dev/null +++ b/_sources/core/generated/deepmr.rosette_proj.rst @@ -0,0 +1,6 @@ +deepmr.rosette\_proj +==================== + +.. currentmodule:: deepmr + +.. autofunction:: rosette_proj \ No newline at end of file diff --git a/_sources/core/generated/deepmr.rosette_stack.rst b/_sources/core/generated/deepmr.rosette_stack.rst new file mode 100644 index 00000000..60e07ebf --- /dev/null +++ b/_sources/core/generated/deepmr.rosette_stack.rst @@ -0,0 +1,6 @@ +deepmr.rosette\_stack +===================== + +.. currentmodule:: deepmr + +.. autofunction:: rosette_stack \ No newline at end of file diff --git a/_sources/core/generated/deepmr.rss.rst b/_sources/core/generated/deepmr.rss.rst new file mode 100644 index 00000000..87c79813 --- /dev/null +++ b/_sources/core/generated/deepmr.rss.rst @@ -0,0 +1,6 @@ +deepmr.rss +========== + +.. currentmodule:: deepmr + +.. autofunction:: rss \ No newline at end of file diff --git a/_sources/core/generated/deepmr.sensmap.rst b/_sources/core/generated/deepmr.sensmap.rst new file mode 100644 index 00000000..02ff5261 --- /dev/null +++ b/_sources/core/generated/deepmr.sensmap.rst @@ -0,0 +1,6 @@ +deepmr.sensmap +============== + +.. currentmodule:: deepmr + +.. autofunction:: sensmap \ No newline at end of file diff --git a/_sources/core/generated/deepmr.shepp_logan.rst b/_sources/core/generated/deepmr.shepp_logan.rst new file mode 100644 index 00000000..0081c515 --- /dev/null +++ b/_sources/core/generated/deepmr.shepp_logan.rst @@ -0,0 +1,6 @@ +deepmr.shepp\_logan +=================== + +.. currentmodule:: deepmr + +.. autofunction:: shepp_logan \ No newline at end of file diff --git a/_sources/core/generated/deepmr.sinusoidal_fa.rst b/_sources/core/generated/deepmr.sinusoidal_fa.rst new file mode 100644 index 00000000..8c682984 --- /dev/null +++ b/_sources/core/generated/deepmr.sinusoidal_fa.rst @@ -0,0 +1,6 @@ +deepmr.sinusoidal\_fa +===================== + +.. currentmodule:: deepmr + +.. autofunction:: sinusoidal_fa \ No newline at end of file diff --git a/_sources/core/generated/deepmr.spiral.rst b/_sources/core/generated/deepmr.spiral.rst new file mode 100644 index 00000000..206b3804 --- /dev/null +++ b/_sources/core/generated/deepmr.spiral.rst @@ -0,0 +1,6 @@ +deepmr.spiral +============= + +.. currentmodule:: deepmr + +.. autofunction:: spiral \ No newline at end of file diff --git a/_sources/core/generated/deepmr.spiral_proj.rst b/_sources/core/generated/deepmr.spiral_proj.rst new file mode 100644 index 00000000..5ab1d430 --- /dev/null +++ b/_sources/core/generated/deepmr.spiral_proj.rst @@ -0,0 +1,6 @@ +deepmr.spiral\_proj +=================== + +.. currentmodule:: deepmr + +.. autofunction:: spiral_proj \ No newline at end of file diff --git a/_sources/core/generated/deepmr.spiral_stack.rst b/_sources/core/generated/deepmr.spiral_stack.rst new file mode 100644 index 00000000..ebcdac67 --- /dev/null +++ b/_sources/core/generated/deepmr.spiral_stack.rst @@ -0,0 +1,6 @@ +deepmr.spiral\_stack +==================== + +.. currentmodule:: deepmr + +.. autofunction:: spiral_stack \ No newline at end of file diff --git a/_sources/core/generated/deepmr.svd.rst b/_sources/core/generated/deepmr.svd.rst new file mode 100644 index 00000000..e2bdd030 --- /dev/null +++ b/_sources/core/generated/deepmr.svd.rst @@ -0,0 +1,6 @@ +deepmr.svd +========== + +.. currentmodule:: deepmr + +.. autofunction:: svd \ No newline at end of file diff --git a/_sources/core/generated/deepmr.tensor2patches.rst b/_sources/core/generated/deepmr.tensor2patches.rst new file mode 100644 index 00000000..640440d9 --- /dev/null +++ b/_sources/core/generated/deepmr.tensor2patches.rst @@ -0,0 +1,6 @@ +deepmr.tensor2patches +===================== + +.. currentmodule:: deepmr + +.. autofunction:: tensor2patches \ No newline at end of file diff --git a/_sources/core/io.md b/_sources/core/io.md new file mode 100644 index 00000000..dfd55612 --- /dev/null +++ b/_sources/core/io.md @@ -0,0 +1,62 @@ +# File Input/Output + +```{eval-rst} +.. automodule:: deepmr.io +``` + +## Image I/O routines +```{eval-rst} +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.io.read_image + deepmr.io.write_image +``` + +## K-Space I/O routines +```{eval-rst} +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.io.read_rawdata +``` + +## Header I/O routines +```{eval-rst} +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.io.read_acqheader + deepmr.io.write_acqheader +``` + +## Generic I/O routines + +DeepMR also contains generic I/O routines that can be used to write custom I/O functions for other formats. As a general rule, functions should have these signatures: + +```python +# Custom reading routines +image, head = read_custom_image(filepath, *args, head=None, **kwargs) # image +data, head = read_custom_rawdata(filepath, *args, head=None, **kwargs) # k-space +head = read_custom_acqheader(filepath, *args, **kwargs) # header + +# Custom writing routines +write_custom_image(filepath, image, *args, **kwargs) # image +write_custom_rawdata(filepath, data, *args, **kwargs) # k-space +write_custom_acqheader(filepath, head, *args, **kwargs) # header +``` + + + +```{eval-rst} +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.io.read_matfile + deepmr.io.read_hdf5 + deepmr.io.write_hdf5 +``` diff --git a/_sources/core/linops.md b/_sources/core/linops.md new file mode 100644 index 00000000..ba9a0563 --- /dev/null +++ b/_sources/core/linops.md @@ -0,0 +1,37 @@ +# Linear Operators + +```{eval-rst} +.. automodule:: deepmr.linops +``` + +## Coil Sensitivity +```{eval-rst} +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.linops.SenseOp + deepmr.linops.SenseAdjointOp +``` + +## FFT +```{eval-rst} +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.linops.FFTOp + deepmr.linops.IFFTOp + deepmr.linops.FFTGramOp +``` + +## Non-Uniform FFT (NUFFT) +```{eval-rst} +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.linops.NUFFTOp + deepmr.linops.NUFFTAdjointOp + deepmr.linops.NUFFTGramOp +``` diff --git a/_sources/core/optim.md b/_sources/core/optim.md new file mode 100644 index 00000000..51789599 --- /dev/null +++ b/_sources/core/optim.md @@ -0,0 +1,39 @@ +# Iterative Algorithms + +```{eval-rst} +.. automodule:: deepmr.optim +``` + +## Optimization Steps + +Operators representing single iterations of classical optimization algorithms. + +DeepMR expose these operators as ``torch.nn`` objects to be chained e.g., in unrolled Neural Network architectures +and the corresponding functional versions for standalone usage. + +```{eval-rst} +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.optim.CGStep + deepmr.optim.ADMMStep + deepmr.optim.PGDStep + + deepmr.optim.cg_solve + deepmr.optim.admm_solve + deepmr.optim.pgd_solve +``` + +In addition, we provide utils to estimate matrix-free operator properties, such as maximum eigenvalue. + +## Linop linear algebra + +```{eval-rst} +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.optim.power_method +``` + diff --git a/_sources/core/prox.md b/_sources/core/prox.md new file mode 100644 index 00000000..3ff6cf17 --- /dev/null +++ b/_sources/core/prox.md @@ -0,0 +1,38 @@ +# Proximal Operators + +```{eval-rst} +.. automodule:: deepmr.prox +``` + +## Classical Denoisers + +Classical (i.e., non-trainable) denoisers can be used inside +iterative reconstructions as regularizars (aka, PnP reconstruction). + +DeepMR expose denoisers both as ``torch.nn`` objects (to be chained in DL-based reconstructions) +and functional forms (e.g., for simple standalone denoising). + +Currently available denoisers are Wavelet (``WaveletDenoiser``, ``WaveletDictDenoiser``), +Local Low Rank (``LLRDenoiser``) and Total (Generalized) Variation (``TVDenoiser``, ``TGVDenoiser`). + +Both ``WaveletDenoiser`` / ``WaveletDictDenoiser`` and ``TVDenoiser`` / ``TGVDenoiser`` are adapted for complex-value inputs from the corresponding [DeepInverse](https://deepinv.github.io/deepinv/) implementations. + +```{eval-rst} +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.prox.WaveletDenoiser + deepmr.prox.WaveletDictDenoiser + deepmr.prox.TVDenoiser + deepmr.prox.TGVDenoiser + deepmr.prox.LLRDenoiser + + deepmr.prox.wavelet_dict_denoise + deepmr.prox.wavelet_denoise + deepmr.prox.tv_denoise + deepmr.prox.tgv_denoise + deepmr.prox.llr_denoise +``` + + diff --git a/_sources/core/signal.md b/_sources/core/signal.md new file mode 100644 index 00000000..81bc1da6 --- /dev/null +++ b/_sources/core/signal.md @@ -0,0 +1,60 @@ +# Signal Processing + +```{eval-rst} +.. automodule:: deepmr._signal +``` + +## Resize and resampling +```{eval-rst} +.. currentmodule:: deepmr +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.resize + deepmr.resample +``` + +## Filtering +```{eval-rst} +.. currentmodule:: deepmr +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.fermi +``` + +## Tensor folding and unfolding +```{eval-rst} +.. currentmodule:: deepmr +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.tensor2patches + deepmr.patches2tensor +``` + +## Wavelet decomposition +```{eval-rst} +.. currentmodule:: deepmr +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.fwt + deepmr.iwt +``` + +## Signal compression +```{eval-rst} +.. currentmodule:: deepmr +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.rss + deepmr.svd +``` + diff --git a/_sources/core/vobj.md b/_sources/core/vobj.md new file mode 100644 index 00000000..fb61f2c3 --- /dev/null +++ b/_sources/core/vobj.md @@ -0,0 +1,74 @@ +# Virtual Objects + +```{eval-rst} +.. automodule:: deepmr._vobj +``` + +## Phantoms +```{eval-rst} +.. currentmodule:: deepmr +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.shepp_logan + deepmr.brainweb + deepmr.custom_phantom +``` + +## Fields +```{eval-rst} +.. currentmodule:: deepmr +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.b0field + deepmr.b1field + deepmr.sensmap +``` + +## Non-idealities +Miscellaneous non-idealities generation routines (e.g., motion patterns). +```{eval-rst} +.. currentmodule:: deepmr +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.rigid_motion +``` + +## Sampling +```{eval-rst} +.. currentmodule:: deepmr +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.cartesian2D + deepmr.cartesian3D + deepmr.radial + deepmr.radial_stack + deepmr.radial_proj + deepmr.rosette + deepmr.rosette_stack + deepmr.rosette_proj + deepmr.spiral + deepmr.spiral_stack + deepmr.spiral_proj +``` + +## Trains +Variable parameters (e.g., flip angle, rf phase, echo time) train generators. +```{eval-rst} +.. currentmodule:: deepmr +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.piecewise_fa + deepmr.sinusoidal_fa + deepmr.phase_cycling + deepmr.rf_spoiling +``` \ No newline at end of file diff --git a/_sources/index.md b/_sources/index.md new file mode 100644 index 00000000..f53f8448 --- /dev/null +++ b/_sources/index.md @@ -0,0 +1,58 @@ +# DeepMR +```{include} ../../_README.md +``` + + + +```{toctree} +:hidden: +:caption: Core Modules + +core/signal.md +core/fft.md +core/bloch.md +core/vobj.md +core/io.md +core/linops.md +core/prox.md +core/optim.md + +``` + +```{toctree} +:hidden: +:caption: MR Reconstruction + +recon/calib.md +recon/alg.md +recon/inference.md + +``` + +```{nbgallery} +:hidden: +:caption: Tutorials + +tutorials/index.md + +``` + +```{toctree} +:hidden: +:caption: Miscellaneous + +misc/changelog.md +misc/conduct.md +misc/contributing.md + +``` + diff --git a/_sources/misc/changelog.md b/_sources/misc/changelog.md new file mode 100644 index 00000000..4288bf8e --- /dev/null +++ b/_sources/misc/changelog.md @@ -0,0 +1,2 @@ +```{include} ../../../CHANGELOG.md +``` \ No newline at end of file diff --git a/_sources/misc/conduct.md b/_sources/misc/conduct.md new file mode 100644 index 00000000..d414633d --- /dev/null +++ b/_sources/misc/conduct.md @@ -0,0 +1,2 @@ +```{include} ../../../CONDUCT.md +``` \ No newline at end of file diff --git a/_sources/misc/contributing.md b/_sources/misc/contributing.md new file mode 100644 index 00000000..9eb1fae7 --- /dev/null +++ b/_sources/misc/contributing.md @@ -0,0 +1,2 @@ +```{include} ../../../CONTRIBUTING.md +``` \ No newline at end of file diff --git a/_sources/recon/alg.md b/_sources/recon/alg.md new file mode 100644 index 00000000..5953a241 --- /dev/null +++ b/_sources/recon/alg.md @@ -0,0 +1,39 @@ +# Image Reconstructors + +```{eval-rst} +.. automodule:: deepmr.recon.alg +``` + +## Building blocks + +DeepMR contains a convenient builder routines to automatically initialize the MR encoding +operator for different reconstruction problems +(e.g., Cartesian vs Non-Cartesian, single- vs multi-channel, single- vs multi-contrast). + +This can be used inside conventional reconstruction algorithms, or inside neural network architectures. + +```{eval-rst} +.. currentmodule:: deepmr +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.recon.EncodingOp +``` + +## Classical Image Reconstruction + +DeepMR contains convenient wrappers around generic standard reconstruction. + +The provided routine can perform zero-filled or iterative reconstruction +with Tikonhov, L1Wavelet or Total Variation regularization for both Cartesian +and Non-Cartesian (single-/multi- contrast/channel) data, depending on input arguments. + +```{eval-rst} +.. currentmodule:: deepmr +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.recon.recon_lstsq +``` \ No newline at end of file diff --git a/_sources/recon/calib.md b/_sources/recon/calib.md new file mode 100644 index 00000000..ebcf0e21 --- /dev/null +++ b/_sources/recon/calib.md @@ -0,0 +1,25 @@ +# Reconstruction Preprocessing + +```{eval-rst} +.. automodule:: deepmr.recon.calib +``` + +## Signal intensity scaling +```{eval-rst} +.. currentmodule:: deepmr +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.recon.intensity_scaling +``` + +## Coil sensitivity estimation +```{eval-rst} +.. currentmodule:: deepmr +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.recon.espirit_cal +``` \ No newline at end of file diff --git a/_sources/recon/generated/deepmr.recon.EncodingOp.rst b/_sources/recon/generated/deepmr.recon.EncodingOp.rst new file mode 100644 index 00000000..aae9d8b9 --- /dev/null +++ b/_sources/recon/generated/deepmr.recon.EncodingOp.rst @@ -0,0 +1,6 @@ +deepmr.recon.EncodingOp +======================= + +.. currentmodule:: deepmr.recon + +.. autofunction:: EncodingOp \ No newline at end of file diff --git a/_sources/recon/generated/deepmr.recon.espirit_cal.rst b/_sources/recon/generated/deepmr.recon.espirit_cal.rst new file mode 100644 index 00000000..febbb853 --- /dev/null +++ b/_sources/recon/generated/deepmr.recon.espirit_cal.rst @@ -0,0 +1,6 @@ +deepmr.recon.espirit\_cal +========================= + +.. currentmodule:: deepmr.recon + +.. autofunction:: espirit_cal \ No newline at end of file diff --git a/_sources/recon/generated/deepmr.recon.fse_fit.rst b/_sources/recon/generated/deepmr.recon.fse_fit.rst new file mode 100644 index 00000000..57bae5d2 --- /dev/null +++ b/_sources/recon/generated/deepmr.recon.fse_fit.rst @@ -0,0 +1,6 @@ +deepmr.recon.fse\_fit +===================== + +.. currentmodule:: deepmr.recon + +.. autofunction:: fse_fit \ No newline at end of file diff --git a/_sources/recon/generated/deepmr.recon.intensity_scaling.rst b/_sources/recon/generated/deepmr.recon.intensity_scaling.rst new file mode 100644 index 00000000..7aeca340 --- /dev/null +++ b/_sources/recon/generated/deepmr.recon.intensity_scaling.rst @@ -0,0 +1,6 @@ +deepmr.recon.intensity\_scaling +=============================== + +.. currentmodule:: deepmr.recon + +.. autofunction:: intensity_scaling \ No newline at end of file diff --git a/_sources/recon/generated/deepmr.recon.mpnrage_fit.rst b/_sources/recon/generated/deepmr.recon.mpnrage_fit.rst new file mode 100644 index 00000000..b0a5dca6 --- /dev/null +++ b/_sources/recon/generated/deepmr.recon.mpnrage_fit.rst @@ -0,0 +1,6 @@ +deepmr.recon.mpnrage\_fit +========================= + +.. currentmodule:: deepmr.recon + +.. autofunction:: mpnrage_fit \ No newline at end of file diff --git a/_sources/recon/generated/deepmr.recon.recon_lstsq.rst b/_sources/recon/generated/deepmr.recon.recon_lstsq.rst new file mode 100644 index 00000000..3f59566d --- /dev/null +++ b/_sources/recon/generated/deepmr.recon.recon_lstsq.rst @@ -0,0 +1,6 @@ +deepmr.recon.recon\_lstsq +========================= + +.. currentmodule:: deepmr.recon + +.. autofunction:: recon_lstsq \ No newline at end of file diff --git a/_sources/recon/inference.md b/_sources/recon/inference.md new file mode 100644 index 00000000..69598926 --- /dev/null +++ b/_sources/recon/inference.md @@ -0,0 +1,29 @@ +# Parametric Mapping + +```{eval-rst} +.. automodule:: deepmr.recon.inference +``` + +## T1 mapping + +```{eval-rst} +.. currentmodule:: deepmr +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.recon.mpnrage_fit + +``` + +## T2 mapping + +```{eval-rst} +.. currentmodule:: deepmr +.. autosummary:: + :toctree: generated + :nosignatures: + + deepmr.recon.fse_fit + +``` \ No newline at end of file diff --git a/_sources/tutorials/index.md b/_sources/tutorials/index.md new file mode 100644 index 00000000..879c02e7 --- /dev/null +++ b/_sources/tutorials/index.md @@ -0,0 +1,6 @@ +# Basics + +```{nbgallery} +../../../tutorials/basics/demo_basics.ipynb +../../../tutorials/basics/demo_linops.ipynb +``` \ No newline at end of file diff --git a/_sources/user_guide/getting_started.md b/_sources/user_guide/getting_started.md new file mode 100644 index 00000000..8b3a7945 --- /dev/null +++ b/_sources/user_guide/getting_started.md @@ -0,0 +1 @@ +# Getting Started \ No newline at end of file diff --git a/_sources/user_guide/overview.md b/_sources/user_guide/overview.md new file mode 100644 index 00000000..4bba659e --- /dev/null +++ b/_sources/user_guide/overview.md @@ -0,0 +1 @@ +# Overview \ No newline at end of file diff --git a/_static/basic.css b/_static/basic.css new file mode 100644 index 00000000..61572969 --- /dev/null +++ b/_static/basic.css @@ -0,0 +1,903 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 270px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} + +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ + +nav.contents, +aside.topic, +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} + +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/_static/binder_badge_logo.svg b/_static/binder_badge_logo.svg new file mode 100644 index 00000000..327f6b63 --- /dev/null +++ b/_static/binder_badge_logo.svg @@ -0,0 +1 @@ + launchlaunchbinderbinder \ No newline at end of file diff --git a/_static/broken_example.png b/_static/broken_example.png new file mode 100644 index 00000000..4fea24e7 Binary files /dev/null and b/_static/broken_example.png differ diff --git a/_static/doctools.js b/_static/doctools.js new file mode 100644 index 00000000..d06a71d7 --- /dev/null +++ b/_static/doctools.js @@ -0,0 +1,156 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Base JavaScript utilities for all Sphinx HTML documentation. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/_static/documentation_options.js b/_static/documentation_options.js new file mode 100644 index 00000000..05f76e70 --- /dev/null +++ b/_static/documentation_options.js @@ -0,0 +1,14 @@ +var DOCUMENTATION_OPTIONS = { + URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), + VERSION: '', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '', + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, +}; \ No newline at end of file diff --git a/_static/file.png b/_static/file.png new file mode 100644 index 00000000..a858a410 Binary files /dev/null and b/_static/file.png differ diff --git a/_static/images/logo_binder.svg b/_static/images/logo_binder.svg new file mode 100644 index 00000000..45fecf75 --- /dev/null +++ b/_static/images/logo_binder.svg @@ -0,0 +1,19 @@ + + + + +logo + + + + + + + + diff --git a/_static/images/logo_colab.png b/_static/images/logo_colab.png new file mode 100644 index 00000000..b7560ec2 Binary files /dev/null and b/_static/images/logo_colab.png differ diff --git a/_static/images/logo_deepnote.svg b/_static/images/logo_deepnote.svg new file mode 100644 index 00000000..fa77ebfc --- /dev/null +++ b/_static/images/logo_deepnote.svg @@ -0,0 +1 @@ + diff --git a/_static/images/logo_jupyterhub.svg b/_static/images/logo_jupyterhub.svg new file mode 100644 index 00000000..60cfe9f2 --- /dev/null +++ b/_static/images/logo_jupyterhub.svg @@ -0,0 +1 @@ +logo_jupyterhubHub diff --git a/_static/jupyterlite_badge_logo.svg b/_static/jupyterlite_badge_logo.svg new file mode 100644 index 00000000..5de36d7f --- /dev/null +++ b/_static/jupyterlite_badge_logo.svg @@ -0,0 +1,3 @@ + + +launchlaunchlitelite \ No newline at end of file diff --git a/_static/language_data.js b/_static/language_data.js new file mode 100644 index 00000000..250f5665 --- /dev/null +++ b/_static/language_data.js @@ -0,0 +1,199 @@ +/* + * language_data.js + * ~~~~~~~~~~~~~~~~ + * + * This script contains the language-specific data used by searchtools.js, + * namely the list of stopwords, stemmer, scorer and splitter. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"]; + + +/* Non-minified version is copied as a separate JS file, is available */ + +/** + * Porter Stemmer + */ +var Stemmer = function() { + + var step2list = { + ational: 'ate', + tional: 'tion', + enci: 'ence', + anci: 'ance', + izer: 'ize', + bli: 'ble', + alli: 'al', + entli: 'ent', + eli: 'e', + ousli: 'ous', + ization: 'ize', + ation: 'ate', + ator: 'ate', + alism: 'al', + iveness: 'ive', + fulness: 'ful', + ousness: 'ous', + aliti: 'al', + iviti: 'ive', + biliti: 'ble', + logi: 'log' + }; + + var step3list = { + icate: 'ic', + ative: '', + alize: 'al', + iciti: 'ic', + ical: 'ic', + ful: '', + ness: '' + }; + + var c = "[^aeiou]"; // consonant + var v = "[aeiouy]"; // vowel + var C = c + "[^aeiouy]*"; // consonant sequence + var V = v + "[aeiou]*"; // vowel sequence + + var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 + var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 + var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 + var s_v = "^(" + C + ")?" + v; // vowel in stem + + this.stemWord = function (w) { + var stem; + var suffix; + var firstch; + var origword = w; + + if (w.length < 3) + return w; + + var re; + var re2; + var re3; + var re4; + + firstch = w.substr(0,1); + if (firstch == "y") + w = firstch.toUpperCase() + w.substr(1); + + // Step 1a + re = /^(.+?)(ss|i)es$/; + re2 = /^(.+?)([^s])s$/; + + if (re.test(w)) + w = w.replace(re,"$1$2"); + else if (re2.test(w)) + w = w.replace(re2,"$1$2"); + + // Step 1b + re = /^(.+?)eed$/; + re2 = /^(.+?)(ed|ing)$/; + if (re.test(w)) { + var fp = re.exec(w); + re = new RegExp(mgr0); + if (re.test(fp[1])) { + re = /.$/; + w = w.replace(re,""); + } + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1]; + re2 = new RegExp(s_v); + if (re2.test(stem)) { + w = stem; + re2 = /(at|bl|iz)$/; + re3 = new RegExp("([^aeiouylsz])\\1$"); + re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re2.test(w)) + w = w + "e"; + else if (re3.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + else if (re4.test(w)) + w = w + "e"; + } + } + + // Step 1c + re = /^(.+?)y$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(s_v); + if (re.test(stem)) + w = stem + "i"; + } + + // Step 2 + re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step2list[suffix]; + } + + // Step 3 + re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step3list[suffix]; + } + + // Step 4 + re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; + re2 = /^(.+?)(s|t)(ion)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + if (re.test(stem)) + w = stem; + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1] + fp[2]; + re2 = new RegExp(mgr1); + if (re2.test(stem)) + w = stem; + } + + // Step 5 + re = /^(.+?)e$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + re2 = new RegExp(meq1); + re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) + w = stem; + } + re = /ll$/; + re2 = new RegExp(mgr1); + if (re.test(w) && re2.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + + // and turn initial Y back to y + if (firstch == "y") + w = firstch.toLowerCase() + w.substr(1); + return w; + } +} + diff --git a/_static/locales/ar/LC_MESSAGES/booktheme.mo b/_static/locales/ar/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..15541a6a Binary files /dev/null and b/_static/locales/ar/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/ar/LC_MESSAGES/booktheme.po b/_static/locales/ar/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..edae2ec4 --- /dev/null +++ b/_static/locales/ar/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ar\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "موضوع بواسطة" + +msgid "Open an issue" +msgstr "افتح قضية" + +msgid "Contents" +msgstr "محتويات" + +msgid "Download notebook file" +msgstr "تنزيل ملف دفتر الملاحظات" + +msgid "Sphinx Book Theme" +msgstr "موضوع كتاب أبو الهول" + +msgid "Fullscreen mode" +msgstr "وضع ملء الشاشة" + +msgid "Edit this page" +msgstr "قم بتحرير هذه الصفحة" + +msgid "By" +msgstr "بواسطة" + +msgid "Copyright" +msgstr "حقوق النشر" + +msgid "Source repository" +msgstr "مستودع المصدر" + +msgid "previous page" +msgstr "الصفحة السابقة" + +msgid "next page" +msgstr "الصفحة التالية" + +msgid "Toggle navigation" +msgstr "تبديل التنقل" + +msgid "repository" +msgstr "مخزن" + +msgid "suggest edit" +msgstr "أقترح تحرير" + +msgid "open issue" +msgstr "قضية مفتوحة" + +msgid "Launch" +msgstr "إطلاق" + +msgid "Print to PDF" +msgstr "طباعة إلى PDF" + +msgid "By the" +msgstr "بواسطة" + +msgid "Last updated on" +msgstr "آخر تحديث في" + +msgid "Download source file" +msgstr "تنزيل ملف المصدر" + +msgid "Download this page" +msgstr "قم بتنزيل هذه الصفحة" diff --git a/_static/locales/bg/LC_MESSAGES/booktheme.mo b/_static/locales/bg/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..da951200 Binary files /dev/null and b/_static/locales/bg/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/bg/LC_MESSAGES/booktheme.po b/_static/locales/bg/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..1f363b9d --- /dev/null +++ b/_static/locales/bg/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: bg\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Тема от" + +msgid "Open an issue" +msgstr "Отворете проблем" + +msgid "Contents" +msgstr "Съдържание" + +msgid "Download notebook file" +msgstr "Изтеглете файла на бележника" + +msgid "Sphinx Book Theme" +msgstr "Тема на книгата Sphinx" + +msgid "Fullscreen mode" +msgstr "Режим на цял екран" + +msgid "Edit this page" +msgstr "Редактирайте тази страница" + +msgid "By" +msgstr "От" + +msgid "Copyright" +msgstr "Авторско право" + +msgid "Source repository" +msgstr "Хранилище на източника" + +msgid "previous page" +msgstr "предишна страница" + +msgid "next page" +msgstr "Следваща страница" + +msgid "Toggle navigation" +msgstr "Превключване на навигацията" + +msgid "repository" +msgstr "хранилище" + +msgid "suggest edit" +msgstr "предложи редактиране" + +msgid "open issue" +msgstr "отворен брой" + +msgid "Launch" +msgstr "Стартиране" + +msgid "Print to PDF" +msgstr "Печат в PDF" + +msgid "By the" +msgstr "По" + +msgid "Last updated on" +msgstr "Последна актуализация на" + +msgid "Download source file" +msgstr "Изтеглете изходния файл" + +msgid "Download this page" +msgstr "Изтеглете тази страница" diff --git a/_static/locales/bn/LC_MESSAGES/booktheme.mo b/_static/locales/bn/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..6b96639b Binary files /dev/null and b/_static/locales/bn/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/bn/LC_MESSAGES/booktheme.po b/_static/locales/bn/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..fa543728 --- /dev/null +++ b/_static/locales/bn/LC_MESSAGES/booktheme.po @@ -0,0 +1,63 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: bn\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "থিম দ্বারা" + +msgid "Open an issue" +msgstr "একটি সমস্যা খুলুন" + +msgid "Download notebook file" +msgstr "নোটবুক ফাইল ডাউনলোড করুন" + +msgid "Sphinx Book Theme" +msgstr "স্পিনিক্স বুক থিম" + +msgid "Edit this page" +msgstr "এই পৃষ্ঠাটি সম্পাদনা করুন" + +msgid "By" +msgstr "দ্বারা" + +msgid "Copyright" +msgstr "কপিরাইট" + +msgid "Source repository" +msgstr "উত্স সংগ্রহস্থল" + +msgid "previous page" +msgstr "আগের পৃষ্ঠা" + +msgid "next page" +msgstr "পরবর্তী পৃষ্ঠা" + +msgid "Toggle navigation" +msgstr "নেভিগেশন টগল করুন" + +msgid "open issue" +msgstr "খোলা সমস্যা" + +msgid "Launch" +msgstr "শুরু করা" + +msgid "Print to PDF" +msgstr "পিডিএফ প্রিন্ট করুন" + +msgid "By the" +msgstr "দ্বারা" + +msgid "Last updated on" +msgstr "সর্বশেষ আপডেট" + +msgid "Download source file" +msgstr "উত্স ফাইল ডাউনলোড করুন" + +msgid "Download this page" +msgstr "এই পৃষ্ঠাটি ডাউনলোড করুন" diff --git a/_static/locales/ca/LC_MESSAGES/booktheme.mo b/_static/locales/ca/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..a4dd30e9 Binary files /dev/null and b/_static/locales/ca/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/ca/LC_MESSAGES/booktheme.po b/_static/locales/ca/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..22f1569a --- /dev/null +++ b/_static/locales/ca/LC_MESSAGES/booktheme.po @@ -0,0 +1,66 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ca\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Tema del" + +msgid "Open an issue" +msgstr "Obriu un número" + +msgid "Download notebook file" +msgstr "Descarregar fitxer de quadern" + +msgid "Sphinx Book Theme" +msgstr "Tema del llibre Esfinx" + +msgid "Edit this page" +msgstr "Editeu aquesta pàgina" + +msgid "By" +msgstr "Per" + +msgid "Copyright" +msgstr "Copyright" + +msgid "Source repository" +msgstr "Dipòsit de fonts" + +msgid "previous page" +msgstr "Pàgina anterior" + +msgid "next page" +msgstr "pàgina següent" + +msgid "Toggle navigation" +msgstr "Commuta la navegació" + +msgid "suggest edit" +msgstr "suggerir edició" + +msgid "open issue" +msgstr "número obert" + +msgid "Launch" +msgstr "Llançament" + +msgid "Print to PDF" +msgstr "Imprimeix a PDF" + +msgid "By the" +msgstr "Per la" + +msgid "Last updated on" +msgstr "Darrera actualització el" + +msgid "Download source file" +msgstr "Baixeu el fitxer font" + +msgid "Download this page" +msgstr "Descarregueu aquesta pàgina" diff --git a/_static/locales/cs/LC_MESSAGES/booktheme.mo b/_static/locales/cs/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..c39e01a6 Binary files /dev/null and b/_static/locales/cs/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/cs/LC_MESSAGES/booktheme.po b/_static/locales/cs/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..afecd9e7 --- /dev/null +++ b/_static/locales/cs/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: cs\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Téma od" + +msgid "Open an issue" +msgstr "Otevřete problém" + +msgid "Contents" +msgstr "Obsah" + +msgid "Download notebook file" +msgstr "Stáhnout soubor poznámkového bloku" + +msgid "Sphinx Book Theme" +msgstr "Téma knihy Sfinga" + +msgid "Fullscreen mode" +msgstr "Režim celé obrazovky" + +msgid "Edit this page" +msgstr "Upravit tuto stránku" + +msgid "By" +msgstr "Podle" + +msgid "Copyright" +msgstr "autorská práva" + +msgid "Source repository" +msgstr "Zdrojové úložiště" + +msgid "previous page" +msgstr "předchozí stránka" + +msgid "next page" +msgstr "další strana" + +msgid "Toggle navigation" +msgstr "Přepnout navigaci" + +msgid "repository" +msgstr "úložiště" + +msgid "suggest edit" +msgstr "navrhnout úpravy" + +msgid "open issue" +msgstr "otevřené číslo" + +msgid "Launch" +msgstr "Zahájení" + +msgid "Print to PDF" +msgstr "Tisk do PDF" + +msgid "By the" +msgstr "Podle" + +msgid "Last updated on" +msgstr "Naposledy aktualizováno" + +msgid "Download source file" +msgstr "Stáhněte si zdrojový soubor" + +msgid "Download this page" +msgstr "Stáhněte si tuto stránku" diff --git a/_static/locales/da/LC_MESSAGES/booktheme.mo b/_static/locales/da/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..f43157d7 Binary files /dev/null and b/_static/locales/da/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/da/LC_MESSAGES/booktheme.po b/_static/locales/da/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..649c78a8 --- /dev/null +++ b/_static/locales/da/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: da\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Tema af" + +msgid "Open an issue" +msgstr "Åbn et problem" + +msgid "Contents" +msgstr "Indhold" + +msgid "Download notebook file" +msgstr "Download notesbog-fil" + +msgid "Sphinx Book Theme" +msgstr "Sphinx bogtema" + +msgid "Fullscreen mode" +msgstr "Fuldskærmstilstand" + +msgid "Edit this page" +msgstr "Rediger denne side" + +msgid "By" +msgstr "Ved" + +msgid "Copyright" +msgstr "ophavsret" + +msgid "Source repository" +msgstr "Kildelager" + +msgid "previous page" +msgstr "forrige side" + +msgid "next page" +msgstr "Næste side" + +msgid "Toggle navigation" +msgstr "Skift navigation" + +msgid "repository" +msgstr "lager" + +msgid "suggest edit" +msgstr "foreslå redigering" + +msgid "open issue" +msgstr "åbent nummer" + +msgid "Launch" +msgstr "Start" + +msgid "Print to PDF" +msgstr "Udskriv til PDF" + +msgid "By the" +msgstr "Ved" + +msgid "Last updated on" +msgstr "Sidst opdateret den" + +msgid "Download source file" +msgstr "Download kildefil" + +msgid "Download this page" +msgstr "Download denne side" diff --git a/_static/locales/de/LC_MESSAGES/booktheme.mo b/_static/locales/de/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..648b565c Binary files /dev/null and b/_static/locales/de/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/de/LC_MESSAGES/booktheme.po b/_static/locales/de/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..f51d2ecc --- /dev/null +++ b/_static/locales/de/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: de\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Thema von der" + +msgid "Open an issue" +msgstr "Öffnen Sie ein Problem" + +msgid "Contents" +msgstr "Inhalt" + +msgid "Download notebook file" +msgstr "Notebook-Datei herunterladen" + +msgid "Sphinx Book Theme" +msgstr "Sphinx-Buch-Thema" + +msgid "Fullscreen mode" +msgstr "Vollbildmodus" + +msgid "Edit this page" +msgstr "Bearbeite diese Seite" + +msgid "By" +msgstr "Durch" + +msgid "Copyright" +msgstr "Urheberrechte ©" + +msgid "Source repository" +msgstr "Quell-Repository" + +msgid "previous page" +msgstr "vorherige Seite" + +msgid "next page" +msgstr "Nächste Seite" + +msgid "Toggle navigation" +msgstr "Navigation umschalten" + +msgid "repository" +msgstr "Repository" + +msgid "suggest edit" +msgstr "vorschlagen zu bearbeiten" + +msgid "open issue" +msgstr "offenes Thema" + +msgid "Launch" +msgstr "Starten" + +msgid "Print to PDF" +msgstr "In PDF drucken" + +msgid "By the" +msgstr "Bis zum" + +msgid "Last updated on" +msgstr "Zuletzt aktualisiert am" + +msgid "Download source file" +msgstr "Quelldatei herunterladen" + +msgid "Download this page" +msgstr "Laden Sie diese Seite herunter" diff --git a/_static/locales/el/LC_MESSAGES/booktheme.mo b/_static/locales/el/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..fca6e935 Binary files /dev/null and b/_static/locales/el/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/el/LC_MESSAGES/booktheme.po b/_static/locales/el/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..8bec7905 --- /dev/null +++ b/_static/locales/el/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: el\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Θέμα από το" + +msgid "Open an issue" +msgstr "Ανοίξτε ένα ζήτημα" + +msgid "Contents" +msgstr "Περιεχόμενα" + +msgid "Download notebook file" +msgstr "Λήψη αρχείου σημειωματάριου" + +msgid "Sphinx Book Theme" +msgstr "Θέμα βιβλίου Sphinx" + +msgid "Fullscreen mode" +msgstr "ΛΕΙΤΟΥΡΓΙΑ ΠΛΗΡΟΥΣ ΟΘΟΝΗΣ" + +msgid "Edit this page" +msgstr "Επεξεργαστείτε αυτήν τη σελίδα" + +msgid "By" +msgstr "Με" + +msgid "Copyright" +msgstr "Πνευματική ιδιοκτησία" + +msgid "Source repository" +msgstr "Αποθήκη πηγής" + +msgid "previous page" +msgstr "προηγούμενη σελίδα" + +msgid "next page" +msgstr "επόμενη σελίδα" + +msgid "Toggle navigation" +msgstr "Εναλλαγή πλοήγησης" + +msgid "repository" +msgstr "αποθήκη" + +msgid "suggest edit" +msgstr "προτείνω επεξεργασία" + +msgid "open issue" +msgstr "ανοιχτό ζήτημα" + +msgid "Launch" +msgstr "Εκτόξευση" + +msgid "Print to PDF" +msgstr "Εκτύπωση σε PDF" + +msgid "By the" +msgstr "Από το" + +msgid "Last updated on" +msgstr "Τελευταία ενημέρωση στις" + +msgid "Download source file" +msgstr "Λήψη αρχείου προέλευσης" + +msgid "Download this page" +msgstr "Λήψη αυτής της σελίδας" diff --git a/_static/locales/eo/LC_MESSAGES/booktheme.mo b/_static/locales/eo/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..d1072bbe Binary files /dev/null and b/_static/locales/eo/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/eo/LC_MESSAGES/booktheme.po b/_static/locales/eo/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..d72a0481 --- /dev/null +++ b/_static/locales/eo/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: eo\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Temo de la" + +msgid "Open an issue" +msgstr "Malfermu numeron" + +msgid "Contents" +msgstr "Enhavo" + +msgid "Download notebook file" +msgstr "Elŝutu kajeran dosieron" + +msgid "Sphinx Book Theme" +msgstr "Sfinksa Libro-Temo" + +msgid "Fullscreen mode" +msgstr "Plenekrana reĝimo" + +msgid "Edit this page" +msgstr "Redaktu ĉi tiun paĝon" + +msgid "By" +msgstr "De" + +msgid "Copyright" +msgstr "Kopirajto" + +msgid "Source repository" +msgstr "Fonto-deponejo" + +msgid "previous page" +msgstr "antaŭa paĝo" + +msgid "next page" +msgstr "sekva paĝo" + +msgid "Toggle navigation" +msgstr "Ŝalti navigadon" + +msgid "repository" +msgstr "deponejo" + +msgid "suggest edit" +msgstr "sugesti redaktadon" + +msgid "open issue" +msgstr "malferma numero" + +msgid "Launch" +msgstr "Lanĉo" + +msgid "Print to PDF" +msgstr "Presi al PDF" + +msgid "By the" +msgstr "Per la" + +msgid "Last updated on" +msgstr "Laste ĝisdatigita la" + +msgid "Download source file" +msgstr "Elŝutu fontodosieron" + +msgid "Download this page" +msgstr "Elŝutu ĉi tiun paĝon" diff --git a/_static/locales/es/LC_MESSAGES/booktheme.mo b/_static/locales/es/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..ba2ee4dc Binary files /dev/null and b/_static/locales/es/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/es/LC_MESSAGES/booktheme.po b/_static/locales/es/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..611834b2 --- /dev/null +++ b/_static/locales/es/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: es\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Tema por el" + +msgid "Open an issue" +msgstr "Abrir un problema" + +msgid "Contents" +msgstr "Contenido" + +msgid "Download notebook file" +msgstr "Descargar archivo de cuaderno" + +msgid "Sphinx Book Theme" +msgstr "Tema del libro de la esfinge" + +msgid "Fullscreen mode" +msgstr "Modo de pantalla completa" + +msgid "Edit this page" +msgstr "Edita esta página" + +msgid "By" +msgstr "Por" + +msgid "Copyright" +msgstr "Derechos de autor" + +msgid "Source repository" +msgstr "Repositorio de origen" + +msgid "previous page" +msgstr "pagina anterior" + +msgid "next page" +msgstr "siguiente página" + +msgid "Toggle navigation" +msgstr "Navegación de palanca" + +msgid "repository" +msgstr "repositorio" + +msgid "suggest edit" +msgstr "sugerir editar" + +msgid "open issue" +msgstr "Tema abierto" + +msgid "Launch" +msgstr "Lanzamiento" + +msgid "Print to PDF" +msgstr "Imprimir en PDF" + +msgid "By the" +msgstr "Por el" + +msgid "Last updated on" +msgstr "Ultima actualización en" + +msgid "Download source file" +msgstr "Descargar archivo fuente" + +msgid "Download this page" +msgstr "Descarga esta pagina" diff --git a/_static/locales/et/LC_MESSAGES/booktheme.mo b/_static/locales/et/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..983b8239 Binary files /dev/null and b/_static/locales/et/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/et/LC_MESSAGES/booktheme.po b/_static/locales/et/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..345088f0 --- /dev/null +++ b/_static/locales/et/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: et\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Teema" + +msgid "Open an issue" +msgstr "Avage probleem" + +msgid "Contents" +msgstr "Sisu" + +msgid "Download notebook file" +msgstr "Laadige sülearvuti fail alla" + +msgid "Sphinx Book Theme" +msgstr "Sfinksiraamatu teema" + +msgid "Fullscreen mode" +msgstr "Täisekraanirežiim" + +msgid "Edit this page" +msgstr "Muutke seda lehte" + +msgid "By" +msgstr "Kõrval" + +msgid "Copyright" +msgstr "Autoriõigus" + +msgid "Source repository" +msgstr "Allikahoidla" + +msgid "previous page" +msgstr "eelmine leht" + +msgid "next page" +msgstr "järgmine leht" + +msgid "Toggle navigation" +msgstr "Lülita navigeerimine sisse" + +msgid "repository" +msgstr "hoidla" + +msgid "suggest edit" +msgstr "soovita muuta" + +msgid "open issue" +msgstr "avatud küsimus" + +msgid "Launch" +msgstr "Käivitage" + +msgid "Print to PDF" +msgstr "Prindi PDF-i" + +msgid "By the" +msgstr "Autor" + +msgid "Last updated on" +msgstr "Viimati uuendatud" + +msgid "Download source file" +msgstr "Laadige alla lähtefail" + +msgid "Download this page" +msgstr "Laadige see leht alla" diff --git a/_static/locales/fi/LC_MESSAGES/booktheme.mo b/_static/locales/fi/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..d8ac0545 Binary files /dev/null and b/_static/locales/fi/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/fi/LC_MESSAGES/booktheme.po b/_static/locales/fi/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..d97a08dc --- /dev/null +++ b/_static/locales/fi/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: fi\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Teeman tekijä" + +msgid "Open an issue" +msgstr "Avaa ongelma" + +msgid "Contents" +msgstr "Sisällys" + +msgid "Download notebook file" +msgstr "Lataa muistikirjatiedosto" + +msgid "Sphinx Book Theme" +msgstr "Sphinx-kirjan teema" + +msgid "Fullscreen mode" +msgstr "Koko näytön tila" + +msgid "Edit this page" +msgstr "Muokkaa tätä sivua" + +msgid "By" +msgstr "Tekijä" + +msgid "Copyright" +msgstr "Tekijänoikeus" + +msgid "Source repository" +msgstr "Lähteen arkisto" + +msgid "previous page" +msgstr "Edellinen sivu" + +msgid "next page" +msgstr "seuraava sivu" + +msgid "Toggle navigation" +msgstr "Vaihda navigointia" + +msgid "repository" +msgstr "arkisto" + +msgid "suggest edit" +msgstr "ehdottaa muokkausta" + +msgid "open issue" +msgstr "avoin ongelma" + +msgid "Launch" +msgstr "Tuoda markkinoille" + +msgid "Print to PDF" +msgstr "Tulosta PDF-tiedostoon" + +msgid "By the" +msgstr "Mukaan" + +msgid "Last updated on" +msgstr "Viimeksi päivitetty" + +msgid "Download source file" +msgstr "Lataa lähdetiedosto" + +msgid "Download this page" +msgstr "Lataa tämä sivu" diff --git a/_static/locales/fr/LC_MESSAGES/booktheme.mo b/_static/locales/fr/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..f663d39f Binary files /dev/null and b/_static/locales/fr/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/fr/LC_MESSAGES/booktheme.po b/_static/locales/fr/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..88f35173 --- /dev/null +++ b/_static/locales/fr/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: fr\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Thème par le" + +msgid "Open an issue" +msgstr "Ouvrez un problème" + +msgid "Contents" +msgstr "Contenu" + +msgid "Download notebook file" +msgstr "Télécharger le fichier notebook" + +msgid "Sphinx Book Theme" +msgstr "Thème du livre Sphinx" + +msgid "Fullscreen mode" +msgstr "Mode plein écran" + +msgid "Edit this page" +msgstr "Modifier cette page" + +msgid "By" +msgstr "Par" + +msgid "Copyright" +msgstr "droits d'auteur" + +msgid "Source repository" +msgstr "Dépôt source" + +msgid "previous page" +msgstr "page précédente" + +msgid "next page" +msgstr "page suivante" + +msgid "Toggle navigation" +msgstr "Basculer la navigation" + +msgid "repository" +msgstr "dépôt" + +msgid "suggest edit" +msgstr "suggestion de modification" + +msgid "open issue" +msgstr "signaler un problème" + +msgid "Launch" +msgstr "lancement" + +msgid "Print to PDF" +msgstr "Imprimer au format PDF" + +msgid "By the" +msgstr "Par le" + +msgid "Last updated on" +msgstr "Dernière mise à jour le" + +msgid "Download source file" +msgstr "Télécharger le fichier source" + +msgid "Download this page" +msgstr "Téléchargez cette page" diff --git a/_static/locales/hr/LC_MESSAGES/booktheme.mo b/_static/locales/hr/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..eca4a1a2 Binary files /dev/null and b/_static/locales/hr/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/hr/LC_MESSAGES/booktheme.po b/_static/locales/hr/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..fb9440ac --- /dev/null +++ b/_static/locales/hr/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: hr\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Tema autora" + +msgid "Open an issue" +msgstr "Otvorite izdanje" + +msgid "Contents" +msgstr "Sadržaj" + +msgid "Download notebook file" +msgstr "Preuzmi datoteku bilježnice" + +msgid "Sphinx Book Theme" +msgstr "Tema knjige Sphinx" + +msgid "Fullscreen mode" +msgstr "Način preko cijelog zaslona" + +msgid "Edit this page" +msgstr "Uredite ovu stranicu" + +msgid "By" +msgstr "Po" + +msgid "Copyright" +msgstr "Autorska prava" + +msgid "Source repository" +msgstr "Izvorno spremište" + +msgid "previous page" +msgstr "Prethodna stranica" + +msgid "next page" +msgstr "sljedeća stranica" + +msgid "Toggle navigation" +msgstr "Uključi / isključi navigaciju" + +msgid "repository" +msgstr "spremište" + +msgid "suggest edit" +msgstr "predloži uređivanje" + +msgid "open issue" +msgstr "otvoreno izdanje" + +msgid "Launch" +msgstr "Pokrenite" + +msgid "Print to PDF" +msgstr "Ispis u PDF" + +msgid "By the" +msgstr "Od strane" + +msgid "Last updated on" +msgstr "Posljednje ažuriranje:" + +msgid "Download source file" +msgstr "Preuzmi izvornu datoteku" + +msgid "Download this page" +msgstr "Preuzmite ovu stranicu" diff --git a/_static/locales/id/LC_MESSAGES/booktheme.mo b/_static/locales/id/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..d07a06a9 Binary files /dev/null and b/_static/locales/id/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/id/LC_MESSAGES/booktheme.po b/_static/locales/id/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..9ffb56f7 --- /dev/null +++ b/_static/locales/id/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: id\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Tema oleh" + +msgid "Open an issue" +msgstr "Buka masalah" + +msgid "Contents" +msgstr "Isi" + +msgid "Download notebook file" +msgstr "Unduh file notebook" + +msgid "Sphinx Book Theme" +msgstr "Tema Buku Sphinx" + +msgid "Fullscreen mode" +msgstr "Mode layar penuh" + +msgid "Edit this page" +msgstr "Edit halaman ini" + +msgid "By" +msgstr "Oleh" + +msgid "Copyright" +msgstr "hak cipta" + +msgid "Source repository" +msgstr "Repositori sumber" + +msgid "previous page" +msgstr "halaman sebelumnya" + +msgid "next page" +msgstr "halaman selanjutnya" + +msgid "Toggle navigation" +msgstr "Alihkan navigasi" + +msgid "repository" +msgstr "gudang" + +msgid "suggest edit" +msgstr "menyarankan edit" + +msgid "open issue" +msgstr "masalah terbuka" + +msgid "Launch" +msgstr "Meluncurkan" + +msgid "Print to PDF" +msgstr "Cetak ke PDF" + +msgid "By the" +msgstr "Oleh" + +msgid "Last updated on" +msgstr "Terakhir diperbarui saat" + +msgid "Download source file" +msgstr "Unduh file sumber" + +msgid "Download this page" +msgstr "Unduh halaman ini" diff --git a/_static/locales/it/LC_MESSAGES/booktheme.mo b/_static/locales/it/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..53ba476e Binary files /dev/null and b/_static/locales/it/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/it/LC_MESSAGES/booktheme.po b/_static/locales/it/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..04308dd2 --- /dev/null +++ b/_static/locales/it/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: it\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Tema di" + +msgid "Open an issue" +msgstr "Apri un problema" + +msgid "Contents" +msgstr "Contenuti" + +msgid "Download notebook file" +msgstr "Scarica il file del taccuino" + +msgid "Sphinx Book Theme" +msgstr "Tema del libro della Sfinge" + +msgid "Fullscreen mode" +msgstr "Modalità schermo intero" + +msgid "Edit this page" +msgstr "Modifica questa pagina" + +msgid "By" +msgstr "Di" + +msgid "Copyright" +msgstr "Diritto d'autore" + +msgid "Source repository" +msgstr "Repository di origine" + +msgid "previous page" +msgstr "pagina precedente" + +msgid "next page" +msgstr "pagina successiva" + +msgid "Toggle navigation" +msgstr "Attiva / disattiva la navigazione" + +msgid "repository" +msgstr "repository" + +msgid "suggest edit" +msgstr "suggerisci modifica" + +msgid "open issue" +msgstr "questione aperta" + +msgid "Launch" +msgstr "Lanciare" + +msgid "Print to PDF" +msgstr "Stampa in PDF" + +msgid "By the" +msgstr "Dal" + +msgid "Last updated on" +msgstr "Ultimo aggiornamento il" + +msgid "Download source file" +msgstr "Scarica il file sorgente" + +msgid "Download this page" +msgstr "Scarica questa pagina" diff --git a/_static/locales/iw/LC_MESSAGES/booktheme.mo b/_static/locales/iw/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..a45c6575 Binary files /dev/null and b/_static/locales/iw/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/iw/LC_MESSAGES/booktheme.po b/_static/locales/iw/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..4ea190d3 --- /dev/null +++ b/_static/locales/iw/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: iw\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "נושא מאת" + +msgid "Open an issue" +msgstr "פתח גיליון" + +msgid "Contents" +msgstr "תוכן" + +msgid "Download notebook file" +msgstr "הורד קובץ מחברת" + +msgid "Sphinx Book Theme" +msgstr "נושא ספר ספינקס" + +msgid "Fullscreen mode" +msgstr "מצב מסך מלא" + +msgid "Edit this page" +msgstr "ערוך דף זה" + +msgid "By" +msgstr "על ידי" + +msgid "Copyright" +msgstr "זכויות יוצרים" + +msgid "Source repository" +msgstr "מאגר המקורות" + +msgid "previous page" +msgstr "עמוד קודם" + +msgid "next page" +msgstr "עמוד הבא" + +msgid "Toggle navigation" +msgstr "החלף ניווט" + +msgid "repository" +msgstr "מאגר" + +msgid "suggest edit" +msgstr "מציע לערוך" + +msgid "open issue" +msgstr "בעיה פתוחה" + +msgid "Launch" +msgstr "לְהַשִׁיק" + +msgid "Print to PDF" +msgstr "הדפס לקובץ PDF" + +msgid "By the" +msgstr "דרך" + +msgid "Last updated on" +msgstr "עודכן לאחרונה ב" + +msgid "Download source file" +msgstr "הורד את קובץ המקור" + +msgid "Download this page" +msgstr "הורד דף זה" diff --git a/_static/locales/ja/LC_MESSAGES/booktheme.mo b/_static/locales/ja/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..1cefd29c Binary files /dev/null and b/_static/locales/ja/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/ja/LC_MESSAGES/booktheme.po b/_static/locales/ja/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..77d5a097 --- /dev/null +++ b/_static/locales/ja/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ja\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "のテーマ" + +msgid "Open an issue" +msgstr "問題を報告" + +msgid "Contents" +msgstr "目次" + +msgid "Download notebook file" +msgstr "ノートブックファイルをダウンロード" + +msgid "Sphinx Book Theme" +msgstr "スフィンクスの本のテーマ" + +msgid "Fullscreen mode" +msgstr "全画面モード" + +msgid "Edit this page" +msgstr "このページを編集" + +msgid "By" +msgstr "著者" + +msgid "Copyright" +msgstr "Copyright" + +msgid "Source repository" +msgstr "ソースリポジトリ" + +msgid "previous page" +msgstr "前のページ" + +msgid "next page" +msgstr "次のページ" + +msgid "Toggle navigation" +msgstr "ナビゲーションを切り替え" + +msgid "repository" +msgstr "リポジトリ" + +msgid "suggest edit" +msgstr "編集を提案する" + +msgid "open issue" +msgstr "未解決の問題" + +msgid "Launch" +msgstr "起動" + +msgid "Print to PDF" +msgstr "PDFに印刷" + +msgid "By the" +msgstr "によって" + +msgid "Last updated on" +msgstr "最終更新日" + +msgid "Download source file" +msgstr "ソースファイルをダウンロード" + +msgid "Download this page" +msgstr "このページをダウンロード" diff --git a/_static/locales/ko/LC_MESSAGES/booktheme.mo b/_static/locales/ko/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..06c7ec93 Binary files /dev/null and b/_static/locales/ko/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/ko/LC_MESSAGES/booktheme.po b/_static/locales/ko/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..6ee3d781 --- /dev/null +++ b/_static/locales/ko/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ko\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "테마별" + +msgid "Open an issue" +msgstr "이슈 열기" + +msgid "Contents" +msgstr "내용" + +msgid "Download notebook file" +msgstr "노트북 파일 다운로드" + +msgid "Sphinx Book Theme" +msgstr "스핑크스 도서 테마" + +msgid "Fullscreen mode" +msgstr "전체 화면으로보기" + +msgid "Edit this page" +msgstr "이 페이지 편집" + +msgid "By" +msgstr "으로" + +msgid "Copyright" +msgstr "저작권" + +msgid "Source repository" +msgstr "소스 저장소" + +msgid "previous page" +msgstr "이전 페이지" + +msgid "next page" +msgstr "다음 페이지" + +msgid "Toggle navigation" +msgstr "탐색 전환" + +msgid "repository" +msgstr "저장소" + +msgid "suggest edit" +msgstr "편집 제안" + +msgid "open issue" +msgstr "열린 문제" + +msgid "Launch" +msgstr "시작하다" + +msgid "Print to PDF" +msgstr "PDF로 인쇄" + +msgid "By the" +msgstr "에 의해" + +msgid "Last updated on" +msgstr "마지막 업데이트" + +msgid "Download source file" +msgstr "소스 파일 다운로드" + +msgid "Download this page" +msgstr "이 페이지 다운로드" diff --git a/_static/locales/lt/LC_MESSAGES/booktheme.mo b/_static/locales/lt/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..4468ba04 Binary files /dev/null and b/_static/locales/lt/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/lt/LC_MESSAGES/booktheme.po b/_static/locales/lt/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..01be2679 --- /dev/null +++ b/_static/locales/lt/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: lt\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Tema" + +msgid "Open an issue" +msgstr "Atidarykite problemą" + +msgid "Contents" +msgstr "Turinys" + +msgid "Download notebook file" +msgstr "Atsisiųsti nešiojamojo kompiuterio failą" + +msgid "Sphinx Book Theme" +msgstr "Sfinkso knygos tema" + +msgid "Fullscreen mode" +msgstr "Pilno ekrano režimas" + +msgid "Edit this page" +msgstr "Redaguoti šį puslapį" + +msgid "By" +msgstr "Iki" + +msgid "Copyright" +msgstr "Autorių teisės" + +msgid "Source repository" +msgstr "Šaltinio saugykla" + +msgid "previous page" +msgstr "Ankstesnis puslapis" + +msgid "next page" +msgstr "Kitas puslapis" + +msgid "Toggle navigation" +msgstr "Perjungti naršymą" + +msgid "repository" +msgstr "saugykla" + +msgid "suggest edit" +msgstr "pasiūlyti redaguoti" + +msgid "open issue" +msgstr "atviras klausimas" + +msgid "Launch" +msgstr "Paleiskite" + +msgid "Print to PDF" +msgstr "Spausdinti į PDF" + +msgid "By the" +msgstr "Prie" + +msgid "Last updated on" +msgstr "Paskutinį kartą atnaujinta" + +msgid "Download source file" +msgstr "Atsisiųsti šaltinio failą" + +msgid "Download this page" +msgstr "Atsisiųskite šį puslapį" diff --git a/_static/locales/lv/LC_MESSAGES/booktheme.mo b/_static/locales/lv/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..74aa4d89 Binary files /dev/null and b/_static/locales/lv/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/lv/LC_MESSAGES/booktheme.po b/_static/locales/lv/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..993a1e41 --- /dev/null +++ b/_static/locales/lv/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: lv\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Autora tēma" + +msgid "Open an issue" +msgstr "Atveriet problēmu" + +msgid "Contents" +msgstr "Saturs" + +msgid "Download notebook file" +msgstr "Lejupielādēt piezīmju grāmatiņu" + +msgid "Sphinx Book Theme" +msgstr "Sfinksa grāmatas tēma" + +msgid "Fullscreen mode" +msgstr "Pilnekrāna režīms" + +msgid "Edit this page" +msgstr "Rediģēt šo lapu" + +msgid "By" +msgstr "Autors" + +msgid "Copyright" +msgstr "Autortiesības" + +msgid "Source repository" +msgstr "Avota krātuve" + +msgid "previous page" +msgstr "iepriekšējā lapa" + +msgid "next page" +msgstr "nākamā lapaspuse" + +msgid "Toggle navigation" +msgstr "Pārslēgt navigāciju" + +msgid "repository" +msgstr "krātuve" + +msgid "suggest edit" +msgstr "ieteikt rediģēt" + +msgid "open issue" +msgstr "atklāts jautājums" + +msgid "Launch" +msgstr "Uzsākt" + +msgid "Print to PDF" +msgstr "Drukāt PDF formātā" + +msgid "By the" +msgstr "Ar" + +msgid "Last updated on" +msgstr "Pēdējoreiz atjaunināts" + +msgid "Download source file" +msgstr "Lejupielādēt avota failu" + +msgid "Download this page" +msgstr "Lejupielādējiet šo lapu" diff --git a/_static/locales/ml/LC_MESSAGES/booktheme.mo b/_static/locales/ml/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..2736e8fc Binary files /dev/null and b/_static/locales/ml/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/ml/LC_MESSAGES/booktheme.po b/_static/locales/ml/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..81daf7c8 --- /dev/null +++ b/_static/locales/ml/LC_MESSAGES/booktheme.po @@ -0,0 +1,66 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ml\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "പ്രമേയം" + +msgid "Open an issue" +msgstr "ഒരു പ്രശ്നം തുറക്കുക" + +msgid "Download notebook file" +msgstr "നോട്ട്ബുക്ക് ഫയൽ ഡൺലോഡ് ചെയ്യുക" + +msgid "Sphinx Book Theme" +msgstr "സ്ഫിങ്ക്സ് പുസ്തക തീം" + +msgid "Edit this page" +msgstr "ഈ പേജ് എഡിറ്റുചെയ്യുക" + +msgid "By" +msgstr "എഴുതിയത്" + +msgid "Copyright" +msgstr "പകർപ്പവകാശം" + +msgid "Source repository" +msgstr "ഉറവിട ശേഖരം" + +msgid "previous page" +msgstr "മുൻപത്തെ താൾ" + +msgid "next page" +msgstr "അടുത്ത പേജ്" + +msgid "Toggle navigation" +msgstr "നാവിഗേഷൻ ടോഗിൾ ചെയ്യുക" + +msgid "suggest edit" +msgstr "എഡിറ്റുചെയ്യാൻ നിർദ്ദേശിക്കുക" + +msgid "open issue" +msgstr "തുറന്ന പ്രശ്നം" + +msgid "Launch" +msgstr "സമാരംഭിക്കുക" + +msgid "Print to PDF" +msgstr "PDF- ലേക്ക് പ്രിന്റുചെയ്യുക" + +msgid "By the" +msgstr "എഴുതിയത്" + +msgid "Last updated on" +msgstr "അവസാനം അപ്‌ഡേറ്റുചെയ്‌തത്" + +msgid "Download source file" +msgstr "ഉറവിട ഫയൽ ഡൗൺലോഡുചെയ്യുക" + +msgid "Download this page" +msgstr "ഈ പേജ് ഡൗൺലോഡുചെയ്യുക" diff --git a/_static/locales/mr/LC_MESSAGES/booktheme.mo b/_static/locales/mr/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..fe530100 Binary files /dev/null and b/_static/locales/mr/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/mr/LC_MESSAGES/booktheme.po b/_static/locales/mr/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..fd857bff --- /dev/null +++ b/_static/locales/mr/LC_MESSAGES/booktheme.po @@ -0,0 +1,66 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: mr\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "द्वारा थीम" + +msgid "Open an issue" +msgstr "एक मुद्दा उघडा" + +msgid "Download notebook file" +msgstr "नोटबुक फाईल डाउनलोड करा" + +msgid "Sphinx Book Theme" +msgstr "स्फिंक्स बुक थीम" + +msgid "Edit this page" +msgstr "हे पृष्ठ संपादित करा" + +msgid "By" +msgstr "द्वारा" + +msgid "Copyright" +msgstr "कॉपीराइट" + +msgid "Source repository" +msgstr "स्त्रोत भांडार" + +msgid "previous page" +msgstr "मागील पान" + +msgid "next page" +msgstr "पुढील पृष्ठ" + +msgid "Toggle navigation" +msgstr "नेव्हिगेशन टॉगल करा" + +msgid "suggest edit" +msgstr "संपादन सुचवा" + +msgid "open issue" +msgstr "खुला मुद्दा" + +msgid "Launch" +msgstr "लाँच करा" + +msgid "Print to PDF" +msgstr "पीडीएफवर मुद्रित करा" + +msgid "By the" +msgstr "द्वारा" + +msgid "Last updated on" +msgstr "अखेरचे अद्यतनित" + +msgid "Download source file" +msgstr "स्त्रोत फाइल डाउनलोड करा" + +msgid "Download this page" +msgstr "हे पृष्ठ डाउनलोड करा" diff --git a/_static/locales/ms/LC_MESSAGES/booktheme.mo b/_static/locales/ms/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..f02603fa Binary files /dev/null and b/_static/locales/ms/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/ms/LC_MESSAGES/booktheme.po b/_static/locales/ms/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..b616d70f --- /dev/null +++ b/_static/locales/ms/LC_MESSAGES/booktheme.po @@ -0,0 +1,66 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ms\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Tema oleh" + +msgid "Open an issue" +msgstr "Buka masalah" + +msgid "Download notebook file" +msgstr "Muat turun fail buku nota" + +msgid "Sphinx Book Theme" +msgstr "Tema Buku Sphinx" + +msgid "Edit this page" +msgstr "Edit halaman ini" + +msgid "By" +msgstr "Oleh" + +msgid "Copyright" +msgstr "hak cipta" + +msgid "Source repository" +msgstr "Repositori sumber" + +msgid "previous page" +msgstr "halaman sebelumnya" + +msgid "next page" +msgstr "muka surat seterusnya" + +msgid "Toggle navigation" +msgstr "Togol navigasi" + +msgid "suggest edit" +msgstr "cadangkan edit" + +msgid "open issue" +msgstr "isu terbuka" + +msgid "Launch" +msgstr "Lancarkan" + +msgid "Print to PDF" +msgstr "Cetak ke PDF" + +msgid "By the" +msgstr "Oleh" + +msgid "Last updated on" +msgstr "Terakhir dikemas kini pada" + +msgid "Download source file" +msgstr "Muat turun fail sumber" + +msgid "Download this page" +msgstr "Muat turun halaman ini" diff --git a/_static/locales/nl/LC_MESSAGES/booktheme.mo b/_static/locales/nl/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..e59e7ecb Binary files /dev/null and b/_static/locales/nl/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/nl/LC_MESSAGES/booktheme.po b/_static/locales/nl/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..f16f4bcc --- /dev/null +++ b/_static/locales/nl/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: nl\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Thema door de" + +msgid "Open an issue" +msgstr "Open een probleem" + +msgid "Contents" +msgstr "Inhoud" + +msgid "Download notebook file" +msgstr "Download notebookbestand" + +msgid "Sphinx Book Theme" +msgstr "Sphinx-boekthema" + +msgid "Fullscreen mode" +msgstr "Volledig scherm" + +msgid "Edit this page" +msgstr "bewerk deze pagina" + +msgid "By" +msgstr "Door" + +msgid "Copyright" +msgstr "auteursrechten" + +msgid "Source repository" +msgstr "Bronopslagplaats" + +msgid "previous page" +msgstr "vorige pagina" + +msgid "next page" +msgstr "volgende bladzijde" + +msgid "Toggle navigation" +msgstr "Schakel navigatie" + +msgid "repository" +msgstr "repository" + +msgid "suggest edit" +msgstr "suggereren bewerken" + +msgid "open issue" +msgstr "open probleem" + +msgid "Launch" +msgstr "Lancering" + +msgid "Print to PDF" +msgstr "Afdrukken naar pdf" + +msgid "By the" +msgstr "Door de" + +msgid "Last updated on" +msgstr "Laatst geupdate op" + +msgid "Download source file" +msgstr "Download het bronbestand" + +msgid "Download this page" +msgstr "Download deze pagina" diff --git a/_static/locales/no/LC_MESSAGES/booktheme.mo b/_static/locales/no/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..6cd15c88 Binary files /dev/null and b/_static/locales/no/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/no/LC_MESSAGES/booktheme.po b/_static/locales/no/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..b1d304ee --- /dev/null +++ b/_static/locales/no/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: no\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Tema av" + +msgid "Open an issue" +msgstr "Åpne et problem" + +msgid "Contents" +msgstr "Innhold" + +msgid "Download notebook file" +msgstr "Last ned notatbokfilen" + +msgid "Sphinx Book Theme" +msgstr "Sphinx boktema" + +msgid "Fullscreen mode" +msgstr "Fullskjerm-modus" + +msgid "Edit this page" +msgstr "Rediger denne siden" + +msgid "By" +msgstr "Av" + +msgid "Copyright" +msgstr "opphavsrett" + +msgid "Source repository" +msgstr "Kildedepot" + +msgid "previous page" +msgstr "forrige side" + +msgid "next page" +msgstr "neste side" + +msgid "Toggle navigation" +msgstr "Bytt navigasjon" + +msgid "repository" +msgstr "oppbevaringssted" + +msgid "suggest edit" +msgstr "foreslå redigering" + +msgid "open issue" +msgstr "åpent nummer" + +msgid "Launch" +msgstr "Start" + +msgid "Print to PDF" +msgstr "Skriv ut til PDF" + +msgid "By the" +msgstr "Ved" + +msgid "Last updated on" +msgstr "Sist oppdatert den" + +msgid "Download source file" +msgstr "Last ned kildefilen" + +msgid "Download this page" +msgstr "Last ned denne siden" diff --git a/_static/locales/pl/LC_MESSAGES/booktheme.mo b/_static/locales/pl/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..9ebb584f Binary files /dev/null and b/_static/locales/pl/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/pl/LC_MESSAGES/booktheme.po b/_static/locales/pl/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..80d2c896 --- /dev/null +++ b/_static/locales/pl/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: pl\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Motyw autorstwa" + +msgid "Open an issue" +msgstr "Otwórz problem" + +msgid "Contents" +msgstr "Zawartość" + +msgid "Download notebook file" +msgstr "Pobierz plik notatnika" + +msgid "Sphinx Book Theme" +msgstr "Motyw książki Sphinx" + +msgid "Fullscreen mode" +msgstr "Pełny ekran" + +msgid "Edit this page" +msgstr "Edytuj tę strone" + +msgid "By" +msgstr "Przez" + +msgid "Copyright" +msgstr "prawa autorskie" + +msgid "Source repository" +msgstr "Repozytorium źródłowe" + +msgid "previous page" +msgstr "Poprzednia strona" + +msgid "next page" +msgstr "Następna strona" + +msgid "Toggle navigation" +msgstr "Przełącz nawigację" + +msgid "repository" +msgstr "magazyn" + +msgid "suggest edit" +msgstr "zaproponuj edycję" + +msgid "open issue" +msgstr "otwarty problem" + +msgid "Launch" +msgstr "Uruchomić" + +msgid "Print to PDF" +msgstr "Drukuj do PDF" + +msgid "By the" +msgstr "Przez" + +msgid "Last updated on" +msgstr "Ostatnia aktualizacja" + +msgid "Download source file" +msgstr "Pobierz plik źródłowy" + +msgid "Download this page" +msgstr "Pobierz tę stronę" diff --git a/_static/locales/pt/LC_MESSAGES/booktheme.mo b/_static/locales/pt/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..d0ddb872 Binary files /dev/null and b/_static/locales/pt/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/pt/LC_MESSAGES/booktheme.po b/_static/locales/pt/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..45ac847f --- /dev/null +++ b/_static/locales/pt/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: pt\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Tema por" + +msgid "Open an issue" +msgstr "Abra um problema" + +msgid "Contents" +msgstr "Conteúdo" + +msgid "Download notebook file" +msgstr "Baixar arquivo de notebook" + +msgid "Sphinx Book Theme" +msgstr "Tema do livro Sphinx" + +msgid "Fullscreen mode" +msgstr "Modo tela cheia" + +msgid "Edit this page" +msgstr "Edite essa página" + +msgid "By" +msgstr "De" + +msgid "Copyright" +msgstr "direito autoral" + +msgid "Source repository" +msgstr "Repositório fonte" + +msgid "previous page" +msgstr "página anterior" + +msgid "next page" +msgstr "próxima página" + +msgid "Toggle navigation" +msgstr "Alternar de navegação" + +msgid "repository" +msgstr "repositório" + +msgid "suggest edit" +msgstr "sugerir edição" + +msgid "open issue" +msgstr "questão aberta" + +msgid "Launch" +msgstr "Lançamento" + +msgid "Print to PDF" +msgstr "Imprimir em PDF" + +msgid "By the" +msgstr "Pelo" + +msgid "Last updated on" +msgstr "Última atualização em" + +msgid "Download source file" +msgstr "Baixar arquivo fonte" + +msgid "Download this page" +msgstr "Baixe esta página" diff --git a/_static/locales/ro/LC_MESSAGES/booktheme.mo b/_static/locales/ro/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..3c36ab1d Binary files /dev/null and b/_static/locales/ro/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/ro/LC_MESSAGES/booktheme.po b/_static/locales/ro/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..532b3b84 --- /dev/null +++ b/_static/locales/ro/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ro\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Tema de" + +msgid "Open an issue" +msgstr "Deschideți o problemă" + +msgid "Contents" +msgstr "Cuprins" + +msgid "Download notebook file" +msgstr "Descărcați fișierul notebook" + +msgid "Sphinx Book Theme" +msgstr "Tema Sphinx Book" + +msgid "Fullscreen mode" +msgstr "Modul ecran întreg" + +msgid "Edit this page" +msgstr "Editați această pagină" + +msgid "By" +msgstr "De" + +msgid "Copyright" +msgstr "Drepturi de autor" + +msgid "Source repository" +msgstr "Depozit sursă" + +msgid "previous page" +msgstr "pagina anterioară" + +msgid "next page" +msgstr "pagina următoare" + +msgid "Toggle navigation" +msgstr "Comutare navigare" + +msgid "repository" +msgstr "repertoriu" + +msgid "suggest edit" +msgstr "sugerează editare" + +msgid "open issue" +msgstr "problema deschisă" + +msgid "Launch" +msgstr "Lansa" + +msgid "Print to PDF" +msgstr "Imprimați în PDF" + +msgid "By the" +msgstr "Langa" + +msgid "Last updated on" +msgstr "Ultima actualizare la" + +msgid "Download source file" +msgstr "Descărcați fișierul sursă" + +msgid "Download this page" +msgstr "Descarcă această pagină" diff --git a/_static/locales/ru/LC_MESSAGES/booktheme.mo b/_static/locales/ru/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..6b8ca41f Binary files /dev/null and b/_static/locales/ru/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/ru/LC_MESSAGES/booktheme.po b/_static/locales/ru/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..b718b482 --- /dev/null +++ b/_static/locales/ru/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ru\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Тема от" + +msgid "Open an issue" +msgstr "Открыть вопрос" + +msgid "Contents" +msgstr "Содержание" + +msgid "Download notebook file" +msgstr "Скачать файл записной книжки" + +msgid "Sphinx Book Theme" +msgstr "Тема книги Сфинкс" + +msgid "Fullscreen mode" +msgstr "Полноэкранный режим" + +msgid "Edit this page" +msgstr "Редактировать эту страницу" + +msgid "By" +msgstr "По" + +msgid "Copyright" +msgstr "авторское право" + +msgid "Source repository" +msgstr "Исходный репозиторий" + +msgid "previous page" +msgstr "Предыдущая страница" + +msgid "next page" +msgstr "Следующая страница" + +msgid "Toggle navigation" +msgstr "Переключить навигацию" + +msgid "repository" +msgstr "хранилище" + +msgid "suggest edit" +msgstr "предложить редактировать" + +msgid "open issue" +msgstr "открытый вопрос" + +msgid "Launch" +msgstr "Запуск" + +msgid "Print to PDF" +msgstr "Распечатать в PDF" + +msgid "By the" +msgstr "Посредством" + +msgid "Last updated on" +msgstr "Последнее обновление" + +msgid "Download source file" +msgstr "Скачать исходный файл" + +msgid "Download this page" +msgstr "Загрузите эту страницу" diff --git a/_static/locales/sk/LC_MESSAGES/booktheme.mo b/_static/locales/sk/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..59bd0ddf Binary files /dev/null and b/_static/locales/sk/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/sk/LC_MESSAGES/booktheme.po b/_static/locales/sk/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..f6c423b6 --- /dev/null +++ b/_static/locales/sk/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: sk\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Téma od" + +msgid "Open an issue" +msgstr "Otvorte problém" + +msgid "Contents" +msgstr "Obsah" + +msgid "Download notebook file" +msgstr "Stiahnite si zošit" + +msgid "Sphinx Book Theme" +msgstr "Téma knihy Sfinga" + +msgid "Fullscreen mode" +msgstr "Režim celej obrazovky" + +msgid "Edit this page" +msgstr "Upraviť túto stránku" + +msgid "By" +msgstr "Autor:" + +msgid "Copyright" +msgstr "Autorské práva" + +msgid "Source repository" +msgstr "Zdrojové úložisko" + +msgid "previous page" +msgstr "predchádzajúca strana" + +msgid "next page" +msgstr "ďalšia strana" + +msgid "Toggle navigation" +msgstr "Prepnúť navigáciu" + +msgid "repository" +msgstr "Úložisko" + +msgid "suggest edit" +msgstr "navrhnúť úpravu" + +msgid "open issue" +msgstr "otvorené vydanie" + +msgid "Launch" +msgstr "Spustiť" + +msgid "Print to PDF" +msgstr "Tlač do PDF" + +msgid "By the" +msgstr "Podľa" + +msgid "Last updated on" +msgstr "Posledná aktualizácia dňa" + +msgid "Download source file" +msgstr "Stiahnite si zdrojový súbor" + +msgid "Download this page" +msgstr "Stiahnite si túto stránku" diff --git a/_static/locales/sl/LC_MESSAGES/booktheme.mo b/_static/locales/sl/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..87bf26de Binary files /dev/null and b/_static/locales/sl/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/sl/LC_MESSAGES/booktheme.po b/_static/locales/sl/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..9822dc58 --- /dev/null +++ b/_static/locales/sl/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: sl\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Tema avtorja" + +msgid "Open an issue" +msgstr "Odprite številko" + +msgid "Contents" +msgstr "Vsebina" + +msgid "Download notebook file" +msgstr "Prenesite datoteko zvezka" + +msgid "Sphinx Book Theme" +msgstr "Tema knjige Sphinx" + +msgid "Fullscreen mode" +msgstr "Celozaslonski način" + +msgid "Edit this page" +msgstr "Uredite to stran" + +msgid "By" +msgstr "Avtor" + +msgid "Copyright" +msgstr "avtorske pravice" + +msgid "Source repository" +msgstr "Izvorno skladišče" + +msgid "previous page" +msgstr "Prejšnja stran" + +msgid "next page" +msgstr "Naslednja stran" + +msgid "Toggle navigation" +msgstr "Preklopi navigacijo" + +msgid "repository" +msgstr "odlagališče" + +msgid "suggest edit" +msgstr "predlagajte urejanje" + +msgid "open issue" +msgstr "odprto vprašanje" + +msgid "Launch" +msgstr "Kosilo" + +msgid "Print to PDF" +msgstr "Natisni v PDF" + +msgid "By the" +msgstr "Avtor" + +msgid "Last updated on" +msgstr "Nazadnje posodobljeno dne" + +msgid "Download source file" +msgstr "Prenesite izvorno datoteko" + +msgid "Download this page" +msgstr "Prenesite to stran" diff --git a/_static/locales/sr/LC_MESSAGES/booktheme.mo b/_static/locales/sr/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..ec740f48 Binary files /dev/null and b/_static/locales/sr/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/sr/LC_MESSAGES/booktheme.po b/_static/locales/sr/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..e809230c --- /dev/null +++ b/_static/locales/sr/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: sr\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Тхеме би" + +msgid "Open an issue" +msgstr "Отворите издање" + +msgid "Contents" +msgstr "Садржај" + +msgid "Download notebook file" +msgstr "Преузмите датотеку бележнице" + +msgid "Sphinx Book Theme" +msgstr "Тема књиге Спхинк" + +msgid "Fullscreen mode" +msgstr "Режим целог екрана" + +msgid "Edit this page" +msgstr "Уредите ову страницу" + +msgid "By" +msgstr "Од стране" + +msgid "Copyright" +msgstr "Ауторско право" + +msgid "Source repository" +msgstr "Изворно спремиште" + +msgid "previous page" +msgstr "Претходна страница" + +msgid "next page" +msgstr "Следећа страна" + +msgid "Toggle navigation" +msgstr "Укључи / искључи навигацију" + +msgid "repository" +msgstr "спремиште" + +msgid "suggest edit" +msgstr "предложи уређивање" + +msgid "open issue" +msgstr "отворено издање" + +msgid "Launch" +msgstr "Лансирање" + +msgid "Print to PDF" +msgstr "Испис у ПДФ" + +msgid "By the" +msgstr "Од" + +msgid "Last updated on" +msgstr "Последње ажурирање" + +msgid "Download source file" +msgstr "Преузми изворну датотеку" + +msgid "Download this page" +msgstr "Преузмите ову страницу" diff --git a/_static/locales/sv/LC_MESSAGES/booktheme.mo b/_static/locales/sv/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..b07dc76f Binary files /dev/null and b/_static/locales/sv/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/sv/LC_MESSAGES/booktheme.po b/_static/locales/sv/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..2421b001 --- /dev/null +++ b/_static/locales/sv/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: sv\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Tema av" + +msgid "Open an issue" +msgstr "Öppna en problemrapport" + +msgid "Contents" +msgstr "Innehåll" + +msgid "Download notebook file" +msgstr "Ladda ner notebook-fil" + +msgid "Sphinx Book Theme" +msgstr "Sphinx Boktema" + +msgid "Fullscreen mode" +msgstr "Fullskärmsläge" + +msgid "Edit this page" +msgstr "Redigera den här sidan" + +msgid "By" +msgstr "Av" + +msgid "Copyright" +msgstr "Upphovsrätt" + +msgid "Source repository" +msgstr "Källkodsrepositorium" + +msgid "previous page" +msgstr "föregående sida" + +msgid "next page" +msgstr "nästa sida" + +msgid "Toggle navigation" +msgstr "Växla navigering" + +msgid "repository" +msgstr "repositorium" + +msgid "suggest edit" +msgstr "föreslå ändring" + +msgid "open issue" +msgstr "öppna problemrapport" + +msgid "Launch" +msgstr "Öppna" + +msgid "Print to PDF" +msgstr "Skriv ut till PDF" + +msgid "By the" +msgstr "Av den" + +msgid "Last updated on" +msgstr "Senast uppdaterad den" + +msgid "Download source file" +msgstr "Ladda ner källfil" + +msgid "Download this page" +msgstr "Ladda ner den här sidan" diff --git a/_static/locales/ta/LC_MESSAGES/booktheme.mo b/_static/locales/ta/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..29f52e1f Binary files /dev/null and b/_static/locales/ta/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/ta/LC_MESSAGES/booktheme.po b/_static/locales/ta/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..500042f4 --- /dev/null +++ b/_static/locales/ta/LC_MESSAGES/booktheme.po @@ -0,0 +1,66 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ta\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "வழங்கிய தீம்" + +msgid "Open an issue" +msgstr "சிக்கலைத் திறக்கவும்" + +msgid "Download notebook file" +msgstr "நோட்புக் கோப்பைப் பதிவிறக்கவும்" + +msgid "Sphinx Book Theme" +msgstr "ஸ்பிங்க்ஸ் புத்தக தீம்" + +msgid "Edit this page" +msgstr "இந்தப் பக்கத்தைத் திருத்தவும்" + +msgid "By" +msgstr "வழங்கியவர்" + +msgid "Copyright" +msgstr "பதிப்புரிமை" + +msgid "Source repository" +msgstr "மூல களஞ்சியம்" + +msgid "previous page" +msgstr "முந்தைய பக்கம்" + +msgid "next page" +msgstr "அடுத்த பக்கம்" + +msgid "Toggle navigation" +msgstr "வழிசெலுத்தலை நிலைமாற்று" + +msgid "suggest edit" +msgstr "திருத்த பரிந்துரைக்கவும்" + +msgid "open issue" +msgstr "திறந்த பிரச்சினை" + +msgid "Launch" +msgstr "தொடங்க" + +msgid "Print to PDF" +msgstr "PDF இல் அச்சிடுக" + +msgid "By the" +msgstr "மூலம்" + +msgid "Last updated on" +msgstr "கடைசியாக புதுப்பிக்கப்பட்டது" + +msgid "Download source file" +msgstr "மூல கோப்பைப் பதிவிறக்குக" + +msgid "Download this page" +msgstr "இந்தப் பக்கத்தைப் பதிவிறக்கவும்" diff --git a/_static/locales/te/LC_MESSAGES/booktheme.mo b/_static/locales/te/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..0a5f4b46 Binary files /dev/null and b/_static/locales/te/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/te/LC_MESSAGES/booktheme.po b/_static/locales/te/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..b1afebba --- /dev/null +++ b/_static/locales/te/LC_MESSAGES/booktheme.po @@ -0,0 +1,66 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: te\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "ద్వారా థీమ్" + +msgid "Open an issue" +msgstr "సమస్యను తెరవండి" + +msgid "Download notebook file" +msgstr "నోట్బుక్ ఫైల్ను డౌన్లోడ్ చేయండి" + +msgid "Sphinx Book Theme" +msgstr "సింహిక పుస్తక థీమ్" + +msgid "Edit this page" +msgstr "ఈ పేజీని సవరించండి" + +msgid "By" +msgstr "ద్వారా" + +msgid "Copyright" +msgstr "కాపీరైట్" + +msgid "Source repository" +msgstr "మూల రిపోజిటరీ" + +msgid "previous page" +msgstr "ముందు పేజి" + +msgid "next page" +msgstr "తరువాతి పేజీ" + +msgid "Toggle navigation" +msgstr "నావిగేషన్‌ను టోగుల్ చేయండి" + +msgid "suggest edit" +msgstr "సవరించమని సూచించండి" + +msgid "open issue" +msgstr "ఓపెన్ ఇష్యూ" + +msgid "Launch" +msgstr "ప్రారంభించండి" + +msgid "Print to PDF" +msgstr "PDF కి ముద్రించండి" + +msgid "By the" +msgstr "ద్వారా" + +msgid "Last updated on" +msgstr "చివరిగా నవీకరించబడింది" + +msgid "Download source file" +msgstr "మూల ఫైల్‌ను డౌన్‌లోడ్ చేయండి" + +msgid "Download this page" +msgstr "ఈ పేజీని డౌన్‌లోడ్ చేయండి" diff --git a/_static/locales/tg/LC_MESSAGES/booktheme.mo b/_static/locales/tg/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..b21c6c63 Binary files /dev/null and b/_static/locales/tg/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/tg/LC_MESSAGES/booktheme.po b/_static/locales/tg/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..29b8237b --- /dev/null +++ b/_static/locales/tg/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: tg\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Мавзӯъи аз" + +msgid "Open an issue" +msgstr "Масъаларо кушоед" + +msgid "Contents" +msgstr "Мундариҷа" + +msgid "Download notebook file" +msgstr "Файли дафтарро зеркашӣ кунед" + +msgid "Sphinx Book Theme" +msgstr "Сфинкс Мавзӯи китоб" + +msgid "Fullscreen mode" +msgstr "Ҳолати экрани пурра" + +msgid "Edit this page" +msgstr "Ин саҳифаро таҳрир кунед" + +msgid "By" +msgstr "Бо" + +msgid "Copyright" +msgstr "Ҳуқуқи муаллиф" + +msgid "Source repository" +msgstr "Анбори манбаъ" + +msgid "previous page" +msgstr "саҳифаи қаблӣ" + +msgid "next page" +msgstr "саҳифаи оянда" + +msgid "Toggle navigation" +msgstr "Гузаришро иваз кунед" + +msgid "repository" +msgstr "анбор" + +msgid "suggest edit" +msgstr "пешниҳод вироиш" + +msgid "open issue" +msgstr "барориши кушод" + +msgid "Launch" +msgstr "Оғоз" + +msgid "Print to PDF" +msgstr "Чоп ба PDF" + +msgid "By the" +msgstr "Бо" + +msgid "Last updated on" +msgstr "Last навсозӣ дар" + +msgid "Download source file" +msgstr "Файли манбаъро зеркашӣ кунед" + +msgid "Download this page" +msgstr "Ин саҳифаро зеркашӣ кунед" diff --git a/_static/locales/th/LC_MESSAGES/booktheme.mo b/_static/locales/th/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..abede98a Binary files /dev/null and b/_static/locales/th/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/th/LC_MESSAGES/booktheme.po b/_static/locales/th/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..ac65ee05 --- /dev/null +++ b/_static/locales/th/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: th\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "ธีมโดย" + +msgid "Open an issue" +msgstr "เปิดปัญหา" + +msgid "Contents" +msgstr "สารบัญ" + +msgid "Download notebook file" +msgstr "ดาวน์โหลดไฟล์สมุดบันทึก" + +msgid "Sphinx Book Theme" +msgstr "ธีมหนังสือสฟิงซ์" + +msgid "Fullscreen mode" +msgstr "โหมดเต็มหน้าจอ" + +msgid "Edit this page" +msgstr "แก้ไขหน้านี้" + +msgid "By" +msgstr "โดย" + +msgid "Copyright" +msgstr "ลิขสิทธิ์" + +msgid "Source repository" +msgstr "ที่เก็บซอร์ส" + +msgid "previous page" +msgstr "หน้าที่แล้ว" + +msgid "next page" +msgstr "หน้าต่อไป" + +msgid "Toggle navigation" +msgstr "ไม่ต้องสลับช่องทาง" + +msgid "repository" +msgstr "ที่เก็บ" + +msgid "suggest edit" +msgstr "แนะนำแก้ไข" + +msgid "open issue" +msgstr "เปิดปัญหา" + +msgid "Launch" +msgstr "เปิด" + +msgid "Print to PDF" +msgstr "พิมพ์เป็น PDF" + +msgid "By the" +msgstr "โดย" + +msgid "Last updated on" +msgstr "ปรับปรุงล่าสุดเมื่อ" + +msgid "Download source file" +msgstr "ดาวน์โหลดไฟล์ต้นฉบับ" + +msgid "Download this page" +msgstr "ดาวน์โหลดหน้านี้" diff --git a/_static/locales/tl/LC_MESSAGES/booktheme.mo b/_static/locales/tl/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..8df1b733 Binary files /dev/null and b/_static/locales/tl/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/tl/LC_MESSAGES/booktheme.po b/_static/locales/tl/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..662d66ca --- /dev/null +++ b/_static/locales/tl/LC_MESSAGES/booktheme.po @@ -0,0 +1,66 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: tl\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Tema ng" + +msgid "Open an issue" +msgstr "Magbukas ng isyu" + +msgid "Download notebook file" +msgstr "Mag-download ng file ng notebook" + +msgid "Sphinx Book Theme" +msgstr "Tema ng Sphinx Book" + +msgid "Edit this page" +msgstr "I-edit ang pahinang ito" + +msgid "By" +msgstr "Ni" + +msgid "Copyright" +msgstr "Copyright" + +msgid "Source repository" +msgstr "Pinagmulan ng imbakan" + +msgid "previous page" +msgstr "Nakaraang pahina" + +msgid "next page" +msgstr "Susunod na pahina" + +msgid "Toggle navigation" +msgstr "I-toggle ang pag-navigate" + +msgid "suggest edit" +msgstr "iminumungkahi i-edit" + +msgid "open issue" +msgstr "bukas na isyu" + +msgid "Launch" +msgstr "Ilunsad" + +msgid "Print to PDF" +msgstr "I-print sa PDF" + +msgid "By the" +msgstr "Sa pamamagitan ng" + +msgid "Last updated on" +msgstr "Huling na-update noong" + +msgid "Download source file" +msgstr "Mag-download ng file ng pinagmulan" + +msgid "Download this page" +msgstr "I-download ang pahinang ito" diff --git a/_static/locales/tr/LC_MESSAGES/booktheme.mo b/_static/locales/tr/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..029ae18a Binary files /dev/null and b/_static/locales/tr/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/tr/LC_MESSAGES/booktheme.po b/_static/locales/tr/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..d1ae7233 --- /dev/null +++ b/_static/locales/tr/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: tr\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Tarafından tema" + +msgid "Open an issue" +msgstr "Bir sorunu açın" + +msgid "Contents" +msgstr "İçindekiler" + +msgid "Download notebook file" +msgstr "Defter dosyasını indirin" + +msgid "Sphinx Book Theme" +msgstr "Sfenks Kitap Teması" + +msgid "Fullscreen mode" +msgstr "Tam ekran modu" + +msgid "Edit this page" +msgstr "Bu sayfayı düzenle" + +msgid "By" +msgstr "Tarafından" + +msgid "Copyright" +msgstr "Telif hakkı" + +msgid "Source repository" +msgstr "Kaynak kod deposu" + +msgid "previous page" +msgstr "önceki sayfa" + +msgid "next page" +msgstr "sonraki Sayfa" + +msgid "Toggle navigation" +msgstr "Gezinmeyi değiştir" + +msgid "repository" +msgstr "depo" + +msgid "suggest edit" +msgstr "düzenleme öner" + +msgid "open issue" +msgstr "Açık konu" + +msgid "Launch" +msgstr "Başlatmak" + +msgid "Print to PDF" +msgstr "PDF olarak yazdır" + +msgid "By the" +msgstr "Tarafından" + +msgid "Last updated on" +msgstr "Son güncelleme tarihi" + +msgid "Download source file" +msgstr "Kaynak dosyayı indirin" + +msgid "Download this page" +msgstr "Bu sayfayı indirin" diff --git a/_static/locales/uk/LC_MESSAGES/booktheme.mo b/_static/locales/uk/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..16ab7890 Binary files /dev/null and b/_static/locales/uk/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/uk/LC_MESSAGES/booktheme.po b/_static/locales/uk/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..be49ab85 --- /dev/null +++ b/_static/locales/uk/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: uk\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Тема від" + +msgid "Open an issue" +msgstr "Відкрийте випуск" + +msgid "Contents" +msgstr "Зміст" + +msgid "Download notebook file" +msgstr "Завантажте файл блокнота" + +msgid "Sphinx Book Theme" +msgstr "Тема книги \"Сфінкс\"" + +msgid "Fullscreen mode" +msgstr "Повноекранний режим" + +msgid "Edit this page" +msgstr "Редагувати цю сторінку" + +msgid "By" +msgstr "Автор" + +msgid "Copyright" +msgstr "Авторське право" + +msgid "Source repository" +msgstr "Джерело сховища" + +msgid "previous page" +msgstr "Попередня сторінка" + +msgid "next page" +msgstr "Наступна сторінка" + +msgid "Toggle navigation" +msgstr "Переключити навігацію" + +msgid "repository" +msgstr "сховище" + +msgid "suggest edit" +msgstr "запропонувати редагувати" + +msgid "open issue" +msgstr "відкритий випуск" + +msgid "Launch" +msgstr "Запуск" + +msgid "Print to PDF" +msgstr "Друк у форматі PDF" + +msgid "By the" +msgstr "По" + +msgid "Last updated on" +msgstr "Останнє оновлення:" + +msgid "Download source file" +msgstr "Завантажити вихідний файл" + +msgid "Download this page" +msgstr "Завантажте цю сторінку" diff --git a/_static/locales/ur/LC_MESSAGES/booktheme.mo b/_static/locales/ur/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..de8c84b9 Binary files /dev/null and b/_static/locales/ur/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/ur/LC_MESSAGES/booktheme.po b/_static/locales/ur/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..94bcab33 --- /dev/null +++ b/_static/locales/ur/LC_MESSAGES/booktheme.po @@ -0,0 +1,66 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: ur\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "کے ذریعہ تھیم" + +msgid "Open an issue" +msgstr "ایک مسئلہ کھولیں" + +msgid "Download notebook file" +msgstr "نوٹ بک فائل ڈاؤن لوڈ کریں" + +msgid "Sphinx Book Theme" +msgstr "سپنکس بک تھیم" + +msgid "Edit this page" +msgstr "اس صفحے میں ترمیم کریں" + +msgid "By" +msgstr "بذریعہ" + +msgid "Copyright" +msgstr "کاپی رائٹ" + +msgid "Source repository" +msgstr "ماخذ ذخیرہ" + +msgid "previous page" +msgstr "سابقہ ​​صفحہ" + +msgid "next page" +msgstr "اگلا صفحہ" + +msgid "Toggle navigation" +msgstr "نیویگیشن ٹوگل کریں" + +msgid "suggest edit" +msgstr "ترمیم کی تجویز کریں" + +msgid "open issue" +msgstr "کھلا مسئلہ" + +msgid "Launch" +msgstr "لانچ کریں" + +msgid "Print to PDF" +msgstr "پی ڈی ایف پرنٹ کریں" + +msgid "By the" +msgstr "کی طرف" + +msgid "Last updated on" +msgstr "آخری بار تازہ کاری ہوئی" + +msgid "Download source file" +msgstr "سورس فائل ڈاؤن لوڈ کریں" + +msgid "Download this page" +msgstr "اس صفحے کو ڈاؤن لوڈ کریں" diff --git a/_static/locales/vi/LC_MESSAGES/booktheme.mo b/_static/locales/vi/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..2bb32555 Binary files /dev/null and b/_static/locales/vi/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/vi/LC_MESSAGES/booktheme.po b/_static/locales/vi/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..116236dc --- /dev/null +++ b/_static/locales/vi/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: vi\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "Chủ đề của" + +msgid "Open an issue" +msgstr "Mở một vấn đề" + +msgid "Contents" +msgstr "Nội dung" + +msgid "Download notebook file" +msgstr "Tải xuống tệp sổ tay" + +msgid "Sphinx Book Theme" +msgstr "Chủ đề sách nhân sư" + +msgid "Fullscreen mode" +msgstr "Chế độ toàn màn hình" + +msgid "Edit this page" +msgstr "chỉnh sửa trang này" + +msgid "By" +msgstr "Bởi" + +msgid "Copyright" +msgstr "Bản quyền" + +msgid "Source repository" +msgstr "Kho nguồn" + +msgid "previous page" +msgstr "trang trước" + +msgid "next page" +msgstr "Trang tiếp theo" + +msgid "Toggle navigation" +msgstr "Chuyển đổi điều hướng thành" + +msgid "repository" +msgstr "kho" + +msgid "suggest edit" +msgstr "đề nghị chỉnh sửa" + +msgid "open issue" +msgstr "vấn đề mở" + +msgid "Launch" +msgstr "Phóng" + +msgid "Print to PDF" +msgstr "In sang PDF" + +msgid "By the" +msgstr "Bằng" + +msgid "Last updated on" +msgstr "Cập nhật lần cuối vào" + +msgid "Download source file" +msgstr "Tải xuống tệp nguồn" + +msgid "Download this page" +msgstr "Tải xuống trang này" diff --git a/_static/locales/zh_CN/LC_MESSAGES/booktheme.mo b/_static/locales/zh_CN/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..0e3235d0 Binary files /dev/null and b/_static/locales/zh_CN/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/zh_CN/LC_MESSAGES/booktheme.po b/_static/locales/zh_CN/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..4f4ab579 --- /dev/null +++ b/_static/locales/zh_CN/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: zh_CN\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "主题作者:" + +msgid "Open an issue" +msgstr "创建议题" + +msgid "Contents" +msgstr "目录" + +msgid "Download notebook file" +msgstr "下载笔记本文件" + +msgid "Sphinx Book Theme" +msgstr "Sphinx Book 主题" + +msgid "Fullscreen mode" +msgstr "全屏模式" + +msgid "Edit this page" +msgstr "编辑此页面" + +msgid "By" +msgstr "作者:" + +msgid "Copyright" +msgstr "版权" + +msgid "Source repository" +msgstr "源码库" + +msgid "previous page" +msgstr "上一页" + +msgid "next page" +msgstr "下一页" + +msgid "Toggle navigation" +msgstr "显示或隐藏导航栏" + +msgid "repository" +msgstr "仓库" + +msgid "suggest edit" +msgstr "提出修改建议" + +msgid "open issue" +msgstr "创建议题" + +msgid "Launch" +msgstr "启动" + +msgid "Print to PDF" +msgstr "列印成 PDF" + +msgid "By the" +msgstr "作者:" + +msgid "Last updated on" +msgstr "上次更新时间:" + +msgid "Download source file" +msgstr "下载源文件" + +msgid "Download this page" +msgstr "下载此页面" diff --git a/_static/locales/zh_TW/LC_MESSAGES/booktheme.mo b/_static/locales/zh_TW/LC_MESSAGES/booktheme.mo new file mode 100644 index 00000000..9116fa95 Binary files /dev/null and b/_static/locales/zh_TW/LC_MESSAGES/booktheme.mo differ diff --git a/_static/locales/zh_TW/LC_MESSAGES/booktheme.po b/_static/locales/zh_TW/LC_MESSAGES/booktheme.po new file mode 100644 index 00000000..42b43b86 --- /dev/null +++ b/_static/locales/zh_TW/LC_MESSAGES/booktheme.po @@ -0,0 +1,75 @@ + +msgid "" +msgstr "" +"Project-Id-Version: Sphinx-Book-Theme\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: zh_TW\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgid "Theme by the" +msgstr "佈景主題作者:" + +msgid "Open an issue" +msgstr "開啟議題" + +msgid "Contents" +msgstr "目錄" + +msgid "Download notebook file" +msgstr "下載 Notebook 檔案" + +msgid "Sphinx Book Theme" +msgstr "Sphinx Book 佈景主題" + +msgid "Fullscreen mode" +msgstr "全螢幕模式" + +msgid "Edit this page" +msgstr "編輯此頁面" + +msgid "By" +msgstr "作者:" + +msgid "Copyright" +msgstr "Copyright" + +msgid "Source repository" +msgstr "來源儲存庫" + +msgid "previous page" +msgstr "上一頁" + +msgid "next page" +msgstr "下一頁" + +msgid "Toggle navigation" +msgstr "顯示或隱藏導覽列" + +msgid "repository" +msgstr "儲存庫" + +msgid "suggest edit" +msgstr "提出修改建議" + +msgid "open issue" +msgstr "公開的問題" + +msgid "Launch" +msgstr "啟動" + +msgid "Print to PDF" +msgstr "列印成 PDF" + +msgid "By the" +msgstr "作者:" + +msgid "Last updated on" +msgstr "最後更新時間:" + +msgid "Download source file" +msgstr "下載原始檔" + +msgid "Download this page" +msgstr "下載此頁面" diff --git a/_static/minus.png b/_static/minus.png new file mode 100644 index 00000000..d96755fd Binary files /dev/null and b/_static/minus.png differ diff --git a/_static/nbsphinx-broken-thumbnail.svg b/_static/nbsphinx-broken-thumbnail.svg new file mode 100644 index 00000000..4919ca88 --- /dev/null +++ b/_static/nbsphinx-broken-thumbnail.svg @@ -0,0 +1,9 @@ + + + + diff --git a/_static/nbsphinx-code-cells.css b/_static/nbsphinx-code-cells.css new file mode 100644 index 00000000..a3fb27c3 --- /dev/null +++ b/_static/nbsphinx-code-cells.css @@ -0,0 +1,259 @@ +/* remove conflicting styling from Sphinx themes */ +div.nbinput.container div.prompt *, +div.nboutput.container div.prompt *, +div.nbinput.container div.input_area pre, +div.nboutput.container div.output_area pre, +div.nbinput.container div.input_area .highlight, +div.nboutput.container div.output_area .highlight { + border: none; + padding: 0; + margin: 0; + box-shadow: none; +} + +div.nbinput.container > div[class*=highlight], +div.nboutput.container > div[class*=highlight] { + margin: 0; +} + +div.nbinput.container div.prompt *, +div.nboutput.container div.prompt * { + background: none; +} + +div.nboutput.container div.output_area .highlight, +div.nboutput.container div.output_area pre { + background: unset; +} + +div.nboutput.container div.output_area div.highlight { + color: unset; /* override Pygments text color */ +} + +/* avoid gaps between output lines */ +div.nboutput.container div[class*=highlight] pre { + line-height: normal; +} + +/* input/output containers */ +div.nbinput.container, +div.nboutput.container { + display: -webkit-flex; + display: flex; + align-items: flex-start; + margin: 0; + width: 100%; +} +@media (max-width: 540px) { + div.nbinput.container, + div.nboutput.container { + flex-direction: column; + } +} + +/* input container */ +div.nbinput.container { + padding-top: 5px; +} + +/* last container */ +div.nblast.container { + padding-bottom: 5px; +} + +/* input prompt */ +div.nbinput.container div.prompt pre, +/* for sphinx_immaterial theme: */ +div.nbinput.container div.prompt pre > code { + color: #307FC1; +} + +/* output prompt */ +div.nboutput.container div.prompt pre, +/* for sphinx_immaterial theme: */ +div.nboutput.container div.prompt pre > code { + color: #BF5B3D; +} + +/* all prompts */ +div.nbinput.container div.prompt, +div.nboutput.container div.prompt { + width: 4.5ex; + padding-top: 5px; + position: relative; + user-select: none; +} + +div.nbinput.container div.prompt > div, +div.nboutput.container div.prompt > div { + position: absolute; + right: 0; + margin-right: 0.3ex; +} + +@media (max-width: 540px) { + div.nbinput.container div.prompt, + div.nboutput.container div.prompt { + width: unset; + text-align: left; + padding: 0.4em; + } + div.nboutput.container div.prompt.empty { + padding: 0; + } + + div.nbinput.container div.prompt > div, + div.nboutput.container div.prompt > div { + position: unset; + } +} + +/* disable scrollbars and line breaks on prompts */ +div.nbinput.container div.prompt pre, +div.nboutput.container div.prompt pre { + overflow: hidden; + white-space: pre; +} + +/* input/output area */ +div.nbinput.container div.input_area, +div.nboutput.container div.output_area { + -webkit-flex: 1; + flex: 1; + overflow: auto; +} +@media (max-width: 540px) { + div.nbinput.container div.input_area, + div.nboutput.container div.output_area { + width: 100%; + } +} + +/* input area */ +div.nbinput.container div.input_area { + border: 1px solid #e0e0e0; + border-radius: 2px; + /*background: #f5f5f5;*/ +} + +/* override MathJax center alignment in output cells */ +div.nboutput.container div[class*=MathJax] { + text-align: left !important; +} + +/* override sphinx.ext.imgmath center alignment in output cells */ +div.nboutput.container div.math p { + text-align: left; +} + +/* standard error */ +div.nboutput.container div.output_area.stderr { + background: #fdd; +} + +/* ANSI colors */ +.ansi-black-fg { color: #3E424D; } +.ansi-black-bg { background-color: #3E424D; } +.ansi-black-intense-fg { color: #282C36; } +.ansi-black-intense-bg { background-color: #282C36; } +.ansi-red-fg { color: #E75C58; } +.ansi-red-bg { background-color: #E75C58; } +.ansi-red-intense-fg { color: #B22B31; } +.ansi-red-intense-bg { background-color: #B22B31; } +.ansi-green-fg { color: #00A250; } +.ansi-green-bg { background-color: #00A250; } +.ansi-green-intense-fg { color: #007427; } +.ansi-green-intense-bg { background-color: #007427; } +.ansi-yellow-fg { color: #DDB62B; } +.ansi-yellow-bg { background-color: #DDB62B; } +.ansi-yellow-intense-fg { color: #B27D12; } +.ansi-yellow-intense-bg { background-color: #B27D12; } +.ansi-blue-fg { color: #208FFB; } +.ansi-blue-bg { background-color: #208FFB; } +.ansi-blue-intense-fg { color: #0065CA; } +.ansi-blue-intense-bg { background-color: #0065CA; } +.ansi-magenta-fg { color: #D160C4; } +.ansi-magenta-bg { background-color: #D160C4; } +.ansi-magenta-intense-fg { color: #A03196; } +.ansi-magenta-intense-bg { background-color: #A03196; } +.ansi-cyan-fg { color: #60C6C8; } +.ansi-cyan-bg { background-color: #60C6C8; } +.ansi-cyan-intense-fg { color: #258F8F; } +.ansi-cyan-intense-bg { background-color: #258F8F; } +.ansi-white-fg { color: #C5C1B4; } +.ansi-white-bg { background-color: #C5C1B4; } +.ansi-white-intense-fg { color: #A1A6B2; } +.ansi-white-intense-bg { background-color: #A1A6B2; } + +.ansi-default-inverse-fg { color: #FFFFFF; } +.ansi-default-inverse-bg { background-color: #000000; } + +.ansi-bold { font-weight: bold; } +.ansi-underline { text-decoration: underline; } + + +div.nbinput.container div.input_area div[class*=highlight] > pre, +div.nboutput.container div.output_area div[class*=highlight] > pre, +div.nboutput.container div.output_area div[class*=highlight].math, +div.nboutput.container div.output_area.rendered_html, +div.nboutput.container div.output_area > div.output_javascript, +div.nboutput.container div.output_area:not(.rendered_html) > img{ + padding: 5px; + margin: 0; +} + +/* fix copybtn overflow problem in chromium (needed for 'sphinx_copybutton') */ +div.nbinput.container div.input_area > div[class^='highlight'], +div.nboutput.container div.output_area > div[class^='highlight']{ + overflow-y: hidden; +} + +/* hide copy button on prompts for 'sphinx_copybutton' extension ... */ +.prompt .copybtn, +/* ... and 'sphinx_immaterial' theme */ +.prompt .md-clipboard.md-icon { + display: none; +} + +/* Some additional styling taken form the Jupyter notebook CSS */ +.jp-RenderedHTMLCommon table, +div.rendered_html table { + border: none; + border-collapse: collapse; + border-spacing: 0; + color: black; + font-size: 12px; + table-layout: fixed; +} +.jp-RenderedHTMLCommon thead, +div.rendered_html thead { + border-bottom: 1px solid black; + vertical-align: bottom; +} +.jp-RenderedHTMLCommon tr, +.jp-RenderedHTMLCommon th, +.jp-RenderedHTMLCommon td, +div.rendered_html tr, +div.rendered_html th, +div.rendered_html td { + text-align: right; + vertical-align: middle; + padding: 0.5em 0.5em; + line-height: normal; + white-space: normal; + max-width: none; + border: none; +} +.jp-RenderedHTMLCommon th, +div.rendered_html th { + font-weight: bold; +} +.jp-RenderedHTMLCommon tbody tr:nth-child(odd), +div.rendered_html tbody tr:nth-child(odd) { + background: #f5f5f5; +} +.jp-RenderedHTMLCommon tbody tr:hover, +div.rendered_html tbody tr:hover { + background: rgba(66, 165, 245, 0.2); +} + diff --git a/_static/nbsphinx-gallery.css b/_static/nbsphinx-gallery.css new file mode 100644 index 00000000..365c27a9 --- /dev/null +++ b/_static/nbsphinx-gallery.css @@ -0,0 +1,31 @@ +.nbsphinx-gallery { + display: grid; + grid-template-columns: repeat(auto-fill, minmax(160px, 1fr)); + gap: 5px; + margin-top: 1em; + margin-bottom: 1em; +} + +.nbsphinx-gallery > a { + padding: 5px; + border: 1px dotted currentColor; + border-radius: 2px; + text-align: center; +} + +.nbsphinx-gallery > a:hover { + border-style: solid; +} + +.nbsphinx-gallery img { + max-width: 100%; + max-height: 100%; +} + +.nbsphinx-gallery > a > div:first-child { + display: flex; + align-items: start; + justify-content: center; + height: 120px; + margin-bottom: 5px; +} diff --git a/_static/nbsphinx-no-thumbnail.svg b/_static/nbsphinx-no-thumbnail.svg new file mode 100644 index 00000000..9dca7588 --- /dev/null +++ b/_static/nbsphinx-no-thumbnail.svg @@ -0,0 +1,9 @@ + + + + diff --git a/_static/no_image.png b/_static/no_image.png new file mode 100644 index 00000000..8c2d48d5 Binary files /dev/null and b/_static/no_image.png differ diff --git a/_static/plot_directive.css b/_static/plot_directive.css new file mode 100644 index 00000000..d45593c9 --- /dev/null +++ b/_static/plot_directive.css @@ -0,0 +1,16 @@ +/* + * plot_directive.css + * ~~~~~~~~~~~~ + * + * Stylesheet controlling images created using the `plot` directive within + * Sphinx. + * + * :copyright: Copyright 2020-* by the Matplotlib development team. + * :license: Matplotlib, see LICENSE for details. + * + */ + +img.plot-directive { + border: 0; + max-width: 100%; +} diff --git a/_static/plus.png b/_static/plus.png new file mode 100644 index 00000000..7107cec9 Binary files /dev/null and b/_static/plus.png differ diff --git a/_static/pygments.css b/_static/pygments.css new file mode 100644 index 00000000..997797f2 --- /dev/null +++ b/_static/pygments.css @@ -0,0 +1,152 @@ +html[data-theme="light"] .highlight pre { line-height: 125%; } +html[data-theme="light"] .highlight td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +html[data-theme="light"] .highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +html[data-theme="light"] .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +html[data-theme="light"] .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +html[data-theme="light"] .highlight .hll { background-color: #7971292e } +html[data-theme="light"] .highlight { background: #fefefe; color: #545454 } +html[data-theme="light"] .highlight .c { color: #797129 } /* Comment */ +html[data-theme="light"] .highlight .err { color: #d91e18 } /* Error */ +html[data-theme="light"] .highlight .k { color: #7928a1 } /* Keyword */ +html[data-theme="light"] .highlight .l { color: #797129 } /* Literal */ +html[data-theme="light"] .highlight .n { color: #545454 } /* Name */ +html[data-theme="light"] .highlight .o { color: #008000 } /* Operator */ +html[data-theme="light"] .highlight .p { color: #545454 } /* Punctuation */ +html[data-theme="light"] .highlight .ch { color: #797129 } /* Comment.Hashbang */ +html[data-theme="light"] .highlight .cm { color: #797129 } /* Comment.Multiline */ +html[data-theme="light"] .highlight .cp { color: #797129 } /* Comment.Preproc */ +html[data-theme="light"] .highlight .cpf { color: #797129 } /* Comment.PreprocFile */ +html[data-theme="light"] .highlight .c1 { color: #797129 } /* Comment.Single */ +html[data-theme="light"] .highlight .cs { color: #797129 } /* Comment.Special */ +html[data-theme="light"] .highlight .gd { color: #007faa } /* Generic.Deleted */ +html[data-theme="light"] .highlight .ge { font-style: italic } /* Generic.Emph */ +html[data-theme="light"] .highlight .gh { color: #007faa } /* Generic.Heading */ +html[data-theme="light"] .highlight .gs { font-weight: bold } /* Generic.Strong */ +html[data-theme="light"] .highlight .gu { color: #007faa } /* Generic.Subheading */ +html[data-theme="light"] .highlight .kc { color: #7928a1 } /* Keyword.Constant */ +html[data-theme="light"] .highlight .kd { color: #7928a1 } /* Keyword.Declaration */ +html[data-theme="light"] .highlight .kn { color: #7928a1 } /* Keyword.Namespace */ +html[data-theme="light"] .highlight .kp { color: #7928a1 } /* Keyword.Pseudo */ +html[data-theme="light"] .highlight .kr { color: #7928a1 } /* Keyword.Reserved */ +html[data-theme="light"] .highlight .kt { color: #797129 } /* Keyword.Type */ +html[data-theme="light"] .highlight .ld { color: #797129 } /* Literal.Date */ +html[data-theme="light"] .highlight .m { color: #797129 } /* Literal.Number */ +html[data-theme="light"] .highlight .s { color: #008000 } /* Literal.String */ +html[data-theme="light"] .highlight .na { color: #797129 } /* Name.Attribute */ +html[data-theme="light"] .highlight .nb { color: #797129 } /* Name.Builtin */ +html[data-theme="light"] .highlight .nc { color: #007faa } /* Name.Class */ +html[data-theme="light"] .highlight .no { color: #007faa } /* Name.Constant */ +html[data-theme="light"] .highlight .nd { color: #797129 } /* Name.Decorator */ +html[data-theme="light"] .highlight .ni { color: #008000 } /* Name.Entity */ +html[data-theme="light"] .highlight .ne { color: #7928a1 } /* Name.Exception */ +html[data-theme="light"] .highlight .nf { color: #007faa } /* Name.Function */ +html[data-theme="light"] .highlight .nl { color: #797129 } /* Name.Label */ +html[data-theme="light"] .highlight .nn { color: #545454 } /* Name.Namespace */ +html[data-theme="light"] .highlight .nx { color: #545454 } /* Name.Other */ +html[data-theme="light"] .highlight .py { color: #007faa } /* Name.Property */ +html[data-theme="light"] .highlight .nt { color: #007faa } /* Name.Tag */ +html[data-theme="light"] .highlight .nv { color: #d91e18 } /* Name.Variable */ +html[data-theme="light"] .highlight .ow { color: #7928a1 } /* Operator.Word */ +html[data-theme="light"] .highlight .pm { color: #545454 } /* Punctuation.Marker */ +html[data-theme="light"] .highlight .w { color: #545454 } /* Text.Whitespace */ +html[data-theme="light"] .highlight .mb { color: #797129 } /* Literal.Number.Bin */ +html[data-theme="light"] .highlight .mf { color: #797129 } /* Literal.Number.Float */ +html[data-theme="light"] .highlight .mh { color: #797129 } /* Literal.Number.Hex */ +html[data-theme="light"] .highlight .mi { color: #797129 } /* Literal.Number.Integer */ +html[data-theme="light"] .highlight .mo { color: #797129 } /* Literal.Number.Oct */ +html[data-theme="light"] .highlight .sa { color: #008000 } /* Literal.String.Affix */ +html[data-theme="light"] .highlight .sb { color: #008000 } /* Literal.String.Backtick */ +html[data-theme="light"] .highlight .sc { color: #008000 } /* Literal.String.Char */ +html[data-theme="light"] .highlight .dl { color: #008000 } /* Literal.String.Delimiter */ +html[data-theme="light"] .highlight .sd { color: #008000 } /* Literal.String.Doc */ +html[data-theme="light"] .highlight .s2 { color: #008000 } /* Literal.String.Double */ +html[data-theme="light"] .highlight .se { color: #008000 } /* Literal.String.Escape */ +html[data-theme="light"] .highlight .sh { color: #008000 } /* Literal.String.Heredoc */ +html[data-theme="light"] .highlight .si { color: #008000 } /* Literal.String.Interpol */ +html[data-theme="light"] .highlight .sx { color: #008000 } /* Literal.String.Other */ +html[data-theme="light"] .highlight .sr { color: #d91e18 } /* Literal.String.Regex */ +html[data-theme="light"] .highlight .s1 { color: #008000 } /* Literal.String.Single */ +html[data-theme="light"] .highlight .ss { color: #007faa } /* Literal.String.Symbol */ +html[data-theme="light"] .highlight .bp { color: #797129 } /* Name.Builtin.Pseudo */ +html[data-theme="light"] .highlight .fm { color: #007faa } /* Name.Function.Magic */ +html[data-theme="light"] .highlight .vc { color: #d91e18 } /* Name.Variable.Class */ +html[data-theme="light"] .highlight .vg { color: #d91e18 } /* Name.Variable.Global */ +html[data-theme="light"] .highlight .vi { color: #d91e18 } /* Name.Variable.Instance */ +html[data-theme="light"] .highlight .vm { color: #797129 } /* Name.Variable.Magic */ +html[data-theme="light"] .highlight .il { color: #797129 } /* Literal.Number.Integer.Long */ +html[data-theme="dark"] .highlight pre { line-height: 125%; } +html[data-theme="dark"] .highlight td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +html[data-theme="dark"] .highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +html[data-theme="dark"] .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +html[data-theme="dark"] .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +html[data-theme="dark"] .highlight .hll { background-color: #ffd9002e } +html[data-theme="dark"] .highlight { background: #2b2b2b; color: #f8f8f2 } +html[data-theme="dark"] .highlight .c { color: #ffd900 } /* Comment */ +html[data-theme="dark"] .highlight .err { color: #ffa07a } /* Error */ +html[data-theme="dark"] .highlight .k { color: #dcc6e0 } /* Keyword */ +html[data-theme="dark"] .highlight .l { color: #ffd900 } /* Literal */ +html[data-theme="dark"] .highlight .n { color: #f8f8f2 } /* Name */ +html[data-theme="dark"] .highlight .o { color: #abe338 } /* Operator */ +html[data-theme="dark"] .highlight .p { color: #f8f8f2 } /* Punctuation */ +html[data-theme="dark"] .highlight .ch { color: #ffd900 } /* Comment.Hashbang */ +html[data-theme="dark"] .highlight .cm { color: #ffd900 } /* Comment.Multiline */ +html[data-theme="dark"] .highlight .cp { color: #ffd900 } /* Comment.Preproc */ +html[data-theme="dark"] .highlight .cpf { color: #ffd900 } /* Comment.PreprocFile */ +html[data-theme="dark"] .highlight .c1 { color: #ffd900 } /* Comment.Single */ +html[data-theme="dark"] .highlight .cs { color: #ffd900 } /* Comment.Special */ +html[data-theme="dark"] .highlight .gd { color: #00e0e0 } /* Generic.Deleted */ +html[data-theme="dark"] .highlight .ge { font-style: italic } /* Generic.Emph */ +html[data-theme="dark"] .highlight .gh { color: #00e0e0 } /* Generic.Heading */ +html[data-theme="dark"] .highlight .gs { font-weight: bold } /* Generic.Strong */ +html[data-theme="dark"] .highlight .gu { color: #00e0e0 } /* Generic.Subheading */ +html[data-theme="dark"] .highlight .kc { color: #dcc6e0 } /* Keyword.Constant */ +html[data-theme="dark"] .highlight .kd { color: #dcc6e0 } /* Keyword.Declaration */ +html[data-theme="dark"] .highlight .kn { color: #dcc6e0 } /* Keyword.Namespace */ +html[data-theme="dark"] .highlight .kp { color: #dcc6e0 } /* Keyword.Pseudo */ +html[data-theme="dark"] .highlight .kr { color: #dcc6e0 } /* Keyword.Reserved */ +html[data-theme="dark"] .highlight .kt { color: #ffd900 } /* Keyword.Type */ +html[data-theme="dark"] .highlight .ld { color: #ffd900 } /* Literal.Date */ +html[data-theme="dark"] .highlight .m { color: #ffd900 } /* Literal.Number */ +html[data-theme="dark"] .highlight .s { color: #abe338 } /* Literal.String */ +html[data-theme="dark"] .highlight .na { color: #ffd900 } /* Name.Attribute */ +html[data-theme="dark"] .highlight .nb { color: #ffd900 } /* Name.Builtin */ +html[data-theme="dark"] .highlight .nc { color: #00e0e0 } /* Name.Class */ +html[data-theme="dark"] .highlight .no { color: #00e0e0 } /* Name.Constant */ +html[data-theme="dark"] .highlight .nd { color: #ffd900 } /* Name.Decorator */ +html[data-theme="dark"] .highlight .ni { color: #abe338 } /* Name.Entity */ +html[data-theme="dark"] .highlight .ne { color: #dcc6e0 } /* Name.Exception */ +html[data-theme="dark"] .highlight .nf { color: #00e0e0 } /* Name.Function */ +html[data-theme="dark"] .highlight .nl { color: #ffd900 } /* Name.Label */ +html[data-theme="dark"] .highlight .nn { color: #f8f8f2 } /* Name.Namespace */ +html[data-theme="dark"] .highlight .nx { color: #f8f8f2 } /* Name.Other */ +html[data-theme="dark"] .highlight .py { color: #00e0e0 } /* Name.Property */ +html[data-theme="dark"] .highlight .nt { color: #00e0e0 } /* Name.Tag */ +html[data-theme="dark"] .highlight .nv { color: #ffa07a } /* Name.Variable */ +html[data-theme="dark"] .highlight .ow { color: #dcc6e0 } /* Operator.Word */ +html[data-theme="dark"] .highlight .pm { color: #f8f8f2 } /* Punctuation.Marker */ +html[data-theme="dark"] .highlight .w { color: #f8f8f2 } /* Text.Whitespace */ +html[data-theme="dark"] .highlight .mb { color: #ffd900 } /* Literal.Number.Bin */ +html[data-theme="dark"] .highlight .mf { color: #ffd900 } /* Literal.Number.Float */ +html[data-theme="dark"] .highlight .mh { color: #ffd900 } /* Literal.Number.Hex */ +html[data-theme="dark"] .highlight .mi { color: #ffd900 } /* Literal.Number.Integer */ +html[data-theme="dark"] .highlight .mo { color: #ffd900 } /* Literal.Number.Oct */ +html[data-theme="dark"] .highlight .sa { color: #abe338 } /* Literal.String.Affix */ +html[data-theme="dark"] .highlight .sb { color: #abe338 } /* Literal.String.Backtick */ +html[data-theme="dark"] .highlight .sc { color: #abe338 } /* Literal.String.Char */ +html[data-theme="dark"] .highlight .dl { color: #abe338 } /* Literal.String.Delimiter */ +html[data-theme="dark"] .highlight .sd { color: #abe338 } /* Literal.String.Doc */ +html[data-theme="dark"] .highlight .s2 { color: #abe338 } /* Literal.String.Double */ +html[data-theme="dark"] .highlight .se { color: #abe338 } /* Literal.String.Escape */ +html[data-theme="dark"] .highlight .sh { color: #abe338 } /* Literal.String.Heredoc */ +html[data-theme="dark"] .highlight .si { color: #abe338 } /* Literal.String.Interpol */ +html[data-theme="dark"] .highlight .sx { color: #abe338 } /* Literal.String.Other */ +html[data-theme="dark"] .highlight .sr { color: #ffa07a } /* Literal.String.Regex */ +html[data-theme="dark"] .highlight .s1 { color: #abe338 } /* Literal.String.Single */ +html[data-theme="dark"] .highlight .ss { color: #00e0e0 } /* Literal.String.Symbol */ +html[data-theme="dark"] .highlight .bp { color: #ffd900 } /* Name.Builtin.Pseudo */ +html[data-theme="dark"] .highlight .fm { color: #00e0e0 } /* Name.Function.Magic */ +html[data-theme="dark"] .highlight .vc { color: #ffa07a } /* Name.Variable.Class */ +html[data-theme="dark"] .highlight .vg { color: #ffa07a } /* Name.Variable.Global */ +html[data-theme="dark"] .highlight .vi { color: #ffa07a } /* Name.Variable.Instance */ +html[data-theme="dark"] .highlight .vm { color: #ffd900 } /* Name.Variable.Magic */ +html[data-theme="dark"] .highlight .il { color: #ffd900 } /* Literal.Number.Integer.Long */ \ No newline at end of file diff --git a/_static/sbt-webpack-macros.html b/_static/sbt-webpack-macros.html new file mode 100644 index 00000000..6cbf559f --- /dev/null +++ b/_static/sbt-webpack-macros.html @@ -0,0 +1,11 @@ + +{% macro head_pre_bootstrap() %} + +{% endmacro %} + +{% macro body_post() %} + +{% endmacro %} diff --git a/_static/scripts/bootstrap.js b/_static/scripts/bootstrap.js new file mode 100644 index 00000000..4e209b0e --- /dev/null +++ b/_static/scripts/bootstrap.js @@ -0,0 +1,3 @@ +/*! For license information please see bootstrap.js.LICENSE.txt */ +(()=>{"use strict";var t={d:(e,i)=>{for(var n in i)t.o(i,n)&&!t.o(e,n)&&Object.defineProperty(e,n,{enumerable:!0,get:i[n]})},o:(t,e)=>Object.prototype.hasOwnProperty.call(t,e),r:t=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})}},e={};t.r(e),t.d(e,{afterMain:()=>E,afterRead:()=>v,afterWrite:()=>C,applyStyles:()=>$,arrow:()=>J,auto:()=>a,basePlacements:()=>l,beforeMain:()=>y,beforeRead:()=>_,beforeWrite:()=>A,bottom:()=>s,clippingParents:()=>d,computeStyles:()=>it,createPopper:()=>Dt,createPopperBase:()=>St,createPopperLite:()=>$t,detectOverflow:()=>_t,end:()=>h,eventListeners:()=>st,flip:()=>bt,hide:()=>wt,left:()=>r,main:()=>w,modifierPhases:()=>O,offset:()=>Et,placements:()=>g,popper:()=>f,popperGenerator:()=>Lt,popperOffsets:()=>At,preventOverflow:()=>Tt,read:()=>b,reference:()=>p,right:()=>o,start:()=>c,top:()=>n,variationPlacements:()=>m,viewport:()=>u,write:()=>T});var i={};t.r(i),t.d(i,{Alert:()=>Oe,Button:()=>ke,Carousel:()=>ri,Collapse:()=>yi,Dropdown:()=>Vi,Modal:()=>xn,Offcanvas:()=>Vn,Popover:()=>fs,ScrollSpy:()=>Ts,Tab:()=>Ks,Toast:()=>lo,Tooltip:()=>hs});var n="top",s="bottom",o="right",r="left",a="auto",l=[n,s,o,r],c="start",h="end",d="clippingParents",u="viewport",f="popper",p="reference",m=l.reduce((function(t,e){return t.concat([e+"-"+c,e+"-"+h])}),[]),g=[].concat(l,[a]).reduce((function(t,e){return t.concat([e,e+"-"+c,e+"-"+h])}),[]),_="beforeRead",b="read",v="afterRead",y="beforeMain",w="main",E="afterMain",A="beforeWrite",T="write",C="afterWrite",O=[_,b,v,y,w,E,A,T,C];function x(t){return t?(t.nodeName||"").toLowerCase():null}function k(t){if(null==t)return window;if("[object Window]"!==t.toString()){var e=t.ownerDocument;return e&&e.defaultView||window}return t}function L(t){return t instanceof k(t).Element||t instanceof Element}function S(t){return t instanceof k(t).HTMLElement||t instanceof HTMLElement}function D(t){return"undefined"!=typeof ShadowRoot&&(t instanceof k(t).ShadowRoot||t instanceof ShadowRoot)}const $={name:"applyStyles",enabled:!0,phase:"write",fn:function(t){var e=t.state;Object.keys(e.elements).forEach((function(t){var i=e.styles[t]||{},n=e.attributes[t]||{},s=e.elements[t];S(s)&&x(s)&&(Object.assign(s.style,i),Object.keys(n).forEach((function(t){var e=n[t];!1===e?s.removeAttribute(t):s.setAttribute(t,!0===e?"":e)})))}))},effect:function(t){var e=t.state,i={popper:{position:e.options.strategy,left:"0",top:"0",margin:"0"},arrow:{position:"absolute"},reference:{}};return Object.assign(e.elements.popper.style,i.popper),e.styles=i,e.elements.arrow&&Object.assign(e.elements.arrow.style,i.arrow),function(){Object.keys(e.elements).forEach((function(t){var n=e.elements[t],s=e.attributes[t]||{},o=Object.keys(e.styles.hasOwnProperty(t)?e.styles[t]:i[t]).reduce((function(t,e){return t[e]="",t}),{});S(n)&&x(n)&&(Object.assign(n.style,o),Object.keys(s).forEach((function(t){n.removeAttribute(t)})))}))}},requires:["computeStyles"]};function I(t){return t.split("-")[0]}var N=Math.max,P=Math.min,M=Math.round;function j(){var t=navigator.userAgentData;return null!=t&&t.brands&&Array.isArray(t.brands)?t.brands.map((function(t){return t.brand+"/"+t.version})).join(" "):navigator.userAgent}function F(){return!/^((?!chrome|android).)*safari/i.test(j())}function H(t,e,i){void 0===e&&(e=!1),void 0===i&&(i=!1);var n=t.getBoundingClientRect(),s=1,o=1;e&&S(t)&&(s=t.offsetWidth>0&&M(n.width)/t.offsetWidth||1,o=t.offsetHeight>0&&M(n.height)/t.offsetHeight||1);var r=(L(t)?k(t):window).visualViewport,a=!F()&&i,l=(n.left+(a&&r?r.offsetLeft:0))/s,c=(n.top+(a&&r?r.offsetTop:0))/o,h=n.width/s,d=n.height/o;return{width:h,height:d,top:c,right:l+h,bottom:c+d,left:l,x:l,y:c}}function B(t){var e=H(t),i=t.offsetWidth,n=t.offsetHeight;return Math.abs(e.width-i)<=1&&(i=e.width),Math.abs(e.height-n)<=1&&(n=e.height),{x:t.offsetLeft,y:t.offsetTop,width:i,height:n}}function W(t,e){var i=e.getRootNode&&e.getRootNode();if(t.contains(e))return!0;if(i&&D(i)){var n=e;do{if(n&&t.isSameNode(n))return!0;n=n.parentNode||n.host}while(n)}return!1}function z(t){return k(t).getComputedStyle(t)}function R(t){return["table","td","th"].indexOf(x(t))>=0}function q(t){return((L(t)?t.ownerDocument:t.document)||window.document).documentElement}function V(t){return"html"===x(t)?t:t.assignedSlot||t.parentNode||(D(t)?t.host:null)||q(t)}function Y(t){return S(t)&&"fixed"!==z(t).position?t.offsetParent:null}function K(t){for(var e=k(t),i=Y(t);i&&R(i)&&"static"===z(i).position;)i=Y(i);return i&&("html"===x(i)||"body"===x(i)&&"static"===z(i).position)?e:i||function(t){var e=/firefox/i.test(j());if(/Trident/i.test(j())&&S(t)&&"fixed"===z(t).position)return null;var i=V(t);for(D(i)&&(i=i.host);S(i)&&["html","body"].indexOf(x(i))<0;){var n=z(i);if("none"!==n.transform||"none"!==n.perspective||"paint"===n.contain||-1!==["transform","perspective"].indexOf(n.willChange)||e&&"filter"===n.willChange||e&&n.filter&&"none"!==n.filter)return i;i=i.parentNode}return null}(t)||e}function Q(t){return["top","bottom"].indexOf(t)>=0?"x":"y"}function X(t,e,i){return N(t,P(e,i))}function U(t){return Object.assign({},{top:0,right:0,bottom:0,left:0},t)}function G(t,e){return e.reduce((function(e,i){return e[i]=t,e}),{})}const J={name:"arrow",enabled:!0,phase:"main",fn:function(t){var e,i=t.state,a=t.name,c=t.options,h=i.elements.arrow,d=i.modifiersData.popperOffsets,u=I(i.placement),f=Q(u),p=[r,o].indexOf(u)>=0?"height":"width";if(h&&d){var m=function(t,e){return U("number"!=typeof(t="function"==typeof t?t(Object.assign({},e.rects,{placement:e.placement})):t)?t:G(t,l))}(c.padding,i),g=B(h),_="y"===f?n:r,b="y"===f?s:o,v=i.rects.reference[p]+i.rects.reference[f]-d[f]-i.rects.popper[p],y=d[f]-i.rects.reference[f],w=K(h),E=w?"y"===f?w.clientHeight||0:w.clientWidth||0:0,A=v/2-y/2,T=m[_],C=E-g[p]-m[b],O=E/2-g[p]/2+A,x=X(T,O,C),k=f;i.modifiersData[a]=((e={})[k]=x,e.centerOffset=x-O,e)}},effect:function(t){var e=t.state,i=t.options.element,n=void 0===i?"[data-popper-arrow]":i;null!=n&&("string"!=typeof n||(n=e.elements.popper.querySelector(n)))&&W(e.elements.popper,n)&&(e.elements.arrow=n)},requires:["popperOffsets"],requiresIfExists:["preventOverflow"]};function Z(t){return t.split("-")[1]}var tt={top:"auto",right:"auto",bottom:"auto",left:"auto"};function et(t){var e,i=t.popper,a=t.popperRect,l=t.placement,c=t.variation,d=t.offsets,u=t.position,f=t.gpuAcceleration,p=t.adaptive,m=t.roundOffsets,g=t.isFixed,_=d.x,b=void 0===_?0:_,v=d.y,y=void 0===v?0:v,w="function"==typeof m?m({x:b,y}):{x:b,y};b=w.x,y=w.y;var E=d.hasOwnProperty("x"),A=d.hasOwnProperty("y"),T=r,C=n,O=window;if(p){var x=K(i),L="clientHeight",S="clientWidth";x===k(i)&&"static"!==z(x=q(i)).position&&"absolute"===u&&(L="scrollHeight",S="scrollWidth"),(l===n||(l===r||l===o)&&c===h)&&(C=s,y-=(g&&x===O&&O.visualViewport?O.visualViewport.height:x[L])-a.height,y*=f?1:-1),l!==r&&(l!==n&&l!==s||c!==h)||(T=o,b-=(g&&x===O&&O.visualViewport?O.visualViewport.width:x[S])-a.width,b*=f?1:-1)}var D,$=Object.assign({position:u},p&&tt),I=!0===m?function(t,e){var i=t.x,n=t.y,s=e.devicePixelRatio||1;return{x:M(i*s)/s||0,y:M(n*s)/s||0}}({x:b,y},k(i)):{x:b,y};return b=I.x,y=I.y,f?Object.assign({},$,((D={})[C]=A?"0":"",D[T]=E?"0":"",D.transform=(O.devicePixelRatio||1)<=1?"translate("+b+"px, "+y+"px)":"translate3d("+b+"px, "+y+"px, 0)",D)):Object.assign({},$,((e={})[C]=A?y+"px":"",e[T]=E?b+"px":"",e.transform="",e))}const it={name:"computeStyles",enabled:!0,phase:"beforeWrite",fn:function(t){var e=t.state,i=t.options,n=i.gpuAcceleration,s=void 0===n||n,o=i.adaptive,r=void 0===o||o,a=i.roundOffsets,l=void 0===a||a,c={placement:I(e.placement),variation:Z(e.placement),popper:e.elements.popper,popperRect:e.rects.popper,gpuAcceleration:s,isFixed:"fixed"===e.options.strategy};null!=e.modifiersData.popperOffsets&&(e.styles.popper=Object.assign({},e.styles.popper,et(Object.assign({},c,{offsets:e.modifiersData.popperOffsets,position:e.options.strategy,adaptive:r,roundOffsets:l})))),null!=e.modifiersData.arrow&&(e.styles.arrow=Object.assign({},e.styles.arrow,et(Object.assign({},c,{offsets:e.modifiersData.arrow,position:"absolute",adaptive:!1,roundOffsets:l})))),e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-placement":e.placement})},data:{}};var nt={passive:!0};const st={name:"eventListeners",enabled:!0,phase:"write",fn:function(){},effect:function(t){var e=t.state,i=t.instance,n=t.options,s=n.scroll,o=void 0===s||s,r=n.resize,a=void 0===r||r,l=k(e.elements.popper),c=[].concat(e.scrollParents.reference,e.scrollParents.popper);return o&&c.forEach((function(t){t.addEventListener("scroll",i.update,nt)})),a&&l.addEventListener("resize",i.update,nt),function(){o&&c.forEach((function(t){t.removeEventListener("scroll",i.update,nt)})),a&&l.removeEventListener("resize",i.update,nt)}},data:{}};var ot={left:"right",right:"left",bottom:"top",top:"bottom"};function rt(t){return t.replace(/left|right|bottom|top/g,(function(t){return ot[t]}))}var at={start:"end",end:"start"};function lt(t){return t.replace(/start|end/g,(function(t){return at[t]}))}function ct(t){var e=k(t);return{scrollLeft:e.pageXOffset,scrollTop:e.pageYOffset}}function ht(t){return H(q(t)).left+ct(t).scrollLeft}function dt(t){var e=z(t),i=e.overflow,n=e.overflowX,s=e.overflowY;return/auto|scroll|overlay|hidden/.test(i+s+n)}function ut(t){return["html","body","#document"].indexOf(x(t))>=0?t.ownerDocument.body:S(t)&&dt(t)?t:ut(V(t))}function ft(t,e){var i;void 0===e&&(e=[]);var n=ut(t),s=n===(null==(i=t.ownerDocument)?void 0:i.body),o=k(n),r=s?[o].concat(o.visualViewport||[],dt(n)?n:[]):n,a=e.concat(r);return s?a:a.concat(ft(V(r)))}function pt(t){return Object.assign({},t,{left:t.x,top:t.y,right:t.x+t.width,bottom:t.y+t.height})}function mt(t,e,i){return e===u?pt(function(t,e){var i=k(t),n=q(t),s=i.visualViewport,o=n.clientWidth,r=n.clientHeight,a=0,l=0;if(s){o=s.width,r=s.height;var c=F();(c||!c&&"fixed"===e)&&(a=s.offsetLeft,l=s.offsetTop)}return{width:o,height:r,x:a+ht(t),y:l}}(t,i)):L(e)?function(t,e){var i=H(t,!1,"fixed"===e);return i.top=i.top+t.clientTop,i.left=i.left+t.clientLeft,i.bottom=i.top+t.clientHeight,i.right=i.left+t.clientWidth,i.width=t.clientWidth,i.height=t.clientHeight,i.x=i.left,i.y=i.top,i}(e,i):pt(function(t){var e,i=q(t),n=ct(t),s=null==(e=t.ownerDocument)?void 0:e.body,o=N(i.scrollWidth,i.clientWidth,s?s.scrollWidth:0,s?s.clientWidth:0),r=N(i.scrollHeight,i.clientHeight,s?s.scrollHeight:0,s?s.clientHeight:0),a=-n.scrollLeft+ht(t),l=-n.scrollTop;return"rtl"===z(s||i).direction&&(a+=N(i.clientWidth,s?s.clientWidth:0)-o),{width:o,height:r,x:a,y:l}}(q(t)))}function gt(t){var e,i=t.reference,a=t.element,l=t.placement,d=l?I(l):null,u=l?Z(l):null,f=i.x+i.width/2-a.width/2,p=i.y+i.height/2-a.height/2;switch(d){case n:e={x:f,y:i.y-a.height};break;case s:e={x:f,y:i.y+i.height};break;case o:e={x:i.x+i.width,y:p};break;case r:e={x:i.x-a.width,y:p};break;default:e={x:i.x,y:i.y}}var m=d?Q(d):null;if(null!=m){var g="y"===m?"height":"width";switch(u){case c:e[m]=e[m]-(i[g]/2-a[g]/2);break;case h:e[m]=e[m]+(i[g]/2-a[g]/2)}}return e}function _t(t,e){void 0===e&&(e={});var i=e,r=i.placement,a=void 0===r?t.placement:r,c=i.strategy,h=void 0===c?t.strategy:c,m=i.boundary,g=void 0===m?d:m,_=i.rootBoundary,b=void 0===_?u:_,v=i.elementContext,y=void 0===v?f:v,w=i.altBoundary,E=void 0!==w&&w,A=i.padding,T=void 0===A?0:A,C=U("number"!=typeof T?T:G(T,l)),O=y===f?p:f,k=t.rects.popper,D=t.elements[E?O:y],$=function(t,e,i,n){var s="clippingParents"===e?function(t){var e=ft(V(t)),i=["absolute","fixed"].indexOf(z(t).position)>=0&&S(t)?K(t):t;return L(i)?e.filter((function(t){return L(t)&&W(t,i)&&"body"!==x(t)})):[]}(t):[].concat(e),o=[].concat(s,[i]),r=o[0],a=o.reduce((function(e,i){var s=mt(t,i,n);return e.top=N(s.top,e.top),e.right=P(s.right,e.right),e.bottom=P(s.bottom,e.bottom),e.left=N(s.left,e.left),e}),mt(t,r,n));return a.width=a.right-a.left,a.height=a.bottom-a.top,a.x=a.left,a.y=a.top,a}(L(D)?D:D.contextElement||q(t.elements.popper),g,b,h),I=H(t.elements.reference),M=gt({reference:I,element:k,strategy:"absolute",placement:a}),j=pt(Object.assign({},k,M)),F=y===f?j:I,B={top:$.top-F.top+C.top,bottom:F.bottom-$.bottom+C.bottom,left:$.left-F.left+C.left,right:F.right-$.right+C.right},R=t.modifiersData.offset;if(y===f&&R){var Y=R[a];Object.keys(B).forEach((function(t){var e=[o,s].indexOf(t)>=0?1:-1,i=[n,s].indexOf(t)>=0?"y":"x";B[t]+=Y[i]*e}))}return B}const bt={name:"flip",enabled:!0,phase:"main",fn:function(t){var e=t.state,i=t.options,h=t.name;if(!e.modifiersData[h]._skip){for(var d=i.mainAxis,u=void 0===d||d,f=i.altAxis,p=void 0===f||f,_=i.fallbackPlacements,b=i.padding,v=i.boundary,y=i.rootBoundary,w=i.altBoundary,E=i.flipVariations,A=void 0===E||E,T=i.allowedAutoPlacements,C=e.options.placement,O=I(C),x=_||(O!==C&&A?function(t){if(I(t)===a)return[];var e=rt(t);return[lt(t),e,lt(e)]}(C):[rt(C)]),k=[C].concat(x).reduce((function(t,i){return t.concat(I(i)===a?function(t,e){void 0===e&&(e={});var i=e,n=i.placement,s=i.boundary,o=i.rootBoundary,r=i.padding,a=i.flipVariations,c=i.allowedAutoPlacements,h=void 0===c?g:c,d=Z(n),u=d?a?m:m.filter((function(t){return Z(t)===d})):l,f=u.filter((function(t){return h.indexOf(t)>=0}));0===f.length&&(f=u);var p=f.reduce((function(e,i){return e[i]=_t(t,{placement:i,boundary:s,rootBoundary:o,padding:r})[I(i)],e}),{});return Object.keys(p).sort((function(t,e){return p[t]-p[e]}))}(e,{placement:i,boundary:v,rootBoundary:y,padding:b,flipVariations:A,allowedAutoPlacements:T}):i)}),[]),L=e.rects.reference,S=e.rects.popper,D=new Map,$=!0,N=k[0],P=0;P=0,B=H?"width":"height",W=_t(e,{placement:M,boundary:v,rootBoundary:y,altBoundary:w,padding:b}),z=H?F?o:r:F?s:n;L[B]>S[B]&&(z=rt(z));var R=rt(z),q=[];if(u&&q.push(W[j]<=0),p&&q.push(W[z]<=0,W[R]<=0),q.every((function(t){return t}))){N=M,$=!1;break}D.set(M,q)}if($)for(var V=function(t){var e=k.find((function(e){var i=D.get(e);if(i)return i.slice(0,t).every((function(t){return t}))}));if(e)return N=e,"break"},Y=A?3:1;Y>0&&"break"!==V(Y);Y--);e.placement!==N&&(e.modifiersData[h]._skip=!0,e.placement=N,e.reset=!0)}},requiresIfExists:["offset"],data:{_skip:!1}};function vt(t,e,i){return void 0===i&&(i={x:0,y:0}),{top:t.top-e.height-i.y,right:t.right-e.width+i.x,bottom:t.bottom-e.height+i.y,left:t.left-e.width-i.x}}function yt(t){return[n,o,s,r].some((function(e){return t[e]>=0}))}const wt={name:"hide",enabled:!0,phase:"main",requiresIfExists:["preventOverflow"],fn:function(t){var e=t.state,i=t.name,n=e.rects.reference,s=e.rects.popper,o=e.modifiersData.preventOverflow,r=_t(e,{elementContext:"reference"}),a=_t(e,{altBoundary:!0}),l=vt(r,n),c=vt(a,s,o),h=yt(l),d=yt(c);e.modifiersData[i]={referenceClippingOffsets:l,popperEscapeOffsets:c,isReferenceHidden:h,hasPopperEscaped:d},e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-reference-hidden":h,"data-popper-escaped":d})}},Et={name:"offset",enabled:!0,phase:"main",requires:["popperOffsets"],fn:function(t){var e=t.state,i=t.options,s=t.name,a=i.offset,l=void 0===a?[0,0]:a,c=g.reduce((function(t,i){return t[i]=function(t,e,i){var s=I(t),a=[r,n].indexOf(s)>=0?-1:1,l="function"==typeof i?i(Object.assign({},e,{placement:t})):i,c=l[0],h=l[1];return c=c||0,h=(h||0)*a,[r,o].indexOf(s)>=0?{x:h,y:c}:{x:c,y:h}}(i,e.rects,l),t}),{}),h=c[e.placement],d=h.x,u=h.y;null!=e.modifiersData.popperOffsets&&(e.modifiersData.popperOffsets.x+=d,e.modifiersData.popperOffsets.y+=u),e.modifiersData[s]=c}},At={name:"popperOffsets",enabled:!0,phase:"read",fn:function(t){var e=t.state,i=t.name;e.modifiersData[i]=gt({reference:e.rects.reference,element:e.rects.popper,strategy:"absolute",placement:e.placement})},data:{}},Tt={name:"preventOverflow",enabled:!0,phase:"main",fn:function(t){var e=t.state,i=t.options,a=t.name,l=i.mainAxis,h=void 0===l||l,d=i.altAxis,u=void 0!==d&&d,f=i.boundary,p=i.rootBoundary,m=i.altBoundary,g=i.padding,_=i.tether,b=void 0===_||_,v=i.tetherOffset,y=void 0===v?0:v,w=_t(e,{boundary:f,rootBoundary:p,padding:g,altBoundary:m}),E=I(e.placement),A=Z(e.placement),T=!A,C=Q(E),O="x"===C?"y":"x",x=e.modifiersData.popperOffsets,k=e.rects.reference,L=e.rects.popper,S="function"==typeof y?y(Object.assign({},e.rects,{placement:e.placement})):y,D="number"==typeof S?{mainAxis:S,altAxis:S}:Object.assign({mainAxis:0,altAxis:0},S),$=e.modifiersData.offset?e.modifiersData.offset[e.placement]:null,M={x:0,y:0};if(x){if(h){var j,F="y"===C?n:r,H="y"===C?s:o,W="y"===C?"height":"width",z=x[C],R=z+w[F],q=z-w[H],V=b?-L[W]/2:0,Y=A===c?k[W]:L[W],U=A===c?-L[W]:-k[W],G=e.elements.arrow,J=b&&G?B(G):{width:0,height:0},tt=e.modifiersData["arrow#persistent"]?e.modifiersData["arrow#persistent"].padding:{top:0,right:0,bottom:0,left:0},et=tt[F],it=tt[H],nt=X(0,k[W],J[W]),st=T?k[W]/2-V-nt-et-D.mainAxis:Y-nt-et-D.mainAxis,ot=T?-k[W]/2+V+nt+it+D.mainAxis:U+nt+it+D.mainAxis,rt=e.elements.arrow&&K(e.elements.arrow),at=rt?"y"===C?rt.clientTop||0:rt.clientLeft||0:0,lt=null!=(j=null==$?void 0:$[C])?j:0,ct=z+ot-lt,ht=X(b?P(R,z+st-lt-at):R,z,b?N(q,ct):q);x[C]=ht,M[C]=ht-z}if(u){var dt,ut="x"===C?n:r,ft="x"===C?s:o,pt=x[O],mt="y"===O?"height":"width",gt=pt+w[ut],bt=pt-w[ft],vt=-1!==[n,r].indexOf(E),yt=null!=(dt=null==$?void 0:$[O])?dt:0,wt=vt?gt:pt-k[mt]-L[mt]-yt+D.altAxis,Et=vt?pt+k[mt]+L[mt]-yt-D.altAxis:bt,At=b&&vt?function(t,e,i){var n=X(t,e,i);return n>i?i:n}(wt,pt,Et):X(b?wt:gt,pt,b?Et:bt);x[O]=At,M[O]=At-pt}e.modifiersData[a]=M}},requiresIfExists:["offset"]};function Ct(t,e,i){void 0===i&&(i=!1);var n,s,o=S(e),r=S(e)&&function(t){var e=t.getBoundingClientRect(),i=M(e.width)/t.offsetWidth||1,n=M(e.height)/t.offsetHeight||1;return 1!==i||1!==n}(e),a=q(e),l=H(t,r,i),c={scrollLeft:0,scrollTop:0},h={x:0,y:0};return(o||!o&&!i)&&(("body"!==x(e)||dt(a))&&(c=(n=e)!==k(n)&&S(n)?{scrollLeft:(s=n).scrollLeft,scrollTop:s.scrollTop}:ct(n)),S(e)?((h=H(e,!0)).x+=e.clientLeft,h.y+=e.clientTop):a&&(h.x=ht(a))),{x:l.left+c.scrollLeft-h.x,y:l.top+c.scrollTop-h.y,width:l.width,height:l.height}}function Ot(t){var e=new Map,i=new Set,n=[];function s(t){i.add(t.name),[].concat(t.requires||[],t.requiresIfExists||[]).forEach((function(t){if(!i.has(t)){var n=e.get(t);n&&s(n)}})),n.push(t)}return t.forEach((function(t){e.set(t.name,t)})),t.forEach((function(t){i.has(t.name)||s(t)})),n}var xt={placement:"bottom",modifiers:[],strategy:"absolute"};function kt(){for(var t=arguments.length,e=new Array(t),i=0;iIt.has(t)&&It.get(t).get(e)||null,remove(t,e){if(!It.has(t))return;const i=It.get(t);i.delete(e),0===i.size&&It.delete(t)}},Pt="transitionend",Mt=t=>(t&&window.CSS&&window.CSS.escape&&(t=t.replace(/#([^\s"#']+)/g,((t,e)=>`#${CSS.escape(e)}`))),t),jt=t=>{t.dispatchEvent(new Event(Pt))},Ft=t=>!(!t||"object"!=typeof t)&&(void 0!==t.jquery&&(t=t[0]),void 0!==t.nodeType),Ht=t=>Ft(t)?t.jquery?t[0]:t:"string"==typeof t&&t.length>0?document.querySelector(Mt(t)):null,Bt=t=>{if(!Ft(t)||0===t.getClientRects().length)return!1;const e="visible"===getComputedStyle(t).getPropertyValue("visibility"),i=t.closest("details:not([open])");if(!i)return e;if(i!==t){const e=t.closest("summary");if(e&&e.parentNode!==i)return!1;if(null===e)return!1}return e},Wt=t=>!t||t.nodeType!==Node.ELEMENT_NODE||!!t.classList.contains("disabled")||(void 0!==t.disabled?t.disabled:t.hasAttribute("disabled")&&"false"!==t.getAttribute("disabled")),zt=t=>{if(!document.documentElement.attachShadow)return null;if("function"==typeof t.getRootNode){const e=t.getRootNode();return e instanceof ShadowRoot?e:null}return t instanceof ShadowRoot?t:t.parentNode?zt(t.parentNode):null},Rt=()=>{},qt=t=>{t.offsetHeight},Vt=()=>window.jQuery&&!document.body.hasAttribute("data-bs-no-jquery")?window.jQuery:null,Yt=[],Kt=()=>"rtl"===document.documentElement.dir,Qt=t=>{var e;e=()=>{const e=Vt();if(e){const i=t.NAME,n=e.fn[i];e.fn[i]=t.jQueryInterface,e.fn[i].Constructor=t,e.fn[i].noConflict=()=>(e.fn[i]=n,t.jQueryInterface)}},"loading"===document.readyState?(Yt.length||document.addEventListener("DOMContentLoaded",(()=>{for(const t of Yt)t()})),Yt.push(e)):e()},Xt=(t,e=[],i=t)=>"function"==typeof t?t(...e):i,Ut=(t,e,i=!0)=>{if(!i)return void Xt(t);const n=(t=>{if(!t)return 0;let{transitionDuration:e,transitionDelay:i}=window.getComputedStyle(t);const n=Number.parseFloat(e),s=Number.parseFloat(i);return n||s?(e=e.split(",")[0],i=i.split(",")[0],1e3*(Number.parseFloat(e)+Number.parseFloat(i))):0})(e)+5;let s=!1;const o=({target:i})=>{i===e&&(s=!0,e.removeEventListener(Pt,o),Xt(t))};e.addEventListener(Pt,o),setTimeout((()=>{s||jt(e)}),n)},Gt=(t,e,i,n)=>{const s=t.length;let o=t.indexOf(e);return-1===o?!i&&n?t[s-1]:t[0]:(o+=i?1:-1,n&&(o=(o+s)%s),t[Math.max(0,Math.min(o,s-1))])},Jt=/[^.]*(?=\..*)\.|.*/,Zt=/\..*/,te=/::\d+$/,ee={};let ie=1;const ne={mouseenter:"mouseover",mouseleave:"mouseout"},se=new Set(["click","dblclick","mouseup","mousedown","contextmenu","mousewheel","DOMMouseScroll","mouseover","mouseout","mousemove","selectstart","selectend","keydown","keypress","keyup","orientationchange","touchstart","touchmove","touchend","touchcancel","pointerdown","pointermove","pointerup","pointerleave","pointercancel","gesturestart","gesturechange","gestureend","focus","blur","change","reset","select","submit","focusin","focusout","load","unload","beforeunload","resize","move","DOMContentLoaded","readystatechange","error","abort","scroll"]);function oe(t,e){return e&&`${e}::${ie++}`||t.uidEvent||ie++}function re(t){const e=oe(t);return t.uidEvent=e,ee[e]=ee[e]||{},ee[e]}function ae(t,e,i=null){return Object.values(t).find((t=>t.callable===e&&t.delegationSelector===i))}function le(t,e,i){const n="string"==typeof e,s=n?i:e||i;let o=ue(t);return se.has(o)||(o=t),[n,s,o]}function ce(t,e,i,n,s){if("string"!=typeof e||!t)return;let[o,r,a]=le(e,i,n);if(e in ne){const t=t=>function(e){if(!e.relatedTarget||e.relatedTarget!==e.delegateTarget&&!e.delegateTarget.contains(e.relatedTarget))return t.call(this,e)};r=t(r)}const l=re(t),c=l[a]||(l[a]={}),h=ae(c,r,o?i:null);if(h)return void(h.oneOff=h.oneOff&&s);const d=oe(r,e.replace(Jt,"")),u=o?function(t,e,i){return function n(s){const o=t.querySelectorAll(e);for(let{target:r}=s;r&&r!==this;r=r.parentNode)for(const a of o)if(a===r)return pe(s,{delegateTarget:r}),n.oneOff&&fe.off(t,s.type,e,i),i.apply(r,[s])}}(t,i,r):function(t,e){return function i(n){return pe(n,{delegateTarget:t}),i.oneOff&&fe.off(t,n.type,e),e.apply(t,[n])}}(t,r);u.delegationSelector=o?i:null,u.callable=r,u.oneOff=s,u.uidEvent=d,c[d]=u,t.addEventListener(a,u,o)}function he(t,e,i,n,s){const o=ae(e[i],n,s);o&&(t.removeEventListener(i,o,Boolean(s)),delete e[i][o.uidEvent])}function de(t,e,i,n){const s=e[i]||{};for(const[o,r]of Object.entries(s))o.includes(n)&&he(t,e,i,r.callable,r.delegationSelector)}function ue(t){return t=t.replace(Zt,""),ne[t]||t}const fe={on(t,e,i,n){ce(t,e,i,n,!1)},one(t,e,i,n){ce(t,e,i,n,!0)},off(t,e,i,n){if("string"!=typeof e||!t)return;const[s,o,r]=le(e,i,n),a=r!==e,l=re(t),c=l[r]||{},h=e.startsWith(".");if(void 0===o){if(h)for(const i of Object.keys(l))de(t,l,i,e.slice(1));for(const[i,n]of Object.entries(c)){const s=i.replace(te,"");a&&!e.includes(s)||he(t,l,r,n.callable,n.delegationSelector)}}else{if(!Object.keys(c).length)return;he(t,l,r,o,s?i:null)}},trigger(t,e,i){if("string"!=typeof e||!t)return null;const n=Vt();let s=null,o=!0,r=!0,a=!1;e!==ue(e)&&n&&(s=n.Event(e,i),n(t).trigger(s),o=!s.isPropagationStopped(),r=!s.isImmediatePropagationStopped(),a=s.isDefaultPrevented());const l=pe(new Event(e,{bubbles:o,cancelable:!0}),i);return a&&l.preventDefault(),r&&t.dispatchEvent(l),l.defaultPrevented&&s&&s.preventDefault(),l}};function pe(t,e={}){for(const[i,n]of Object.entries(e))try{t[i]=n}catch(e){Object.defineProperty(t,i,{configurable:!0,get:()=>n})}return t}function me(t){if("true"===t)return!0;if("false"===t)return!1;if(t===Number(t).toString())return Number(t);if(""===t||"null"===t)return null;if("string"!=typeof t)return t;try{return JSON.parse(decodeURIComponent(t))}catch(e){return t}}function ge(t){return t.replace(/[A-Z]/g,(t=>`-${t.toLowerCase()}`))}const _e={setDataAttribute(t,e,i){t.setAttribute(`data-bs-${ge(e)}`,i)},removeDataAttribute(t,e){t.removeAttribute(`data-bs-${ge(e)}`)},getDataAttributes(t){if(!t)return{};const e={},i=Object.keys(t.dataset).filter((t=>t.startsWith("bs")&&!t.startsWith("bsConfig")));for(const n of i){let i=n.replace(/^bs/,"");i=i.charAt(0).toLowerCase()+i.slice(1,i.length),e[i]=me(t.dataset[n])}return e},getDataAttribute:(t,e)=>me(t.getAttribute(`data-bs-${ge(e)}`))};class be{static get Default(){return{}}static get DefaultType(){return{}}static get NAME(){throw new Error('You have to implement the static method "NAME", for each component!')}_getConfig(t){return t=this._mergeConfigObj(t),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}_configAfterMerge(t){return t}_mergeConfigObj(t,e){const i=Ft(e)?_e.getDataAttribute(e,"config"):{};return{...this.constructor.Default,..."object"==typeof i?i:{},...Ft(e)?_e.getDataAttributes(e):{},..."object"==typeof t?t:{}}}_typeCheckConfig(t,e=this.constructor.DefaultType){for(const[n,s]of Object.entries(e)){const e=t[n],o=Ft(e)?"element":null==(i=e)?`${i}`:Object.prototype.toString.call(i).match(/\s([a-z]+)/i)[1].toLowerCase();if(!new RegExp(s).test(o))throw new TypeError(`${this.constructor.NAME.toUpperCase()}: Option "${n}" provided type "${o}" but expected type "${s}".`)}var i}}class ve extends be{constructor(t,e){super(),(t=Ht(t))&&(this._element=t,this._config=this._getConfig(e),Nt.set(this._element,this.constructor.DATA_KEY,this))}dispose(){Nt.remove(this._element,this.constructor.DATA_KEY),fe.off(this._element,this.constructor.EVENT_KEY);for(const t of Object.getOwnPropertyNames(this))this[t]=null}_queueCallback(t,e,i=!0){Ut(t,e,i)}_getConfig(t){return t=this._mergeConfigObj(t,this._element),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}static getInstance(t){return Nt.get(Ht(t),this.DATA_KEY)}static getOrCreateInstance(t,e={}){return this.getInstance(t)||new this(t,"object"==typeof e?e:null)}static get VERSION(){return"5.3.2"}static get DATA_KEY(){return`bs.${this.NAME}`}static get EVENT_KEY(){return`.${this.DATA_KEY}`}static eventName(t){return`${t}${this.EVENT_KEY}`}}const ye=t=>{let e=t.getAttribute("data-bs-target");if(!e||"#"===e){let i=t.getAttribute("href");if(!i||!i.includes("#")&&!i.startsWith("."))return null;i.includes("#")&&!i.startsWith("#")&&(i=`#${i.split("#")[1]}`),e=i&&"#"!==i?Mt(i.trim()):null}return e},we={find:(t,e=document.documentElement)=>[].concat(...Element.prototype.querySelectorAll.call(e,t)),findOne:(t,e=document.documentElement)=>Element.prototype.querySelector.call(e,t),children:(t,e)=>[].concat(...t.children).filter((t=>t.matches(e))),parents(t,e){const i=[];let n=t.parentNode.closest(e);for(;n;)i.push(n),n=n.parentNode.closest(e);return i},prev(t,e){let i=t.previousElementSibling;for(;i;){if(i.matches(e))return[i];i=i.previousElementSibling}return[]},next(t,e){let i=t.nextElementSibling;for(;i;){if(i.matches(e))return[i];i=i.nextElementSibling}return[]},focusableChildren(t){const e=["a","button","input","textarea","select","details","[tabindex]",'[contenteditable="true"]'].map((t=>`${t}:not([tabindex^="-"])`)).join(",");return this.find(e,t).filter((t=>!Wt(t)&&Bt(t)))},getSelectorFromElement(t){const e=ye(t);return e&&we.findOne(e)?e:null},getElementFromSelector(t){const e=ye(t);return e?we.findOne(e):null},getMultipleElementsFromSelector(t){const e=ye(t);return e?we.find(e):[]}},Ee=(t,e="hide")=>{const i=`click.dismiss${t.EVENT_KEY}`,n=t.NAME;fe.on(document,i,`[data-bs-dismiss="${n}"]`,(function(i){if(["A","AREA"].includes(this.tagName)&&i.preventDefault(),Wt(this))return;const s=we.getElementFromSelector(this)||this.closest(`.${n}`);t.getOrCreateInstance(s)[e]()}))},Ae=".bs.alert",Te=`close${Ae}`,Ce=`closed${Ae}`;class Oe extends ve{static get NAME(){return"alert"}close(){if(fe.trigger(this._element,Te).defaultPrevented)return;this._element.classList.remove("show");const t=this._element.classList.contains("fade");this._queueCallback((()=>this._destroyElement()),this._element,t)}_destroyElement(){this._element.remove(),fe.trigger(this._element,Ce),this.dispose()}static jQueryInterface(t){return this.each((function(){const e=Oe.getOrCreateInstance(this);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}Ee(Oe,"close"),Qt(Oe);const xe='[data-bs-toggle="button"]';class ke extends ve{static get NAME(){return"button"}toggle(){this._element.setAttribute("aria-pressed",this._element.classList.toggle("active"))}static jQueryInterface(t){return this.each((function(){const e=ke.getOrCreateInstance(this);"toggle"===t&&e[t]()}))}}fe.on(document,"click.bs.button.data-api",xe,(t=>{t.preventDefault();const e=t.target.closest(xe);ke.getOrCreateInstance(e).toggle()})),Qt(ke);const Le=".bs.swipe",Se=`touchstart${Le}`,De=`touchmove${Le}`,$e=`touchend${Le}`,Ie=`pointerdown${Le}`,Ne=`pointerup${Le}`,Pe={endCallback:null,leftCallback:null,rightCallback:null},Me={endCallback:"(function|null)",leftCallback:"(function|null)",rightCallback:"(function|null)"};class je extends be{constructor(t,e){super(),this._element=t,t&&je.isSupported()&&(this._config=this._getConfig(e),this._deltaX=0,this._supportPointerEvents=Boolean(window.PointerEvent),this._initEvents())}static get Default(){return Pe}static get DefaultType(){return Me}static get NAME(){return"swipe"}dispose(){fe.off(this._element,Le)}_start(t){this._supportPointerEvents?this._eventIsPointerPenTouch(t)&&(this._deltaX=t.clientX):this._deltaX=t.touches[0].clientX}_end(t){this._eventIsPointerPenTouch(t)&&(this._deltaX=t.clientX-this._deltaX),this._handleSwipe(),Xt(this._config.endCallback)}_move(t){this._deltaX=t.touches&&t.touches.length>1?0:t.touches[0].clientX-this._deltaX}_handleSwipe(){const t=Math.abs(this._deltaX);if(t<=40)return;const e=t/this._deltaX;this._deltaX=0,e&&Xt(e>0?this._config.rightCallback:this._config.leftCallback)}_initEvents(){this._supportPointerEvents?(fe.on(this._element,Ie,(t=>this._start(t))),fe.on(this._element,Ne,(t=>this._end(t))),this._element.classList.add("pointer-event")):(fe.on(this._element,Se,(t=>this._start(t))),fe.on(this._element,De,(t=>this._move(t))),fe.on(this._element,$e,(t=>this._end(t))))}_eventIsPointerPenTouch(t){return this._supportPointerEvents&&("pen"===t.pointerType||"touch"===t.pointerType)}static isSupported(){return"ontouchstart"in document.documentElement||navigator.maxTouchPoints>0}}const Fe=".bs.carousel",He=".data-api",Be="next",We="prev",ze="left",Re="right",qe=`slide${Fe}`,Ve=`slid${Fe}`,Ye=`keydown${Fe}`,Ke=`mouseenter${Fe}`,Qe=`mouseleave${Fe}`,Xe=`dragstart${Fe}`,Ue=`load${Fe}${He}`,Ge=`click${Fe}${He}`,Je="carousel",Ze="active",ti=".active",ei=".carousel-item",ii=ti+ei,ni={ArrowLeft:Re,ArrowRight:ze},si={interval:5e3,keyboard:!0,pause:"hover",ride:!1,touch:!0,wrap:!0},oi={interval:"(number|boolean)",keyboard:"boolean",pause:"(string|boolean)",ride:"(boolean|string)",touch:"boolean",wrap:"boolean"};class ri extends ve{constructor(t,e){super(t,e),this._interval=null,this._activeElement=null,this._isSliding=!1,this.touchTimeout=null,this._swipeHelper=null,this._indicatorsElement=we.findOne(".carousel-indicators",this._element),this._addEventListeners(),this._config.ride===Je&&this.cycle()}static get Default(){return si}static get DefaultType(){return oi}static get NAME(){return"carousel"}next(){this._slide(Be)}nextWhenVisible(){!document.hidden&&Bt(this._element)&&this.next()}prev(){this._slide(We)}pause(){this._isSliding&&jt(this._element),this._clearInterval()}cycle(){this._clearInterval(),this._updateInterval(),this._interval=setInterval((()=>this.nextWhenVisible()),this._config.interval)}_maybeEnableCycle(){this._config.ride&&(this._isSliding?fe.one(this._element,Ve,(()=>this.cycle())):this.cycle())}to(t){const e=this._getItems();if(t>e.length-1||t<0)return;if(this._isSliding)return void fe.one(this._element,Ve,(()=>this.to(t)));const i=this._getItemIndex(this._getActive());if(i===t)return;const n=t>i?Be:We;this._slide(n,e[t])}dispose(){this._swipeHelper&&this._swipeHelper.dispose(),super.dispose()}_configAfterMerge(t){return t.defaultInterval=t.interval,t}_addEventListeners(){this._config.keyboard&&fe.on(this._element,Ye,(t=>this._keydown(t))),"hover"===this._config.pause&&(fe.on(this._element,Ke,(()=>this.pause())),fe.on(this._element,Qe,(()=>this._maybeEnableCycle()))),this._config.touch&&je.isSupported()&&this._addTouchEventListeners()}_addTouchEventListeners(){for(const t of we.find(".carousel-item img",this._element))fe.on(t,Xe,(t=>t.preventDefault()));const t={leftCallback:()=>this._slide(this._directionToOrder(ze)),rightCallback:()=>this._slide(this._directionToOrder(Re)),endCallback:()=>{"hover"===this._config.pause&&(this.pause(),this.touchTimeout&&clearTimeout(this.touchTimeout),this.touchTimeout=setTimeout((()=>this._maybeEnableCycle()),500+this._config.interval))}};this._swipeHelper=new je(this._element,t)}_keydown(t){if(/input|textarea/i.test(t.target.tagName))return;const e=ni[t.key];e&&(t.preventDefault(),this._slide(this._directionToOrder(e)))}_getItemIndex(t){return this._getItems().indexOf(t)}_setActiveIndicatorElement(t){if(!this._indicatorsElement)return;const e=we.findOne(ti,this._indicatorsElement);e.classList.remove(Ze),e.removeAttribute("aria-current");const i=we.findOne(`[data-bs-slide-to="${t}"]`,this._indicatorsElement);i&&(i.classList.add(Ze),i.setAttribute("aria-current","true"))}_updateInterval(){const t=this._activeElement||this._getActive();if(!t)return;const e=Number.parseInt(t.getAttribute("data-bs-interval"),10);this._config.interval=e||this._config.defaultInterval}_slide(t,e=null){if(this._isSliding)return;const i=this._getActive(),n=t===Be,s=e||Gt(this._getItems(),i,n,this._config.wrap);if(s===i)return;const o=this._getItemIndex(s),r=e=>fe.trigger(this._element,e,{relatedTarget:s,direction:this._orderToDirection(t),from:this._getItemIndex(i),to:o});if(r(qe).defaultPrevented)return;if(!i||!s)return;const a=Boolean(this._interval);this.pause(),this._isSliding=!0,this._setActiveIndicatorElement(o),this._activeElement=s;const l=n?"carousel-item-start":"carousel-item-end",c=n?"carousel-item-next":"carousel-item-prev";s.classList.add(c),qt(s),i.classList.add(l),s.classList.add(l),this._queueCallback((()=>{s.classList.remove(l,c),s.classList.add(Ze),i.classList.remove(Ze,c,l),this._isSliding=!1,r(Ve)}),i,this._isAnimated()),a&&this.cycle()}_isAnimated(){return this._element.classList.contains("slide")}_getActive(){return we.findOne(ii,this._element)}_getItems(){return we.find(ei,this._element)}_clearInterval(){this._interval&&(clearInterval(this._interval),this._interval=null)}_directionToOrder(t){return Kt()?t===ze?We:Be:t===ze?Be:We}_orderToDirection(t){return Kt()?t===We?ze:Re:t===We?Re:ze}static jQueryInterface(t){return this.each((function(){const e=ri.getOrCreateInstance(this,t);if("number"!=typeof t){if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}else e.to(t)}))}}fe.on(document,Ge,"[data-bs-slide], [data-bs-slide-to]",(function(t){const e=we.getElementFromSelector(this);if(!e||!e.classList.contains(Je))return;t.preventDefault();const i=ri.getOrCreateInstance(e),n=this.getAttribute("data-bs-slide-to");return n?(i.to(n),void i._maybeEnableCycle()):"next"===_e.getDataAttribute(this,"slide")?(i.next(),void i._maybeEnableCycle()):(i.prev(),void i._maybeEnableCycle())})),fe.on(window,Ue,(()=>{const t=we.find('[data-bs-ride="carousel"]');for(const e of t)ri.getOrCreateInstance(e)})),Qt(ri);const ai=".bs.collapse",li=`show${ai}`,ci=`shown${ai}`,hi=`hide${ai}`,di=`hidden${ai}`,ui=`click${ai}.data-api`,fi="show",pi="collapse",mi="collapsing",gi=`:scope .${pi} .${pi}`,_i='[data-bs-toggle="collapse"]',bi={parent:null,toggle:!0},vi={parent:"(null|element)",toggle:"boolean"};class yi extends ve{constructor(t,e){super(t,e),this._isTransitioning=!1,this._triggerArray=[];const i=we.find(_i);for(const t of i){const e=we.getSelectorFromElement(t),i=we.find(e).filter((t=>t===this._element));null!==e&&i.length&&this._triggerArray.push(t)}this._initializeChildren(),this._config.parent||this._addAriaAndCollapsedClass(this._triggerArray,this._isShown()),this._config.toggle&&this.toggle()}static get Default(){return bi}static get DefaultType(){return vi}static get NAME(){return"collapse"}toggle(){this._isShown()?this.hide():this.show()}show(){if(this._isTransitioning||this._isShown())return;let t=[];if(this._config.parent&&(t=this._getFirstLevelChildren(".collapse.show, .collapse.collapsing").filter((t=>t!==this._element)).map((t=>yi.getOrCreateInstance(t,{toggle:!1})))),t.length&&t[0]._isTransitioning)return;if(fe.trigger(this._element,li).defaultPrevented)return;for(const e of t)e.hide();const e=this._getDimension();this._element.classList.remove(pi),this._element.classList.add(mi),this._element.style[e]=0,this._addAriaAndCollapsedClass(this._triggerArray,!0),this._isTransitioning=!0;const i=`scroll${e[0].toUpperCase()+e.slice(1)}`;this._queueCallback((()=>{this._isTransitioning=!1,this._element.classList.remove(mi),this._element.classList.add(pi,fi),this._element.style[e]="",fe.trigger(this._element,ci)}),this._element,!0),this._element.style[e]=`${this._element[i]}px`}hide(){if(this._isTransitioning||!this._isShown())return;if(fe.trigger(this._element,hi).defaultPrevented)return;const t=this._getDimension();this._element.style[t]=`${this._element.getBoundingClientRect()[t]}px`,qt(this._element),this._element.classList.add(mi),this._element.classList.remove(pi,fi);for(const t of this._triggerArray){const e=we.getElementFromSelector(t);e&&!this._isShown(e)&&this._addAriaAndCollapsedClass([t],!1)}this._isTransitioning=!0,this._element.style[t]="",this._queueCallback((()=>{this._isTransitioning=!1,this._element.classList.remove(mi),this._element.classList.add(pi),fe.trigger(this._element,di)}),this._element,!0)}_isShown(t=this._element){return t.classList.contains(fi)}_configAfterMerge(t){return t.toggle=Boolean(t.toggle),t.parent=Ht(t.parent),t}_getDimension(){return this._element.classList.contains("collapse-horizontal")?"width":"height"}_initializeChildren(){if(!this._config.parent)return;const t=this._getFirstLevelChildren(_i);for(const e of t){const t=we.getElementFromSelector(e);t&&this._addAriaAndCollapsedClass([e],this._isShown(t))}}_getFirstLevelChildren(t){const e=we.find(gi,this._config.parent);return we.find(t,this._config.parent).filter((t=>!e.includes(t)))}_addAriaAndCollapsedClass(t,e){if(t.length)for(const i of t)i.classList.toggle("collapsed",!e),i.setAttribute("aria-expanded",e)}static jQueryInterface(t){const e={};return"string"==typeof t&&/show|hide/.test(t)&&(e.toggle=!1),this.each((function(){const i=yi.getOrCreateInstance(this,e);if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t]()}}))}}fe.on(document,ui,_i,(function(t){("A"===t.target.tagName||t.delegateTarget&&"A"===t.delegateTarget.tagName)&&t.preventDefault();for(const t of we.getMultipleElementsFromSelector(this))yi.getOrCreateInstance(t,{toggle:!1}).toggle()})),Qt(yi);const wi="dropdown",Ei=".bs.dropdown",Ai=".data-api",Ti="ArrowUp",Ci="ArrowDown",Oi=`hide${Ei}`,xi=`hidden${Ei}`,ki=`show${Ei}`,Li=`shown${Ei}`,Si=`click${Ei}${Ai}`,Di=`keydown${Ei}${Ai}`,$i=`keyup${Ei}${Ai}`,Ii="show",Ni='[data-bs-toggle="dropdown"]:not(.disabled):not(:disabled)',Pi=`${Ni}.${Ii}`,Mi=".dropdown-menu",ji=Kt()?"top-end":"top-start",Fi=Kt()?"top-start":"top-end",Hi=Kt()?"bottom-end":"bottom-start",Bi=Kt()?"bottom-start":"bottom-end",Wi=Kt()?"left-start":"right-start",zi=Kt()?"right-start":"left-start",Ri={autoClose:!0,boundary:"clippingParents",display:"dynamic",offset:[0,2],popperConfig:null,reference:"toggle"},qi={autoClose:"(boolean|string)",boundary:"(string|element)",display:"string",offset:"(array|string|function)",popperConfig:"(null|object|function)",reference:"(string|element|object)"};class Vi extends ve{constructor(t,e){super(t,e),this._popper=null,this._parent=this._element.parentNode,this._menu=we.next(this._element,Mi)[0]||we.prev(this._element,Mi)[0]||we.findOne(Mi,this._parent),this._inNavbar=this._detectNavbar()}static get Default(){return Ri}static get DefaultType(){return qi}static get NAME(){return wi}toggle(){return this._isShown()?this.hide():this.show()}show(){if(Wt(this._element)||this._isShown())return;const t={relatedTarget:this._element};if(!fe.trigger(this._element,ki,t).defaultPrevented){if(this._createPopper(),"ontouchstart"in document.documentElement&&!this._parent.closest(".navbar-nav"))for(const t of[].concat(...document.body.children))fe.on(t,"mouseover",Rt);this._element.focus(),this._element.setAttribute("aria-expanded",!0),this._menu.classList.add(Ii),this._element.classList.add(Ii),fe.trigger(this._element,Li,t)}}hide(){if(Wt(this._element)||!this._isShown())return;const t={relatedTarget:this._element};this._completeHide(t)}dispose(){this._popper&&this._popper.destroy(),super.dispose()}update(){this._inNavbar=this._detectNavbar(),this._popper&&this._popper.update()}_completeHide(t){if(!fe.trigger(this._element,Oi,t).defaultPrevented){if("ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))fe.off(t,"mouseover",Rt);this._popper&&this._popper.destroy(),this._menu.classList.remove(Ii),this._element.classList.remove(Ii),this._element.setAttribute("aria-expanded","false"),_e.removeDataAttribute(this._menu,"popper"),fe.trigger(this._element,xi,t)}}_getConfig(t){if("object"==typeof(t=super._getConfig(t)).reference&&!Ft(t.reference)&&"function"!=typeof t.reference.getBoundingClientRect)throw new TypeError(`${wi.toUpperCase()}: Option "reference" provided type "object" without a required "getBoundingClientRect" method.`);return t}_createPopper(){if(void 0===e)throw new TypeError("Bootstrap's dropdowns require Popper (https://popper.js.org)");let t=this._element;"parent"===this._config.reference?t=this._parent:Ft(this._config.reference)?t=Ht(this._config.reference):"object"==typeof this._config.reference&&(t=this._config.reference);const i=this._getPopperConfig();this._popper=Dt(t,this._menu,i)}_isShown(){return this._menu.classList.contains(Ii)}_getPlacement(){const t=this._parent;if(t.classList.contains("dropend"))return Wi;if(t.classList.contains("dropstart"))return zi;if(t.classList.contains("dropup-center"))return"top";if(t.classList.contains("dropdown-center"))return"bottom";const e="end"===getComputedStyle(this._menu).getPropertyValue("--bs-position").trim();return t.classList.contains("dropup")?e?Fi:ji:e?Bi:Hi}_detectNavbar(){return null!==this._element.closest(".navbar")}_getOffset(){const{offset:t}=this._config;return"string"==typeof t?t.split(",").map((t=>Number.parseInt(t,10))):"function"==typeof t?e=>t(e,this._element):t}_getPopperConfig(){const t={placement:this._getPlacement(),modifiers:[{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"offset",options:{offset:this._getOffset()}}]};return(this._inNavbar||"static"===this._config.display)&&(_e.setDataAttribute(this._menu,"popper","static"),t.modifiers=[{name:"applyStyles",enabled:!1}]),{...t,...Xt(this._config.popperConfig,[t])}}_selectMenuItem({key:t,target:e}){const i=we.find(".dropdown-menu .dropdown-item:not(.disabled):not(:disabled)",this._menu).filter((t=>Bt(t)));i.length&&Gt(i,e,t===Ci,!i.includes(e)).focus()}static jQueryInterface(t){return this.each((function(){const e=Vi.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}static clearMenus(t){if(2===t.button||"keyup"===t.type&&"Tab"!==t.key)return;const e=we.find(Pi);for(const i of e){const e=Vi.getInstance(i);if(!e||!1===e._config.autoClose)continue;const n=t.composedPath(),s=n.includes(e._menu);if(n.includes(e._element)||"inside"===e._config.autoClose&&!s||"outside"===e._config.autoClose&&s)continue;if(e._menu.contains(t.target)&&("keyup"===t.type&&"Tab"===t.key||/input|select|option|textarea|form/i.test(t.target.tagName)))continue;const o={relatedTarget:e._element};"click"===t.type&&(o.clickEvent=t),e._completeHide(o)}}static dataApiKeydownHandler(t){const e=/input|textarea/i.test(t.target.tagName),i="Escape"===t.key,n=[Ti,Ci].includes(t.key);if(!n&&!i)return;if(e&&!i)return;t.preventDefault();const s=this.matches(Ni)?this:we.prev(this,Ni)[0]||we.next(this,Ni)[0]||we.findOne(Ni,t.delegateTarget.parentNode),o=Vi.getOrCreateInstance(s);if(n)return t.stopPropagation(),o.show(),void o._selectMenuItem(t);o._isShown()&&(t.stopPropagation(),o.hide(),s.focus())}}fe.on(document,Di,Ni,Vi.dataApiKeydownHandler),fe.on(document,Di,Mi,Vi.dataApiKeydownHandler),fe.on(document,Si,Vi.clearMenus),fe.on(document,$i,Vi.clearMenus),fe.on(document,Si,Ni,(function(t){t.preventDefault(),Vi.getOrCreateInstance(this).toggle()})),Qt(Vi);const Yi="backdrop",Ki="show",Qi=`mousedown.bs.${Yi}`,Xi={className:"modal-backdrop",clickCallback:null,isAnimated:!1,isVisible:!0,rootElement:"body"},Ui={className:"string",clickCallback:"(function|null)",isAnimated:"boolean",isVisible:"boolean",rootElement:"(element|string)"};class Gi extends be{constructor(t){super(),this._config=this._getConfig(t),this._isAppended=!1,this._element=null}static get Default(){return Xi}static get DefaultType(){return Ui}static get NAME(){return Yi}show(t){if(!this._config.isVisible)return void Xt(t);this._append();const e=this._getElement();this._config.isAnimated&&qt(e),e.classList.add(Ki),this._emulateAnimation((()=>{Xt(t)}))}hide(t){this._config.isVisible?(this._getElement().classList.remove(Ki),this._emulateAnimation((()=>{this.dispose(),Xt(t)}))):Xt(t)}dispose(){this._isAppended&&(fe.off(this._element,Qi),this._element.remove(),this._isAppended=!1)}_getElement(){if(!this._element){const t=document.createElement("div");t.className=this._config.className,this._config.isAnimated&&t.classList.add("fade"),this._element=t}return this._element}_configAfterMerge(t){return t.rootElement=Ht(t.rootElement),t}_append(){if(this._isAppended)return;const t=this._getElement();this._config.rootElement.append(t),fe.on(t,Qi,(()=>{Xt(this._config.clickCallback)})),this._isAppended=!0}_emulateAnimation(t){Ut(t,this._getElement(),this._config.isAnimated)}}const Ji=".bs.focustrap",Zi=`focusin${Ji}`,tn=`keydown.tab${Ji}`,en="backward",nn={autofocus:!0,trapElement:null},sn={autofocus:"boolean",trapElement:"element"};class on extends be{constructor(t){super(),this._config=this._getConfig(t),this._isActive=!1,this._lastTabNavDirection=null}static get Default(){return nn}static get DefaultType(){return sn}static get NAME(){return"focustrap"}activate(){this._isActive||(this._config.autofocus&&this._config.trapElement.focus(),fe.off(document,Ji),fe.on(document,Zi,(t=>this._handleFocusin(t))),fe.on(document,tn,(t=>this._handleKeydown(t))),this._isActive=!0)}deactivate(){this._isActive&&(this._isActive=!1,fe.off(document,Ji))}_handleFocusin(t){const{trapElement:e}=this._config;if(t.target===document||t.target===e||e.contains(t.target))return;const i=we.focusableChildren(e);0===i.length?e.focus():this._lastTabNavDirection===en?i[i.length-1].focus():i[0].focus()}_handleKeydown(t){"Tab"===t.key&&(this._lastTabNavDirection=t.shiftKey?en:"forward")}}const rn=".fixed-top, .fixed-bottom, .is-fixed, .sticky-top",an=".sticky-top",ln="padding-right",cn="margin-right";class hn{constructor(){this._element=document.body}getWidth(){const t=document.documentElement.clientWidth;return Math.abs(window.innerWidth-t)}hide(){const t=this.getWidth();this._disableOverFlow(),this._setElementAttributes(this._element,ln,(e=>e+t)),this._setElementAttributes(rn,ln,(e=>e+t)),this._setElementAttributes(an,cn,(e=>e-t))}reset(){this._resetElementAttributes(this._element,"overflow"),this._resetElementAttributes(this._element,ln),this._resetElementAttributes(rn,ln),this._resetElementAttributes(an,cn)}isOverflowing(){return this.getWidth()>0}_disableOverFlow(){this._saveInitialAttribute(this._element,"overflow"),this._element.style.overflow="hidden"}_setElementAttributes(t,e,i){const n=this.getWidth();this._applyManipulationCallback(t,(t=>{if(t!==this._element&&window.innerWidth>t.clientWidth+n)return;this._saveInitialAttribute(t,e);const s=window.getComputedStyle(t).getPropertyValue(e);t.style.setProperty(e,`${i(Number.parseFloat(s))}px`)}))}_saveInitialAttribute(t,e){const i=t.style.getPropertyValue(e);i&&_e.setDataAttribute(t,e,i)}_resetElementAttributes(t,e){this._applyManipulationCallback(t,(t=>{const i=_e.getDataAttribute(t,e);null!==i?(_e.removeDataAttribute(t,e),t.style.setProperty(e,i)):t.style.removeProperty(e)}))}_applyManipulationCallback(t,e){if(Ft(t))e(t);else for(const i of we.find(t,this._element))e(i)}}const dn=".bs.modal",un=`hide${dn}`,fn=`hidePrevented${dn}`,pn=`hidden${dn}`,mn=`show${dn}`,gn=`shown${dn}`,_n=`resize${dn}`,bn=`click.dismiss${dn}`,vn=`mousedown.dismiss${dn}`,yn=`keydown.dismiss${dn}`,wn=`click${dn}.data-api`,En="modal-open",An="show",Tn="modal-static",Cn={backdrop:!0,focus:!0,keyboard:!0},On={backdrop:"(boolean|string)",focus:"boolean",keyboard:"boolean"};class xn extends ve{constructor(t,e){super(t,e),this._dialog=we.findOne(".modal-dialog",this._element),this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._isShown=!1,this._isTransitioning=!1,this._scrollBar=new hn,this._addEventListeners()}static get Default(){return Cn}static get DefaultType(){return On}static get NAME(){return"modal"}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||this._isTransitioning||fe.trigger(this._element,mn,{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._isTransitioning=!0,this._scrollBar.hide(),document.body.classList.add(En),this._adjustDialog(),this._backdrop.show((()=>this._showElement(t))))}hide(){this._isShown&&!this._isTransitioning&&(fe.trigger(this._element,un).defaultPrevented||(this._isShown=!1,this._isTransitioning=!0,this._focustrap.deactivate(),this._element.classList.remove(An),this._queueCallback((()=>this._hideModal()),this._element,this._isAnimated())))}dispose(){fe.off(window,dn),fe.off(this._dialog,dn),this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}handleUpdate(){this._adjustDialog()}_initializeBackDrop(){return new Gi({isVisible:Boolean(this._config.backdrop),isAnimated:this._isAnimated()})}_initializeFocusTrap(){return new on({trapElement:this._element})}_showElement(t){document.body.contains(this._element)||document.body.append(this._element),this._element.style.display="block",this._element.removeAttribute("aria-hidden"),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.scrollTop=0;const e=we.findOne(".modal-body",this._dialog);e&&(e.scrollTop=0),qt(this._element),this._element.classList.add(An),this._queueCallback((()=>{this._config.focus&&this._focustrap.activate(),this._isTransitioning=!1,fe.trigger(this._element,gn,{relatedTarget:t})}),this._dialog,this._isAnimated())}_addEventListeners(){fe.on(this._element,yn,(t=>{"Escape"===t.key&&(this._config.keyboard?this.hide():this._triggerBackdropTransition())})),fe.on(window,_n,(()=>{this._isShown&&!this._isTransitioning&&this._adjustDialog()})),fe.on(this._element,vn,(t=>{fe.one(this._element,bn,(e=>{this._element===t.target&&this._element===e.target&&("static"!==this._config.backdrop?this._config.backdrop&&this.hide():this._triggerBackdropTransition())}))}))}_hideModal(){this._element.style.display="none",this._element.setAttribute("aria-hidden",!0),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._isTransitioning=!1,this._backdrop.hide((()=>{document.body.classList.remove(En),this._resetAdjustments(),this._scrollBar.reset(),fe.trigger(this._element,pn)}))}_isAnimated(){return this._element.classList.contains("fade")}_triggerBackdropTransition(){if(fe.trigger(this._element,fn).defaultPrevented)return;const t=this._element.scrollHeight>document.documentElement.clientHeight,e=this._element.style.overflowY;"hidden"===e||this._element.classList.contains(Tn)||(t||(this._element.style.overflowY="hidden"),this._element.classList.add(Tn),this._queueCallback((()=>{this._element.classList.remove(Tn),this._queueCallback((()=>{this._element.style.overflowY=e}),this._dialog)}),this._dialog),this._element.focus())}_adjustDialog(){const t=this._element.scrollHeight>document.documentElement.clientHeight,e=this._scrollBar.getWidth(),i=e>0;if(i&&!t){const t=Kt()?"paddingLeft":"paddingRight";this._element.style[t]=`${e}px`}if(!i&&t){const t=Kt()?"paddingRight":"paddingLeft";this._element.style[t]=`${e}px`}}_resetAdjustments(){this._element.style.paddingLeft="",this._element.style.paddingRight=""}static jQueryInterface(t,e){return this.each((function(){const i=xn.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t](e)}}))}}fe.on(document,wn,'[data-bs-toggle="modal"]',(function(t){const e=we.getElementFromSelector(this);["A","AREA"].includes(this.tagName)&&t.preventDefault(),fe.one(e,mn,(t=>{t.defaultPrevented||fe.one(e,pn,(()=>{Bt(this)&&this.focus()}))}));const i=we.findOne(".modal.show");i&&xn.getInstance(i).hide(),xn.getOrCreateInstance(e).toggle(this)})),Ee(xn),Qt(xn);const kn=".bs.offcanvas",Ln=".data-api",Sn=`load${kn}${Ln}`,Dn="show",$n="showing",In="hiding",Nn=".offcanvas.show",Pn=`show${kn}`,Mn=`shown${kn}`,jn=`hide${kn}`,Fn=`hidePrevented${kn}`,Hn=`hidden${kn}`,Bn=`resize${kn}`,Wn=`click${kn}${Ln}`,zn=`keydown.dismiss${kn}`,Rn={backdrop:!0,keyboard:!0,scroll:!1},qn={backdrop:"(boolean|string)",keyboard:"boolean",scroll:"boolean"};class Vn extends ve{constructor(t,e){super(t,e),this._isShown=!1,this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._addEventListeners()}static get Default(){return Rn}static get DefaultType(){return qn}static get NAME(){return"offcanvas"}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||fe.trigger(this._element,Pn,{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._backdrop.show(),this._config.scroll||(new hn).hide(),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.classList.add($n),this._queueCallback((()=>{this._config.scroll&&!this._config.backdrop||this._focustrap.activate(),this._element.classList.add(Dn),this._element.classList.remove($n),fe.trigger(this._element,Mn,{relatedTarget:t})}),this._element,!0))}hide(){this._isShown&&(fe.trigger(this._element,jn).defaultPrevented||(this._focustrap.deactivate(),this._element.blur(),this._isShown=!1,this._element.classList.add(In),this._backdrop.hide(),this._queueCallback((()=>{this._element.classList.remove(Dn,In),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._config.scroll||(new hn).reset(),fe.trigger(this._element,Hn)}),this._element,!0)))}dispose(){this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}_initializeBackDrop(){const t=Boolean(this._config.backdrop);return new Gi({className:"offcanvas-backdrop",isVisible:t,isAnimated:!0,rootElement:this._element.parentNode,clickCallback:t?()=>{"static"!==this._config.backdrop?this.hide():fe.trigger(this._element,Fn)}:null})}_initializeFocusTrap(){return new on({trapElement:this._element})}_addEventListeners(){fe.on(this._element,zn,(t=>{"Escape"===t.key&&(this._config.keyboard?this.hide():fe.trigger(this._element,Fn))}))}static jQueryInterface(t){return this.each((function(){const e=Vn.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}fe.on(document,Wn,'[data-bs-toggle="offcanvas"]',(function(t){const e=we.getElementFromSelector(this);if(["A","AREA"].includes(this.tagName)&&t.preventDefault(),Wt(this))return;fe.one(e,Hn,(()=>{Bt(this)&&this.focus()}));const i=we.findOne(Nn);i&&i!==e&&Vn.getInstance(i).hide(),Vn.getOrCreateInstance(e).toggle(this)})),fe.on(window,Sn,(()=>{for(const t of we.find(Nn))Vn.getOrCreateInstance(t).show()})),fe.on(window,Bn,(()=>{for(const t of we.find("[aria-modal][class*=show][class*=offcanvas-]"))"fixed"!==getComputedStyle(t).position&&Vn.getOrCreateInstance(t).hide()})),Ee(Vn),Qt(Vn);const Yn={"*":["class","dir","id","lang","role",/^aria-[\w-]*$/i],a:["target","href","title","rel"],area:[],b:[],br:[],col:[],code:[],div:[],em:[],hr:[],h1:[],h2:[],h3:[],h4:[],h5:[],h6:[],i:[],img:["src","srcset","alt","title","width","height"],li:[],ol:[],p:[],pre:[],s:[],small:[],span:[],sub:[],sup:[],strong:[],u:[],ul:[]},Kn=new Set(["background","cite","href","itemtype","longdesc","poster","src","xlink:href"]),Qn=/^(?!javascript:)(?:[a-z0-9+.-]+:|[^&:/?#]*(?:[/?#]|$))/i,Xn=(t,e)=>{const i=t.nodeName.toLowerCase();return e.includes(i)?!Kn.has(i)||Boolean(Qn.test(t.nodeValue)):e.filter((t=>t instanceof RegExp)).some((t=>t.test(i)))},Un={allowList:Yn,content:{},extraClass:"",html:!1,sanitize:!0,sanitizeFn:null,template:"
"},Gn={allowList:"object",content:"object",extraClass:"(string|function)",html:"boolean",sanitize:"boolean",sanitizeFn:"(null|function)",template:"string"},Jn={entry:"(string|element|function|null)",selector:"(string|element)"};class Zn extends be{constructor(t){super(),this._config=this._getConfig(t)}static get Default(){return Un}static get DefaultType(){return Gn}static get NAME(){return"TemplateFactory"}getContent(){return Object.values(this._config.content).map((t=>this._resolvePossibleFunction(t))).filter(Boolean)}hasContent(){return this.getContent().length>0}changeContent(t){return this._checkContent(t),this._config.content={...this._config.content,...t},this}toHtml(){const t=document.createElement("div");t.innerHTML=this._maybeSanitize(this._config.template);for(const[e,i]of Object.entries(this._config.content))this._setContent(t,i,e);const e=t.children[0],i=this._resolvePossibleFunction(this._config.extraClass);return i&&e.classList.add(...i.split(" ")),e}_typeCheckConfig(t){super._typeCheckConfig(t),this._checkContent(t.content)}_checkContent(t){for(const[e,i]of Object.entries(t))super._typeCheckConfig({selector:e,entry:i},Jn)}_setContent(t,e,i){const n=we.findOne(i,t);n&&((e=this._resolvePossibleFunction(e))?Ft(e)?this._putElementInTemplate(Ht(e),n):this._config.html?n.innerHTML=this._maybeSanitize(e):n.textContent=e:n.remove())}_maybeSanitize(t){return this._config.sanitize?function(t,e,i){if(!t.length)return t;if(i&&"function"==typeof i)return i(t);const n=(new window.DOMParser).parseFromString(t,"text/html"),s=[].concat(...n.body.querySelectorAll("*"));for(const t of s){const i=t.nodeName.toLowerCase();if(!Object.keys(e).includes(i)){t.remove();continue}const n=[].concat(...t.attributes),s=[].concat(e["*"]||[],e[i]||[]);for(const e of n)Xn(e,s)||t.removeAttribute(e.nodeName)}return n.body.innerHTML}(t,this._config.allowList,this._config.sanitizeFn):t}_resolvePossibleFunction(t){return Xt(t,[this])}_putElementInTemplate(t,e){if(this._config.html)return e.innerHTML="",void e.append(t);e.textContent=t.textContent}}const ts=new Set(["sanitize","allowList","sanitizeFn"]),es="fade",is="show",ns=".modal",ss="hide.bs.modal",os="hover",rs="focus",as={AUTO:"auto",TOP:"top",RIGHT:Kt()?"left":"right",BOTTOM:"bottom",LEFT:Kt()?"right":"left"},ls={allowList:Yn,animation:!0,boundary:"clippingParents",container:!1,customClass:"",delay:0,fallbackPlacements:["top","right","bottom","left"],html:!1,offset:[0,6],placement:"top",popperConfig:null,sanitize:!0,sanitizeFn:null,selector:!1,template:'',title:"",trigger:"hover focus"},cs={allowList:"object",animation:"boolean",boundary:"(string|element)",container:"(string|element|boolean)",customClass:"(string|function)",delay:"(number|object)",fallbackPlacements:"array",html:"boolean",offset:"(array|string|function)",placement:"(string|function)",popperConfig:"(null|object|function)",sanitize:"boolean",sanitizeFn:"(null|function)",selector:"(string|boolean)",template:"string",title:"(string|element|function)",trigger:"string"};class hs extends ve{constructor(t,i){if(void 0===e)throw new TypeError("Bootstrap's tooltips require Popper (https://popper.js.org)");super(t,i),this._isEnabled=!0,this._timeout=0,this._isHovered=null,this._activeTrigger={},this._popper=null,this._templateFactory=null,this._newContent=null,this.tip=null,this._setListeners(),this._config.selector||this._fixTitle()}static get Default(){return ls}static get DefaultType(){return cs}static get NAME(){return"tooltip"}enable(){this._isEnabled=!0}disable(){this._isEnabled=!1}toggleEnabled(){this._isEnabled=!this._isEnabled}toggle(){this._isEnabled&&(this._activeTrigger.click=!this._activeTrigger.click,this._isShown()?this._leave():this._enter())}dispose(){clearTimeout(this._timeout),fe.off(this._element.closest(ns),ss,this._hideModalHandler),this._element.getAttribute("data-bs-original-title")&&this._element.setAttribute("title",this._element.getAttribute("data-bs-original-title")),this._disposePopper(),super.dispose()}show(){if("none"===this._element.style.display)throw new Error("Please use show on visible elements");if(!this._isWithContent()||!this._isEnabled)return;const t=fe.trigger(this._element,this.constructor.eventName("show")),e=(zt(this._element)||this._element.ownerDocument.documentElement).contains(this._element);if(t.defaultPrevented||!e)return;this._disposePopper();const i=this._getTipElement();this._element.setAttribute("aria-describedby",i.getAttribute("id"));const{container:n}=this._config;if(this._element.ownerDocument.documentElement.contains(this.tip)||(n.append(i),fe.trigger(this._element,this.constructor.eventName("inserted"))),this._popper=this._createPopper(i),i.classList.add(is),"ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))fe.on(t,"mouseover",Rt);this._queueCallback((()=>{fe.trigger(this._element,this.constructor.eventName("shown")),!1===this._isHovered&&this._leave(),this._isHovered=!1}),this.tip,this._isAnimated())}hide(){if(this._isShown()&&!fe.trigger(this._element,this.constructor.eventName("hide")).defaultPrevented){if(this._getTipElement().classList.remove(is),"ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))fe.off(t,"mouseover",Rt);this._activeTrigger.click=!1,this._activeTrigger[rs]=!1,this._activeTrigger[os]=!1,this._isHovered=null,this._queueCallback((()=>{this._isWithActiveTrigger()||(this._isHovered||this._disposePopper(),this._element.removeAttribute("aria-describedby"),fe.trigger(this._element,this.constructor.eventName("hidden")))}),this.tip,this._isAnimated())}}update(){this._popper&&this._popper.update()}_isWithContent(){return Boolean(this._getTitle())}_getTipElement(){return this.tip||(this.tip=this._createTipElement(this._newContent||this._getContentForTemplate())),this.tip}_createTipElement(t){const e=this._getTemplateFactory(t).toHtml();if(!e)return null;e.classList.remove(es,is),e.classList.add(`bs-${this.constructor.NAME}-auto`);const i=(t=>{do{t+=Math.floor(1e6*Math.random())}while(document.getElementById(t));return t})(this.constructor.NAME).toString();return e.setAttribute("id",i),this._isAnimated()&&e.classList.add(es),e}setContent(t){this._newContent=t,this._isShown()&&(this._disposePopper(),this.show())}_getTemplateFactory(t){return this._templateFactory?this._templateFactory.changeContent(t):this._templateFactory=new Zn({...this._config,content:t,extraClass:this._resolvePossibleFunction(this._config.customClass)}),this._templateFactory}_getContentForTemplate(){return{".tooltip-inner":this._getTitle()}}_getTitle(){return this._resolvePossibleFunction(this._config.title)||this._element.getAttribute("data-bs-original-title")}_initializeOnDelegatedTarget(t){return this.constructor.getOrCreateInstance(t.delegateTarget,this._getDelegateConfig())}_isAnimated(){return this._config.animation||this.tip&&this.tip.classList.contains(es)}_isShown(){return this.tip&&this.tip.classList.contains(is)}_createPopper(t){const e=Xt(this._config.placement,[this,t,this._element]),i=as[e.toUpperCase()];return Dt(this._element,t,this._getPopperConfig(i))}_getOffset(){const{offset:t}=this._config;return"string"==typeof t?t.split(",").map((t=>Number.parseInt(t,10))):"function"==typeof t?e=>t(e,this._element):t}_resolvePossibleFunction(t){return Xt(t,[this._element])}_getPopperConfig(t){const e={placement:t,modifiers:[{name:"flip",options:{fallbackPlacements:this._config.fallbackPlacements}},{name:"offset",options:{offset:this._getOffset()}},{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"arrow",options:{element:`.${this.constructor.NAME}-arrow`}},{name:"preSetPlacement",enabled:!0,phase:"beforeMain",fn:t=>{this._getTipElement().setAttribute("data-popper-placement",t.state.placement)}}]};return{...e,...Xt(this._config.popperConfig,[e])}}_setListeners(){const t=this._config.trigger.split(" ");for(const e of t)if("click"===e)fe.on(this._element,this.constructor.eventName("click"),this._config.selector,(t=>{this._initializeOnDelegatedTarget(t).toggle()}));else if("manual"!==e){const t=e===os?this.constructor.eventName("mouseenter"):this.constructor.eventName("focusin"),i=e===os?this.constructor.eventName("mouseleave"):this.constructor.eventName("focusout");fe.on(this._element,t,this._config.selector,(t=>{const e=this._initializeOnDelegatedTarget(t);e._activeTrigger["focusin"===t.type?rs:os]=!0,e._enter()})),fe.on(this._element,i,this._config.selector,(t=>{const e=this._initializeOnDelegatedTarget(t);e._activeTrigger["focusout"===t.type?rs:os]=e._element.contains(t.relatedTarget),e._leave()}))}this._hideModalHandler=()=>{this._element&&this.hide()},fe.on(this._element.closest(ns),ss,this._hideModalHandler)}_fixTitle(){const t=this._element.getAttribute("title");t&&(this._element.getAttribute("aria-label")||this._element.textContent.trim()||this._element.setAttribute("aria-label",t),this._element.setAttribute("data-bs-original-title",t),this._element.removeAttribute("title"))}_enter(){this._isShown()||this._isHovered?this._isHovered=!0:(this._isHovered=!0,this._setTimeout((()=>{this._isHovered&&this.show()}),this._config.delay.show))}_leave(){this._isWithActiveTrigger()||(this._isHovered=!1,this._setTimeout((()=>{this._isHovered||this.hide()}),this._config.delay.hide))}_setTimeout(t,e){clearTimeout(this._timeout),this._timeout=setTimeout(t,e)}_isWithActiveTrigger(){return Object.values(this._activeTrigger).includes(!0)}_getConfig(t){const e=_e.getDataAttributes(this._element);for(const t of Object.keys(e))ts.has(t)&&delete e[t];return t={...e,..."object"==typeof t&&t?t:{}},t=this._mergeConfigObj(t),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}_configAfterMerge(t){return t.container=!1===t.container?document.body:Ht(t.container),"number"==typeof t.delay&&(t.delay={show:t.delay,hide:t.delay}),"number"==typeof t.title&&(t.title=t.title.toString()),"number"==typeof t.content&&(t.content=t.content.toString()),t}_getDelegateConfig(){const t={};for(const[e,i]of Object.entries(this._config))this.constructor.Default[e]!==i&&(t[e]=i);return t.selector=!1,t.trigger="manual",t}_disposePopper(){this._popper&&(this._popper.destroy(),this._popper=null),this.tip&&(this.tip.remove(),this.tip=null)}static jQueryInterface(t){return this.each((function(){const e=hs.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}Qt(hs);const ds={...hs.Default,content:"",offset:[0,8],placement:"right",template:'',trigger:"click"},us={...hs.DefaultType,content:"(null|string|element|function)"};class fs extends hs{static get Default(){return ds}static get DefaultType(){return us}static get NAME(){return"popover"}_isWithContent(){return this._getTitle()||this._getContent()}_getContentForTemplate(){return{".popover-header":this._getTitle(),".popover-body":this._getContent()}}_getContent(){return this._resolvePossibleFunction(this._config.content)}static jQueryInterface(t){return this.each((function(){const e=fs.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}Qt(fs);const ps=".bs.scrollspy",ms=`activate${ps}`,gs=`click${ps}`,_s=`load${ps}.data-api`,bs="active",vs="[href]",ys=".nav-link",ws=`${ys}, .nav-item > ${ys}, .list-group-item`,Es={offset:null,rootMargin:"0px 0px -25%",smoothScroll:!1,target:null,threshold:[.1,.5,1]},As={offset:"(number|null)",rootMargin:"string",smoothScroll:"boolean",target:"element",threshold:"array"};class Ts extends ve{constructor(t,e){super(t,e),this._targetLinks=new Map,this._observableSections=new Map,this._rootElement="visible"===getComputedStyle(this._element).overflowY?null:this._element,this._activeTarget=null,this._observer=null,this._previousScrollData={visibleEntryTop:0,parentScrollTop:0},this.refresh()}static get Default(){return Es}static get DefaultType(){return As}static get NAME(){return"scrollspy"}refresh(){this._initializeTargetsAndObservables(),this._maybeEnableSmoothScroll(),this._observer?this._observer.disconnect():this._observer=this._getNewObserver();for(const t of this._observableSections.values())this._observer.observe(t)}dispose(){this._observer.disconnect(),super.dispose()}_configAfterMerge(t){return t.target=Ht(t.target)||document.body,t.rootMargin=t.offset?`${t.offset}px 0px -30%`:t.rootMargin,"string"==typeof t.threshold&&(t.threshold=t.threshold.split(",").map((t=>Number.parseFloat(t)))),t}_maybeEnableSmoothScroll(){this._config.smoothScroll&&(fe.off(this._config.target,gs),fe.on(this._config.target,gs,vs,(t=>{const e=this._observableSections.get(t.target.hash);if(e){t.preventDefault();const i=this._rootElement||window,n=e.offsetTop-this._element.offsetTop;if(i.scrollTo)return void i.scrollTo({top:n,behavior:"smooth"});i.scrollTop=n}})))}_getNewObserver(){const t={root:this._rootElement,threshold:this._config.threshold,rootMargin:this._config.rootMargin};return new IntersectionObserver((t=>this._observerCallback(t)),t)}_observerCallback(t){const e=t=>this._targetLinks.get(`#${t.target.id}`),i=t=>{this._previousScrollData.visibleEntryTop=t.target.offsetTop,this._process(e(t))},n=(this._rootElement||document.documentElement).scrollTop,s=n>=this._previousScrollData.parentScrollTop;this._previousScrollData.parentScrollTop=n;for(const o of t){if(!o.isIntersecting){this._activeTarget=null,this._clearActiveClass(e(o));continue}const t=o.target.offsetTop>=this._previousScrollData.visibleEntryTop;if(s&&t){if(i(o),!n)return}else s||t||i(o)}}_initializeTargetsAndObservables(){this._targetLinks=new Map,this._observableSections=new Map;const t=we.find(vs,this._config.target);for(const e of t){if(!e.hash||Wt(e))continue;const t=we.findOne(decodeURI(e.hash),this._element);Bt(t)&&(this._targetLinks.set(decodeURI(e.hash),e),this._observableSections.set(e.hash,t))}}_process(t){this._activeTarget!==t&&(this._clearActiveClass(this._config.target),this._activeTarget=t,t.classList.add(bs),this._activateParents(t),fe.trigger(this._element,ms,{relatedTarget:t}))}_activateParents(t){if(t.classList.contains("dropdown-item"))we.findOne(".dropdown-toggle",t.closest(".dropdown")).classList.add(bs);else for(const e of we.parents(t,".nav, .list-group"))for(const t of we.prev(e,ws))t.classList.add(bs)}_clearActiveClass(t){t.classList.remove(bs);const e=we.find(`${vs}.${bs}`,t);for(const t of e)t.classList.remove(bs)}static jQueryInterface(t){return this.each((function(){const e=Ts.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}))}}fe.on(window,_s,(()=>{for(const t of we.find('[data-bs-spy="scroll"]'))Ts.getOrCreateInstance(t)})),Qt(Ts);const Cs=".bs.tab",Os=`hide${Cs}`,xs=`hidden${Cs}`,ks=`show${Cs}`,Ls=`shown${Cs}`,Ss=`click${Cs}`,Ds=`keydown${Cs}`,$s=`load${Cs}`,Is="ArrowLeft",Ns="ArrowRight",Ps="ArrowUp",Ms="ArrowDown",js="Home",Fs="End",Hs="active",Bs="fade",Ws="show",zs=".dropdown-toggle",Rs=`:not(${zs})`,qs='[data-bs-toggle="tab"], [data-bs-toggle="pill"], [data-bs-toggle="list"]',Vs=`.nav-link${Rs}, .list-group-item${Rs}, [role="tab"]${Rs}, ${qs}`,Ys=`.${Hs}[data-bs-toggle="tab"], .${Hs}[data-bs-toggle="pill"], .${Hs}[data-bs-toggle="list"]`;class Ks extends ve{constructor(t){super(t),this._parent=this._element.closest('.list-group, .nav, [role="tablist"]'),this._parent&&(this._setInitialAttributes(this._parent,this._getChildren()),fe.on(this._element,Ds,(t=>this._keydown(t))))}static get NAME(){return"tab"}show(){const t=this._element;if(this._elemIsActive(t))return;const e=this._getActiveElem(),i=e?fe.trigger(e,Os,{relatedTarget:t}):null;fe.trigger(t,ks,{relatedTarget:e}).defaultPrevented||i&&i.defaultPrevented||(this._deactivate(e,t),this._activate(t,e))}_activate(t,e){t&&(t.classList.add(Hs),this._activate(we.getElementFromSelector(t)),this._queueCallback((()=>{"tab"===t.getAttribute("role")?(t.removeAttribute("tabindex"),t.setAttribute("aria-selected",!0),this._toggleDropDown(t,!0),fe.trigger(t,Ls,{relatedTarget:e})):t.classList.add(Ws)}),t,t.classList.contains(Bs)))}_deactivate(t,e){t&&(t.classList.remove(Hs),t.blur(),this._deactivate(we.getElementFromSelector(t)),this._queueCallback((()=>{"tab"===t.getAttribute("role")?(t.setAttribute("aria-selected",!1),t.setAttribute("tabindex","-1"),this._toggleDropDown(t,!1),fe.trigger(t,xs,{relatedTarget:e})):t.classList.remove(Ws)}),t,t.classList.contains(Bs)))}_keydown(t){if(![Is,Ns,Ps,Ms,js,Fs].includes(t.key))return;t.stopPropagation(),t.preventDefault();const e=this._getChildren().filter((t=>!Wt(t)));let i;if([js,Fs].includes(t.key))i=e[t.key===js?0:e.length-1];else{const n=[Ns,Ms].includes(t.key);i=Gt(e,t.target,n,!0)}i&&(i.focus({preventScroll:!0}),Ks.getOrCreateInstance(i).show())}_getChildren(){return we.find(Vs,this._parent)}_getActiveElem(){return this._getChildren().find((t=>this._elemIsActive(t)))||null}_setInitialAttributes(t,e){this._setAttributeIfNotExists(t,"role","tablist");for(const t of e)this._setInitialAttributesOnChild(t)}_setInitialAttributesOnChild(t){t=this._getInnerElement(t);const e=this._elemIsActive(t),i=this._getOuterElement(t);t.setAttribute("aria-selected",e),i!==t&&this._setAttributeIfNotExists(i,"role","presentation"),e||t.setAttribute("tabindex","-1"),this._setAttributeIfNotExists(t,"role","tab"),this._setInitialAttributesOnTargetPanel(t)}_setInitialAttributesOnTargetPanel(t){const e=we.getElementFromSelector(t);e&&(this._setAttributeIfNotExists(e,"role","tabpanel"),t.id&&this._setAttributeIfNotExists(e,"aria-labelledby",`${t.id}`))}_toggleDropDown(t,e){const i=this._getOuterElement(t);if(!i.classList.contains("dropdown"))return;const n=(t,n)=>{const s=we.findOne(t,i);s&&s.classList.toggle(n,e)};n(zs,Hs),n(".dropdown-menu",Ws),i.setAttribute("aria-expanded",e)}_setAttributeIfNotExists(t,e,i){t.hasAttribute(e)||t.setAttribute(e,i)}_elemIsActive(t){return t.classList.contains(Hs)}_getInnerElement(t){return t.matches(Vs)?t:we.findOne(Vs,t)}_getOuterElement(t){return t.closest(".nav-item, .list-group-item")||t}static jQueryInterface(t){return this.each((function(){const e=Ks.getOrCreateInstance(this);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}))}}fe.on(document,Ss,qs,(function(t){["A","AREA"].includes(this.tagName)&&t.preventDefault(),Wt(this)||Ks.getOrCreateInstance(this).show()})),fe.on(window,$s,(()=>{for(const t of we.find(Ys))Ks.getOrCreateInstance(t)})),Qt(Ks);const Qs=".bs.toast",Xs=`mouseover${Qs}`,Us=`mouseout${Qs}`,Gs=`focusin${Qs}`,Js=`focusout${Qs}`,Zs=`hide${Qs}`,to=`hidden${Qs}`,eo=`show${Qs}`,io=`shown${Qs}`,no="hide",so="show",oo="showing",ro={animation:"boolean",autohide:"boolean",delay:"number"},ao={animation:!0,autohide:!0,delay:5e3};class lo extends ve{constructor(t,e){super(t,e),this._timeout=null,this._hasMouseInteraction=!1,this._hasKeyboardInteraction=!1,this._setListeners()}static get Default(){return ao}static get DefaultType(){return ro}static get NAME(){return"toast"}show(){fe.trigger(this._element,eo).defaultPrevented||(this._clearTimeout(),this._config.animation&&this._element.classList.add("fade"),this._element.classList.remove(no),qt(this._element),this._element.classList.add(so,oo),this._queueCallback((()=>{this._element.classList.remove(oo),fe.trigger(this._element,io),this._maybeScheduleHide()}),this._element,this._config.animation))}hide(){this.isShown()&&(fe.trigger(this._element,Zs).defaultPrevented||(this._element.classList.add(oo),this._queueCallback((()=>{this._element.classList.add(no),this._element.classList.remove(oo,so),fe.trigger(this._element,to)}),this._element,this._config.animation)))}dispose(){this._clearTimeout(),this.isShown()&&this._element.classList.remove(so),super.dispose()}isShown(){return this._element.classList.contains(so)}_maybeScheduleHide(){this._config.autohide&&(this._hasMouseInteraction||this._hasKeyboardInteraction||(this._timeout=setTimeout((()=>{this.hide()}),this._config.delay)))}_onInteraction(t,e){switch(t.type){case"mouseover":case"mouseout":this._hasMouseInteraction=e;break;case"focusin":case"focusout":this._hasKeyboardInteraction=e}if(e)return void this._clearTimeout();const i=t.relatedTarget;this._element===i||this._element.contains(i)||this._maybeScheduleHide()}_setListeners(){fe.on(this._element,Xs,(t=>this._onInteraction(t,!0))),fe.on(this._element,Us,(t=>this._onInteraction(t,!1))),fe.on(this._element,Gs,(t=>this._onInteraction(t,!0))),fe.on(this._element,Js,(t=>this._onInteraction(t,!1)))}_clearTimeout(){clearTimeout(this._timeout),this._timeout=null}static jQueryInterface(t){return this.each((function(){const e=lo.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}function co(t){"loading"!=document.readyState?t():document.addEventListener("DOMContentLoaded",t)}Ee(lo),Qt(lo),co((function(){[].slice.call(document.querySelectorAll('[data-bs-toggle="tooltip"]')).map((function(t){return new hs(t,{delay:{show:500,hide:100}})}))})),co((function(){document.getElementById("pst-back-to-top").addEventListener("click",(function(){document.body.scrollTop=0,document.documentElement.scrollTop=0}))})),co((function(){var t=document.getElementById("pst-back-to-top"),e=document.getElementsByClassName("bd-header")[0].getBoundingClientRect();window.addEventListener("scroll",(function(){this.oldScroll>this.scrollY&&this.scrollY>e.bottom?t.style.display="block":t.style.display="none",this.oldScroll=this.scrollY}))})),window.bootstrap=i})(); +//# sourceMappingURL=bootstrap.js.map \ No newline at end of file diff --git a/_static/scripts/bootstrap.js.LICENSE.txt b/_static/scripts/bootstrap.js.LICENSE.txt new file mode 100644 index 00000000..10f979d0 --- /dev/null +++ b/_static/scripts/bootstrap.js.LICENSE.txt @@ -0,0 +1,5 @@ +/*! + * Bootstrap v5.3.2 (https://getbootstrap.com/) + * Copyright 2011-2023 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors) + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE) + */ diff --git a/_static/scripts/bootstrap.js.map b/_static/scripts/bootstrap.js.map new file mode 100644 index 00000000..64e212b1 --- /dev/null +++ b/_static/scripts/bootstrap.js.map @@ -0,0 +1 @@ +{"version":3,"file":"scripts/bootstrap.js","mappings":";mBACA,IAAIA,EAAsB,CCA1BA,EAAwB,CAACC,EAASC,KACjC,IAAI,IAAIC,KAAOD,EACXF,EAAoBI,EAAEF,EAAYC,KAASH,EAAoBI,EAAEH,EAASE,IAC5EE,OAAOC,eAAeL,EAASE,EAAK,CAAEI,YAAY,EAAMC,IAAKN,EAAWC,IAE1E,ECNDH,EAAwB,CAACS,EAAKC,IAAUL,OAAOM,UAAUC,eAAeC,KAAKJ,EAAKC,GCClFV,EAAyBC,IACH,oBAAXa,QAA0BA,OAAOC,aAC1CV,OAAOC,eAAeL,EAASa,OAAOC,YAAa,CAAEC,MAAO,WAE7DX,OAAOC,eAAeL,EAAS,aAAc,CAAEe,OAAO,GAAO,01BCLvD,IAAI,EAAM,MACNC,EAAS,SACTC,EAAQ,QACRC,EAAO,OACPC,EAAO,OACPC,EAAiB,CAAC,EAAKJ,EAAQC,EAAOC,GACtCG,EAAQ,QACRC,EAAM,MACNC,EAAkB,kBAClBC,EAAW,WACXC,EAAS,SACTC,EAAY,YACZC,EAAmCP,EAAeQ,QAAO,SAAUC,EAAKC,GACjF,OAAOD,EAAIE,OAAO,CAACD,EAAY,IAAMT,EAAOS,EAAY,IAAMR,GAChE,GAAG,IACQ,EAA0B,GAAGS,OAAOX,EAAgB,CAACD,IAAOS,QAAO,SAAUC,EAAKC,GAC3F,OAAOD,EAAIE,OAAO,CAACD,EAAWA,EAAY,IAAMT,EAAOS,EAAY,IAAMR,GAC3E,GAAG,IAEQU,EAAa,aACbC,EAAO,OACPC,EAAY,YAEZC,EAAa,aACbC,EAAO,OACPC,EAAY,YAEZC,EAAc,cACdC,EAAQ,QACRC,EAAa,aACbC,EAAiB,CAACT,EAAYC,EAAMC,EAAWC,EAAYC,EAAMC,EAAWC,EAAaC,EAAOC,GC9B5F,SAASE,EAAYC,GAClC,OAAOA,GAAWA,EAAQC,UAAY,IAAIC,cAAgB,IAC5D,CCFe,SAASC,EAAUC,GAChC,GAAY,MAARA,EACF,OAAOC,OAGT,GAAwB,oBAApBD,EAAKE,WAAkC,CACzC,IAAIC,EAAgBH,EAAKG,cACzB,OAAOA,GAAgBA,EAAcC,aAAwBH,MAC/D,CAEA,OAAOD,CACT,CCTA,SAASK,EAAUL,GAEjB,OAAOA,aADUD,EAAUC,GAAMM,SACIN,aAAgBM,OACvD,CAEA,SAASC,EAAcP,GAErB,OAAOA,aADUD,EAAUC,GAAMQ,aACIR,aAAgBQ,WACvD,CAEA,SAASC,EAAaT,GAEpB,MAA0B,oBAAfU,aAKJV,aADUD,EAAUC,GAAMU,YACIV,aAAgBU,WACvD,CCwDA,SACEC,KAAM,cACNC,SAAS,EACTC,MAAO,QACPC,GA5EF,SAAqBC,GACnB,IAAIC,EAAQD,EAAKC,MACjB3D,OAAO4D,KAAKD,EAAME,UAAUC,SAAQ,SAAUR,GAC5C,IAAIS,EAAQJ,EAAMK,OAAOV,IAAS,CAAC,EAC/BW,EAAaN,EAAMM,WAAWX,IAAS,CAAC,EACxCf,EAAUoB,EAAME,SAASP,GAExBJ,EAAcX,IAAaD,EAAYC,KAO5CvC,OAAOkE,OAAO3B,EAAQwB,MAAOA,GAC7B/D,OAAO4D,KAAKK,GAAYH,SAAQ,SAAUR,GACxC,IAAI3C,EAAQsD,EAAWX,IAET,IAAV3C,EACF4B,EAAQ4B,gBAAgBb,GAExBf,EAAQ6B,aAAad,GAAgB,IAAV3C,EAAiB,GAAKA,EAErD,IACF,GACF,EAoDE0D,OAlDF,SAAgBC,GACd,IAAIX,EAAQW,EAAMX,MACdY,EAAgB,CAClBlD,OAAQ,CACNmD,SAAUb,EAAMc,QAAQC,SACxB5D,KAAM,IACN6D,IAAK,IACLC,OAAQ,KAEVC,MAAO,CACLL,SAAU,YAEZlD,UAAW,CAAC,GASd,OAPAtB,OAAOkE,OAAOP,EAAME,SAASxC,OAAO0C,MAAOQ,EAAclD,QACzDsC,EAAMK,OAASO,EAEXZ,EAAME,SAASgB,OACjB7E,OAAOkE,OAAOP,EAAME,SAASgB,MAAMd,MAAOQ,EAAcM,OAGnD,WACL7E,OAAO4D,KAAKD,EAAME,UAAUC,SAAQ,SAAUR,GAC5C,IAAIf,EAAUoB,EAAME,SAASP,GACzBW,EAAaN,EAAMM,WAAWX,IAAS,CAAC,EAGxCS,EAFkB/D,OAAO4D,KAAKD,EAAMK,OAAOzD,eAAe+C,GAAQK,EAAMK,OAAOV,GAAQiB,EAAcjB,IAE7E9B,QAAO,SAAUuC,EAAOe,GAElD,OADAf,EAAMe,GAAY,GACXf,CACT,GAAG,CAAC,GAECb,EAAcX,IAAaD,EAAYC,KAI5CvC,OAAOkE,OAAO3B,EAAQwB,MAAOA,GAC7B/D,OAAO4D,KAAKK,GAAYH,SAAQ,SAAUiB,GACxCxC,EAAQ4B,gBAAgBY,EAC1B,IACF,GACF,CACF,EASEC,SAAU,CAAC,kBCjFE,SAASC,EAAiBvD,GACvC,OAAOA,EAAUwD,MAAM,KAAK,EAC9B,CCHO,IAAI,EAAMC,KAAKC,IACX,EAAMD,KAAKE,IACXC,EAAQH,KAAKG,MCFT,SAASC,IACtB,IAAIC,EAASC,UAAUC,cAEvB,OAAc,MAAVF,GAAkBA,EAAOG,QAAUC,MAAMC,QAAQL,EAAOG,QACnDH,EAAOG,OAAOG,KAAI,SAAUC,GACjC,OAAOA,EAAKC,MAAQ,IAAMD,EAAKE,OACjC,IAAGC,KAAK,KAGHT,UAAUU,SACnB,CCTe,SAASC,IACtB,OAAQ,iCAAiCC,KAAKd,IAChD,CCCe,SAASe,EAAsB/D,EAASgE,EAAcC,QAC9C,IAAjBD,IACFA,GAAe,QAGO,IAApBC,IACFA,GAAkB,GAGpB,IAAIC,EAAalE,EAAQ+D,wBACrBI,EAAS,EACTC,EAAS,EAETJ,GAAgBrD,EAAcX,KAChCmE,EAASnE,EAAQqE,YAAc,GAAItB,EAAMmB,EAAWI,OAAStE,EAAQqE,aAAmB,EACxFD,EAASpE,EAAQuE,aAAe,GAAIxB,EAAMmB,EAAWM,QAAUxE,EAAQuE,cAAoB,GAG7F,IACIE,GADOhE,EAAUT,GAAWG,EAAUH,GAAWK,QAC3BoE,eAEtBC,GAAoBb,KAAsBI,EAC1CU,GAAKT,EAAW3F,MAAQmG,GAAoBD,EAAiBA,EAAeG,WAAa,IAAMT,EAC/FU,GAAKX,EAAW9B,KAAOsC,GAAoBD,EAAiBA,EAAeK,UAAY,IAAMV,EAC7FE,EAAQJ,EAAWI,MAAQH,EAC3BK,EAASN,EAAWM,OAASJ,EACjC,MAAO,CACLE,MAAOA,EACPE,OAAQA,EACRpC,IAAKyC,EACLvG,MAAOqG,EAAIL,EACXjG,OAAQwG,EAAIL,EACZjG,KAAMoG,EACNA,EAAGA,EACHE,EAAGA,EAEP,CCrCe,SAASE,EAAc/E,GACpC,IAAIkE,EAAaH,EAAsB/D,GAGnCsE,EAAQtE,EAAQqE,YAChBG,EAASxE,EAAQuE,aAUrB,OARI3B,KAAKoC,IAAId,EAAWI,MAAQA,IAAU,IACxCA,EAAQJ,EAAWI,OAGjB1B,KAAKoC,IAAId,EAAWM,OAASA,IAAW,IAC1CA,EAASN,EAAWM,QAGf,CACLG,EAAG3E,EAAQ4E,WACXC,EAAG7E,EAAQ8E,UACXR,MAAOA,EACPE,OAAQA,EAEZ,CCvBe,SAASS,EAASC,EAAQC,GACvC,IAAIC,EAAWD,EAAME,aAAeF,EAAME,cAE1C,GAAIH,EAAOD,SAASE,GAClB,OAAO,EAEJ,GAAIC,GAAYvE,EAAauE,GAAW,CACzC,IAAIE,EAAOH,EAEX,EAAG,CACD,GAAIG,GAAQJ,EAAOK,WAAWD,GAC5B,OAAO,EAITA,EAAOA,EAAKE,YAAcF,EAAKG,IACjC,OAASH,EACX,CAGF,OAAO,CACT,CCrBe,SAAS,EAAiBtF,GACvC,OAAOG,EAAUH,GAAS0F,iBAAiB1F,EAC7C,CCFe,SAAS2F,EAAe3F,GACrC,MAAO,CAAC,QAAS,KAAM,MAAM4F,QAAQ7F,EAAYC,KAAa,CAChE,CCFe,SAAS6F,EAAmB7F,GAEzC,QAASS,EAAUT,GAAWA,EAAQO,cACtCP,EAAQ8F,WAAazF,OAAOyF,UAAUC,eACxC,CCFe,SAASC,EAAchG,GACpC,MAA6B,SAAzBD,EAAYC,GACPA,EAMPA,EAAQiG,cACRjG,EAAQwF,aACR3E,EAAab,GAAWA,EAAQyF,KAAO,OAEvCI,EAAmB7F,EAGvB,CCVA,SAASkG,EAAoBlG,GAC3B,OAAKW,EAAcX,IACoB,UAAvC,EAAiBA,GAASiC,SAInBjC,EAAQmG,aAHN,IAIX,CAwCe,SAASC,EAAgBpG,GAItC,IAHA,IAAIK,EAASF,EAAUH,GACnBmG,EAAeD,EAAoBlG,GAEhCmG,GAAgBR,EAAeQ,IAA6D,WAA5C,EAAiBA,GAAclE,UACpFkE,EAAeD,EAAoBC,GAGrC,OAAIA,IAA+C,SAA9BpG,EAAYoG,IAA0D,SAA9BpG,EAAYoG,IAAwE,WAA5C,EAAiBA,GAAclE,UAC3H5B,EAGF8F,GAhDT,SAA4BnG,GAC1B,IAAIqG,EAAY,WAAWvC,KAAKd,KAGhC,GAFW,WAAWc,KAAKd,MAEfrC,EAAcX,IAII,UAFX,EAAiBA,GAEnBiC,SACb,OAAO,KAIX,IAAIqE,EAAcN,EAAchG,GAMhC,IAJIa,EAAayF,KACfA,EAAcA,EAAYb,MAGrB9E,EAAc2F,IAAgB,CAAC,OAAQ,QAAQV,QAAQ7F,EAAYuG,IAAgB,GAAG,CAC3F,IAAIC,EAAM,EAAiBD,GAI3B,GAAsB,SAAlBC,EAAIC,WAA4C,SAApBD,EAAIE,aAA0C,UAAhBF,EAAIG,UAAiF,IAA1D,CAAC,YAAa,eAAed,QAAQW,EAAII,aAAsBN,GAAgC,WAAnBE,EAAII,YAA2BN,GAAaE,EAAIK,QAAyB,SAAfL,EAAIK,OACjO,OAAON,EAEPA,EAAcA,EAAYd,UAE9B,CAEA,OAAO,IACT,CAgByBqB,CAAmB7G,IAAYK,CACxD,CCpEe,SAASyG,EAAyB3H,GAC/C,MAAO,CAAC,MAAO,UAAUyG,QAAQzG,IAAc,EAAI,IAAM,GAC3D,CCDO,SAAS4H,EAAOjE,EAAK1E,EAAOyE,GACjC,OAAO,EAAQC,EAAK,EAAQ1E,EAAOyE,GACrC,CCFe,SAASmE,EAAmBC,GACzC,OAAOxJ,OAAOkE,OAAO,CAAC,ECDf,CACLS,IAAK,EACL9D,MAAO,EACPD,OAAQ,EACRE,KAAM,GDHuC0I,EACjD,CEHe,SAASC,EAAgB9I,EAAOiD,GAC7C,OAAOA,EAAKpC,QAAO,SAAUkI,EAAS5J,GAEpC,OADA4J,EAAQ5J,GAAOa,EACR+I,CACT,GAAG,CAAC,EACN,CC4EA,SACEpG,KAAM,QACNC,SAAS,EACTC,MAAO,OACPC,GApEF,SAAeC,GACb,IAAIiG,EAEAhG,EAAQD,EAAKC,MACbL,EAAOI,EAAKJ,KACZmB,EAAUf,EAAKe,QACfmF,EAAejG,EAAME,SAASgB,MAC9BgF,EAAgBlG,EAAMmG,cAAcD,cACpCE,EAAgB9E,EAAiBtB,EAAMjC,WACvCsI,EAAOX,EAAyBU,GAEhCE,EADa,CAACnJ,EAAMD,GAAOsH,QAAQ4B,IAAkB,EAClC,SAAW,QAElC,GAAKH,GAAiBC,EAAtB,CAIA,IAAIL,EAxBgB,SAAyBU,EAASvG,GAItD,OAAO4F,EAAsC,iBAH7CW,EAA6B,mBAAZA,EAAyBA,EAAQlK,OAAOkE,OAAO,CAAC,EAAGP,EAAMwG,MAAO,CAC/EzI,UAAWiC,EAAMjC,aACbwI,GACkDA,EAAUT,EAAgBS,EAASlJ,GAC7F,CAmBsBoJ,CAAgB3F,EAAQyF,QAASvG,GACjD0G,EAAY/C,EAAcsC,GAC1BU,EAAmB,MAATN,EAAe,EAAMlJ,EAC/ByJ,EAAmB,MAATP,EAAepJ,EAASC,EAClC2J,EAAU7G,EAAMwG,MAAM7I,UAAU2I,GAAOtG,EAAMwG,MAAM7I,UAAU0I,GAAQH,EAAcG,GAAQrG,EAAMwG,MAAM9I,OAAO4I,GAC9GQ,EAAYZ,EAAcG,GAAQrG,EAAMwG,MAAM7I,UAAU0I,GACxDU,EAAoB/B,EAAgBiB,GACpCe,EAAaD,EAA6B,MAATV,EAAeU,EAAkBE,cAAgB,EAAIF,EAAkBG,aAAe,EAAI,EAC3HC,EAAoBN,EAAU,EAAIC,EAAY,EAG9CpF,EAAMmE,EAAcc,GACpBlF,EAAMuF,EAAaN,EAAUJ,GAAOT,EAAce,GAClDQ,EAASJ,EAAa,EAAIN,EAAUJ,GAAO,EAAIa,EAC/CE,EAAS1B,EAAOjE,EAAK0F,EAAQ3F,GAE7B6F,EAAWjB,EACfrG,EAAMmG,cAAcxG,KAASqG,EAAwB,CAAC,GAAyBsB,GAAYD,EAAQrB,EAAsBuB,aAAeF,EAASD,EAAQpB,EAnBzJ,CAoBF,EAkCEtF,OAhCF,SAAgBC,GACd,IAAIX,EAAQW,EAAMX,MAEdwH,EADU7G,EAAMG,QACWlC,QAC3BqH,OAAoC,IAArBuB,EAA8B,sBAAwBA,EAErD,MAAhBvB,IAKwB,iBAAjBA,IACTA,EAAejG,EAAME,SAASxC,OAAO+J,cAAcxB,MAOhDpC,EAAS7D,EAAME,SAASxC,OAAQuI,KAIrCjG,EAAME,SAASgB,MAAQ+E,EACzB,EASE5E,SAAU,CAAC,iBACXqG,iBAAkB,CAAC,oBCxFN,SAASC,EAAa5J,GACnC,OAAOA,EAAUwD,MAAM,KAAK,EAC9B,CCOA,IAAIqG,GAAa,CACf5G,IAAK,OACL9D,MAAO,OACPD,OAAQ,OACRE,KAAM,QAeD,SAAS0K,GAAYlH,GAC1B,IAAImH,EAEApK,EAASiD,EAAMjD,OACfqK,EAAapH,EAAMoH,WACnBhK,EAAY4C,EAAM5C,UAClBiK,EAAYrH,EAAMqH,UAClBC,EAAUtH,EAAMsH,QAChBpH,EAAWF,EAAME,SACjBqH,EAAkBvH,EAAMuH,gBACxBC,EAAWxH,EAAMwH,SACjBC,EAAezH,EAAMyH,aACrBC,EAAU1H,EAAM0H,QAChBC,EAAaL,EAAQ1E,EACrBA,OAAmB,IAAf+E,EAAwB,EAAIA,EAChCC,EAAaN,EAAQxE,EACrBA,OAAmB,IAAf8E,EAAwB,EAAIA,EAEhCC,EAAgC,mBAAjBJ,EAA8BA,EAAa,CAC5D7E,EAAGA,EACHE,IACG,CACHF,EAAGA,EACHE,GAGFF,EAAIiF,EAAMjF,EACVE,EAAI+E,EAAM/E,EACV,IAAIgF,EAAOR,EAAQrL,eAAe,KAC9B8L,EAAOT,EAAQrL,eAAe,KAC9B+L,EAAQxL,EACRyL,EAAQ,EACRC,EAAM5J,OAEV,GAAIkJ,EAAU,CACZ,IAAIpD,EAAeC,EAAgBtH,GAC/BoL,EAAa,eACbC,EAAY,cAEZhE,IAAiBhG,EAAUrB,IAGmB,WAA5C,EAFJqH,EAAeN,EAAmB/G,IAECmD,UAAsC,aAAbA,IAC1DiI,EAAa,eACbC,EAAY,gBAOZhL,IAAc,IAAQA,IAAcZ,GAAQY,IAAcb,IAAU8K,IAAczK,KACpFqL,EAAQ3L,EAGRwG,IAFc4E,GAAWtD,IAAiB8D,GAAOA,EAAIxF,eAAiBwF,EAAIxF,eAAeD,OACzF2B,EAAa+D,IACEf,EAAW3E,OAC1BK,GAAKyE,EAAkB,GAAK,GAG1BnK,IAAcZ,IAASY,IAAc,GAAOA,IAAcd,GAAW+K,IAAczK,KACrFoL,EAAQzL,EAGRqG,IAFc8E,GAAWtD,IAAiB8D,GAAOA,EAAIxF,eAAiBwF,EAAIxF,eAAeH,MACzF6B,EAAagE,IACEhB,EAAW7E,MAC1BK,GAAK2E,EAAkB,GAAK,EAEhC,CAEA,IAgBMc,EAhBFC,EAAe5M,OAAOkE,OAAO,CAC/BM,SAAUA,GACTsH,GAAYP,IAEXsB,GAAyB,IAAjBd,EAlFd,SAA2BrI,EAAM8I,GAC/B,IAAItF,EAAIxD,EAAKwD,EACTE,EAAI1D,EAAK0D,EACT0F,EAAMN,EAAIO,kBAAoB,EAClC,MAAO,CACL7F,EAAG5B,EAAM4B,EAAI4F,GAAOA,GAAO,EAC3B1F,EAAG9B,EAAM8B,EAAI0F,GAAOA,GAAO,EAE/B,CA0EsCE,CAAkB,CACpD9F,EAAGA,EACHE,GACC1E,EAAUrB,IAAW,CACtB6F,EAAGA,EACHE,GAMF,OAHAF,EAAI2F,EAAM3F,EACVE,EAAIyF,EAAMzF,EAENyE,EAGK7L,OAAOkE,OAAO,CAAC,EAAG0I,IAAeD,EAAiB,CAAC,GAAkBJ,GAASF,EAAO,IAAM,GAAIM,EAAeL,GAASF,EAAO,IAAM,GAAIO,EAAe5D,WAAayD,EAAIO,kBAAoB,IAAM,EAAI,aAAe7F,EAAI,OAASE,EAAI,MAAQ,eAAiBF,EAAI,OAASE,EAAI,SAAUuF,IAG5R3M,OAAOkE,OAAO,CAAC,EAAG0I,IAAenB,EAAkB,CAAC,GAAmBc,GAASF,EAAOjF,EAAI,KAAO,GAAIqE,EAAgBa,GAASF,EAAOlF,EAAI,KAAO,GAAIuE,EAAgB1C,UAAY,GAAI0C,GAC9L,CA4CA,UACEnI,KAAM,gBACNC,SAAS,EACTC,MAAO,cACPC,GA9CF,SAAuBwJ,GACrB,IAAItJ,EAAQsJ,EAAMtJ,MACdc,EAAUwI,EAAMxI,QAChByI,EAAwBzI,EAAQoH,gBAChCA,OAA4C,IAA1BqB,GAA0CA,EAC5DC,EAAoB1I,EAAQqH,SAC5BA,OAAiC,IAAtBqB,GAAsCA,EACjDC,EAAwB3I,EAAQsH,aAChCA,OAAyC,IAA1BqB,GAA0CA,EACzDR,EAAe,CACjBlL,UAAWuD,EAAiBtB,EAAMjC,WAClCiK,UAAWL,EAAa3H,EAAMjC,WAC9BL,OAAQsC,EAAME,SAASxC,OACvBqK,WAAY/H,EAAMwG,MAAM9I,OACxBwK,gBAAiBA,EACjBG,QAAoC,UAA3BrI,EAAMc,QAAQC,UAGgB,MAArCf,EAAMmG,cAAcD,gBACtBlG,EAAMK,OAAO3C,OAASrB,OAAOkE,OAAO,CAAC,EAAGP,EAAMK,OAAO3C,OAAQmK,GAAYxL,OAAOkE,OAAO,CAAC,EAAG0I,EAAc,CACvGhB,QAASjI,EAAMmG,cAAcD,cAC7BrF,SAAUb,EAAMc,QAAQC,SACxBoH,SAAUA,EACVC,aAAcA,OAIe,MAA7BpI,EAAMmG,cAAcjF,QACtBlB,EAAMK,OAAOa,MAAQ7E,OAAOkE,OAAO,CAAC,EAAGP,EAAMK,OAAOa,MAAO2G,GAAYxL,OAAOkE,OAAO,CAAC,EAAG0I,EAAc,CACrGhB,QAASjI,EAAMmG,cAAcjF,MAC7BL,SAAU,WACVsH,UAAU,EACVC,aAAcA,OAIlBpI,EAAMM,WAAW5C,OAASrB,OAAOkE,OAAO,CAAC,EAAGP,EAAMM,WAAW5C,OAAQ,CACnE,wBAAyBsC,EAAMjC,WAEnC,EAQE2L,KAAM,CAAC,GCrKT,IAAIC,GAAU,CACZA,SAAS,GAsCX,UACEhK,KAAM,iBACNC,SAAS,EACTC,MAAO,QACPC,GAAI,WAAe,EACnBY,OAxCF,SAAgBX,GACd,IAAIC,EAAQD,EAAKC,MACb4J,EAAW7J,EAAK6J,SAChB9I,EAAUf,EAAKe,QACf+I,EAAkB/I,EAAQgJ,OAC1BA,OAA6B,IAApBD,GAAoCA,EAC7CE,EAAkBjJ,EAAQkJ,OAC1BA,OAA6B,IAApBD,GAAoCA,EAC7C9K,EAASF,EAAUiB,EAAME,SAASxC,QAClCuM,EAAgB,GAAGjM,OAAOgC,EAAMiK,cAActM,UAAWqC,EAAMiK,cAAcvM,QAYjF,OAVIoM,GACFG,EAAc9J,SAAQ,SAAU+J,GAC9BA,EAAaC,iBAAiB,SAAUP,EAASQ,OAAQT,GAC3D,IAGEK,GACF/K,EAAOkL,iBAAiB,SAAUP,EAASQ,OAAQT,IAG9C,WACDG,GACFG,EAAc9J,SAAQ,SAAU+J,GAC9BA,EAAaG,oBAAoB,SAAUT,EAASQ,OAAQT,GAC9D,IAGEK,GACF/K,EAAOoL,oBAAoB,SAAUT,EAASQ,OAAQT,GAE1D,CACF,EASED,KAAM,CAAC,GC/CT,IAAIY,GAAO,CACTnN,KAAM,QACND,MAAO,OACPD,OAAQ,MACR+D,IAAK,UAEQ,SAASuJ,GAAqBxM,GAC3C,OAAOA,EAAUyM,QAAQ,0BAA0B,SAAUC,GAC3D,OAAOH,GAAKG,EACd,GACF,CCVA,IAAI,GAAO,CACTnN,MAAO,MACPC,IAAK,SAEQ,SAASmN,GAA8B3M,GACpD,OAAOA,EAAUyM,QAAQ,cAAc,SAAUC,GAC/C,OAAO,GAAKA,EACd,GACF,CCPe,SAASE,GAAgB3L,GACtC,IAAI6J,EAAM9J,EAAUC,GAGpB,MAAO,CACL4L,WAHe/B,EAAIgC,YAInBC,UAHcjC,EAAIkC,YAKtB,CCNe,SAASC,GAAoBpM,GAQ1C,OAAO+D,EAAsB8B,EAAmB7F,IAAUzB,KAAOwN,GAAgB/L,GAASgM,UAC5F,CCXe,SAASK,GAAerM,GAErC,IAAIsM,EAAoB,EAAiBtM,GACrCuM,EAAWD,EAAkBC,SAC7BC,EAAYF,EAAkBE,UAC9BC,EAAYH,EAAkBG,UAElC,MAAO,6BAA6B3I,KAAKyI,EAAWE,EAAYD,EAClE,CCLe,SAASE,GAAgBtM,GACtC,MAAI,CAAC,OAAQ,OAAQ,aAAawF,QAAQ7F,EAAYK,KAAU,EAEvDA,EAAKG,cAAcoM,KAGxBhM,EAAcP,IAASiM,GAAejM,GACjCA,EAGFsM,GAAgB1G,EAAc5F,GACvC,CCJe,SAASwM,GAAkB5M,EAAS6M,GACjD,IAAIC,OAES,IAATD,IACFA,EAAO,IAGT,IAAIvB,EAAeoB,GAAgB1M,GAC/B+M,EAASzB,KAAqE,OAAlDwB,EAAwB9M,EAAQO,oBAAyB,EAASuM,EAAsBH,MACpH1C,EAAM9J,EAAUmL,GAChB0B,EAASD,EAAS,CAAC9C,GAAK7K,OAAO6K,EAAIxF,gBAAkB,GAAI4H,GAAef,GAAgBA,EAAe,IAAMA,EAC7G2B,EAAcJ,EAAKzN,OAAO4N,GAC9B,OAAOD,EAASE,EAChBA,EAAY7N,OAAOwN,GAAkB5G,EAAcgH,IACrD,CCzBe,SAASE,GAAiBC,GACvC,OAAO1P,OAAOkE,OAAO,CAAC,EAAGwL,EAAM,CAC7B5O,KAAM4O,EAAKxI,EACXvC,IAAK+K,EAAKtI,EACVvG,MAAO6O,EAAKxI,EAAIwI,EAAK7I,MACrBjG,OAAQ8O,EAAKtI,EAAIsI,EAAK3I,QAE1B,CCqBA,SAAS4I,GAA2BpN,EAASqN,EAAgBlL,GAC3D,OAAOkL,IAAmBxO,EAAWqO,GCzBxB,SAAyBlN,EAASmC,GAC/C,IAAI8H,EAAM9J,EAAUH,GAChBsN,EAAOzH,EAAmB7F,GAC1ByE,EAAiBwF,EAAIxF,eACrBH,EAAQgJ,EAAKhF,YACb9D,EAAS8I,EAAKjF,aACd1D,EAAI,EACJE,EAAI,EAER,GAAIJ,EAAgB,CAClBH,EAAQG,EAAeH,MACvBE,EAASC,EAAeD,OACxB,IAAI+I,EAAiB1J,KAEjB0J,IAAmBA,GAA+B,UAAbpL,KACvCwC,EAAIF,EAAeG,WACnBC,EAAIJ,EAAeK,UAEvB,CAEA,MAAO,CACLR,MAAOA,EACPE,OAAQA,EACRG,EAAGA,EAAIyH,GAAoBpM,GAC3B6E,EAAGA,EAEP,CDDwD2I,CAAgBxN,EAASmC,IAAa1B,EAAU4M,GAdxG,SAAoCrN,EAASmC,GAC3C,IAAIgL,EAAOpJ,EAAsB/D,GAAS,EAAoB,UAAbmC,GASjD,OARAgL,EAAK/K,IAAM+K,EAAK/K,IAAMpC,EAAQyN,UAC9BN,EAAK5O,KAAO4O,EAAK5O,KAAOyB,EAAQ0N,WAChCP,EAAK9O,OAAS8O,EAAK/K,IAAMpC,EAAQqI,aACjC8E,EAAK7O,MAAQ6O,EAAK5O,KAAOyB,EAAQsI,YACjC6E,EAAK7I,MAAQtE,EAAQsI,YACrB6E,EAAK3I,OAASxE,EAAQqI,aACtB8E,EAAKxI,EAAIwI,EAAK5O,KACd4O,EAAKtI,EAAIsI,EAAK/K,IACP+K,CACT,CAG0HQ,CAA2BN,EAAgBlL,GAAY+K,GEtBlK,SAAyBlN,GACtC,IAAI8M,EAEAQ,EAAOzH,EAAmB7F,GAC1B4N,EAAY7B,GAAgB/L,GAC5B2M,EAA0D,OAAlDG,EAAwB9M,EAAQO,oBAAyB,EAASuM,EAAsBH,KAChGrI,EAAQ,EAAIgJ,EAAKO,YAAaP,EAAKhF,YAAaqE,EAAOA,EAAKkB,YAAc,EAAGlB,EAAOA,EAAKrE,YAAc,GACvG9D,EAAS,EAAI8I,EAAKQ,aAAcR,EAAKjF,aAAcsE,EAAOA,EAAKmB,aAAe,EAAGnB,EAAOA,EAAKtE,aAAe,GAC5G1D,GAAKiJ,EAAU5B,WAAaI,GAAoBpM,GAChD6E,GAAK+I,EAAU1B,UAMnB,MAJiD,QAA7C,EAAiBS,GAAQW,GAAMS,YACjCpJ,GAAK,EAAI2I,EAAKhF,YAAaqE,EAAOA,EAAKrE,YAAc,GAAKhE,GAGrD,CACLA,MAAOA,EACPE,OAAQA,EACRG,EAAGA,EACHE,EAAGA,EAEP,CFCkMmJ,CAAgBnI,EAAmB7F,IACrO,CG1Be,SAASiO,GAAe9M,GACrC,IAOIkI,EAPAtK,EAAYoC,EAAKpC,UACjBiB,EAAUmB,EAAKnB,QACfb,EAAYgC,EAAKhC,UACjBqI,EAAgBrI,EAAYuD,EAAiBvD,GAAa,KAC1DiK,EAAYjK,EAAY4J,EAAa5J,GAAa,KAClD+O,EAAUnP,EAAU4F,EAAI5F,EAAUuF,MAAQ,EAAItE,EAAQsE,MAAQ,EAC9D6J,EAAUpP,EAAU8F,EAAI9F,EAAUyF,OAAS,EAAIxE,EAAQwE,OAAS,EAGpE,OAAQgD,GACN,KAAK,EACH6B,EAAU,CACR1E,EAAGuJ,EACHrJ,EAAG9F,EAAU8F,EAAI7E,EAAQwE,QAE3B,MAEF,KAAKnG,EACHgL,EAAU,CACR1E,EAAGuJ,EACHrJ,EAAG9F,EAAU8F,EAAI9F,EAAUyF,QAE7B,MAEF,KAAKlG,EACH+K,EAAU,CACR1E,EAAG5F,EAAU4F,EAAI5F,EAAUuF,MAC3BO,EAAGsJ,GAEL,MAEF,KAAK5P,EACH8K,EAAU,CACR1E,EAAG5F,EAAU4F,EAAI3E,EAAQsE,MACzBO,EAAGsJ,GAEL,MAEF,QACE9E,EAAU,CACR1E,EAAG5F,EAAU4F,EACbE,EAAG9F,EAAU8F,GAInB,IAAIuJ,EAAW5G,EAAgBV,EAAyBU,GAAiB,KAEzE,GAAgB,MAAZ4G,EAAkB,CACpB,IAAI1G,EAAmB,MAAb0G,EAAmB,SAAW,QAExC,OAAQhF,GACN,KAAK1K,EACH2K,EAAQ+E,GAAY/E,EAAQ+E,IAAarP,EAAU2I,GAAO,EAAI1H,EAAQ0H,GAAO,GAC7E,MAEF,KAAK/I,EACH0K,EAAQ+E,GAAY/E,EAAQ+E,IAAarP,EAAU2I,GAAO,EAAI1H,EAAQ0H,GAAO,GAKnF,CAEA,OAAO2B,CACT,CC3De,SAASgF,GAAejN,EAAOc,QAC5B,IAAZA,IACFA,EAAU,CAAC,GAGb,IAAIoM,EAAWpM,EACXqM,EAAqBD,EAASnP,UAC9BA,OAAmC,IAAvBoP,EAAgCnN,EAAMjC,UAAYoP,EAC9DC,EAAoBF,EAASnM,SAC7BA,OAAiC,IAAtBqM,EAA+BpN,EAAMe,SAAWqM,EAC3DC,EAAoBH,EAASI,SAC7BA,OAAiC,IAAtBD,EAA+B7P,EAAkB6P,EAC5DE,EAAwBL,EAASM,aACjCA,OAAyC,IAA1BD,EAAmC9P,EAAW8P,EAC7DE,EAAwBP,EAASQ,eACjCA,OAA2C,IAA1BD,EAAmC/P,EAAS+P,EAC7DE,EAAuBT,EAASU,YAChCA,OAAuC,IAAzBD,GAA0CA,EACxDE,EAAmBX,EAAS3G,QAC5BA,OAA+B,IAArBsH,EAA8B,EAAIA,EAC5ChI,EAAgBD,EAAsC,iBAAZW,EAAuBA,EAAUT,EAAgBS,EAASlJ,IACpGyQ,EAAaJ,IAAmBhQ,EAASC,EAAYD,EACrDqK,EAAa/H,EAAMwG,MAAM9I,OACzBkB,EAAUoB,EAAME,SAAS0N,EAAcE,EAAaJ,GACpDK,EJkBS,SAAyBnP,EAAS0O,EAAUE,EAAczM,GACvE,IAAIiN,EAAmC,oBAAbV,EAlB5B,SAA4B1O,GAC1B,IAAIpB,EAAkBgO,GAAkB5G,EAAchG,IAElDqP,EADoB,CAAC,WAAY,SAASzJ,QAAQ,EAAiB5F,GAASiC,WAAa,GACnDtB,EAAcX,GAAWoG,EAAgBpG,GAAWA,EAE9F,OAAKS,EAAU4O,GAKRzQ,EAAgBgI,QAAO,SAAUyG,GACtC,OAAO5M,EAAU4M,IAAmBpI,EAASoI,EAAgBgC,IAAmD,SAAhCtP,EAAYsN,EAC9F,IANS,EAOX,CAK6DiC,CAAmBtP,GAAW,GAAGZ,OAAOsP,GAC/F9P,EAAkB,GAAGQ,OAAOgQ,EAAqB,CAACR,IAClDW,EAAsB3Q,EAAgB,GACtC4Q,EAAe5Q,EAAgBK,QAAO,SAAUwQ,EAASpC,GAC3D,IAAIF,EAAOC,GAA2BpN,EAASqN,EAAgBlL,GAK/D,OAJAsN,EAAQrN,IAAM,EAAI+K,EAAK/K,IAAKqN,EAAQrN,KACpCqN,EAAQnR,MAAQ,EAAI6O,EAAK7O,MAAOmR,EAAQnR,OACxCmR,EAAQpR,OAAS,EAAI8O,EAAK9O,OAAQoR,EAAQpR,QAC1CoR,EAAQlR,KAAO,EAAI4O,EAAK5O,KAAMkR,EAAQlR,MAC/BkR,CACT,GAAGrC,GAA2BpN,EAASuP,EAAqBpN,IAK5D,OAJAqN,EAAalL,MAAQkL,EAAalR,MAAQkR,EAAajR,KACvDiR,EAAahL,OAASgL,EAAanR,OAASmR,EAAapN,IACzDoN,EAAa7K,EAAI6K,EAAajR,KAC9BiR,EAAa3K,EAAI2K,EAAapN,IACvBoN,CACT,CInC2BE,CAAgBjP,EAAUT,GAAWA,EAAUA,EAAQ2P,gBAAkB9J,EAAmBzE,EAAME,SAASxC,QAAS4P,EAAUE,EAAczM,GACjKyN,EAAsB7L,EAAsB3C,EAAME,SAASvC,WAC3DuI,EAAgB2G,GAAe,CACjClP,UAAW6Q,EACX5P,QAASmJ,EACThH,SAAU,WACVhD,UAAWA,IAET0Q,EAAmB3C,GAAiBzP,OAAOkE,OAAO,CAAC,EAAGwH,EAAY7B,IAClEwI,EAAoBhB,IAAmBhQ,EAAS+Q,EAAmBD,EAGnEG,EAAkB,CACpB3N,IAAK+M,EAAmB/M,IAAM0N,EAAkB1N,IAAM6E,EAAc7E,IACpE/D,OAAQyR,EAAkBzR,OAAS8Q,EAAmB9Q,OAAS4I,EAAc5I,OAC7EE,KAAM4Q,EAAmB5Q,KAAOuR,EAAkBvR,KAAO0I,EAAc1I,KACvED,MAAOwR,EAAkBxR,MAAQ6Q,EAAmB7Q,MAAQ2I,EAAc3I,OAExE0R,EAAa5O,EAAMmG,cAAckB,OAErC,GAAIqG,IAAmBhQ,GAAUkR,EAAY,CAC3C,IAAIvH,EAASuH,EAAW7Q,GACxB1B,OAAO4D,KAAK0O,GAAiBxO,SAAQ,SAAUhE,GAC7C,IAAI0S,EAAW,CAAC3R,EAAOD,GAAQuH,QAAQrI,IAAQ,EAAI,GAAK,EACpDkK,EAAO,CAAC,EAAKpJ,GAAQuH,QAAQrI,IAAQ,EAAI,IAAM,IACnDwS,EAAgBxS,IAAQkL,EAAOhB,GAAQwI,CACzC,GACF,CAEA,OAAOF,CACT,CCyEA,UACEhP,KAAM,OACNC,SAAS,EACTC,MAAO,OACPC,GA5HF,SAAcC,GACZ,IAAIC,EAAQD,EAAKC,MACbc,EAAUf,EAAKe,QACfnB,EAAOI,EAAKJ,KAEhB,IAAIK,EAAMmG,cAAcxG,GAAMmP,MAA9B,CAoCA,IAhCA,IAAIC,EAAoBjO,EAAQkM,SAC5BgC,OAAsC,IAAtBD,GAAsCA,EACtDE,EAAmBnO,EAAQoO,QAC3BC,OAAoC,IAArBF,GAAqCA,EACpDG,EAA8BtO,EAAQuO,mBACtC9I,EAAUzF,EAAQyF,QAClB+G,EAAWxM,EAAQwM,SACnBE,EAAe1M,EAAQ0M,aACvBI,EAAc9M,EAAQ8M,YACtB0B,EAAwBxO,EAAQyO,eAChCA,OAA2C,IAA1BD,GAA0CA,EAC3DE,EAAwB1O,EAAQ0O,sBAChCC,EAAqBzP,EAAMc,QAAQ/C,UACnCqI,EAAgB9E,EAAiBmO,GAEjCJ,EAAqBD,IADHhJ,IAAkBqJ,GACqCF,EAjC/E,SAAuCxR,GACrC,GAAIuD,EAAiBvD,KAAeX,EAClC,MAAO,GAGT,IAAIsS,EAAoBnF,GAAqBxM,GAC7C,MAAO,CAAC2M,GAA8B3M,GAAY2R,EAAmBhF,GAA8BgF,GACrG,CA0B6IC,CAA8BF,GAA3E,CAAClF,GAAqBkF,KAChHG,EAAa,CAACH,GAAoBzR,OAAOqR,GAAoBxR,QAAO,SAAUC,EAAKC,GACrF,OAAOD,EAAIE,OAAOsD,EAAiBvD,KAAeX,ECvCvC,SAA8B4C,EAAOc,QAClC,IAAZA,IACFA,EAAU,CAAC,GAGb,IAAIoM,EAAWpM,EACX/C,EAAYmP,EAASnP,UACrBuP,EAAWJ,EAASI,SACpBE,EAAeN,EAASM,aACxBjH,EAAU2G,EAAS3G,QACnBgJ,EAAiBrC,EAASqC,eAC1BM,EAAwB3C,EAASsC,sBACjCA,OAAkD,IAA1BK,EAAmC,EAAgBA,EAC3E7H,EAAYL,EAAa5J,GACzB6R,EAAa5H,EAAYuH,EAAiB3R,EAAsBA,EAAoB4H,QAAO,SAAUzH,GACvG,OAAO4J,EAAa5J,KAAeiK,CACrC,IAAK3K,EACDyS,EAAoBF,EAAWpK,QAAO,SAAUzH,GAClD,OAAOyR,EAAsBhL,QAAQzG,IAAc,CACrD,IAEiC,IAA7B+R,EAAkBC,SACpBD,EAAoBF,GAItB,IAAII,EAAYF,EAAkBjS,QAAO,SAAUC,EAAKC,GAOtD,OANAD,EAAIC,GAAakP,GAAejN,EAAO,CACrCjC,UAAWA,EACXuP,SAAUA,EACVE,aAAcA,EACdjH,QAASA,IACRjF,EAAiBvD,IACbD,CACT,GAAG,CAAC,GACJ,OAAOzB,OAAO4D,KAAK+P,GAAWC,MAAK,SAAUC,EAAGC,GAC9C,OAAOH,EAAUE,GAAKF,EAAUG,EAClC,GACF,CDC6DC,CAAqBpQ,EAAO,CACnFjC,UAAWA,EACXuP,SAAUA,EACVE,aAAcA,EACdjH,QAASA,EACTgJ,eAAgBA,EAChBC,sBAAuBA,IACpBzR,EACP,GAAG,IACCsS,EAAgBrQ,EAAMwG,MAAM7I,UAC5BoK,EAAa/H,EAAMwG,MAAM9I,OACzB4S,EAAY,IAAIC,IAChBC,GAAqB,EACrBC,EAAwBb,EAAW,GAE9Bc,EAAI,EAAGA,EAAId,EAAWG,OAAQW,IAAK,CAC1C,IAAI3S,EAAY6R,EAAWc,GAEvBC,EAAiBrP,EAAiBvD,GAElC6S,EAAmBjJ,EAAa5J,KAAeT,EAC/CuT,EAAa,CAAC,EAAK5T,GAAQuH,QAAQmM,IAAmB,EACtDrK,EAAMuK,EAAa,QAAU,SAC7B1F,EAAW8B,GAAejN,EAAO,CACnCjC,UAAWA,EACXuP,SAAUA,EACVE,aAAcA,EACdI,YAAaA,EACbrH,QAASA,IAEPuK,EAAoBD,EAAaD,EAAmB1T,EAAQC,EAAOyT,EAAmB3T,EAAS,EAE/FoT,EAAc/J,GAAOyB,EAAWzB,KAClCwK,EAAoBvG,GAAqBuG,IAG3C,IAAIC,EAAmBxG,GAAqBuG,GACxCE,EAAS,GAUb,GARIhC,GACFgC,EAAOC,KAAK9F,EAASwF,IAAmB,GAGtCxB,GACF6B,EAAOC,KAAK9F,EAAS2F,IAAsB,EAAG3F,EAAS4F,IAAqB,GAG1EC,EAAOE,OAAM,SAAUC,GACzB,OAAOA,CACT,IAAI,CACFV,EAAwB1S,EACxByS,GAAqB,EACrB,KACF,CAEAF,EAAUc,IAAIrT,EAAWiT,EAC3B,CAEA,GAAIR,EAqBF,IAnBA,IAEIa,EAAQ,SAAeC,GACzB,IAAIC,EAAmB3B,EAAW4B,MAAK,SAAUzT,GAC/C,IAAIiT,EAASV,EAAU9T,IAAIuB,GAE3B,GAAIiT,EACF,OAAOA,EAAOS,MAAM,EAAGH,GAAIJ,OAAM,SAAUC,GACzC,OAAOA,CACT,GAEJ,IAEA,GAAII,EAEF,OADAd,EAAwBc,EACjB,OAEX,EAESD,EAnBY/B,EAAiB,EAAI,EAmBZ+B,EAAK,GAGpB,UAFFD,EAAMC,GADmBA,KAOpCtR,EAAMjC,YAAc0S,IACtBzQ,EAAMmG,cAAcxG,GAAMmP,OAAQ,EAClC9O,EAAMjC,UAAY0S,EAClBzQ,EAAM0R,OAAQ,EA5GhB,CA8GF,EAQEhK,iBAAkB,CAAC,UACnBgC,KAAM,CACJoF,OAAO,IE7IX,SAAS6C,GAAexG,EAAUY,EAAM6F,GAQtC,YAPyB,IAArBA,IACFA,EAAmB,CACjBrO,EAAG,EACHE,EAAG,IAIA,CACLzC,IAAKmK,EAASnK,IAAM+K,EAAK3I,OAASwO,EAAiBnO,EACnDvG,MAAOiO,EAASjO,MAAQ6O,EAAK7I,MAAQ0O,EAAiBrO,EACtDtG,OAAQkO,EAASlO,OAAS8O,EAAK3I,OAASwO,EAAiBnO,EACzDtG,KAAMgO,EAAShO,KAAO4O,EAAK7I,MAAQ0O,EAAiBrO,EAExD,CAEA,SAASsO,GAAsB1G,GAC7B,MAAO,CAAC,EAAKjO,EAAOD,EAAQE,GAAM2U,MAAK,SAAUC,GAC/C,OAAO5G,EAAS4G,IAAS,CAC3B,GACF,CA+BA,UACEpS,KAAM,OACNC,SAAS,EACTC,MAAO,OACP6H,iBAAkB,CAAC,mBACnB5H,GAlCF,SAAcC,GACZ,IAAIC,EAAQD,EAAKC,MACbL,EAAOI,EAAKJ,KACZ0Q,EAAgBrQ,EAAMwG,MAAM7I,UAC5BoK,EAAa/H,EAAMwG,MAAM9I,OACzBkU,EAAmB5R,EAAMmG,cAAc6L,gBACvCC,EAAoBhF,GAAejN,EAAO,CAC5C0N,eAAgB,cAEdwE,EAAoBjF,GAAejN,EAAO,CAC5C4N,aAAa,IAEXuE,EAA2BR,GAAeM,EAAmB5B,GAC7D+B,EAAsBT,GAAeO,EAAmBnK,EAAY6J,GACpES,EAAoBR,GAAsBM,GAC1CG,EAAmBT,GAAsBO,GAC7CpS,EAAMmG,cAAcxG,GAAQ,CAC1BwS,yBAA0BA,EAC1BC,oBAAqBA,EACrBC,kBAAmBA,EACnBC,iBAAkBA,GAEpBtS,EAAMM,WAAW5C,OAASrB,OAAOkE,OAAO,CAAC,EAAGP,EAAMM,WAAW5C,OAAQ,CACnE,+BAAgC2U,EAChC,sBAAuBC,GAE3B,GCJA,IACE3S,KAAM,SACNC,SAAS,EACTC,MAAO,OACPwB,SAAU,CAAC,iBACXvB,GA5BF,SAAgBa,GACd,IAAIX,EAAQW,EAAMX,MACdc,EAAUH,EAAMG,QAChBnB,EAAOgB,EAAMhB,KACb4S,EAAkBzR,EAAQuG,OAC1BA,OAA6B,IAApBkL,EAA6B,CAAC,EAAG,GAAKA,EAC/C7I,EAAO,EAAW7L,QAAO,SAAUC,EAAKC,GAE1C,OADAD,EAAIC,GA5BD,SAAiCA,EAAWyI,EAAOa,GACxD,IAAIjB,EAAgB9E,EAAiBvD,GACjCyU,EAAiB,CAACrV,EAAM,GAAKqH,QAAQ4B,IAAkB,GAAK,EAAI,EAEhErG,EAAyB,mBAAXsH,EAAwBA,EAAOhL,OAAOkE,OAAO,CAAC,EAAGiG,EAAO,CACxEzI,UAAWA,KACPsJ,EACFoL,EAAW1S,EAAK,GAChB2S,EAAW3S,EAAK,GAIpB,OAFA0S,EAAWA,GAAY,EACvBC,GAAYA,GAAY,GAAKF,EACtB,CAACrV,EAAMD,GAAOsH,QAAQ4B,IAAkB,EAAI,CACjD7C,EAAGmP,EACHjP,EAAGgP,GACD,CACFlP,EAAGkP,EACHhP,EAAGiP,EAEP,CASqBC,CAAwB5U,EAAWiC,EAAMwG,MAAOa,GAC1DvJ,CACT,GAAG,CAAC,GACA8U,EAAwBlJ,EAAK1J,EAAMjC,WACnCwF,EAAIqP,EAAsBrP,EAC1BE,EAAImP,EAAsBnP,EAEW,MAArCzD,EAAMmG,cAAcD,gBACtBlG,EAAMmG,cAAcD,cAAc3C,GAAKA,EACvCvD,EAAMmG,cAAcD,cAAczC,GAAKA,GAGzCzD,EAAMmG,cAAcxG,GAAQ+J,CAC9B,GC1BA,IACE/J,KAAM,gBACNC,SAAS,EACTC,MAAO,OACPC,GApBF,SAAuBC,GACrB,IAAIC,EAAQD,EAAKC,MACbL,EAAOI,EAAKJ,KAKhBK,EAAMmG,cAAcxG,GAAQkN,GAAe,CACzClP,UAAWqC,EAAMwG,MAAM7I,UACvBiB,QAASoB,EAAMwG,MAAM9I,OACrBqD,SAAU,WACVhD,UAAWiC,EAAMjC,WAErB,EAQE2L,KAAM,CAAC,GCgHT,IACE/J,KAAM,kBACNC,SAAS,EACTC,MAAO,OACPC,GA/HF,SAAyBC,GACvB,IAAIC,EAAQD,EAAKC,MACbc,EAAUf,EAAKe,QACfnB,EAAOI,EAAKJ,KACZoP,EAAoBjO,EAAQkM,SAC5BgC,OAAsC,IAAtBD,GAAsCA,EACtDE,EAAmBnO,EAAQoO,QAC3BC,OAAoC,IAArBF,GAAsCA,EACrD3B,EAAWxM,EAAQwM,SACnBE,EAAe1M,EAAQ0M,aACvBI,EAAc9M,EAAQ8M,YACtBrH,EAAUzF,EAAQyF,QAClBsM,EAAkB/R,EAAQgS,OAC1BA,OAA6B,IAApBD,GAAoCA,EAC7CE,EAAwBjS,EAAQkS,aAChCA,OAAyC,IAA1BD,EAAmC,EAAIA,EACtD5H,EAAW8B,GAAejN,EAAO,CACnCsN,SAAUA,EACVE,aAAcA,EACdjH,QAASA,EACTqH,YAAaA,IAEXxH,EAAgB9E,EAAiBtB,EAAMjC,WACvCiK,EAAYL,EAAa3H,EAAMjC,WAC/BkV,GAAmBjL,EACnBgF,EAAWtH,EAAyBU,GACpC8I,ECrCY,MDqCSlC,ECrCH,IAAM,IDsCxB9G,EAAgBlG,EAAMmG,cAAcD,cACpCmK,EAAgBrQ,EAAMwG,MAAM7I,UAC5BoK,EAAa/H,EAAMwG,MAAM9I,OACzBwV,EAA4C,mBAAjBF,EAA8BA,EAAa3W,OAAOkE,OAAO,CAAC,EAAGP,EAAMwG,MAAO,CACvGzI,UAAWiC,EAAMjC,aACbiV,EACFG,EAA2D,iBAAtBD,EAAiC,CACxElG,SAAUkG,EACVhE,QAASgE,GACP7W,OAAOkE,OAAO,CAChByM,SAAU,EACVkC,QAAS,GACRgE,GACCE,EAAsBpT,EAAMmG,cAAckB,OAASrH,EAAMmG,cAAckB,OAAOrH,EAAMjC,WAAa,KACjG2L,EAAO,CACTnG,EAAG,EACHE,EAAG,GAGL,GAAKyC,EAAL,CAIA,GAAI8I,EAAe,CACjB,IAAIqE,EAEAC,EAAwB,MAAbtG,EAAmB,EAAM7P,EACpCoW,EAAuB,MAAbvG,EAAmB/P,EAASC,EACtCoJ,EAAmB,MAAb0G,EAAmB,SAAW,QACpC3F,EAASnB,EAAc8G,GACvBtL,EAAM2F,EAAS8D,EAASmI,GACxB7R,EAAM4F,EAAS8D,EAASoI,GACxBC,EAAWV,GAAU/K,EAAWzB,GAAO,EAAI,EAC3CmN,EAASzL,IAAc1K,EAAQ+S,EAAc/J,GAAOyB,EAAWzB,GAC/DoN,EAAS1L,IAAc1K,GAASyK,EAAWzB,IAAQ+J,EAAc/J,GAGjEL,EAAejG,EAAME,SAASgB,MAC9BwF,EAAYoM,GAAU7M,EAAetC,EAAcsC,GAAgB,CACrE/C,MAAO,EACPE,OAAQ,GAENuQ,GAAqB3T,EAAMmG,cAAc,oBAAsBnG,EAAMmG,cAAc,oBAAoBI,QxBhFtG,CACLvF,IAAK,EACL9D,MAAO,EACPD,OAAQ,EACRE,KAAM,GwB6EFyW,GAAkBD,GAAmBL,GACrCO,GAAkBF,GAAmBJ,GAMrCO,GAAWnO,EAAO,EAAG0K,EAAc/J,GAAMI,EAAUJ,IACnDyN,GAAYd,EAAkB5C,EAAc/J,GAAO,EAAIkN,EAAWM,GAAWF,GAAkBT,EAA4BnG,SAAWyG,EAASK,GAAWF,GAAkBT,EAA4BnG,SACxMgH,GAAYf,GAAmB5C,EAAc/J,GAAO,EAAIkN,EAAWM,GAAWD,GAAkBV,EAA4BnG,SAAW0G,EAASI,GAAWD,GAAkBV,EAA4BnG,SACzMjG,GAAoB/G,EAAME,SAASgB,OAAS8D,EAAgBhF,EAAME,SAASgB,OAC3E+S,GAAelN,GAAiC,MAAbiG,EAAmBjG,GAAkBsF,WAAa,EAAItF,GAAkBuF,YAAc,EAAI,EAC7H4H,GAAwH,OAAjGb,EAA+C,MAAvBD,OAA8B,EAASA,EAAoBpG,IAAqBqG,EAAwB,EAEvJc,GAAY9M,EAAS2M,GAAYE,GACjCE,GAAkBzO,EAAOmN,EAAS,EAAQpR,EAF9B2F,EAAS0M,GAAYG,GAAsBD,IAEKvS,EAAK2F,EAAQyL,EAAS,EAAQrR,EAAK0S,IAAa1S,GAChHyE,EAAc8G,GAAYoH,GAC1B1K,EAAKsD,GAAYoH,GAAkB/M,CACrC,CAEA,GAAI8H,EAAc,CAChB,IAAIkF,GAEAC,GAAyB,MAAbtH,EAAmB,EAAM7P,EAErCoX,GAAwB,MAAbvH,EAAmB/P,EAASC,EAEvCsX,GAAUtO,EAAcgJ,GAExBuF,GAAmB,MAAZvF,EAAkB,SAAW,QAEpCwF,GAAOF,GAAUrJ,EAASmJ,IAE1BK,GAAOH,GAAUrJ,EAASoJ,IAE1BK,IAAuD,IAAxC,CAAC,EAAKzX,GAAMqH,QAAQ4B,GAEnCyO,GAAyH,OAAjGR,GAAgD,MAAvBjB,OAA8B,EAASA,EAAoBlE,IAAoBmF,GAAyB,EAEzJS,GAAaF,GAAeF,GAAOF,GAAUnE,EAAcoE,IAAQ1M,EAAW0M,IAAQI,GAAuB1B,EAA4BjE,QAEzI6F,GAAaH,GAAeJ,GAAUnE,EAAcoE,IAAQ1M,EAAW0M,IAAQI,GAAuB1B,EAA4BjE,QAAUyF,GAE5IK,GAAmBlC,GAAU8B,G1BzH9B,SAAwBlT,EAAK1E,EAAOyE,GACzC,IAAIwT,EAAItP,EAAOjE,EAAK1E,EAAOyE,GAC3B,OAAOwT,EAAIxT,EAAMA,EAAMwT,CACzB,C0BsHoDC,CAAeJ,GAAYN,GAASO,IAAcpP,EAAOmN,EAASgC,GAAaJ,GAAMF,GAAS1B,EAASiC,GAAaJ,IAEpKzO,EAAcgJ,GAAW8F,GACzBtL,EAAKwF,GAAW8F,GAAmBR,EACrC,CAEAxU,EAAMmG,cAAcxG,GAAQ+J,CAvE5B,CAwEF,EAQEhC,iBAAkB,CAAC,WE1HN,SAASyN,GAAiBC,EAAyBrQ,EAAcsD,QAC9D,IAAZA,IACFA,GAAU,GAGZ,ICnBoCrJ,ECJOJ,EFuBvCyW,EAA0B9V,EAAcwF,GACxCuQ,EAAuB/V,EAAcwF,IAf3C,SAAyBnG,GACvB,IAAImN,EAAOnN,EAAQ+D,wBACfI,EAASpB,EAAMoK,EAAK7I,OAAStE,EAAQqE,aAAe,EACpDD,EAASrB,EAAMoK,EAAK3I,QAAUxE,EAAQuE,cAAgB,EAC1D,OAAkB,IAAXJ,GAA2B,IAAXC,CACzB,CAU4DuS,CAAgBxQ,GACtEJ,EAAkBF,EAAmBM,GACrCgH,EAAOpJ,EAAsByS,EAAyBE,EAAsBjN,GAC5EyB,EAAS,CACXc,WAAY,EACZE,UAAW,GAET7C,EAAU,CACZ1E,EAAG,EACHE,EAAG,GAkBL,OAfI4R,IAA4BA,IAA4BhN,MACxB,SAA9B1J,EAAYoG,IAChBkG,GAAetG,MACbmF,GCnCgC9K,EDmCT+F,KClCdhG,EAAUC,IAAUO,EAAcP,GCJxC,CACL4L,YAFyChM,EDQbI,GCNR4L,WACpBE,UAAWlM,EAAQkM,WDGZH,GAAgB3L,IDoCnBO,EAAcwF,KAChBkD,EAAUtF,EAAsBoC,GAAc,IACtCxB,GAAKwB,EAAauH,WAC1BrE,EAAQxE,GAAKsB,EAAasH,WACjB1H,IACTsD,EAAQ1E,EAAIyH,GAAoBrG,KAI7B,CACLpB,EAAGwI,EAAK5O,KAAO2M,EAAOc,WAAa3C,EAAQ1E,EAC3CE,EAAGsI,EAAK/K,IAAM8I,EAAOgB,UAAY7C,EAAQxE,EACzCP,MAAO6I,EAAK7I,MACZE,OAAQ2I,EAAK3I,OAEjB,CGvDA,SAASoS,GAAMC,GACb,IAAItT,EAAM,IAAIoO,IACVmF,EAAU,IAAIC,IACdC,EAAS,GAKb,SAAS3F,EAAK4F,GACZH,EAAQI,IAAID,EAASlW,MACN,GAAG3B,OAAO6X,EAASxU,UAAY,GAAIwU,EAASnO,kBAAoB,IACtEvH,SAAQ,SAAU4V,GACzB,IAAKL,EAAQM,IAAID,GAAM,CACrB,IAAIE,EAAc9T,EAAI3F,IAAIuZ,GAEtBE,GACFhG,EAAKgG,EAET,CACF,IACAL,EAAO3E,KAAK4E,EACd,CAQA,OAzBAJ,EAAUtV,SAAQ,SAAU0V,GAC1B1T,EAAIiP,IAAIyE,EAASlW,KAAMkW,EACzB,IAiBAJ,EAAUtV,SAAQ,SAAU0V,GACrBH,EAAQM,IAAIH,EAASlW,OAExBsQ,EAAK4F,EAET,IACOD,CACT,CCvBA,IAAIM,GAAkB,CACpBnY,UAAW,SACX0X,UAAW,GACX1U,SAAU,YAGZ,SAASoV,KACP,IAAK,IAAI1B,EAAO2B,UAAUrG,OAAQsG,EAAO,IAAIpU,MAAMwS,GAAO6B,EAAO,EAAGA,EAAO7B,EAAM6B,IAC/ED,EAAKC,GAAQF,UAAUE,GAGzB,OAAQD,EAAKvE,MAAK,SAAUlT,GAC1B,QAASA,GAAoD,mBAAlCA,EAAQ+D,sBACrC,GACF,CAEO,SAAS4T,GAAgBC,QACL,IAArBA,IACFA,EAAmB,CAAC,GAGtB,IAAIC,EAAoBD,EACpBE,EAAwBD,EAAkBE,iBAC1CA,OAA6C,IAA1BD,EAAmC,GAAKA,EAC3DE,EAAyBH,EAAkBI,eAC3CA,OAA4C,IAA3BD,EAAoCV,GAAkBU,EAC3E,OAAO,SAAsBjZ,EAAWD,EAAQoD,QAC9B,IAAZA,IACFA,EAAU+V,GAGZ,ICxC6B/W,EAC3BgX,EDuCE9W,EAAQ,CACVjC,UAAW,SACXgZ,iBAAkB,GAClBjW,QAASzE,OAAOkE,OAAO,CAAC,EAAG2V,GAAiBW,GAC5C1Q,cAAe,CAAC,EAChBjG,SAAU,CACRvC,UAAWA,EACXD,OAAQA,GAEV4C,WAAY,CAAC,EACbD,OAAQ,CAAC,GAEP2W,EAAmB,GACnBC,GAAc,EACdrN,EAAW,CACb5J,MAAOA,EACPkX,WAAY,SAAoBC,GAC9B,IAAIrW,EAAsC,mBAArBqW,EAAkCA,EAAiBnX,EAAMc,SAAWqW,EACzFC,IACApX,EAAMc,QAAUzE,OAAOkE,OAAO,CAAC,EAAGsW,EAAgB7W,EAAMc,QAASA,GACjEd,EAAMiK,cAAgB,CACpBtM,UAAW0B,EAAU1B,GAAa6N,GAAkB7N,GAAaA,EAAU4Q,eAAiB/C,GAAkB7N,EAAU4Q,gBAAkB,GAC1I7Q,OAAQ8N,GAAkB9N,IAI5B,IElE4B+X,EAC9B4B,EFiEMN,EDhCG,SAAwBtB,GAErC,IAAIsB,EAAmBvB,GAAMC,GAE7B,OAAO/W,EAAeb,QAAO,SAAUC,EAAK+B,GAC1C,OAAO/B,EAAIE,OAAO+Y,EAAiBvR,QAAO,SAAUqQ,GAClD,OAAOA,EAAShW,QAAUA,CAC5B,IACF,GAAG,GACL,CCuB+ByX,EElEK7B,EFkEsB,GAAGzX,OAAO2Y,EAAkB3W,EAAMc,QAAQ2U,WEjE9F4B,EAAS5B,EAAU5X,QAAO,SAAUwZ,EAAQE,GAC9C,IAAIC,EAAWH,EAAOE,EAAQ5X,MAK9B,OAJA0X,EAAOE,EAAQ5X,MAAQ6X,EAAWnb,OAAOkE,OAAO,CAAC,EAAGiX,EAAUD,EAAS,CACrEzW,QAASzE,OAAOkE,OAAO,CAAC,EAAGiX,EAAS1W,QAASyW,EAAQzW,SACrD4I,KAAMrN,OAAOkE,OAAO,CAAC,EAAGiX,EAAS9N,KAAM6N,EAAQ7N,QAC5C6N,EACEF,CACT,GAAG,CAAC,GAEGhb,OAAO4D,KAAKoX,GAAQlV,KAAI,SAAUhG,GACvC,OAAOkb,EAAOlb,EAChB,MF4DM,OAJA6D,EAAM+W,iBAAmBA,EAAiBvR,QAAO,SAAUiS,GACzD,OAAOA,EAAE7X,OACX,IA+FFI,EAAM+W,iBAAiB5W,SAAQ,SAAUJ,GACvC,IAAIJ,EAAOI,EAAKJ,KACZ+X,EAAe3X,EAAKe,QACpBA,OAA2B,IAAjB4W,EAA0B,CAAC,EAAIA,EACzChX,EAASX,EAAKW,OAElB,GAAsB,mBAAXA,EAAuB,CAChC,IAAIiX,EAAYjX,EAAO,CACrBV,MAAOA,EACPL,KAAMA,EACNiK,SAAUA,EACV9I,QAASA,IAKXkW,EAAiB/F,KAAK0G,GAFT,WAAmB,EAGlC,CACF,IA/GS/N,EAASQ,QAClB,EAMAwN,YAAa,WACX,IAAIX,EAAJ,CAIA,IAAIY,EAAkB7X,EAAME,SACxBvC,EAAYka,EAAgBla,UAC5BD,EAASma,EAAgBna,OAG7B,GAAKyY,GAAiBxY,EAAWD,GAAjC,CAKAsC,EAAMwG,MAAQ,CACZ7I,UAAWwX,GAAiBxX,EAAWqH,EAAgBtH,GAAoC,UAA3BsC,EAAMc,QAAQC,UAC9ErD,OAAQiG,EAAcjG,IAOxBsC,EAAM0R,OAAQ,EACd1R,EAAMjC,UAAYiC,EAAMc,QAAQ/C,UAKhCiC,EAAM+W,iBAAiB5W,SAAQ,SAAU0V,GACvC,OAAO7V,EAAMmG,cAAc0P,EAASlW,MAAQtD,OAAOkE,OAAO,CAAC,EAAGsV,EAASnM,KACzE,IAEA,IAAK,IAAIoO,EAAQ,EAAGA,EAAQ9X,EAAM+W,iBAAiBhH,OAAQ+H,IACzD,IAAoB,IAAhB9X,EAAM0R,MAAV,CAMA,IAAIqG,EAAwB/X,EAAM+W,iBAAiBe,GAC/ChY,EAAKiY,EAAsBjY,GAC3BkY,EAAyBD,EAAsBjX,QAC/CoM,OAAsC,IAA3B8K,EAAoC,CAAC,EAAIA,EACpDrY,EAAOoY,EAAsBpY,KAEf,mBAAPG,IACTE,EAAQF,EAAG,CACTE,MAAOA,EACPc,QAASoM,EACTvN,KAAMA,EACNiK,SAAUA,KACN5J,EAdR,MAHEA,EAAM0R,OAAQ,EACdoG,GAAS,CAzBb,CATA,CAqDF,EAGA1N,QC1I2BtK,ED0IV,WACf,OAAO,IAAImY,SAAQ,SAAUC,GAC3BtO,EAASgO,cACTM,EAAQlY,EACV,GACF,EC7IG,WAUL,OATK8W,IACHA,EAAU,IAAImB,SAAQ,SAAUC,GAC9BD,QAAQC,UAAUC,MAAK,WACrBrB,OAAUsB,EACVF,EAAQpY,IACV,GACF,KAGKgX,CACT,GDmIIuB,QAAS,WACPjB,IACAH,GAAc,CAChB,GAGF,IAAKd,GAAiBxY,EAAWD,GAC/B,OAAOkM,EAmCT,SAASwN,IACPJ,EAAiB7W,SAAQ,SAAUL,GACjC,OAAOA,GACT,IACAkX,EAAmB,EACrB,CAEA,OAvCApN,EAASsN,WAAWpW,GAASqX,MAAK,SAAUnY,IACrCiX,GAAenW,EAAQwX,eAC1BxX,EAAQwX,cAActY,EAE1B,IAmCO4J,CACT,CACF,CACO,IAAI2O,GAA4BhC,KGzLnC,GAA4BA,GAAgB,CAC9CI,iBAFqB,CAAC6B,GAAgB,GAAe,GAAe,EAAa,GAAQ,GAAM,GAAiB,EAAO,MCJrH,GAA4BjC,GAAgB,CAC9CI,iBAFqB,CAAC6B,GAAgB,GAAe,GAAe,KCatE,MAAMC,GAAa,IAAIlI,IACjBmI,GAAO,CACX,GAAAtH,CAAIxS,EAASzC,EAAKyN,GACX6O,GAAWzC,IAAIpX,IAClB6Z,GAAWrH,IAAIxS,EAAS,IAAI2R,KAE9B,MAAMoI,EAAcF,GAAWjc,IAAIoC,GAI9B+Z,EAAY3C,IAAI7Z,IAA6B,IAArBwc,EAAYC,KAKzCD,EAAYvH,IAAIjV,EAAKyN,GAHnBiP,QAAQC,MAAM,+EAA+E7W,MAAM8W,KAAKJ,EAAY1Y,QAAQ,MAIhI,EACAzD,IAAG,CAACoC,EAASzC,IACPsc,GAAWzC,IAAIpX,IACV6Z,GAAWjc,IAAIoC,GAASpC,IAAIL,IAE9B,KAET,MAAA6c,CAAOpa,EAASzC,GACd,IAAKsc,GAAWzC,IAAIpX,GAClB,OAEF,MAAM+Z,EAAcF,GAAWjc,IAAIoC,GACnC+Z,EAAYM,OAAO9c,GAGM,IAArBwc,EAAYC,MACdH,GAAWQ,OAAOra,EAEtB,GAYIsa,GAAiB,gBAOjBC,GAAgBC,IAChBA,GAAYna,OAAOoa,KAAOpa,OAAOoa,IAAIC,SAEvCF,EAAWA,EAAS5O,QAAQ,iBAAiB,CAAC+O,EAAOC,IAAO,IAAIH,IAAIC,OAAOE,QAEtEJ,GA4CHK,GAAuB7a,IAC3BA,EAAQ8a,cAAc,IAAIC,MAAMT,IAAgB,EAE5C,GAAYU,MACXA,GAA4B,iBAAXA,UAGO,IAAlBA,EAAOC,SAChBD,EAASA,EAAO,SAEgB,IAApBA,EAAOE,UAEjBC,GAAaH,GAEb,GAAUA,GACLA,EAAOC,OAASD,EAAO,GAAKA,EAEf,iBAAXA,GAAuBA,EAAO7J,OAAS,EACzCrL,SAAS+C,cAAc0R,GAAcS,IAEvC,KAEHI,GAAYpb,IAChB,IAAK,GAAUA,IAAgD,IAApCA,EAAQqb,iBAAiBlK,OAClD,OAAO,EAET,MAAMmK,EAAgF,YAA7D5V,iBAAiB1F,GAASub,iBAAiB,cAE9DC,EAAgBxb,EAAQyb,QAAQ,uBACtC,IAAKD,EACH,OAAOF,EAET,GAAIE,IAAkBxb,EAAS,CAC7B,MAAM0b,EAAU1b,EAAQyb,QAAQ,WAChC,GAAIC,GAAWA,EAAQlW,aAAegW,EACpC,OAAO,EAET,GAAgB,OAAZE,EACF,OAAO,CAEX,CACA,OAAOJ,CAAgB,EAEnBK,GAAa3b,IACZA,GAAWA,EAAQkb,WAAaU,KAAKC,gBAGtC7b,EAAQ8b,UAAU7W,SAAS,mBAGC,IAArBjF,EAAQ+b,SACV/b,EAAQ+b,SAEV/b,EAAQgc,aAAa,aAAoD,UAArChc,EAAQic,aAAa,aAE5DC,GAAiBlc,IACrB,IAAK8F,SAASC,gBAAgBoW,aAC5B,OAAO,KAIT,GAAmC,mBAAxBnc,EAAQqF,YAA4B,CAC7C,MAAM+W,EAAOpc,EAAQqF,cACrB,OAAO+W,aAAgBtb,WAAasb,EAAO,IAC7C,CACA,OAAIpc,aAAmBc,WACdd,EAIJA,EAAQwF,WAGN0W,GAAelc,EAAQwF,YAFrB,IAEgC,EAErC6W,GAAO,OAUPC,GAAStc,IACbA,EAAQuE,YAAY,EAGhBgY,GAAY,IACZlc,OAAOmc,SAAW1W,SAAS6G,KAAKqP,aAAa,qBACxC3b,OAAOmc,OAET,KAEHC,GAA4B,GAgB5BC,GAAQ,IAAuC,QAAjC5W,SAASC,gBAAgB4W,IACvCC,GAAqBC,IAhBAC,QAiBN,KACjB,MAAMC,EAAIR,KAEV,GAAIQ,EAAG,CACL,MAAMhc,EAAO8b,EAAOG,KACdC,EAAqBF,EAAE7b,GAAGH,GAChCgc,EAAE7b,GAAGH,GAAQ8b,EAAOK,gBACpBH,EAAE7b,GAAGH,GAAMoc,YAAcN,EACzBE,EAAE7b,GAAGH,GAAMqc,WAAa,KACtBL,EAAE7b,GAAGH,GAAQkc,EACNJ,EAAOK,gBAElB,GA5B0B,YAAxBpX,SAASuX,YAENZ,GAA0BtL,QAC7BrL,SAASyF,iBAAiB,oBAAoB,KAC5C,IAAK,MAAMuR,KAAYL,GACrBK,GACF,IAGJL,GAA0BpK,KAAKyK,IAE/BA,GAkBA,EAEEQ,GAAU,CAACC,EAAkB9F,EAAO,GAAI+F,EAAeD,IACxB,mBAArBA,EAAkCA,KAAoB9F,GAAQ+F,EAExEC,GAAyB,CAACX,EAAUY,EAAmBC,GAAoB,KAC/E,IAAKA,EAEH,YADAL,GAAQR,GAGV,MACMc,EAhKiC5d,KACvC,IAAKA,EACH,OAAO,EAIT,IAAI,mBACF6d,EAAkB,gBAClBC,GACEzd,OAAOqF,iBAAiB1F,GAC5B,MAAM+d,EAA0BC,OAAOC,WAAWJ,GAC5CK,EAAuBF,OAAOC,WAAWH,GAG/C,OAAKC,GAA4BG,GAKjCL,EAAqBA,EAAmBlb,MAAM,KAAK,GACnDmb,EAAkBA,EAAgBnb,MAAM,KAAK,GAtDf,KAuDtBqb,OAAOC,WAAWJ,GAAsBG,OAAOC,WAAWH,KANzD,CAMoG,EA2IpFK,CAAiCT,GADlC,EAExB,IAAIU,GAAS,EACb,MAAMC,EAAU,EACdrR,aAEIA,IAAW0Q,IAGfU,GAAS,EACTV,EAAkBjS,oBAAoB6O,GAAgB+D,GACtDf,GAAQR,GAAS,EAEnBY,EAAkBnS,iBAAiB+O,GAAgB+D,GACnDC,YAAW,KACJF,GACHvD,GAAqB6C,EACvB,GACCE,EAAiB,EAYhBW,GAAuB,CAAC1R,EAAM2R,EAAeC,EAAeC,KAChE,MAAMC,EAAa9R,EAAKsE,OACxB,IAAI+H,EAAQrM,EAAKjH,QAAQ4Y,GAIzB,OAAe,IAAXtF,GACMuF,GAAiBC,EAAiB7R,EAAK8R,EAAa,GAAK9R,EAAK,IAExEqM,GAASuF,EAAgB,GAAK,EAC1BC,IACFxF,GAASA,EAAQyF,GAAcA,GAE1B9R,EAAKjK,KAAKC,IAAI,EAAGD,KAAKE,IAAIoW,EAAOyF,EAAa,KAAI,EAerDC,GAAiB,qBACjBC,GAAiB,OACjBC,GAAgB,SAChBC,GAAgB,CAAC,EACvB,IAAIC,GAAW,EACf,MAAMC,GAAe,CACnBC,WAAY,YACZC,WAAY,YAERC,GAAe,IAAIrI,IAAI,CAAC,QAAS,WAAY,UAAW,YAAa,cAAe,aAAc,iBAAkB,YAAa,WAAY,YAAa,cAAe,YAAa,UAAW,WAAY,QAAS,oBAAqB,aAAc,YAAa,WAAY,cAAe,cAAe,cAAe,YAAa,eAAgB,gBAAiB,eAAgB,gBAAiB,aAAc,QAAS,OAAQ,SAAU,QAAS,SAAU,SAAU,UAAW,WAAY,OAAQ,SAAU,eAAgB,SAAU,OAAQ,mBAAoB,mBAAoB,QAAS,QAAS,WAM/lB,SAASsI,GAAarf,EAASsf,GAC7B,OAAOA,GAAO,GAAGA,MAAQN,QAAgBhf,EAAQgf,UAAYA,IAC/D,CACA,SAASO,GAAiBvf,GACxB,MAAMsf,EAAMD,GAAarf,GAGzB,OAFAA,EAAQgf,SAAWM,EACnBP,GAAcO,GAAOP,GAAcO,IAAQ,CAAC,EACrCP,GAAcO,EACvB,CAiCA,SAASE,GAAYC,EAAQC,EAAUC,EAAqB,MAC1D,OAAOliB,OAAOmiB,OAAOH,GAAQ7M,MAAKiN,GAASA,EAAMH,WAAaA,GAAYG,EAAMF,qBAAuBA,GACzG,CACA,SAASG,GAAoBC,EAAmB1B,EAAS2B,GACvD,MAAMC,EAAiC,iBAAZ5B,EAErBqB,EAAWO,EAAcD,EAAqB3B,GAAW2B,EAC/D,IAAIE,EAAYC,GAAaJ,GAI7B,OAHKX,GAAahI,IAAI8I,KACpBA,EAAYH,GAEP,CAACE,EAAaP,EAAUQ,EACjC,CACA,SAASE,GAAWpgB,EAAS+f,EAAmB1B,EAAS2B,EAAoBK,GAC3E,GAAiC,iBAAtBN,IAAmC/f,EAC5C,OAEF,IAAKigB,EAAaP,EAAUQ,GAAaJ,GAAoBC,EAAmB1B,EAAS2B,GAIzF,GAAID,KAAqBd,GAAc,CACrC,MAAMqB,EAAepf,GACZ,SAAU2e,GACf,IAAKA,EAAMU,eAAiBV,EAAMU,gBAAkBV,EAAMW,iBAAmBX,EAAMW,eAAevb,SAAS4a,EAAMU,eAC/G,OAAOrf,EAAGjD,KAAKwiB,KAAMZ,EAEzB,EAEFH,EAAWY,EAAaZ,EAC1B,CACA,MAAMD,EAASF,GAAiBvf,GAC1B0gB,EAAWjB,EAAOS,KAAeT,EAAOS,GAAa,CAAC,GACtDS,EAAmBnB,GAAYkB,EAAUhB,EAAUO,EAAc5B,EAAU,MACjF,GAAIsC,EAEF,YADAA,EAAiBN,OAASM,EAAiBN,QAAUA,GAGvD,MAAMf,EAAMD,GAAaK,EAAUK,EAAkBnU,QAAQgT,GAAgB,KACvE1d,EAAK+e,EA5Db,SAAoCjgB,EAASwa,EAAUtZ,GACrD,OAAO,SAASmd,EAAQwB,GACtB,MAAMe,EAAc5gB,EAAQ6gB,iBAAiBrG,GAC7C,IAAK,IAAI,OACPxN,GACE6S,EAAO7S,GAAUA,IAAWyT,KAAMzT,EAASA,EAAOxH,WACpD,IAAK,MAAMsb,KAAcF,EACvB,GAAIE,IAAe9T,EASnB,OANA+T,GAAWlB,EAAO,CAChBW,eAAgBxT,IAEdqR,EAAQgC,QACVW,GAAaC,IAAIjhB,EAAS6f,EAAMqB,KAAM1G,EAAUtZ,GAE3CA,EAAGigB,MAAMnU,EAAQ,CAAC6S,GAG/B,CACF,CAwC2BuB,CAA2BphB,EAASqe,EAASqB,GAvExE,SAA0B1f,EAASkB,GACjC,OAAO,SAASmd,EAAQwB,GAOtB,OANAkB,GAAWlB,EAAO,CAChBW,eAAgBxgB,IAEdqe,EAAQgC,QACVW,GAAaC,IAAIjhB,EAAS6f,EAAMqB,KAAMhgB,GAEjCA,EAAGigB,MAAMnhB,EAAS,CAAC6f,GAC5B,CACF,CA6DoFwB,CAAiBrhB,EAAS0f,GAC5Gxe,EAAGye,mBAAqBM,EAAc5B,EAAU,KAChDnd,EAAGwe,SAAWA,EACdxe,EAAGmf,OAASA,EACZnf,EAAG8d,SAAWM,EACdoB,EAASpB,GAAOpe,EAChBlB,EAAQuL,iBAAiB2U,EAAWhf,EAAI+e,EAC1C,CACA,SAASqB,GAActhB,EAASyf,EAAQS,EAAW7B,EAASsB,GAC1D,MAAMze,EAAKse,GAAYC,EAAOS,GAAY7B,EAASsB,GAC9Cze,IAGLlB,EAAQyL,oBAAoByU,EAAWhf,EAAIqgB,QAAQ5B,WAC5CF,EAAOS,GAAWhf,EAAG8d,UAC9B,CACA,SAASwC,GAAyBxhB,EAASyf,EAAQS,EAAWuB,GAC5D,MAAMC,EAAoBjC,EAAOS,IAAc,CAAC,EAChD,IAAK,MAAOyB,EAAY9B,KAAUpiB,OAAOmkB,QAAQF,GAC3CC,EAAWE,SAASJ,IACtBH,GAActhB,EAASyf,EAAQS,EAAWL,EAAMH,SAAUG,EAAMF,mBAGtE,CACA,SAASQ,GAAaN,GAGpB,OADAA,EAAQA,EAAMjU,QAAQiT,GAAgB,IAC/BI,GAAaY,IAAUA,CAChC,CACA,MAAMmB,GAAe,CACnB,EAAAc,CAAG9hB,EAAS6f,EAAOxB,EAAS2B,GAC1BI,GAAWpgB,EAAS6f,EAAOxB,EAAS2B,GAAoB,EAC1D,EACA,GAAA+B,CAAI/hB,EAAS6f,EAAOxB,EAAS2B,GAC3BI,GAAWpgB,EAAS6f,EAAOxB,EAAS2B,GAAoB,EAC1D,EACA,GAAAiB,CAAIjhB,EAAS+f,EAAmB1B,EAAS2B,GACvC,GAAiC,iBAAtBD,IAAmC/f,EAC5C,OAEF,MAAOigB,EAAaP,EAAUQ,GAAaJ,GAAoBC,EAAmB1B,EAAS2B,GACrFgC,EAAc9B,IAAcH,EAC5BN,EAASF,GAAiBvf,GAC1B0hB,EAAoBjC,EAAOS,IAAc,CAAC,EAC1C+B,EAAclC,EAAkBmC,WAAW,KACjD,QAAwB,IAAbxC,EAAX,CAQA,GAAIuC,EACF,IAAK,MAAME,KAAgB1kB,OAAO4D,KAAKoe,GACrC+B,GAAyBxhB,EAASyf,EAAQ0C,EAAcpC,EAAkBlN,MAAM,IAGpF,IAAK,MAAOuP,EAAavC,KAAUpiB,OAAOmkB,QAAQF,GAAoB,CACpE,MAAMC,EAAaS,EAAYxW,QAAQkT,GAAe,IACjDkD,IAAejC,EAAkB8B,SAASF,IAC7CL,GAActhB,EAASyf,EAAQS,EAAWL,EAAMH,SAAUG,EAAMF,mBAEpE,CAXA,KAPA,CAEE,IAAKliB,OAAO4D,KAAKqgB,GAAmBvQ,OAClC,OAEFmQ,GAActhB,EAASyf,EAAQS,EAAWR,EAAUO,EAAc5B,EAAU,KAE9E,CAYF,EACA,OAAAgE,CAAQriB,EAAS6f,EAAOpI,GACtB,GAAqB,iBAAVoI,IAAuB7f,EAChC,OAAO,KAET,MAAM+c,EAAIR,KAGV,IAAI+F,EAAc,KACdC,GAAU,EACVC,GAAiB,EACjBC,GAAmB,EAJH5C,IADFM,GAAaN,IAMZ9C,IACjBuF,EAAcvF,EAAEhC,MAAM8E,EAAOpI,GAC7BsF,EAAE/c,GAASqiB,QAAQC,GACnBC,GAAWD,EAAYI,uBACvBF,GAAkBF,EAAYK,gCAC9BF,EAAmBH,EAAYM,sBAEjC,MAAMC,EAAM9B,GAAW,IAAIhG,MAAM8E,EAAO,CACtC0C,UACAO,YAAY,IACVrL,GAUJ,OATIgL,GACFI,EAAIE,iBAEFP,GACFxiB,EAAQ8a,cAAc+H,GAEpBA,EAAIJ,kBAAoBH,GAC1BA,EAAYS,iBAEPF,CACT,GAEF,SAAS9B,GAAWljB,EAAKmlB,EAAO,CAAC,GAC/B,IAAK,MAAOzlB,EAAKa,KAAUX,OAAOmkB,QAAQoB,GACxC,IACEnlB,EAAIN,GAAOa,CACb,CAAE,MAAO6kB,GACPxlB,OAAOC,eAAeG,EAAKN,EAAK,CAC9B2lB,cAAc,EACdtlB,IAAG,IACMQ,GAGb,CAEF,OAAOP,CACT,CASA,SAASslB,GAAc/kB,GACrB,GAAc,SAAVA,EACF,OAAO,EAET,GAAc,UAAVA,EACF,OAAO,EAET,GAAIA,IAAU4f,OAAO5f,GAAOkC,WAC1B,OAAO0d,OAAO5f,GAEhB,GAAc,KAAVA,GAA0B,SAAVA,EAClB,OAAO,KAET,GAAqB,iBAAVA,EACT,OAAOA,EAET,IACE,OAAOglB,KAAKC,MAAMC,mBAAmBllB,GACvC,CAAE,MAAO6kB,GACP,OAAO7kB,CACT,CACF,CACA,SAASmlB,GAAiBhmB,GACxB,OAAOA,EAAIqO,QAAQ,UAAU4X,GAAO,IAAIA,EAAItjB,iBAC9C,CACA,MAAMujB,GAAc,CAClB,gBAAAC,CAAiB1jB,EAASzC,EAAKa,GAC7B4B,EAAQ6B,aAAa,WAAW0hB,GAAiBhmB,KAAQa,EAC3D,EACA,mBAAAulB,CAAoB3jB,EAASzC,GAC3ByC,EAAQ4B,gBAAgB,WAAW2hB,GAAiBhmB,KACtD,EACA,iBAAAqmB,CAAkB5jB,GAChB,IAAKA,EACH,MAAO,CAAC,EAEV,MAAM0B,EAAa,CAAC,EACdmiB,EAASpmB,OAAO4D,KAAKrB,EAAQ8jB,SAASld,QAAOrJ,GAAOA,EAAI2kB,WAAW,QAAU3kB,EAAI2kB,WAAW,cAClG,IAAK,MAAM3kB,KAAOsmB,EAAQ,CACxB,IAAIE,EAAUxmB,EAAIqO,QAAQ,MAAO,IACjCmY,EAAUA,EAAQC,OAAO,GAAG9jB,cAAgB6jB,EAAQlR,MAAM,EAAGkR,EAAQ5S,QACrEzP,EAAWqiB,GAAWZ,GAAcnjB,EAAQ8jB,QAAQvmB,GACtD,CACA,OAAOmE,CACT,EACAuiB,iBAAgB,CAACjkB,EAASzC,IACjB4lB,GAAcnjB,EAAQic,aAAa,WAAWsH,GAAiBhmB,QAgB1E,MAAM2mB,GAEJ,kBAAWC,GACT,MAAO,CAAC,CACV,CACA,sBAAWC,GACT,MAAO,CAAC,CACV,CACA,eAAWpH,GACT,MAAM,IAAIqH,MAAM,sEAClB,CACA,UAAAC,CAAWC,GAIT,OAHAA,EAAS9D,KAAK+D,gBAAgBD,GAC9BA,EAAS9D,KAAKgE,kBAAkBF,GAChC9D,KAAKiE,iBAAiBH,GACfA,CACT,CACA,iBAAAE,CAAkBF,GAChB,OAAOA,CACT,CACA,eAAAC,CAAgBD,EAAQvkB,GACtB,MAAM2kB,EAAa,GAAU3kB,GAAWyjB,GAAYQ,iBAAiBjkB,EAAS,UAAY,CAAC,EAE3F,MAAO,IACFygB,KAAKmE,YAAYT,WACM,iBAAfQ,EAA0BA,EAAa,CAAC,KAC/C,GAAU3kB,GAAWyjB,GAAYG,kBAAkB5jB,GAAW,CAAC,KAC7C,iBAAXukB,EAAsBA,EAAS,CAAC,EAE/C,CACA,gBAAAG,CAAiBH,EAAQM,EAAcpE,KAAKmE,YAAYR,aACtD,IAAK,MAAO7hB,EAAUuiB,KAAkBrnB,OAAOmkB,QAAQiD,GAAc,CACnE,MAAMzmB,EAAQmmB,EAAOhiB,GACfwiB,EAAY,GAAU3mB,GAAS,UAjiBrC4c,OADSA,EAkiB+C5c,GAhiBnD,GAAG4c,IAELvd,OAAOM,UAAUuC,SAASrC,KAAK+c,GAAQL,MAAM,eAAe,GAAGza,cA+hBlE,IAAK,IAAI8kB,OAAOF,GAAehhB,KAAKihB,GAClC,MAAM,IAAIE,UAAU,GAAGxE,KAAKmE,YAAY5H,KAAKkI,0BAA0B3iB,qBAA4BwiB,yBAAiCD,MAExI,CAtiBW9J,KAuiBb,EAqBF,MAAMmK,WAAsBjB,GAC1B,WAAAU,CAAY5kB,EAASukB,GACnBa,SACAplB,EAAUmb,GAAWnb,MAIrBygB,KAAK4E,SAAWrlB,EAChBygB,KAAK6E,QAAU7E,KAAK6D,WAAWC,GAC/BzK,GAAKtH,IAAIiO,KAAK4E,SAAU5E,KAAKmE,YAAYW,SAAU9E,MACrD,CAGA,OAAA+E,GACE1L,GAAKM,OAAOqG,KAAK4E,SAAU5E,KAAKmE,YAAYW,UAC5CvE,GAAaC,IAAIR,KAAK4E,SAAU5E,KAAKmE,YAAYa,WACjD,IAAK,MAAMC,KAAgBjoB,OAAOkoB,oBAAoBlF,MACpDA,KAAKiF,GAAgB,IAEzB,CACA,cAAAE,CAAe9I,EAAU9c,EAAS6lB,GAAa,GAC7CpI,GAAuBX,EAAU9c,EAAS6lB,EAC5C,CACA,UAAAvB,CAAWC,GAIT,OAHAA,EAAS9D,KAAK+D,gBAAgBD,EAAQ9D,KAAK4E,UAC3Cd,EAAS9D,KAAKgE,kBAAkBF,GAChC9D,KAAKiE,iBAAiBH,GACfA,CACT,CAGA,kBAAOuB,CAAY9lB,GACjB,OAAO8Z,GAAKlc,IAAIud,GAAWnb,GAAUygB,KAAK8E,SAC5C,CACA,0BAAOQ,CAAoB/lB,EAASukB,EAAS,CAAC,GAC5C,OAAO9D,KAAKqF,YAAY9lB,IAAY,IAAIygB,KAAKzgB,EAA2B,iBAAXukB,EAAsBA,EAAS,KAC9F,CACA,kBAAWyB,GACT,MA5CY,OA6Cd,CACA,mBAAWT,GACT,MAAO,MAAM9E,KAAKzD,MACpB,CACA,oBAAWyI,GACT,MAAO,IAAIhF,KAAK8E,UAClB,CACA,gBAAOU,CAAUllB,GACf,MAAO,GAAGA,IAAO0f,KAAKgF,WACxB,EAUF,MAAMS,GAAclmB,IAClB,IAAIwa,EAAWxa,EAAQic,aAAa,kBACpC,IAAKzB,GAAyB,MAAbA,EAAkB,CACjC,IAAI2L,EAAgBnmB,EAAQic,aAAa,QAMzC,IAAKkK,IAAkBA,EAActE,SAAS,OAASsE,EAAcjE,WAAW,KAC9E,OAAO,KAILiE,EAActE,SAAS,OAASsE,EAAcjE,WAAW,OAC3DiE,EAAgB,IAAIA,EAAcxjB,MAAM,KAAK,MAE/C6X,EAAW2L,GAAmC,MAAlBA,EAAwB5L,GAAc4L,EAAcC,QAAU,IAC5F,CACA,OAAO5L,CAAQ,EAEX6L,GAAiB,CACrBzT,KAAI,CAAC4H,EAAUxa,EAAU8F,SAASC,kBACzB,GAAG3G,UAAUsB,QAAQ3C,UAAU8iB,iBAAiB5iB,KAAK+B,EAASwa,IAEvE8L,QAAO,CAAC9L,EAAUxa,EAAU8F,SAASC,kBAC5BrF,QAAQ3C,UAAU8K,cAAc5K,KAAK+B,EAASwa,GAEvD+L,SAAQ,CAACvmB,EAASwa,IACT,GAAGpb,UAAUY,EAAQumB,UAAU3f,QAAOzB,GAASA,EAAMqhB,QAAQhM,KAEtE,OAAAiM,CAAQzmB,EAASwa,GACf,MAAMiM,EAAU,GAChB,IAAIC,EAAW1mB,EAAQwF,WAAWiW,QAAQjB,GAC1C,KAAOkM,GACLD,EAAQpU,KAAKqU,GACbA,EAAWA,EAASlhB,WAAWiW,QAAQjB,GAEzC,OAAOiM,CACT,EACA,IAAAE,CAAK3mB,EAASwa,GACZ,IAAIoM,EAAW5mB,EAAQ6mB,uBACvB,KAAOD,GAAU,CACf,GAAIA,EAASJ,QAAQhM,GACnB,MAAO,CAACoM,GAEVA,EAAWA,EAASC,sBACtB,CACA,MAAO,EACT,EAEA,IAAAvhB,CAAKtF,EAASwa,GACZ,IAAIlV,EAAOtF,EAAQ8mB,mBACnB,KAAOxhB,GAAM,CACX,GAAIA,EAAKkhB,QAAQhM,GACf,MAAO,CAAClV,GAEVA,EAAOA,EAAKwhB,kBACd,CACA,MAAO,EACT,EACA,iBAAAC,CAAkB/mB,GAChB,MAAMgnB,EAAa,CAAC,IAAK,SAAU,QAAS,WAAY,SAAU,UAAW,aAAc,4BAA4BzjB,KAAIiX,GAAY,GAAGA,2BAAiC7W,KAAK,KAChL,OAAO8c,KAAK7N,KAAKoU,EAAYhnB,GAAS4G,QAAOqgB,IAAOtL,GAAWsL,IAAO7L,GAAU6L,IAClF,EACA,sBAAAC,CAAuBlnB,GACrB,MAAMwa,EAAW0L,GAAYlmB,GAC7B,OAAIwa,GACK6L,GAAeC,QAAQ9L,GAAYA,EAErC,IACT,EACA,sBAAA2M,CAAuBnnB,GACrB,MAAMwa,EAAW0L,GAAYlmB,GAC7B,OAAOwa,EAAW6L,GAAeC,QAAQ9L,GAAY,IACvD,EACA,+BAAA4M,CAAgCpnB,GAC9B,MAAMwa,EAAW0L,GAAYlmB,GAC7B,OAAOwa,EAAW6L,GAAezT,KAAK4H,GAAY,EACpD,GAUI6M,GAAuB,CAACC,EAAWC,EAAS,UAChD,MAAMC,EAAa,gBAAgBF,EAAU7B,YACvC1kB,EAAOumB,EAAUtK,KACvBgE,GAAac,GAAGhc,SAAU0hB,EAAY,qBAAqBzmB,OAAU,SAAU8e,GAI7E,GAHI,CAAC,IAAK,QAAQgC,SAASpB,KAAKgH,UAC9B5H,EAAMkD,iBAEJpH,GAAW8E,MACb,OAEF,MAAMzT,EAASqZ,GAAec,uBAAuB1G,OAASA,KAAKhF,QAAQ,IAAI1a,KAC9DumB,EAAUvB,oBAAoB/Y,GAGtCua,IACX,GAAE,EAiBEG,GAAc,YACdC,GAAc,QAAQD,KACtBE,GAAe,SAASF,KAQ9B,MAAMG,WAAc1C,GAElB,eAAWnI,GACT,MAfW,OAgBb,CAGA,KAAA8K,GAEE,GADmB9G,GAAaqB,QAAQ5B,KAAK4E,SAAUsC,IACxClF,iBACb,OAEFhC,KAAK4E,SAASvJ,UAAU1B,OAlBF,QAmBtB,MAAMyL,EAAapF,KAAK4E,SAASvJ,UAAU7W,SApBrB,QAqBtBwb,KAAKmF,gBAAe,IAAMnF,KAAKsH,mBAAmBtH,KAAK4E,SAAUQ,EACnE,CAGA,eAAAkC,GACEtH,KAAK4E,SAASjL,SACd4G,GAAaqB,QAAQ5B,KAAK4E,SAAUuC,IACpCnH,KAAK+E,SACP,CAGA,sBAAOtI,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAO+c,GAAM9B,oBAAoBtF,MACvC,GAAsB,iBAAX8D,EAAX,CAGA,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,GAAQ9D,KAJb,CAKF,GACF,EAOF4G,GAAqBQ,GAAO,SAM5BjL,GAAmBiL,IAcnB,MAKMI,GAAyB,4BAO/B,MAAMC,WAAe/C,GAEnB,eAAWnI,GACT,MAfW,QAgBb,CAGA,MAAAmL,GAEE1H,KAAK4E,SAASxjB,aAAa,eAAgB4e,KAAK4E,SAASvJ,UAAUqM,OAjB3C,UAkB1B,CAGA,sBAAOjL,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAOod,GAAOnC,oBAAoBtF,MACzB,WAAX8D,GACFzZ,EAAKyZ,IAET,GACF,EAOFvD,GAAac,GAAGhc,SAjCe,2BAiCmBmiB,IAAwBpI,IACxEA,EAAMkD,iBACN,MAAMqF,EAASvI,EAAM7S,OAAOyO,QAAQwM,IACvBC,GAAOnC,oBAAoBqC,GACnCD,QAAQ,IAOfvL,GAAmBsL,IAcnB,MACMG,GAAc,YACdC,GAAmB,aAAaD,KAChCE,GAAkB,YAAYF,KAC9BG,GAAiB,WAAWH,KAC5BI,GAAoB,cAAcJ,KAClCK,GAAkB,YAAYL,KAK9BM,GAAY,CAChBC,YAAa,KACbC,aAAc,KACdC,cAAe,MAEXC,GAAgB,CACpBH,YAAa,kBACbC,aAAc,kBACdC,cAAe,mBAOjB,MAAME,WAAc9E,GAClB,WAAAU,CAAY5kB,EAASukB,GACnBa,QACA3E,KAAK4E,SAAWrlB,EACXA,GAAYgpB,GAAMC,gBAGvBxI,KAAK6E,QAAU7E,KAAK6D,WAAWC,GAC/B9D,KAAKyI,QAAU,EACfzI,KAAK0I,sBAAwB5H,QAAQlhB,OAAO+oB,cAC5C3I,KAAK4I,cACP,CAGA,kBAAWlF,GACT,OAAOwE,EACT,CACA,sBAAWvE,GACT,OAAO2E,EACT,CACA,eAAW/L,GACT,MA/CW,OAgDb,CAGA,OAAAwI,GACExE,GAAaC,IAAIR,KAAK4E,SAAUgD,GAClC,CAGA,MAAAiB,CAAOzJ,GACAY,KAAK0I,sBAIN1I,KAAK8I,wBAAwB1J,KAC/BY,KAAKyI,QAAUrJ,EAAM2J,SAJrB/I,KAAKyI,QAAUrJ,EAAM4J,QAAQ,GAAGD,OAMpC,CACA,IAAAE,CAAK7J,GACCY,KAAK8I,wBAAwB1J,KAC/BY,KAAKyI,QAAUrJ,EAAM2J,QAAU/I,KAAKyI,SAEtCzI,KAAKkJ,eACLrM,GAAQmD,KAAK6E,QAAQsD,YACvB,CACA,KAAAgB,CAAM/J,GACJY,KAAKyI,QAAUrJ,EAAM4J,SAAW5J,EAAM4J,QAAQtY,OAAS,EAAI,EAAI0O,EAAM4J,QAAQ,GAAGD,QAAU/I,KAAKyI,OACjG,CACA,YAAAS,GACE,MAAME,EAAYjnB,KAAKoC,IAAIyb,KAAKyI,SAChC,GAAIW,GAnEgB,GAoElB,OAEF,MAAM9b,EAAY8b,EAAYpJ,KAAKyI,QACnCzI,KAAKyI,QAAU,EACVnb,GAGLuP,GAAQvP,EAAY,EAAI0S,KAAK6E,QAAQwD,cAAgBrI,KAAK6E,QAAQuD,aACpE,CACA,WAAAQ,GACM5I,KAAK0I,uBACPnI,GAAac,GAAGrB,KAAK4E,SAAUoD,IAAmB5I,GAASY,KAAK6I,OAAOzJ,KACvEmB,GAAac,GAAGrB,KAAK4E,SAAUqD,IAAiB7I,GAASY,KAAKiJ,KAAK7J,KACnEY,KAAK4E,SAASvJ,UAAU5E,IAlFG,mBAoF3B8J,GAAac,GAAGrB,KAAK4E,SAAUiD,IAAkBzI,GAASY,KAAK6I,OAAOzJ,KACtEmB,GAAac,GAAGrB,KAAK4E,SAAUkD,IAAiB1I,GAASY,KAAKmJ,MAAM/J,KACpEmB,GAAac,GAAGrB,KAAK4E,SAAUmD,IAAgB3I,GAASY,KAAKiJ,KAAK7J,KAEtE,CACA,uBAAA0J,CAAwB1J,GACtB,OAAOY,KAAK0I,wBA3FS,QA2FiBtJ,EAAMiK,aA5FrB,UA4FyDjK,EAAMiK,YACxF,CAGA,kBAAOb,GACL,MAAO,iBAAkBnjB,SAASC,iBAAmB7C,UAAU6mB,eAAiB,CAClF,EAeF,MAEMC,GAAc,eACdC,GAAiB,YAKjBC,GAAa,OACbC,GAAa,OACbC,GAAiB,OACjBC,GAAkB,QAClBC,GAAc,QAAQN,KACtBO,GAAa,OAAOP,KACpBQ,GAAkB,UAAUR,KAC5BS,GAAqB,aAAaT,KAClCU,GAAqB,aAAaV,KAClCW,GAAmB,YAAYX,KAC/BY,GAAwB,OAAOZ,KAAcC,KAC7CY,GAAyB,QAAQb,KAAcC,KAC/Ca,GAAsB,WACtBC,GAAsB,SAMtBC,GAAkB,UAClBC,GAAgB,iBAChBC,GAAuBF,GAAkBC,GAKzCE,GAAmB,CACvB,UAAoBd,GACpB,WAAqBD,IAEjBgB,GAAY,CAChBC,SAAU,IACVC,UAAU,EACVC,MAAO,QACPC,MAAM,EACNC,OAAO,EACPC,MAAM,GAEFC,GAAgB,CACpBN,SAAU,mBAEVC,SAAU,UACVC,MAAO,mBACPC,KAAM,mBACNC,MAAO,UACPC,KAAM,WAOR,MAAME,WAAiBzG,GACrB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKoL,UAAY,KACjBpL,KAAKqL,eAAiB,KACtBrL,KAAKsL,YAAa,EAClBtL,KAAKuL,aAAe,KACpBvL,KAAKwL,aAAe,KACpBxL,KAAKyL,mBAAqB7F,GAAeC,QArCjB,uBAqC8C7F,KAAK4E,UAC3E5E,KAAK0L,qBACD1L,KAAK6E,QAAQkG,OAASV,IACxBrK,KAAK2L,OAET,CAGA,kBAAWjI,GACT,OAAOiH,EACT,CACA,sBAAWhH,GACT,OAAOuH,EACT,CACA,eAAW3O,GACT,MAnFW,UAoFb,CAGA,IAAA1X,GACEmb,KAAK4L,OAAOnC,GACd,CACA,eAAAoC,IAIOxmB,SAASymB,QAAUnR,GAAUqF,KAAK4E,WACrC5E,KAAKnb,MAET,CACA,IAAAqhB,GACElG,KAAK4L,OAAOlC,GACd,CACA,KAAAoB,GACM9K,KAAKsL,YACPlR,GAAqB4F,KAAK4E,UAE5B5E,KAAK+L,gBACP,CACA,KAAAJ,GACE3L,KAAK+L,iBACL/L,KAAKgM,kBACLhM,KAAKoL,UAAYa,aAAY,IAAMjM,KAAK6L,mBAAmB7L,KAAK6E,QAAQ+F,SAC1E,CACA,iBAAAsB,GACOlM,KAAK6E,QAAQkG,OAGd/K,KAAKsL,WACP/K,GAAae,IAAItB,KAAK4E,SAAUkF,IAAY,IAAM9J,KAAK2L,UAGzD3L,KAAK2L,QACP,CACA,EAAAQ,CAAG1T,GACD,MAAM2T,EAAQpM,KAAKqM,YACnB,GAAI5T,EAAQ2T,EAAM1b,OAAS,GAAK+H,EAAQ,EACtC,OAEF,GAAIuH,KAAKsL,WAEP,YADA/K,GAAae,IAAItB,KAAK4E,SAAUkF,IAAY,IAAM9J,KAAKmM,GAAG1T,KAG5D,MAAM6T,EAActM,KAAKuM,cAAcvM,KAAKwM,cAC5C,GAAIF,IAAgB7T,EAClB,OAEF,MAAMtC,EAAQsC,EAAQ6T,EAAc7C,GAAaC,GACjD1J,KAAK4L,OAAOzV,EAAOiW,EAAM3T,GAC3B,CACA,OAAAsM,GACM/E,KAAKwL,cACPxL,KAAKwL,aAAazG,UAEpBJ,MAAMI,SACR,CAGA,iBAAAf,CAAkBF,GAEhB,OADAA,EAAO2I,gBAAkB3I,EAAO8G,SACzB9G,CACT,CACA,kBAAA4H,GACM1L,KAAK6E,QAAQgG,UACftK,GAAac,GAAGrB,KAAK4E,SAAUmF,IAAiB3K,GAASY,KAAK0M,SAAStN,KAE9C,UAAvBY,KAAK6E,QAAQiG,QACfvK,GAAac,GAAGrB,KAAK4E,SAAUoF,IAAoB,IAAMhK,KAAK8K,UAC9DvK,GAAac,GAAGrB,KAAK4E,SAAUqF,IAAoB,IAAMjK,KAAKkM,uBAE5DlM,KAAK6E,QAAQmG,OAASzC,GAAMC,eAC9BxI,KAAK2M,yBAET,CACA,uBAAAA,GACE,IAAK,MAAMC,KAAOhH,GAAezT,KArIX,qBAqImC6N,KAAK4E,UAC5DrE,GAAac,GAAGuL,EAAK1C,IAAkB9K,GAASA,EAAMkD,mBAExD,MAmBMuK,EAAc,CAClBzE,aAAc,IAAMpI,KAAK4L,OAAO5L,KAAK8M,kBAAkBnD,KACvDtB,cAAe,IAAMrI,KAAK4L,OAAO5L,KAAK8M,kBAAkBlD,KACxDzB,YAtBkB,KACS,UAAvBnI,KAAK6E,QAAQiG,QAYjB9K,KAAK8K,QACD9K,KAAKuL,cACPwB,aAAa/M,KAAKuL,cAEpBvL,KAAKuL,aAAe1N,YAAW,IAAMmC,KAAKkM,qBAjLjB,IAiL+DlM,KAAK6E,QAAQ+F,UAAS,GAOhH5K,KAAKwL,aAAe,IAAIjD,GAAMvI,KAAK4E,SAAUiI,EAC/C,CACA,QAAAH,CAAStN,GACP,GAAI,kBAAkB/b,KAAK+b,EAAM7S,OAAOya,SACtC,OAEF,MAAM1Z,EAAYod,GAAiBtL,EAAMtiB,KACrCwQ,IACF8R,EAAMkD,iBACNtC,KAAK4L,OAAO5L,KAAK8M,kBAAkBxf,IAEvC,CACA,aAAAif,CAAchtB,GACZ,OAAOygB,KAAKqM,YAAYlnB,QAAQ5F,EAClC,CACA,0BAAAytB,CAA2BvU,GACzB,IAAKuH,KAAKyL,mBACR,OAEF,MAAMwB,EAAkBrH,GAAeC,QAAQ0E,GAAiBvK,KAAKyL,oBACrEwB,EAAgB5R,UAAU1B,OAAO2Q,IACjC2C,EAAgB9rB,gBAAgB,gBAChC,MAAM+rB,EAAqBtH,GAAeC,QAAQ,sBAAsBpN,MAAWuH,KAAKyL,oBACpFyB,IACFA,EAAmB7R,UAAU5E,IAAI6T,IACjC4C,EAAmB9rB,aAAa,eAAgB,QAEpD,CACA,eAAA4qB,GACE,MAAMzsB,EAAUygB,KAAKqL,gBAAkBrL,KAAKwM,aAC5C,IAAKjtB,EACH,OAEF,MAAM4tB,EAAkB5P,OAAO6P,SAAS7tB,EAAQic,aAAa,oBAAqB,IAClFwE,KAAK6E,QAAQ+F,SAAWuC,GAAmBnN,KAAK6E,QAAQ4H,eAC1D,CACA,MAAAb,CAAOzV,EAAO5W,EAAU,MACtB,GAAIygB,KAAKsL,WACP,OAEF,MAAMvN,EAAgBiC,KAAKwM,aACrBa,EAASlX,IAAUsT,GACnB6D,EAAc/tB,GAAWue,GAAqBkC,KAAKqM,YAAatO,EAAesP,EAAQrN,KAAK6E,QAAQoG,MAC1G,GAAIqC,IAAgBvP,EAClB,OAEF,MAAMwP,EAAmBvN,KAAKuM,cAAce,GACtCE,EAAehI,GACZjF,GAAaqB,QAAQ5B,KAAK4E,SAAUY,EAAW,CACpD1F,cAAewN,EACfhgB,UAAW0S,KAAKyN,kBAAkBtX,GAClCuD,KAAMsG,KAAKuM,cAAcxO,GACzBoO,GAAIoB,IAIR,GADmBC,EAAa3D,IACjB7H,iBACb,OAEF,IAAKjE,IAAkBuP,EAGrB,OAEF,MAAMI,EAAY5M,QAAQd,KAAKoL,WAC/BpL,KAAK8K,QACL9K,KAAKsL,YAAa,EAClBtL,KAAKgN,2BAA2BO,GAChCvN,KAAKqL,eAAiBiC,EACtB,MAAMK,EAAuBN,EA3OR,sBADF,oBA6ObO,EAAiBP,EA3OH,qBACA,qBA2OpBC,EAAYjS,UAAU5E,IAAImX,GAC1B/R,GAAOyR,GACPvP,EAAc1C,UAAU5E,IAAIkX,GAC5BL,EAAYjS,UAAU5E,IAAIkX,GAQ1B3N,KAAKmF,gBAPoB,KACvBmI,EAAYjS,UAAU1B,OAAOgU,EAAsBC,GACnDN,EAAYjS,UAAU5E,IAAI6T,IAC1BvM,EAAc1C,UAAU1B,OAAO2Q,GAAqBsD,EAAgBD,GACpE3N,KAAKsL,YAAa,EAClBkC,EAAa1D,GAAW,GAEY/L,EAAeiC,KAAK6N,eACtDH,GACF1N,KAAK2L,OAET,CACA,WAAAkC,GACE,OAAO7N,KAAK4E,SAASvJ,UAAU7W,SAhQV,QAiQvB,CACA,UAAAgoB,GACE,OAAO5G,GAAeC,QAAQ4E,GAAsBzK,KAAK4E,SAC3D,CACA,SAAAyH,GACE,OAAOzG,GAAezT,KAAKqY,GAAexK,KAAK4E,SACjD,CACA,cAAAmH,GACM/L,KAAKoL,YACP0C,cAAc9N,KAAKoL,WACnBpL,KAAKoL,UAAY,KAErB,CACA,iBAAA0B,CAAkBxf,GAChB,OAAI2O,KACK3O,IAAcqc,GAAiBD,GAAaD,GAE9Cnc,IAAcqc,GAAiBF,GAAaC,EACrD,CACA,iBAAA+D,CAAkBtX,GAChB,OAAI8F,KACK9F,IAAUuT,GAAaC,GAAiBC,GAE1CzT,IAAUuT,GAAaE,GAAkBD,EAClD,CAGA,sBAAOlN,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAO8gB,GAAS7F,oBAAoBtF,KAAM8D,GAChD,GAAsB,iBAAXA,GAIX,GAAsB,iBAAXA,EAAqB,CAC9B,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IACP,OAREzZ,EAAK8hB,GAAGrI,EASZ,GACF,EAOFvD,GAAac,GAAGhc,SAAU+kB,GAvSE,uCAuS2C,SAAUhL,GAC/E,MAAM7S,EAASqZ,GAAec,uBAAuB1G,MACrD,IAAKzT,IAAWA,EAAO8O,UAAU7W,SAAS6lB,IACxC,OAEFjL,EAAMkD,iBACN,MAAMyL,EAAW5C,GAAS7F,oBAAoB/Y,GACxCyhB,EAAahO,KAAKxE,aAAa,oBACrC,OAAIwS,GACFD,EAAS5B,GAAG6B,QACZD,EAAS7B,qBAGyC,SAAhDlJ,GAAYQ,iBAAiBxD,KAAM,UACrC+N,EAASlpB,YACTkpB,EAAS7B,sBAGX6B,EAAS7H,YACT6H,EAAS7B,oBACX,IACA3L,GAAac,GAAGzhB,OAAQuqB,IAAuB,KAC7C,MAAM8D,EAAYrI,GAAezT,KA5TR,6BA6TzB,IAAK,MAAM4b,KAAYE,EACrB9C,GAAS7F,oBAAoByI,EAC/B,IAOF5R,GAAmBgP,IAcnB,MAEM+C,GAAc,eAEdC,GAAe,OAAOD,KACtBE,GAAgB,QAAQF,KACxBG,GAAe,OAAOH,KACtBI,GAAiB,SAASJ,KAC1BK,GAAyB,QAAQL,cACjCM,GAAoB,OACpBC,GAAsB,WACtBC,GAAwB,aAExBC,GAA6B,WAAWF,OAAwBA,KAKhEG,GAAyB,8BACzBC,GAAY,CAChBpqB,OAAQ,KACRijB,QAAQ,GAEJoH,GAAgB,CACpBrqB,OAAQ,iBACRijB,OAAQ,WAOV,MAAMqH,WAAiBrK,GACrB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKgP,kBAAmB,EACxBhP,KAAKiP,cAAgB,GACrB,MAAMC,EAAatJ,GAAezT,KAAKyc,IACvC,IAAK,MAAMO,KAAQD,EAAY,CAC7B,MAAMnV,EAAW6L,GAAea,uBAAuB0I,GACjDC,EAAgBxJ,GAAezT,KAAK4H,GAAU5T,QAAOkpB,GAAgBA,IAAiBrP,KAAK4E,WAChF,OAAb7K,GAAqBqV,EAAc1e,QACrCsP,KAAKiP,cAAcrd,KAAKud,EAE5B,CACAnP,KAAKsP,sBACAtP,KAAK6E,QAAQpgB,QAChBub,KAAKuP,0BAA0BvP,KAAKiP,cAAejP,KAAKwP,YAEtDxP,KAAK6E,QAAQ6C,QACf1H,KAAK0H,QAET,CAGA,kBAAWhE,GACT,OAAOmL,EACT,CACA,sBAAWlL,GACT,OAAOmL,EACT,CACA,eAAWvS,GACT,MA9DW,UA+Db,CAGA,MAAAmL,GACM1H,KAAKwP,WACPxP,KAAKyP,OAELzP,KAAK0P,MAET,CACA,IAAAA,GACE,GAAI1P,KAAKgP,kBAAoBhP,KAAKwP,WAChC,OAEF,IAAIG,EAAiB,GAQrB,GALI3P,KAAK6E,QAAQpgB,SACfkrB,EAAiB3P,KAAK4P,uBAhEH,wCAgE4CzpB,QAAO5G,GAAWA,IAAYygB,KAAK4E,WAAU9hB,KAAIvD,GAAWwvB,GAASzJ,oBAAoB/lB,EAAS,CAC/JmoB,QAAQ,OAGRiI,EAAejf,QAAUif,EAAe,GAAGX,iBAC7C,OAGF,GADmBzO,GAAaqB,QAAQ5B,KAAK4E,SAAUuJ,IACxCnM,iBACb,OAEF,IAAK,MAAM6N,KAAkBF,EAC3BE,EAAeJ,OAEjB,MAAMK,EAAY9P,KAAK+P,gBACvB/P,KAAK4E,SAASvJ,UAAU1B,OAAO8U,IAC/BzO,KAAK4E,SAASvJ,UAAU5E,IAAIiY,IAC5B1O,KAAK4E,SAAS7jB,MAAM+uB,GAAa,EACjC9P,KAAKuP,0BAA0BvP,KAAKiP,eAAe,GACnDjP,KAAKgP,kBAAmB,EACxB,MAQMgB,EAAa,SADUF,EAAU,GAAGrL,cAAgBqL,EAAU1d,MAAM,KAE1E4N,KAAKmF,gBATY,KACfnF,KAAKgP,kBAAmB,EACxBhP,KAAK4E,SAASvJ,UAAU1B,OAAO+U,IAC/B1O,KAAK4E,SAASvJ,UAAU5E,IAAIgY,GAAqBD,IACjDxO,KAAK4E,SAAS7jB,MAAM+uB,GAAa,GACjCvP,GAAaqB,QAAQ5B,KAAK4E,SAAUwJ,GAAc,GAItBpO,KAAK4E,UAAU,GAC7C5E,KAAK4E,SAAS7jB,MAAM+uB,GAAa,GAAG9P,KAAK4E,SAASoL,MACpD,CACA,IAAAP,GACE,GAAIzP,KAAKgP,mBAAqBhP,KAAKwP,WACjC,OAGF,GADmBjP,GAAaqB,QAAQ5B,KAAK4E,SAAUyJ,IACxCrM,iBACb,OAEF,MAAM8N,EAAY9P,KAAK+P,gBACvB/P,KAAK4E,SAAS7jB,MAAM+uB,GAAa,GAAG9P,KAAK4E,SAASthB,wBAAwBwsB,OAC1EjU,GAAOmE,KAAK4E,UACZ5E,KAAK4E,SAASvJ,UAAU5E,IAAIiY,IAC5B1O,KAAK4E,SAASvJ,UAAU1B,OAAO8U,GAAqBD,IACpD,IAAK,MAAM5M,KAAW5B,KAAKiP,cAAe,CACxC,MAAM1vB,EAAUqmB,GAAec,uBAAuB9E,GAClDriB,IAAYygB,KAAKwP,SAASjwB,IAC5BygB,KAAKuP,0BAA0B,CAAC3N,IAAU,EAE9C,CACA5B,KAAKgP,kBAAmB,EAOxBhP,KAAK4E,SAAS7jB,MAAM+uB,GAAa,GACjC9P,KAAKmF,gBAPY,KACfnF,KAAKgP,kBAAmB,EACxBhP,KAAK4E,SAASvJ,UAAU1B,OAAO+U,IAC/B1O,KAAK4E,SAASvJ,UAAU5E,IAAIgY,IAC5BlO,GAAaqB,QAAQ5B,KAAK4E,SAAU0J,GAAe,GAGvBtO,KAAK4E,UAAU,EAC/C,CACA,QAAA4K,CAASjwB,EAAUygB,KAAK4E,UACtB,OAAOrlB,EAAQ8b,UAAU7W,SAASgqB,GACpC,CAGA,iBAAAxK,CAAkBF,GAGhB,OAFAA,EAAO4D,OAAS5G,QAAQgD,EAAO4D,QAC/B5D,EAAOrf,OAASiW,GAAWoJ,EAAOrf,QAC3Bqf,CACT,CACA,aAAAiM,GACE,OAAO/P,KAAK4E,SAASvJ,UAAU7W,SA3IL,uBAChB,QACC,QA0Ib,CACA,mBAAA8qB,GACE,IAAKtP,KAAK6E,QAAQpgB,OAChB,OAEF,MAAMqhB,EAAW9F,KAAK4P,uBAAuBhB,IAC7C,IAAK,MAAMrvB,KAAWumB,EAAU,CAC9B,MAAMmK,EAAWrK,GAAec,uBAAuBnnB,GACnD0wB,GACFjQ,KAAKuP,0BAA0B,CAAChwB,GAAUygB,KAAKwP,SAASS,GAE5D,CACF,CACA,sBAAAL,CAAuB7V,GACrB,MAAM+L,EAAWF,GAAezT,KAAKwc,GAA4B3O,KAAK6E,QAAQpgB,QAE9E,OAAOmhB,GAAezT,KAAK4H,EAAUiG,KAAK6E,QAAQpgB,QAAQ0B,QAAO5G,IAAYumB,EAAS1E,SAAS7hB,IACjG,CACA,yBAAAgwB,CAA0BW,EAAcC,GACtC,GAAKD,EAAaxf,OAGlB,IAAK,MAAMnR,KAAW2wB,EACpB3wB,EAAQ8b,UAAUqM,OArKK,aAqKyByI,GAChD5wB,EAAQ6B,aAAa,gBAAiB+uB,EAE1C,CAGA,sBAAO1T,CAAgBqH,GACrB,MAAMe,EAAU,CAAC,EAIjB,MAHsB,iBAAXf,GAAuB,YAAYzgB,KAAKygB,KACjDe,EAAQ6C,QAAS,GAEZ1H,KAAKuH,MAAK,WACf,MAAMld,EAAO0kB,GAASzJ,oBAAoBtF,KAAM6E,GAChD,GAAsB,iBAAXf,EAAqB,CAC9B,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IACP,CACF,GACF,EAOFvD,GAAac,GAAGhc,SAAUkpB,GAAwBK,IAAwB,SAAUxP,IAErD,MAAzBA,EAAM7S,OAAOya,SAAmB5H,EAAMW,gBAAmD,MAAjCX,EAAMW,eAAeiH,UAC/E5H,EAAMkD,iBAER,IAAK,MAAM/iB,KAAWqmB,GAAee,gCAAgC3G,MACnE+O,GAASzJ,oBAAoB/lB,EAAS,CACpCmoB,QAAQ,IACPA,QAEP,IAMAvL,GAAmB4S,IAcnB,MAAMqB,GAAS,WAETC,GAAc,eACdC,GAAiB,YAGjBC,GAAiB,UACjBC,GAAmB,YAGnBC,GAAe,OAAOJ,KACtBK,GAAiB,SAASL,KAC1BM,GAAe,OAAON,KACtBO,GAAgB,QAAQP,KACxBQ,GAAyB,QAAQR,KAAcC,KAC/CQ,GAAyB,UAAUT,KAAcC,KACjDS,GAAuB,QAAQV,KAAcC,KAC7CU,GAAoB,OAMpBC,GAAyB,4DACzBC,GAA6B,GAAGD,MAA0BD,KAC1DG,GAAgB,iBAIhBC,GAAgBnV,KAAU,UAAY,YACtCoV,GAAmBpV,KAAU,YAAc,UAC3CqV,GAAmBrV,KAAU,aAAe,eAC5CsV,GAAsBtV,KAAU,eAAiB,aACjDuV,GAAkBvV,KAAU,aAAe,cAC3CwV,GAAiBxV,KAAU,cAAgB,aAG3CyV,GAAY,CAChBC,WAAW,EACX1jB,SAAU,kBACV2jB,QAAS,UACT5pB,OAAQ,CAAC,EAAG,GACZ6pB,aAAc,KACdvzB,UAAW,UAEPwzB,GAAgB,CACpBH,UAAW,mBACX1jB,SAAU,mBACV2jB,QAAS,SACT5pB,OAAQ,0BACR6pB,aAAc,yBACdvzB,UAAW,2BAOb,MAAMyzB,WAAiBrN,GACrB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKgS,QAAU,KACfhS,KAAKiS,QAAUjS,KAAK4E,SAAS7f,WAE7Bib,KAAKkS,MAAQtM,GAAe/gB,KAAKmb,KAAK4E,SAAUuM,IAAe,IAAMvL,GAAeM,KAAKlG,KAAK4E,SAAUuM,IAAe,IAAMvL,GAAeC,QAAQsL,GAAenR,KAAKiS,SACxKjS,KAAKmS,UAAYnS,KAAKoS,eACxB,CAGA,kBAAW1O,GACT,OAAOgO,EACT,CACA,sBAAW/N,GACT,OAAOmO,EACT,CACA,eAAWvV,GACT,OAAO6T,EACT,CAGA,MAAA1I,GACE,OAAO1H,KAAKwP,WAAaxP,KAAKyP,OAASzP,KAAK0P,MAC9C,CACA,IAAAA,GACE,GAAIxU,GAAW8E,KAAK4E,WAAa5E,KAAKwP,WACpC,OAEF,MAAM1P,EAAgB,CACpBA,cAAeE,KAAK4E,UAGtB,IADkBrE,GAAaqB,QAAQ5B,KAAK4E,SAAU+L,GAAc7Q,GACtDkC,iBAAd,CASA,GANAhC,KAAKqS,gBAMD,iBAAkBhtB,SAASC,kBAAoB0a,KAAKiS,QAAQjX,QAzExC,eA0EtB,IAAK,MAAMzb,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAK4Z,UAC/CvF,GAAac,GAAG9hB,EAAS,YAAaqc,IAG1CoE,KAAK4E,SAAS0N,QACdtS,KAAK4E,SAASxjB,aAAa,iBAAiB,GAC5C4e,KAAKkS,MAAM7W,UAAU5E,IAAIua,IACzBhR,KAAK4E,SAASvJ,UAAU5E,IAAIua,IAC5BzQ,GAAaqB,QAAQ5B,KAAK4E,SAAUgM,GAAe9Q,EAhBnD,CAiBF,CACA,IAAA2P,GACE,GAAIvU,GAAW8E,KAAK4E,YAAc5E,KAAKwP,WACrC,OAEF,MAAM1P,EAAgB,CACpBA,cAAeE,KAAK4E,UAEtB5E,KAAKuS,cAAczS,EACrB,CACA,OAAAiF,GACM/E,KAAKgS,SACPhS,KAAKgS,QAAQhZ,UAEf2L,MAAMI,SACR,CACA,MAAAha,GACEiV,KAAKmS,UAAYnS,KAAKoS,gBAClBpS,KAAKgS,SACPhS,KAAKgS,QAAQjnB,QAEjB,CAGA,aAAAwnB,CAAczS,GAEZ,IADkBS,GAAaqB,QAAQ5B,KAAK4E,SAAU6L,GAAc3Q,GACtDkC,iBAAd,CAMA,GAAI,iBAAkB3c,SAASC,gBAC7B,IAAK,MAAM/F,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAK4Z,UAC/CvF,GAAaC,IAAIjhB,EAAS,YAAaqc,IAGvCoE,KAAKgS,SACPhS,KAAKgS,QAAQhZ,UAEfgH,KAAKkS,MAAM7W,UAAU1B,OAAOqX,IAC5BhR,KAAK4E,SAASvJ,UAAU1B,OAAOqX,IAC/BhR,KAAK4E,SAASxjB,aAAa,gBAAiB,SAC5C4hB,GAAYE,oBAAoBlD,KAAKkS,MAAO,UAC5C3R,GAAaqB,QAAQ5B,KAAK4E,SAAU8L,GAAgB5Q,EAhBpD,CAiBF,CACA,UAAA+D,CAAWC,GAET,GAAgC,iBADhCA,EAASa,MAAMd,WAAWC,IACRxlB,YAA2B,GAAUwlB,EAAOxlB,YAAgE,mBAA3CwlB,EAAOxlB,UAAUgF,sBAElG,MAAM,IAAIkhB,UAAU,GAAG4L,GAAO3L,+GAEhC,OAAOX,CACT,CACA,aAAAuO,GACE,QAAsB,IAAX,EACT,MAAM,IAAI7N,UAAU,gEAEtB,IAAIgO,EAAmBxS,KAAK4E,SACG,WAA3B5E,KAAK6E,QAAQvmB,UACfk0B,EAAmBxS,KAAKiS,QACf,GAAUjS,KAAK6E,QAAQvmB,WAChCk0B,EAAmB9X,GAAWsF,KAAK6E,QAAQvmB,WACA,iBAA3B0hB,KAAK6E,QAAQvmB,YAC7Bk0B,EAAmBxS,KAAK6E,QAAQvmB,WAElC,MAAMuzB,EAAe7R,KAAKyS,mBAC1BzS,KAAKgS,QAAU,GAAoBQ,EAAkBxS,KAAKkS,MAAOL,EACnE,CACA,QAAArC,GACE,OAAOxP,KAAKkS,MAAM7W,UAAU7W,SAASwsB,GACvC,CACA,aAAA0B,GACE,MAAMC,EAAiB3S,KAAKiS,QAC5B,GAAIU,EAAetX,UAAU7W,SArKN,WAsKrB,OAAOgtB,GAET,GAAImB,EAAetX,UAAU7W,SAvKJ,aAwKvB,OAAOitB,GAET,GAAIkB,EAAetX,UAAU7W,SAzKA,iBA0K3B,MA5JsB,MA8JxB,GAAImuB,EAAetX,UAAU7W,SA3KE,mBA4K7B,MA9JyB,SAkK3B,MAAMouB,EAAkF,QAA1E3tB,iBAAiB+a,KAAKkS,OAAOpX,iBAAiB,iBAAiB6K,OAC7E,OAAIgN,EAAetX,UAAU7W,SArLP,UAsLbouB,EAAQvB,GAAmBD,GAE7BwB,EAAQrB,GAAsBD,EACvC,CACA,aAAAc,GACE,OAAkD,OAA3CpS,KAAK4E,SAAS5J,QAnLD,UAoLtB,CACA,UAAA6X,GACE,MAAM,OACJ7qB,GACEgY,KAAK6E,QACT,MAAsB,iBAAX7c,EACFA,EAAO9F,MAAM,KAAKY,KAAInF,GAAS4f,OAAO6P,SAASzvB,EAAO,MAEzC,mBAAXqK,EACF8qB,GAAc9qB,EAAO8qB,EAAY9S,KAAK4E,UAExC5c,CACT,CACA,gBAAAyqB,GACE,MAAMM,EAAwB,CAC5Br0B,UAAWshB,KAAK0S,gBAChBtc,UAAW,CAAC,CACV9V,KAAM,kBACNmB,QAAS,CACPwM,SAAU+R,KAAK6E,QAAQ5W,WAExB,CACD3N,KAAM,SACNmB,QAAS,CACPuG,OAAQgY,KAAK6S,iBAanB,OAPI7S,KAAKmS,WAAsC,WAAzBnS,KAAK6E,QAAQ+M,WACjC5O,GAAYC,iBAAiBjD,KAAKkS,MAAO,SAAU,UACnDa,EAAsB3c,UAAY,CAAC,CACjC9V,KAAM,cACNC,SAAS,KAGN,IACFwyB,KACAlW,GAAQmD,KAAK6E,QAAQgN,aAAc,CAACkB,IAE3C,CACA,eAAAC,EAAgB,IACdl2B,EAAG,OACHyP,IAEA,MAAM6f,EAAQxG,GAAezT,KAhOF,8DAgO+B6N,KAAKkS,OAAO/rB,QAAO5G,GAAWob,GAAUpb,KAC7F6sB,EAAM1b,QAMXoN,GAAqBsO,EAAO7f,EAAQzP,IAAQ0zB,IAAmBpE,EAAMhL,SAAS7U,IAAS+lB,OACzF,CAGA,sBAAO7V,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAO0nB,GAASzM,oBAAoBtF,KAAM8D,GAChD,GAAsB,iBAAXA,EAAX,CAGA,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,CACA,iBAAOmP,CAAW7T,GAChB,GA5QuB,IA4QnBA,EAAMuI,QAAgD,UAAfvI,EAAMqB,MA/QnC,QA+QuDrB,EAAMtiB,IACzE,OAEF,MAAMo2B,EAActN,GAAezT,KAAK+e,IACxC,IAAK,MAAMxJ,KAAUwL,EAAa,CAChC,MAAMC,EAAUpB,GAAS1M,YAAYqC,GACrC,IAAKyL,IAAyC,IAA9BA,EAAQtO,QAAQ8M,UAC9B,SAEF,MAAMyB,EAAehU,EAAMgU,eACrBC,EAAeD,EAAahS,SAAS+R,EAAQjB,OACnD,GAAIkB,EAAahS,SAAS+R,EAAQvO,WAA2C,WAA9BuO,EAAQtO,QAAQ8M,YAA2B0B,GAA8C,YAA9BF,EAAQtO,QAAQ8M,WAA2B0B,EACnJ,SAIF,GAAIF,EAAQjB,MAAM1tB,SAAS4a,EAAM7S,UAA2B,UAAf6S,EAAMqB,MA/RvC,QA+R2DrB,EAAMtiB,KAAqB,qCAAqCuG,KAAK+b,EAAM7S,OAAOya,UACvJ,SAEF,MAAMlH,EAAgB,CACpBA,cAAeqT,EAAQvO,UAEN,UAAfxF,EAAMqB,OACRX,EAAciH,WAAa3H,GAE7B+T,EAAQZ,cAAczS,EACxB,CACF,CACA,4BAAOwT,CAAsBlU,GAI3B,MAAMmU,EAAU,kBAAkBlwB,KAAK+b,EAAM7S,OAAOya,SAC9CwM,EAjTW,WAiTKpU,EAAMtiB,IACtB22B,EAAkB,CAAClD,GAAgBC,IAAkBpP,SAAShC,EAAMtiB,KAC1E,IAAK22B,IAAoBD,EACvB,OAEF,GAAID,IAAYC,EACd,OAEFpU,EAAMkD,iBAGN,MAAMoR,EAAkB1T,KAAK+F,QAAQkL,IAA0BjR,KAAO4F,GAAeM,KAAKlG,KAAMiR,IAAwB,IAAMrL,GAAe/gB,KAAKmb,KAAMiR,IAAwB,IAAMrL,GAAeC,QAAQoL,GAAwB7R,EAAMW,eAAehb,YACpPwF,EAAWwnB,GAASzM,oBAAoBoO,GAC9C,GAAID,EAIF,OAHArU,EAAMuU,kBACNppB,EAASmlB,YACTnlB,EAASyoB,gBAAgB5T,GAGvB7U,EAASilB,aAEXpQ,EAAMuU,kBACNppB,EAASklB,OACTiE,EAAgBpB,QAEpB,EAOF/R,GAAac,GAAGhc,SAAUyrB,GAAwBG,GAAwBc,GAASuB,uBACnF/S,GAAac,GAAGhc,SAAUyrB,GAAwBK,GAAeY,GAASuB,uBAC1E/S,GAAac,GAAGhc,SAAUwrB,GAAwBkB,GAASkB,YAC3D1S,GAAac,GAAGhc,SAAU0rB,GAAsBgB,GAASkB,YACzD1S,GAAac,GAAGhc,SAAUwrB,GAAwBI,IAAwB,SAAU7R,GAClFA,EAAMkD,iBACNyP,GAASzM,oBAAoBtF,MAAM0H,QACrC,IAMAvL,GAAmB4V,IAcnB,MAAM6B,GAAS,WAETC,GAAoB,OACpBC,GAAkB,gBAAgBF,KAClCG,GAAY,CAChBC,UAAW,iBACXC,cAAe,KACf7O,YAAY,EACZzK,WAAW,EAEXuZ,YAAa,QAGTC,GAAgB,CACpBH,UAAW,SACXC,cAAe,kBACf7O,WAAY,UACZzK,UAAW,UACXuZ,YAAa,oBAOf,MAAME,WAAiB3Q,GACrB,WAAAU,CAAYL,GACVa,QACA3E,KAAK6E,QAAU7E,KAAK6D,WAAWC,GAC/B9D,KAAKqU,aAAc,EACnBrU,KAAK4E,SAAW,IAClB,CAGA,kBAAWlB,GACT,OAAOqQ,EACT,CACA,sBAAWpQ,GACT,OAAOwQ,EACT,CACA,eAAW5X,GACT,OAAOqX,EACT,CAGA,IAAAlE,CAAKrT,GACH,IAAK2D,KAAK6E,QAAQlK,UAEhB,YADAkC,GAAQR,GAGV2D,KAAKsU,UACL,MAAM/0B,EAAUygB,KAAKuU,cACjBvU,KAAK6E,QAAQO,YACfvJ,GAAOtc,GAETA,EAAQ8b,UAAU5E,IAAIod,IACtB7T,KAAKwU,mBAAkB,KACrB3X,GAAQR,EAAS,GAErB,CACA,IAAAoT,CAAKpT,GACE2D,KAAK6E,QAAQlK,WAIlBqF,KAAKuU,cAAclZ,UAAU1B,OAAOka,IACpC7T,KAAKwU,mBAAkB,KACrBxU,KAAK+E,UACLlI,GAAQR,EAAS,KANjBQ,GAAQR,EAQZ,CACA,OAAA0I,GACO/E,KAAKqU,cAGV9T,GAAaC,IAAIR,KAAK4E,SAAUkP,IAChC9T,KAAK4E,SAASjL,SACdqG,KAAKqU,aAAc,EACrB,CAGA,WAAAE,GACE,IAAKvU,KAAK4E,SAAU,CAClB,MAAM6P,EAAWpvB,SAASqvB,cAAc,OACxCD,EAAST,UAAYhU,KAAK6E,QAAQmP,UAC9BhU,KAAK6E,QAAQO,YACfqP,EAASpZ,UAAU5E,IArFD,QAuFpBuJ,KAAK4E,SAAW6P,CAClB,CACA,OAAOzU,KAAK4E,QACd,CACA,iBAAAZ,CAAkBF,GAGhB,OADAA,EAAOoQ,YAAcxZ,GAAWoJ,EAAOoQ,aAChCpQ,CACT,CACA,OAAAwQ,GACE,GAAItU,KAAKqU,YACP,OAEF,MAAM90B,EAAUygB,KAAKuU,cACrBvU,KAAK6E,QAAQqP,YAAYS,OAAOp1B,GAChCghB,GAAac,GAAG9hB,EAASu0B,IAAiB,KACxCjX,GAAQmD,KAAK6E,QAAQoP,cAAc,IAErCjU,KAAKqU,aAAc,CACrB,CACA,iBAAAG,CAAkBnY,GAChBW,GAAuBX,EAAU2D,KAAKuU,cAAevU,KAAK6E,QAAQO,WACpE,EAeF,MAEMwP,GAAc,gBACdC,GAAkB,UAAUD,KAC5BE,GAAoB,cAAcF,KAGlCG,GAAmB,WACnBC,GAAY,CAChBC,WAAW,EACXC,YAAa,MAGTC,GAAgB,CACpBF,UAAW,UACXC,YAAa,WAOf,MAAME,WAAkB3R,GACtB,WAAAU,CAAYL,GACVa,QACA3E,KAAK6E,QAAU7E,KAAK6D,WAAWC,GAC/B9D,KAAKqV,WAAY,EACjBrV,KAAKsV,qBAAuB,IAC9B,CAGA,kBAAW5R,GACT,OAAOsR,EACT,CACA,sBAAWrR,GACT,OAAOwR,EACT,CACA,eAAW5Y,GACT,MAtCW,WAuCb,CAGA,QAAAgZ,GACMvV,KAAKqV,YAGLrV,KAAK6E,QAAQoQ,WACfjV,KAAK6E,QAAQqQ,YAAY5C,QAE3B/R,GAAaC,IAAInb,SAAUuvB,IAC3BrU,GAAac,GAAGhc,SAAUwvB,IAAiBzV,GAASY,KAAKwV,eAAepW,KACxEmB,GAAac,GAAGhc,SAAUyvB,IAAmB1V,GAASY,KAAKyV,eAAerW,KAC1EY,KAAKqV,WAAY,EACnB,CACA,UAAAK,GACO1V,KAAKqV,YAGVrV,KAAKqV,WAAY,EACjB9U,GAAaC,IAAInb,SAAUuvB,IAC7B,CAGA,cAAAY,CAAepW,GACb,MAAM,YACJ8V,GACElV,KAAK6E,QACT,GAAIzF,EAAM7S,SAAWlH,UAAY+Z,EAAM7S,SAAW2oB,GAAeA,EAAY1wB,SAAS4a,EAAM7S,QAC1F,OAEF,MAAM1L,EAAW+kB,GAAeU,kBAAkB4O,GAC1B,IAApBr0B,EAAS6P,OACXwkB,EAAY5C,QACHtS,KAAKsV,uBAAyBP,GACvCl0B,EAASA,EAAS6P,OAAS,GAAG4hB,QAE9BzxB,EAAS,GAAGyxB,OAEhB,CACA,cAAAmD,CAAerW,GA1ED,QA2ERA,EAAMtiB,MAGVkjB,KAAKsV,qBAAuBlW,EAAMuW,SAAWZ,GA7EzB,UA8EtB,EAeF,MAAMa,GAAyB,oDACzBC,GAA0B,cAC1BC,GAAmB,gBACnBC,GAAkB,eAMxB,MAAMC,GACJ,WAAA7R,GACEnE,KAAK4E,SAAWvf,SAAS6G,IAC3B,CAGA,QAAA+pB,GAEE,MAAMC,EAAgB7wB,SAASC,gBAAgBuC,YAC/C,OAAO1F,KAAKoC,IAAI3E,OAAOu2B,WAAaD,EACtC,CACA,IAAAzG,GACE,MAAM5rB,EAAQmc,KAAKiW,WACnBjW,KAAKoW,mBAELpW,KAAKqW,sBAAsBrW,KAAK4E,SAAUkR,IAAkBQ,GAAmBA,EAAkBzyB,IAEjGmc,KAAKqW,sBAAsBT,GAAwBE,IAAkBQ,GAAmBA,EAAkBzyB,IAC1Gmc,KAAKqW,sBAAsBR,GAAyBE,IAAiBO,GAAmBA,EAAkBzyB,GAC5G,CACA,KAAAwO,GACE2N,KAAKuW,wBAAwBvW,KAAK4E,SAAU,YAC5C5E,KAAKuW,wBAAwBvW,KAAK4E,SAAUkR,IAC5C9V,KAAKuW,wBAAwBX,GAAwBE,IACrD9V,KAAKuW,wBAAwBV,GAAyBE,GACxD,CACA,aAAAS,GACE,OAAOxW,KAAKiW,WAAa,CAC3B,CAGA,gBAAAG,GACEpW,KAAKyW,sBAAsBzW,KAAK4E,SAAU,YAC1C5E,KAAK4E,SAAS7jB,MAAM+K,SAAW,QACjC,CACA,qBAAAuqB,CAAsBtc,EAAU2c,EAAera,GAC7C,MAAMsa,EAAiB3W,KAAKiW,WAS5BjW,KAAK4W,2BAA2B7c,GARHxa,IAC3B,GAAIA,IAAYygB,KAAK4E,UAAYhlB,OAAOu2B,WAAa52B,EAAQsI,YAAc8uB,EACzE,OAEF3W,KAAKyW,sBAAsBl3B,EAASm3B,GACpC,MAAMJ,EAAkB12B,OAAOqF,iBAAiB1F,GAASub,iBAAiB4b,GAC1En3B,EAAQwB,MAAM81B,YAAYH,EAAe,GAAGra,EAASkB,OAAOC,WAAW8Y,QAAsB,GAGjG,CACA,qBAAAG,CAAsBl3B,EAASm3B,GAC7B,MAAMI,EAAcv3B,EAAQwB,MAAM+Z,iBAAiB4b,GAC/CI,GACF9T,GAAYC,iBAAiB1jB,EAASm3B,EAAeI,EAEzD,CACA,uBAAAP,CAAwBxc,EAAU2c,GAWhC1W,KAAK4W,2BAA2B7c,GAVHxa,IAC3B,MAAM5B,EAAQqlB,GAAYQ,iBAAiBjkB,EAASm3B,GAEtC,OAAV/4B,GAIJqlB,GAAYE,oBAAoB3jB,EAASm3B,GACzCn3B,EAAQwB,MAAM81B,YAAYH,EAAe/4B,IAJvC4B,EAAQwB,MAAMg2B,eAAeL,EAIgB,GAGnD,CACA,0BAAAE,CAA2B7c,EAAUid,GACnC,GAAI,GAAUjd,GACZid,EAASjd,QAGX,IAAK,MAAMkd,KAAOrR,GAAezT,KAAK4H,EAAUiG,KAAK4E,UACnDoS,EAASC,EAEb,EAeF,MAEMC,GAAc,YAGdC,GAAe,OAAOD,KACtBE,GAAyB,gBAAgBF,KACzCG,GAAiB,SAASH,KAC1BI,GAAe,OAAOJ,KACtBK,GAAgB,QAAQL,KACxBM,GAAiB,SAASN,KAC1BO,GAAsB,gBAAgBP,KACtCQ,GAA0B,oBAAoBR,KAC9CS,GAA0B,kBAAkBT,KAC5CU,GAAyB,QAAQV,cACjCW,GAAkB,aAElBC,GAAoB,OACpBC,GAAoB,eAKpBC,GAAY,CAChBvD,UAAU,EACVnC,OAAO,EACPzH,UAAU,GAENoN,GAAgB,CACpBxD,SAAU,mBACVnC,MAAO,UACPzH,SAAU,WAOZ,MAAMqN,WAAcxT,GAClB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKmY,QAAUvS,GAAeC,QArBV,gBAqBmC7F,KAAK4E,UAC5D5E,KAAKoY,UAAYpY,KAAKqY,sBACtBrY,KAAKsY,WAAatY,KAAKuY,uBACvBvY,KAAKwP,UAAW,EAChBxP,KAAKgP,kBAAmB,EACxBhP,KAAKwY,WAAa,IAAIxC,GACtBhW,KAAK0L,oBACP,CAGA,kBAAWhI,GACT,OAAOsU,EACT,CACA,sBAAWrU,GACT,OAAOsU,EACT,CACA,eAAW1b,GACT,MA1DW,OA2Db,CAGA,MAAAmL,CAAO5H,GACL,OAAOE,KAAKwP,SAAWxP,KAAKyP,OAASzP,KAAK0P,KAAK5P,EACjD,CACA,IAAA4P,CAAK5P,GACCE,KAAKwP,UAAYxP,KAAKgP,kBAGRzO,GAAaqB,QAAQ5B,KAAK4E,SAAU0S,GAAc,CAClExX,kBAEYkC,mBAGdhC,KAAKwP,UAAW,EAChBxP,KAAKgP,kBAAmB,EACxBhP,KAAKwY,WAAW/I,OAChBpqB,SAAS6G,KAAKmP,UAAU5E,IAAIohB,IAC5B7X,KAAKyY,gBACLzY,KAAKoY,UAAU1I,MAAK,IAAM1P,KAAK0Y,aAAa5Y,KAC9C,CACA,IAAA2P,GACOzP,KAAKwP,WAAYxP,KAAKgP,mBAGTzO,GAAaqB,QAAQ5B,KAAK4E,SAAUuS,IACxCnV,mBAGdhC,KAAKwP,UAAW,EAChBxP,KAAKgP,kBAAmB,EACxBhP,KAAKsY,WAAW5C,aAChB1V,KAAK4E,SAASvJ,UAAU1B,OAAOme,IAC/B9X,KAAKmF,gBAAe,IAAMnF,KAAK2Y,cAAc3Y,KAAK4E,SAAU5E,KAAK6N,gBACnE,CACA,OAAA9I,GACExE,GAAaC,IAAI5gB,OAAQs3B,IACzB3W,GAAaC,IAAIR,KAAKmY,QAASjB,IAC/BlX,KAAKoY,UAAUrT,UACf/E,KAAKsY,WAAW5C,aAChB/Q,MAAMI,SACR,CACA,YAAA6T,GACE5Y,KAAKyY,eACP,CAGA,mBAAAJ,GACE,OAAO,IAAIjE,GAAS,CAClBzZ,UAAWmG,QAAQd,KAAK6E,QAAQ4P,UAEhCrP,WAAYpF,KAAK6N,eAErB,CACA,oBAAA0K,GACE,OAAO,IAAInD,GAAU,CACnBF,YAAalV,KAAK4E,UAEtB,CACA,YAAA8T,CAAa5Y,GAENza,SAAS6G,KAAK1H,SAASwb,KAAK4E,WAC/Bvf,SAAS6G,KAAKyoB,OAAO3U,KAAK4E,UAE5B5E,KAAK4E,SAAS7jB,MAAM6wB,QAAU,QAC9B5R,KAAK4E,SAASzjB,gBAAgB,eAC9B6e,KAAK4E,SAASxjB,aAAa,cAAc,GACzC4e,KAAK4E,SAASxjB,aAAa,OAAQ,UACnC4e,KAAK4E,SAASnZ,UAAY,EAC1B,MAAMotB,EAAYjT,GAAeC,QA7GT,cA6GsC7F,KAAKmY,SAC/DU,IACFA,EAAUptB,UAAY,GAExBoQ,GAAOmE,KAAK4E,UACZ5E,KAAK4E,SAASvJ,UAAU5E,IAAIqhB,IAU5B9X,KAAKmF,gBATsB,KACrBnF,KAAK6E,QAAQyN,OACftS,KAAKsY,WAAW/C,WAElBvV,KAAKgP,kBAAmB,EACxBzO,GAAaqB,QAAQ5B,KAAK4E,SAAU2S,GAAe,CACjDzX,iBACA,GAEoCE,KAAKmY,QAASnY,KAAK6N,cAC7D,CACA,kBAAAnC,GACEnL,GAAac,GAAGrB,KAAK4E,SAAU+S,IAAyBvY,IAhJvC,WAiJXA,EAAMtiB,MAGNkjB,KAAK6E,QAAQgG,SACf7K,KAAKyP,OAGPzP,KAAK8Y,6BAA4B,IAEnCvY,GAAac,GAAGzhB,OAAQ43B,IAAgB,KAClCxX,KAAKwP,WAAaxP,KAAKgP,kBACzBhP,KAAKyY,eACP,IAEFlY,GAAac,GAAGrB,KAAK4E,SAAU8S,IAAyBtY,IAEtDmB,GAAae,IAAItB,KAAK4E,SAAU6S,IAAqBsB,IAC/C/Y,KAAK4E,WAAaxF,EAAM7S,QAAUyT,KAAK4E,WAAamU,EAAOxsB,SAGjC,WAA1ByT,KAAK6E,QAAQ4P,SAIbzU,KAAK6E,QAAQ4P,UACfzU,KAAKyP,OAJLzP,KAAK8Y,6BAKP,GACA,GAEN,CACA,UAAAH,GACE3Y,KAAK4E,SAAS7jB,MAAM6wB,QAAU,OAC9B5R,KAAK4E,SAASxjB,aAAa,eAAe,GAC1C4e,KAAK4E,SAASzjB,gBAAgB,cAC9B6e,KAAK4E,SAASzjB,gBAAgB,QAC9B6e,KAAKgP,kBAAmB,EACxBhP,KAAKoY,UAAU3I,MAAK,KAClBpqB,SAAS6G,KAAKmP,UAAU1B,OAAOke,IAC/B7X,KAAKgZ,oBACLhZ,KAAKwY,WAAWnmB,QAChBkO,GAAaqB,QAAQ5B,KAAK4E,SAAUyS,GAAe,GAEvD,CACA,WAAAxJ,GACE,OAAO7N,KAAK4E,SAASvJ,UAAU7W,SAjLT,OAkLxB,CACA,0BAAAs0B,GAEE,GADkBvY,GAAaqB,QAAQ5B,KAAK4E,SAAUwS,IACxCpV,iBACZ,OAEF,MAAMiX,EAAqBjZ,KAAK4E,SAASvX,aAAehI,SAASC,gBAAgBsC,aAC3EsxB,EAAmBlZ,KAAK4E,SAAS7jB,MAAMiL,UAEpB,WAArBktB,GAAiClZ,KAAK4E,SAASvJ,UAAU7W,SAASuzB,MAGjEkB,IACHjZ,KAAK4E,SAAS7jB,MAAMiL,UAAY,UAElCgU,KAAK4E,SAASvJ,UAAU5E,IAAIshB,IAC5B/X,KAAKmF,gBAAe,KAClBnF,KAAK4E,SAASvJ,UAAU1B,OAAOoe,IAC/B/X,KAAKmF,gBAAe,KAClBnF,KAAK4E,SAAS7jB,MAAMiL,UAAYktB,CAAgB,GAC/ClZ,KAAKmY,QAAQ,GACfnY,KAAKmY,SACRnY,KAAK4E,SAAS0N,QAChB,CAMA,aAAAmG,GACE,MAAMQ,EAAqBjZ,KAAK4E,SAASvX,aAAehI,SAASC,gBAAgBsC,aAC3E+uB,EAAiB3W,KAAKwY,WAAWvC,WACjCkD,EAAoBxC,EAAiB,EAC3C,GAAIwC,IAAsBF,EAAoB,CAC5C,MAAMn3B,EAAWma,KAAU,cAAgB,eAC3C+D,KAAK4E,SAAS7jB,MAAMe,GAAY,GAAG60B,KACrC,CACA,IAAKwC,GAAqBF,EAAoB,CAC5C,MAAMn3B,EAAWma,KAAU,eAAiB,cAC5C+D,KAAK4E,SAAS7jB,MAAMe,GAAY,GAAG60B,KACrC,CACF,CACA,iBAAAqC,GACEhZ,KAAK4E,SAAS7jB,MAAMq4B,YAAc,GAClCpZ,KAAK4E,SAAS7jB,MAAMs4B,aAAe,EACrC,CAGA,sBAAO5c,CAAgBqH,EAAQhE,GAC7B,OAAOE,KAAKuH,MAAK,WACf,MAAMld,EAAO6tB,GAAM5S,oBAAoBtF,KAAM8D,GAC7C,GAAsB,iBAAXA,EAAX,CAGA,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,GAAQhE,EAJb,CAKF,GACF,EAOFS,GAAac,GAAGhc,SAAUuyB,GA9OK,4BA8O2C,SAAUxY,GAClF,MAAM7S,EAASqZ,GAAec,uBAAuB1G,MACjD,CAAC,IAAK,QAAQoB,SAASpB,KAAKgH,UAC9B5H,EAAMkD,iBAER/B,GAAae,IAAI/U,EAAQ+qB,IAAcgC,IACjCA,EAAUtX,kBAIdzB,GAAae,IAAI/U,EAAQ8qB,IAAgB,KACnC1c,GAAUqF,OACZA,KAAKsS,OACP,GACA,IAIJ,MAAMiH,EAAc3T,GAAeC,QAnQb,eAoQlB0T,GACFrB,GAAM7S,YAAYkU,GAAa9J,OAEpByI,GAAM5S,oBAAoB/Y,GAClCmb,OAAO1H,KACd,IACA4G,GAAqBsR,IAMrB/b,GAAmB+b,IAcnB,MAEMsB,GAAc,gBACdC,GAAiB,YACjBC,GAAwB,OAAOF,KAAcC,KAE7CE,GAAoB,OACpBC,GAAuB,UACvBC,GAAoB,SAEpBC,GAAgB,kBAChBC,GAAe,OAAOP,KACtBQ,GAAgB,QAAQR,KACxBS,GAAe,OAAOT,KACtBU,GAAuB,gBAAgBV,KACvCW,GAAiB,SAASX,KAC1BY,GAAe,SAASZ,KACxBa,GAAyB,QAAQb,KAAcC,KAC/Ca,GAAwB,kBAAkBd,KAE1Ce,GAAY,CAChB9F,UAAU,EACV5J,UAAU,EACVpgB,QAAQ,GAEJ+vB,GAAgB,CACpB/F,SAAU,mBACV5J,SAAU,UACVpgB,OAAQ,WAOV,MAAMgwB,WAAkB/V,GACtB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKwP,UAAW,EAChBxP,KAAKoY,UAAYpY,KAAKqY,sBACtBrY,KAAKsY,WAAatY,KAAKuY,uBACvBvY,KAAK0L,oBACP,CAGA,kBAAWhI,GACT,OAAO6W,EACT,CACA,sBAAW5W,GACT,OAAO6W,EACT,CACA,eAAWje,GACT,MApDW,WAqDb,CAGA,MAAAmL,CAAO5H,GACL,OAAOE,KAAKwP,SAAWxP,KAAKyP,OAASzP,KAAK0P,KAAK5P,EACjD,CACA,IAAA4P,CAAK5P,GACCE,KAAKwP,UAGSjP,GAAaqB,QAAQ5B,KAAK4E,SAAUmV,GAAc,CAClEja,kBAEYkC,mBAGdhC,KAAKwP,UAAW,EAChBxP,KAAKoY,UAAU1I,OACV1P,KAAK6E,QAAQpa,SAChB,IAAIurB,IAAkBvG,OAExBzP,KAAK4E,SAASxjB,aAAa,cAAc,GACzC4e,KAAK4E,SAASxjB,aAAa,OAAQ,UACnC4e,KAAK4E,SAASvJ,UAAU5E,IAAImjB,IAW5B5Z,KAAKmF,gBAVoB,KAClBnF,KAAK6E,QAAQpa,SAAUuV,KAAK6E,QAAQ4P,UACvCzU,KAAKsY,WAAW/C,WAElBvV,KAAK4E,SAASvJ,UAAU5E,IAAIkjB,IAC5B3Z,KAAK4E,SAASvJ,UAAU1B,OAAOigB,IAC/BrZ,GAAaqB,QAAQ5B,KAAK4E,SAAUoV,GAAe,CACjDla,iBACA,GAEkCE,KAAK4E,UAAU,GACvD,CACA,IAAA6K,GACOzP,KAAKwP,WAGQjP,GAAaqB,QAAQ5B,KAAK4E,SAAUqV,IACxCjY,mBAGdhC,KAAKsY,WAAW5C,aAChB1V,KAAK4E,SAAS8V,OACd1a,KAAKwP,UAAW,EAChBxP,KAAK4E,SAASvJ,UAAU5E,IAAIojB,IAC5B7Z,KAAKoY,UAAU3I,OAUfzP,KAAKmF,gBAToB,KACvBnF,KAAK4E,SAASvJ,UAAU1B,OAAOggB,GAAmBE,IAClD7Z,KAAK4E,SAASzjB,gBAAgB,cAC9B6e,KAAK4E,SAASzjB,gBAAgB,QACzB6e,KAAK6E,QAAQpa,SAChB,IAAIurB,IAAkB3jB,QAExBkO,GAAaqB,QAAQ5B,KAAK4E,SAAUuV,GAAe,GAEfna,KAAK4E,UAAU,IACvD,CACA,OAAAG,GACE/E,KAAKoY,UAAUrT,UACf/E,KAAKsY,WAAW5C,aAChB/Q,MAAMI,SACR,CAGA,mBAAAsT,GACE,MASM1d,EAAYmG,QAAQd,KAAK6E,QAAQ4P,UACvC,OAAO,IAAIL,GAAS,CAClBJ,UA3HsB,qBA4HtBrZ,YACAyK,YAAY,EACZ8O,YAAalU,KAAK4E,SAAS7f,WAC3BkvB,cAAetZ,EAfK,KACU,WAA1BqF,KAAK6E,QAAQ4P,SAIjBzU,KAAKyP,OAHHlP,GAAaqB,QAAQ5B,KAAK4E,SAAUsV,GAG3B,EAUgC,MAE/C,CACA,oBAAA3B,GACE,OAAO,IAAInD,GAAU,CACnBF,YAAalV,KAAK4E,UAEtB,CACA,kBAAA8G,GACEnL,GAAac,GAAGrB,KAAK4E,SAAU0V,IAAuBlb,IA5IvC,WA6ITA,EAAMtiB,MAGNkjB,KAAK6E,QAAQgG,SACf7K,KAAKyP,OAGPlP,GAAaqB,QAAQ5B,KAAK4E,SAAUsV,IAAqB,GAE7D,CAGA,sBAAOzd,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAOowB,GAAUnV,oBAAoBtF,KAAM8D,GACjD,GAAsB,iBAAXA,EAAX,CAGA,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,GAAQ9D,KAJb,CAKF,GACF,EAOFO,GAAac,GAAGhc,SAAUg1B,GA7JK,gCA6J2C,SAAUjb,GAClF,MAAM7S,EAASqZ,GAAec,uBAAuB1G,MAIrD,GAHI,CAAC,IAAK,QAAQoB,SAASpB,KAAKgH,UAC9B5H,EAAMkD,iBAEJpH,GAAW8E,MACb,OAEFO,GAAae,IAAI/U,EAAQ4tB,IAAgB,KAEnCxf,GAAUqF,OACZA,KAAKsS,OACP,IAIF,MAAMiH,EAAc3T,GAAeC,QAAQiU,IACvCP,GAAeA,IAAgBhtB,GACjCkuB,GAAUpV,YAAYkU,GAAa9J,OAExBgL,GAAUnV,oBAAoB/Y,GACtCmb,OAAO1H,KACd,IACAO,GAAac,GAAGzhB,OAAQ85B,IAAuB,KAC7C,IAAK,MAAM3f,KAAY6L,GAAezT,KAAK2nB,IACzCW,GAAUnV,oBAAoBvL,GAAU2V,MAC1C,IAEFnP,GAAac,GAAGzhB,OAAQw6B,IAAc,KACpC,IAAK,MAAM76B,KAAWqmB,GAAezT,KAAK,gDACG,UAAvClN,iBAAiB1F,GAASiC,UAC5Bi5B,GAAUnV,oBAAoB/lB,GAASkwB,MAE3C,IAEF7I,GAAqB6T,IAMrBte,GAAmBse,IAUnB,MACME,GAAmB,CAEvB,IAAK,CAAC,QAAS,MAAO,KAAM,OAAQ,OAHP,kBAI7B9pB,EAAG,CAAC,SAAU,OAAQ,QAAS,OAC/B+pB,KAAM,GACN9pB,EAAG,GACH+pB,GAAI,GACJC,IAAK,GACLC,KAAM,GACNC,IAAK,GACLC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJnqB,EAAG,GACHub,IAAK,CAAC,MAAO,SAAU,MAAO,QAAS,QAAS,UAChD6O,GAAI,GACJC,GAAI,GACJC,EAAG,GACHC,IAAK,GACLC,EAAG,GACHC,MAAO,GACPC,KAAM,GACNC,IAAK,GACLC,IAAK,GACLC,OAAQ,GACRC,EAAG,GACHC,GAAI,IAIAC,GAAgB,IAAI/lB,IAAI,CAAC,aAAc,OAAQ,OAAQ,WAAY,WAAY,SAAU,MAAO,eAShGgmB,GAAmB,0DACnBC,GAAmB,CAACx6B,EAAWy6B,KACnC,MAAMC,EAAgB16B,EAAUvC,SAASC,cACzC,OAAI+8B,EAAqBpb,SAASqb,IAC5BJ,GAAc1lB,IAAI8lB,IACb3b,QAAQwb,GAAiBj5B,KAAKtB,EAAU26B,YAM5CF,EAAqBr2B,QAAOw2B,GAAkBA,aAA0BpY,SAAQ9R,MAAKmqB,GAASA,EAAMv5B,KAAKo5B,IAAe,EA0C3HI,GAAY,CAChBC,UAAWnC,GACXoC,QAAS,CAAC,EAEVC,WAAY,GACZnwB,MAAM,EACNowB,UAAU,EACVC,WAAY,KACZC,SAAU,eAENC,GAAgB,CACpBN,UAAW,SACXC,QAAS,SACTC,WAAY,oBACZnwB,KAAM,UACNowB,SAAU,UACVC,WAAY,kBACZC,SAAU,UAENE,GAAqB,CACzBC,MAAO,iCACPvjB,SAAU,oBAOZ,MAAMwjB,WAAwB9Z,GAC5B,WAAAU,CAAYL,GACVa,QACA3E,KAAK6E,QAAU7E,KAAK6D,WAAWC,EACjC,CAGA,kBAAWJ,GACT,OAAOmZ,EACT,CACA,sBAAWlZ,GACT,OAAOyZ,EACT,CACA,eAAW7gB,GACT,MA3CW,iBA4Cb,CAGA,UAAAihB,GACE,OAAOxgC,OAAOmiB,OAAOa,KAAK6E,QAAQkY,SAASj6B,KAAIghB,GAAU9D,KAAKyd,yBAAyB3Z,KAAS3d,OAAO2a,QACzG,CACA,UAAA4c,GACE,OAAO1d,KAAKwd,aAAa9sB,OAAS,CACpC,CACA,aAAAitB,CAAcZ,GAMZ,OALA/c,KAAK4d,cAAcb,GACnB/c,KAAK6E,QAAQkY,QAAU,IAClB/c,KAAK6E,QAAQkY,WACbA,GAEE/c,IACT,CACA,MAAA6d,GACE,MAAMC,EAAkBz4B,SAASqvB,cAAc,OAC/CoJ,EAAgBC,UAAY/d,KAAKge,eAAehe,KAAK6E,QAAQsY,UAC7D,IAAK,MAAOpjB,EAAUkkB,KAASjhC,OAAOmkB,QAAQnB,KAAK6E,QAAQkY,SACzD/c,KAAKke,YAAYJ,EAAiBG,EAAMlkB,GAE1C,MAAMojB,EAAWW,EAAgBhY,SAAS,GACpCkX,EAAahd,KAAKyd,yBAAyBzd,KAAK6E,QAAQmY,YAI9D,OAHIA,GACFG,EAAS9hB,UAAU5E,OAAOumB,EAAW96B,MAAM,MAEtCi7B,CACT,CAGA,gBAAAlZ,CAAiBH,GACfa,MAAMV,iBAAiBH,GACvB9D,KAAK4d,cAAc9Z,EAAOiZ,QAC5B,CACA,aAAAa,CAAcO,GACZ,IAAK,MAAOpkB,EAAUgjB,KAAY//B,OAAOmkB,QAAQgd,GAC/CxZ,MAAMV,iBAAiB,CACrBlK,WACAujB,MAAOP,GACNM,GAEP,CACA,WAAAa,CAAYf,EAAUJ,EAAShjB,GAC7B,MAAMqkB,EAAkBxY,GAAeC,QAAQ9L,EAAUojB,GACpDiB,KAGLrB,EAAU/c,KAAKyd,yBAAyBV,IAKpC,GAAUA,GACZ/c,KAAKqe,sBAAsB3jB,GAAWqiB,GAAUqB,GAG9Cpe,KAAK6E,QAAQhY,KACfuxB,EAAgBL,UAAY/d,KAAKge,eAAejB,GAGlDqB,EAAgBE,YAAcvB,EAX5BqB,EAAgBzkB,SAYpB,CACA,cAAAqkB,CAAeG,GACb,OAAOne,KAAK6E,QAAQoY,SApJxB,SAAsBsB,EAAYzB,EAAW0B,GAC3C,IAAKD,EAAW7tB,OACd,OAAO6tB,EAET,GAAIC,GAAgD,mBAArBA,EAC7B,OAAOA,EAAiBD,GAE1B,MACME,GADY,IAAI7+B,OAAO8+B,WACKC,gBAAgBJ,EAAY,aACxD19B,EAAW,GAAGlC,UAAU8/B,EAAgBvyB,KAAKkU,iBAAiB,MACpE,IAAK,MAAM7gB,KAAWsB,EAAU,CAC9B,MAAM+9B,EAAcr/B,EAAQC,SAASC,cACrC,IAAKzC,OAAO4D,KAAKk8B,GAAW1b,SAASwd,GAAc,CACjDr/B,EAAQoa,SACR,QACF,CACA,MAAMklB,EAAgB,GAAGlgC,UAAUY,EAAQ0B,YACrC69B,EAAoB,GAAGngC,OAAOm+B,EAAU,MAAQ,GAAIA,EAAU8B,IAAgB,IACpF,IAAK,MAAM78B,KAAa88B,EACjBtC,GAAiBx6B,EAAW+8B,IAC/Bv/B,EAAQ4B,gBAAgBY,EAAUvC,SAGxC,CACA,OAAOi/B,EAAgBvyB,KAAK6xB,SAC9B,CA2HmCgB,CAAaZ,EAAKne,KAAK6E,QAAQiY,UAAW9c,KAAK6E,QAAQqY,YAAciB,CACtG,CACA,wBAAAV,CAAyBU,GACvB,OAAOthB,GAAQshB,EAAK,CAACne,MACvB,CACA,qBAAAqe,CAAsB9+B,EAAS6+B,GAC7B,GAAIpe,KAAK6E,QAAQhY,KAGf,OAFAuxB,EAAgBL,UAAY,QAC5BK,EAAgBzJ,OAAOp1B,GAGzB6+B,EAAgBE,YAAc/+B,EAAQ++B,WACxC,EAeF,MACMU,GAAwB,IAAI1oB,IAAI,CAAC,WAAY,YAAa,eAC1D2oB,GAAoB,OAEpBC,GAAoB,OAEpBC,GAAiB,SACjBC,GAAmB,gBACnBC,GAAgB,QAChBC,GAAgB,QAahBC,GAAgB,CACpBC,KAAM,OACNC,IAAK,MACLC,MAAOzjB,KAAU,OAAS,QAC1B0jB,OAAQ,SACRC,KAAM3jB,KAAU,QAAU,QAEtB4jB,GAAY,CAChB/C,UAAWnC,GACXmF,WAAW,EACX7xB,SAAU,kBACV8xB,WAAW,EACXC,YAAa,GACbC,MAAO,EACPjwB,mBAAoB,CAAC,MAAO,QAAS,SAAU,QAC/CnD,MAAM,EACN7E,OAAQ,CAAC,EAAG,GACZtJ,UAAW,MACXmzB,aAAc,KACdoL,UAAU,EACVC,WAAY,KACZnjB,UAAU,EACVojB,SAAU,+GACV+C,MAAO,GACPte,QAAS,eAELue,GAAgB,CACpBrD,UAAW,SACXgD,UAAW,UACX7xB,SAAU,mBACV8xB,UAAW,2BACXC,YAAa,oBACbC,MAAO,kBACPjwB,mBAAoB,QACpBnD,KAAM,UACN7E,OAAQ,0BACRtJ,UAAW,oBACXmzB,aAAc,yBACdoL,SAAU,UACVC,WAAY,kBACZnjB,SAAU,mBACVojB,SAAU,SACV+C,MAAO,4BACPte,QAAS,UAOX,MAAMwe,WAAgB1b,GACpB,WAAAP,CAAY5kB,EAASukB,GACnB,QAAsB,IAAX,EACT,MAAM,IAAIU,UAAU,+DAEtBG,MAAMplB,EAASukB,GAGf9D,KAAKqgB,YAAa,EAClBrgB,KAAKsgB,SAAW,EAChBtgB,KAAKugB,WAAa,KAClBvgB,KAAKwgB,eAAiB,CAAC,EACvBxgB,KAAKgS,QAAU,KACfhS,KAAKygB,iBAAmB,KACxBzgB,KAAK0gB,YAAc,KAGnB1gB,KAAK2gB,IAAM,KACX3gB,KAAK4gB,gBACA5gB,KAAK6E,QAAQ9K,UAChBiG,KAAK6gB,WAET,CAGA,kBAAWnd,GACT,OAAOmc,EACT,CACA,sBAAWlc,GACT,OAAOwc,EACT,CACA,eAAW5jB,GACT,MAxGW,SAyGb,CAGA,MAAAukB,GACE9gB,KAAKqgB,YAAa,CACpB,CACA,OAAAU,GACE/gB,KAAKqgB,YAAa,CACpB,CACA,aAAAW,GACEhhB,KAAKqgB,YAAcrgB,KAAKqgB,UAC1B,CACA,MAAA3Y,GACO1H,KAAKqgB,aAGVrgB,KAAKwgB,eAAeS,OAASjhB,KAAKwgB,eAAeS,MAC7CjhB,KAAKwP,WACPxP,KAAKkhB,SAGPlhB,KAAKmhB,SACP,CACA,OAAApc,GACEgI,aAAa/M,KAAKsgB,UAClB/f,GAAaC,IAAIR,KAAK4E,SAAS5J,QAAQmkB,IAAiBC,GAAkBpf,KAAKohB,mBAC3EphB,KAAK4E,SAASpJ,aAAa,2BAC7BwE,KAAK4E,SAASxjB,aAAa,QAAS4e,KAAK4E,SAASpJ,aAAa,2BAEjEwE,KAAKqhB,iBACL1c,MAAMI,SACR,CACA,IAAA2K,GACE,GAAoC,SAAhC1P,KAAK4E,SAAS7jB,MAAM6wB,QACtB,MAAM,IAAIhO,MAAM,uCAElB,IAAM5D,KAAKshB,mBAAoBthB,KAAKqgB,WAClC,OAEF,MAAM/G,EAAY/Y,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAlItD,SAoIX+b,GADa9lB,GAAeuE,KAAK4E,WACL5E,KAAK4E,SAAS9kB,cAAcwF,iBAAiBd,SAASwb,KAAK4E,UAC7F,GAAI0U,EAAUtX,mBAAqBuf,EACjC,OAIFvhB,KAAKqhB,iBACL,MAAMV,EAAM3gB,KAAKwhB,iBACjBxhB,KAAK4E,SAASxjB,aAAa,mBAAoBu/B,EAAInlB,aAAa,OAChE,MAAM,UACJukB,GACE/f,KAAK6E,QAYT,GAXK7E,KAAK4E,SAAS9kB,cAAcwF,gBAAgBd,SAASwb,KAAK2gB,OAC7DZ,EAAUpL,OAAOgM,GACjBpgB,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAhJpC,cAkJnBxF,KAAKgS,QAAUhS,KAAKqS,cAAcsO,GAClCA,EAAItlB,UAAU5E,IAAIyoB,IAMd,iBAAkB75B,SAASC,gBAC7B,IAAK,MAAM/F,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAK4Z,UAC/CvF,GAAac,GAAG9hB,EAAS,YAAaqc,IAU1CoE,KAAKmF,gBAPY,KACf5E,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAhKrC,WAiKQ,IAApBxF,KAAKugB,YACPvgB,KAAKkhB,SAEPlhB,KAAKugB,YAAa,CAAK,GAEKvgB,KAAK2gB,IAAK3gB,KAAK6N,cAC/C,CACA,IAAA4B,GACE,GAAKzP,KAAKwP,aAGQjP,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UA/KtD,SAgLHxD,iBAAd,CAQA,GALYhC,KAAKwhB,iBACbnmB,UAAU1B,OAAOulB,IAIjB,iBAAkB75B,SAASC,gBAC7B,IAAK,MAAM/F,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAK4Z,UAC/CvF,GAAaC,IAAIjhB,EAAS,YAAaqc,IAG3CoE,KAAKwgB,eAA4B,OAAI,EACrCxgB,KAAKwgB,eAAelB,KAAiB,EACrCtf,KAAKwgB,eAAenB,KAAiB,EACrCrf,KAAKugB,WAAa,KAYlBvgB,KAAKmF,gBAVY,KACXnF,KAAKyhB,yBAGJzhB,KAAKugB,YACRvgB,KAAKqhB,iBAEPrhB,KAAK4E,SAASzjB,gBAAgB,oBAC9Bof,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAzMpC,WAyM8D,GAEnDxF,KAAK2gB,IAAK3gB,KAAK6N,cA1B7C,CA2BF,CACA,MAAA9iB,GACMiV,KAAKgS,SACPhS,KAAKgS,QAAQjnB,QAEjB,CAGA,cAAAu2B,GACE,OAAOxgB,QAAQd,KAAK0hB,YACtB,CACA,cAAAF,GAIE,OAHKxhB,KAAK2gB,MACR3gB,KAAK2gB,IAAM3gB,KAAK2hB,kBAAkB3hB,KAAK0gB,aAAe1gB,KAAK4hB,2BAEtD5hB,KAAK2gB,GACd,CACA,iBAAAgB,CAAkB5E,GAChB,MAAM4D,EAAM3gB,KAAK6hB,oBAAoB9E,GAASc,SAG9C,IAAK8C,EACH,OAAO,KAETA,EAAItlB,UAAU1B,OAAOslB,GAAmBC,IAExCyB,EAAItlB,UAAU5E,IAAI,MAAMuJ,KAAKmE,YAAY5H,aACzC,MAAMulB,EAvuGKC,KACb,GACEA,GAAU5/B,KAAK6/B,MA/BH,IA+BS7/B,KAAK8/B,gBACnB58B,SAAS68B,eAAeH,IACjC,OAAOA,CAAM,EAmuGGI,CAAOniB,KAAKmE,YAAY5H,MAAM1c,WAK5C,OAJA8gC,EAAIv/B,aAAa,KAAM0gC,GACnB9hB,KAAK6N,eACP8S,EAAItlB,UAAU5E,IAAIwoB,IAEb0B,CACT,CACA,UAAAyB,CAAWrF,GACT/c,KAAK0gB,YAAc3D,EACf/c,KAAKwP,aACPxP,KAAKqhB,iBACLrhB,KAAK0P,OAET,CACA,mBAAAmS,CAAoB9E,GAYlB,OAXI/c,KAAKygB,iBACPzgB,KAAKygB,iBAAiB9C,cAAcZ,GAEpC/c,KAAKygB,iBAAmB,IAAIlD,GAAgB,IACvCvd,KAAK6E,QAGRkY,UACAC,WAAYhd,KAAKyd,yBAAyBzd,KAAK6E,QAAQmb,eAGpDhgB,KAAKygB,gBACd,CACA,sBAAAmB,GACE,MAAO,CACL,iBAA0B5hB,KAAK0hB,YAEnC,CACA,SAAAA,GACE,OAAO1hB,KAAKyd,yBAAyBzd,KAAK6E,QAAQqb,QAAUlgB,KAAK4E,SAASpJ,aAAa,yBACzF,CAGA,4BAAA6mB,CAA6BjjB,GAC3B,OAAOY,KAAKmE,YAAYmB,oBAAoBlG,EAAMW,eAAgBC,KAAKsiB,qBACzE,CACA,WAAAzU,GACE,OAAO7N,KAAK6E,QAAQib,WAAa9f,KAAK2gB,KAAO3gB,KAAK2gB,IAAItlB,UAAU7W,SAASy6B,GAC3E,CACA,QAAAzP,GACE,OAAOxP,KAAK2gB,KAAO3gB,KAAK2gB,IAAItlB,UAAU7W,SAAS06B,GACjD,CACA,aAAA7M,CAAcsO,GACZ,MAAMjiC,EAAYme,GAAQmD,KAAK6E,QAAQnmB,UAAW,CAACshB,KAAM2gB,EAAK3gB,KAAK4E,WAC7D2d,EAAahD,GAAc7gC,EAAU+lB,eAC3C,OAAO,GAAoBzE,KAAK4E,SAAU+b,EAAK3gB,KAAKyS,iBAAiB8P,GACvE,CACA,UAAA1P,GACE,MAAM,OACJ7qB,GACEgY,KAAK6E,QACT,MAAsB,iBAAX7c,EACFA,EAAO9F,MAAM,KAAKY,KAAInF,GAAS4f,OAAO6P,SAASzvB,EAAO,MAEzC,mBAAXqK,EACF8qB,GAAc9qB,EAAO8qB,EAAY9S,KAAK4E,UAExC5c,CACT,CACA,wBAAAy1B,CAAyBU,GACvB,OAAOthB,GAAQshB,EAAK,CAACne,KAAK4E,UAC5B,CACA,gBAAA6N,CAAiB8P,GACf,MAAMxP,EAAwB,CAC5Br0B,UAAW6jC,EACXnsB,UAAW,CAAC,CACV9V,KAAM,OACNmB,QAAS,CACPuO,mBAAoBgQ,KAAK6E,QAAQ7U,qBAElC,CACD1P,KAAM,SACNmB,QAAS,CACPuG,OAAQgY,KAAK6S,eAEd,CACDvyB,KAAM,kBACNmB,QAAS,CACPwM,SAAU+R,KAAK6E,QAAQ5W,WAExB,CACD3N,KAAM,QACNmB,QAAS,CACPlC,QAAS,IAAIygB,KAAKmE,YAAY5H,eAE/B,CACDjc,KAAM,kBACNC,SAAS,EACTC,MAAO,aACPC,GAAI4J,IAGF2V,KAAKwhB,iBAAiBpgC,aAAa,wBAAyBiJ,EAAK1J,MAAMjC,UAAU,KAIvF,MAAO,IACFq0B,KACAlW,GAAQmD,KAAK6E,QAAQgN,aAAc,CAACkB,IAE3C,CACA,aAAA6N,GACE,MAAM4B,EAAWxiB,KAAK6E,QAAQjD,QAAQ1f,MAAM,KAC5C,IAAK,MAAM0f,KAAW4gB,EACpB,GAAgB,UAAZ5gB,EACFrB,GAAac,GAAGrB,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAjVlC,SAiV4DxF,KAAK6E,QAAQ9K,UAAUqF,IAC/EY,KAAKqiB,6BAA6BjjB,GAC1CsI,QAAQ,SAEb,GA3VU,WA2VN9F,EAA4B,CACrC,MAAM6gB,EAAU7gB,IAAYyd,GAAgBrf,KAAKmE,YAAYqB,UAnV5C,cAmV0ExF,KAAKmE,YAAYqB,UArV5F,WAsVVkd,EAAW9gB,IAAYyd,GAAgBrf,KAAKmE,YAAYqB,UAnV7C,cAmV2ExF,KAAKmE,YAAYqB,UArV5F,YAsVjBjF,GAAac,GAAGrB,KAAK4E,SAAU6d,EAASziB,KAAK6E,QAAQ9K,UAAUqF,IAC7D,MAAM+T,EAAUnT,KAAKqiB,6BAA6BjjB,GAClD+T,EAAQqN,eAA8B,YAAfphB,EAAMqB,KAAqB6e,GAAgBD,KAAiB,EACnFlM,EAAQgO,QAAQ,IAElB5gB,GAAac,GAAGrB,KAAK4E,SAAU8d,EAAU1iB,KAAK6E,QAAQ9K,UAAUqF,IAC9D,MAAM+T,EAAUnT,KAAKqiB,6BAA6BjjB,GAClD+T,EAAQqN,eAA8B,aAAfphB,EAAMqB,KAAsB6e,GAAgBD,IAAiBlM,EAAQvO,SAASpgB,SAAS4a,EAAMU,eACpHqT,EAAQ+N,QAAQ,GAEpB,CAEFlhB,KAAKohB,kBAAoB,KACnBphB,KAAK4E,UACP5E,KAAKyP,MACP,EAEFlP,GAAac,GAAGrB,KAAK4E,SAAS5J,QAAQmkB,IAAiBC,GAAkBpf,KAAKohB,kBAChF,CACA,SAAAP,GACE,MAAMX,EAAQlgB,KAAK4E,SAASpJ,aAAa,SACpC0kB,IAGAlgB,KAAK4E,SAASpJ,aAAa,eAAkBwE,KAAK4E,SAAS0Z,YAAY3Y,QAC1E3F,KAAK4E,SAASxjB,aAAa,aAAc8+B,GAE3ClgB,KAAK4E,SAASxjB,aAAa,yBAA0B8+B,GACrDlgB,KAAK4E,SAASzjB,gBAAgB,SAChC,CACA,MAAAggC,GACMnhB,KAAKwP,YAAcxP,KAAKugB,WAC1BvgB,KAAKugB,YAAa,GAGpBvgB,KAAKugB,YAAa,EAClBvgB,KAAK2iB,aAAY,KACX3iB,KAAKugB,YACPvgB,KAAK0P,MACP,GACC1P,KAAK6E,QAAQob,MAAMvQ,MACxB,CACA,MAAAwR,GACMlhB,KAAKyhB,yBAGTzhB,KAAKugB,YAAa,EAClBvgB,KAAK2iB,aAAY,KACV3iB,KAAKugB,YACRvgB,KAAKyP,MACP,GACCzP,KAAK6E,QAAQob,MAAMxQ,MACxB,CACA,WAAAkT,CAAY/kB,EAASglB,GACnB7V,aAAa/M,KAAKsgB,UAClBtgB,KAAKsgB,SAAWziB,WAAWD,EAASglB,EACtC,CACA,oBAAAnB,GACE,OAAOzkC,OAAOmiB,OAAOa,KAAKwgB,gBAAgBpf,UAAS,EACrD,CACA,UAAAyC,CAAWC,GACT,MAAM+e,EAAiB7f,GAAYG,kBAAkBnD,KAAK4E,UAC1D,IAAK,MAAMke,KAAiB9lC,OAAO4D,KAAKiiC,GAClC7D,GAAsBroB,IAAImsB,WACrBD,EAAeC,GAU1B,OAPAhf,EAAS,IACJ+e,KACmB,iBAAX/e,GAAuBA,EAASA,EAAS,CAAC,GAEvDA,EAAS9D,KAAK+D,gBAAgBD,GAC9BA,EAAS9D,KAAKgE,kBAAkBF,GAChC9D,KAAKiE,iBAAiBH,GACfA,CACT,CACA,iBAAAE,CAAkBF,GAchB,OAbAA,EAAOic,WAAiC,IAArBjc,EAAOic,UAAsB16B,SAAS6G,KAAOwO,GAAWoJ,EAAOic,WACtD,iBAAjBjc,EAAOmc,QAChBnc,EAAOmc,MAAQ,CACbvQ,KAAM5L,EAAOmc,MACbxQ,KAAM3L,EAAOmc,QAGW,iBAAjBnc,EAAOoc,QAChBpc,EAAOoc,MAAQpc,EAAOoc,MAAMrgC,YAEA,iBAAnBikB,EAAOiZ,UAChBjZ,EAAOiZ,QAAUjZ,EAAOiZ,QAAQl9B,YAE3BikB,CACT,CACA,kBAAAwe,GACE,MAAMxe,EAAS,CAAC,EAChB,IAAK,MAAOhnB,EAAKa,KAAUX,OAAOmkB,QAAQnB,KAAK6E,SACzC7E,KAAKmE,YAAYT,QAAQ5mB,KAASa,IACpCmmB,EAAOhnB,GAAOa,GASlB,OANAmmB,EAAO/J,UAAW,EAClB+J,EAAOlC,QAAU,SAKVkC,CACT,CACA,cAAAud,GACMrhB,KAAKgS,UACPhS,KAAKgS,QAAQhZ,UACbgH,KAAKgS,QAAU,MAEbhS,KAAK2gB,MACP3gB,KAAK2gB,IAAIhnB,SACTqG,KAAK2gB,IAAM,KAEf,CAGA,sBAAOlkB,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAO+1B,GAAQ9a,oBAAoBtF,KAAM8D,GAC/C,GAAsB,iBAAXA,EAAX,CAGA,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,EAOF3H,GAAmBikB,IAcnB,MAGM2C,GAAY,IACb3C,GAAQ1c,QACXqZ,QAAS,GACT/0B,OAAQ,CAAC,EAAG,GACZtJ,UAAW,QACXy+B,SAAU,8IACVvb,QAAS,SAELohB,GAAgB,IACjB5C,GAAQzc,YACXoZ,QAAS,kCAOX,MAAMkG,WAAgB7C,GAEpB,kBAAW1c,GACT,OAAOqf,EACT,CACA,sBAAWpf,GACT,OAAOqf,EACT,CACA,eAAWzmB,GACT,MA7BW,SA8Bb,CAGA,cAAA+kB,GACE,OAAOthB,KAAK0hB,aAAe1hB,KAAKkjB,aAClC,CAGA,sBAAAtB,GACE,MAAO,CACL,kBAAkB5hB,KAAK0hB,YACvB,gBAAoB1hB,KAAKkjB,cAE7B,CACA,WAAAA,GACE,OAAOljB,KAAKyd,yBAAyBzd,KAAK6E,QAAQkY,QACpD,CAGA,sBAAOtgB,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAO44B,GAAQ3d,oBAAoBtF,KAAM8D,GAC/C,GAAsB,iBAAXA,EAAX,CAGA,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,EAOF3H,GAAmB8mB,IAcnB,MAEME,GAAc,gBAEdC,GAAiB,WAAWD,KAC5BE,GAAc,QAAQF,KACtBG,GAAwB,OAAOH,cAE/BI,GAAsB,SAEtBC,GAAwB,SAExBC,GAAqB,YAGrBC,GAAsB,GAAGD,mBAA+CA,uBAGxEE,GAAY,CAChB37B,OAAQ,KAER47B,WAAY,eACZC,cAAc,EACdt3B,OAAQ,KACRu3B,UAAW,CAAC,GAAK,GAAK,IAElBC,GAAgB,CACpB/7B,OAAQ,gBAER47B,WAAY,SACZC,aAAc,UACdt3B,OAAQ,UACRu3B,UAAW,SAOb,MAAME,WAAkBtf,GACtB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GAGf9D,KAAKikB,aAAe,IAAI/yB,IACxB8O,KAAKkkB,oBAAsB,IAAIhzB,IAC/B8O,KAAKmkB,aAA6D,YAA9Cl/B,iBAAiB+a,KAAK4E,UAAU5Y,UAA0B,KAAOgU,KAAK4E,SAC1F5E,KAAKokB,cAAgB,KACrBpkB,KAAKqkB,UAAY,KACjBrkB,KAAKskB,oBAAsB,CACzBC,gBAAiB,EACjBC,gBAAiB,GAEnBxkB,KAAKykB,SACP,CAGA,kBAAW/gB,GACT,OAAOigB,EACT,CACA,sBAAWhgB,GACT,OAAOogB,EACT,CACA,eAAWxnB,GACT,MAhEW,WAiEb,CAGA,OAAAkoB,GACEzkB,KAAK0kB,mCACL1kB,KAAK2kB,2BACD3kB,KAAKqkB,UACPrkB,KAAKqkB,UAAUO,aAEf5kB,KAAKqkB,UAAYrkB,KAAK6kB,kBAExB,IAAK,MAAMC,KAAW9kB,KAAKkkB,oBAAoB/kB,SAC7Ca,KAAKqkB,UAAUU,QAAQD,EAE3B,CACA,OAAA/f,GACE/E,KAAKqkB,UAAUO,aACfjgB,MAAMI,SACR,CAGA,iBAAAf,CAAkBF,GAShB,OAPAA,EAAOvX,OAASmO,GAAWoJ,EAAOvX,SAAWlH,SAAS6G,KAGtD4X,EAAO8f,WAAa9f,EAAO9b,OAAS,GAAG8b,EAAO9b,oBAAsB8b,EAAO8f,WAC3C,iBAArB9f,EAAOggB,YAChBhgB,EAAOggB,UAAYhgB,EAAOggB,UAAU5hC,MAAM,KAAKY,KAAInF,GAAS4f,OAAOC,WAAW7f,MAEzEmmB,CACT,CACA,wBAAA6gB,GACO3kB,KAAK6E,QAAQgf,eAKlBtjB,GAAaC,IAAIR,KAAK6E,QAAQtY,OAAQ82B,IACtC9iB,GAAac,GAAGrB,KAAK6E,QAAQtY,OAAQ82B,GAAaG,IAAuBpkB,IACvE,MAAM4lB,EAAoBhlB,KAAKkkB,oBAAoB/mC,IAAIiiB,EAAM7S,OAAOtB,MACpE,GAAI+5B,EAAmB,CACrB5lB,EAAMkD,iBACN,MAAM3G,EAAOqE,KAAKmkB,cAAgBvkC,OAC5BmE,EAASihC,EAAkB3gC,UAAY2b,KAAK4E,SAASvgB,UAC3D,GAAIsX,EAAKspB,SAKP,YAJAtpB,EAAKspB,SAAS,CACZtjC,IAAKoC,EACLmhC,SAAU,WAMdvpB,EAAKlQ,UAAY1H,CACnB,KAEJ,CACA,eAAA8gC,GACE,MAAMpjC,EAAU,CACdka,KAAMqE,KAAKmkB,aACXL,UAAW9jB,KAAK6E,QAAQif,UACxBF,WAAY5jB,KAAK6E,QAAQ+e,YAE3B,OAAO,IAAIuB,sBAAqBhkB,GAAWnB,KAAKolB,kBAAkBjkB,IAAU1f,EAC9E,CAGA,iBAAA2jC,CAAkBjkB,GAChB,MAAMkkB,EAAgB/H,GAAStd,KAAKikB,aAAa9mC,IAAI,IAAImgC,EAAM/wB,OAAO4N,MAChEob,EAAW+H,IACftd,KAAKskB,oBAAoBC,gBAAkBjH,EAAM/wB,OAAOlI,UACxD2b,KAAKslB,SAASD,EAAc/H,GAAO,EAE/BkH,GAAmBxkB,KAAKmkB,cAAgB9+B,SAASC,iBAAiBmG,UAClE85B,EAAkBf,GAAmBxkB,KAAKskB,oBAAoBE,gBACpExkB,KAAKskB,oBAAoBE,gBAAkBA,EAC3C,IAAK,MAAMlH,KAASnc,EAAS,CAC3B,IAAKmc,EAAMkI,eAAgB,CACzBxlB,KAAKokB,cAAgB,KACrBpkB,KAAKylB,kBAAkBJ,EAAc/H,IACrC,QACF,CACA,MAAMoI,EAA2BpI,EAAM/wB,OAAOlI,WAAa2b,KAAKskB,oBAAoBC,gBAEpF,GAAIgB,GAAmBG,GAGrB,GAFAnQ,EAAS+H,IAEJkH,EACH,YAMCe,GAAoBG,GACvBnQ,EAAS+H,EAEb,CACF,CACA,gCAAAoH,GACE1kB,KAAKikB,aAAe,IAAI/yB,IACxB8O,KAAKkkB,oBAAsB,IAAIhzB,IAC/B,MAAMy0B,EAAc/f,GAAezT,KAAKqxB,GAAuBxjB,KAAK6E,QAAQtY,QAC5E,IAAK,MAAMq5B,KAAUD,EAAa,CAEhC,IAAKC,EAAO36B,MAAQiQ,GAAW0qB,GAC7B,SAEF,MAAMZ,EAAoBpf,GAAeC,QAAQggB,UAAUD,EAAO36B,MAAO+U,KAAK4E,UAG1EjK,GAAUqqB,KACZhlB,KAAKikB,aAAalyB,IAAI8zB,UAAUD,EAAO36B,MAAO26B,GAC9C5lB,KAAKkkB,oBAAoBnyB,IAAI6zB,EAAO36B,KAAM+5B,GAE9C,CACF,CACA,QAAAM,CAAS/4B,GACHyT,KAAKokB,gBAAkB73B,IAG3ByT,KAAKylB,kBAAkBzlB,KAAK6E,QAAQtY,QACpCyT,KAAKokB,cAAgB73B,EACrBA,EAAO8O,UAAU5E,IAAI8sB,IACrBvjB,KAAK8lB,iBAAiBv5B,GACtBgU,GAAaqB,QAAQ5B,KAAK4E,SAAUwe,GAAgB,CAClDtjB,cAAevT,IAEnB,CACA,gBAAAu5B,CAAiBv5B,GAEf,GAAIA,EAAO8O,UAAU7W,SA9LQ,iBA+L3BohB,GAAeC,QArLc,mBAqLsBtZ,EAAOyO,QAtLtC,cAsLkEK,UAAU5E,IAAI8sB,SAGtG,IAAK,MAAMwC,KAAangB,GAAeI,QAAQzZ,EA9LnB,qBAiM1B,IAAK,MAAMxJ,KAAQ6iB,GAAeM,KAAK6f,EAAWrC,IAChD3gC,EAAKsY,UAAU5E,IAAI8sB,GAGzB,CACA,iBAAAkC,CAAkBhhC,GAChBA,EAAO4W,UAAU1B,OAAO4pB,IACxB,MAAMyC,EAAcpgB,GAAezT,KAAK,GAAGqxB,MAAyBD,KAAuB9+B,GAC3F,IAAK,MAAM9E,KAAQqmC,EACjBrmC,EAAK0b,UAAU1B,OAAO4pB,GAE1B,CAGA,sBAAO9mB,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAO25B,GAAU1e,oBAAoBtF,KAAM8D,GACjD,GAAsB,iBAAXA,EAAX,CAGA,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,EAOFvD,GAAac,GAAGzhB,OAAQ0jC,IAAuB,KAC7C,IAAK,MAAM2C,KAAOrgB,GAAezT,KApOT,0BAqOtB6xB,GAAU1e,oBAAoB2gB,EAChC,IAOF9pB,GAAmB6nB,IAcnB,MAEMkC,GAAc,UACdC,GAAe,OAAOD,KACtBE,GAAiB,SAASF,KAC1BG,GAAe,OAAOH,KACtBI,GAAgB,QAAQJ,KACxBK,GAAuB,QAAQL,KAC/BM,GAAgB,UAAUN,KAC1BO,GAAsB,OAAOP,KAC7BQ,GAAiB,YACjBC,GAAkB,aAClBC,GAAe,UACfC,GAAiB,YACjBC,GAAW,OACXC,GAAU,MACVC,GAAoB,SACpBC,GAAoB,OACpBC,GAAoB,OAEpBC,GAA2B,mBAE3BC,GAA+B,QAAQD,MAIvCE,GAAuB,2EACvBC,GAAsB,YAFOF,uBAAiDA,mBAA6CA,OAE/EC,KAC5CE,GAA8B,IAAIP,8BAA6CA,+BAA8CA,4BAMnI,MAAMQ,WAAY9iB,GAChB,WAAAP,CAAY5kB,GACVolB,MAAMplB,GACNygB,KAAKiS,QAAUjS,KAAK4E,SAAS5J,QAdN,uCAelBgF,KAAKiS,UAOVjS,KAAKynB,sBAAsBznB,KAAKiS,QAASjS,KAAK0nB,gBAC9CnnB,GAAac,GAAGrB,KAAK4E,SAAU4hB,IAAepnB,GAASY,KAAK0M,SAAStN,KACvE,CAGA,eAAW7C,GACT,MAnDW,KAoDb,CAGA,IAAAmT,GAEE,MAAMiY,EAAY3nB,KAAK4E,SACvB,GAAI5E,KAAK4nB,cAAcD,GACrB,OAIF,MAAME,EAAS7nB,KAAK8nB,iBACdC,EAAYF,EAAStnB,GAAaqB,QAAQimB,EAAQ1B,GAAc,CACpErmB,cAAe6nB,IACZ,KACapnB,GAAaqB,QAAQ+lB,EAAWtB,GAAc,CAC9DvmB,cAAe+nB,IAEH7lB,kBAAoB+lB,GAAaA,EAAU/lB,mBAGzDhC,KAAKgoB,YAAYH,EAAQF,GACzB3nB,KAAKioB,UAAUN,EAAWE,GAC5B,CAGA,SAAAI,CAAU1oC,EAAS2oC,GACZ3oC,IAGLA,EAAQ8b,UAAU5E,IAAIuwB,IACtBhnB,KAAKioB,UAAUriB,GAAec,uBAAuBnnB,IAcrDygB,KAAKmF,gBAZY,KACsB,QAAjC5lB,EAAQic,aAAa,SAIzBjc,EAAQ4B,gBAAgB,YACxB5B,EAAQ6B,aAAa,iBAAiB,GACtC4e,KAAKmoB,gBAAgB5oC,GAAS,GAC9BghB,GAAaqB,QAAQriB,EAAS+mC,GAAe,CAC3CxmB,cAAeooB,KAPf3oC,EAAQ8b,UAAU5E,IAAIywB,GAQtB,GAE0B3nC,EAASA,EAAQ8b,UAAU7W,SAASyiC,KACpE,CACA,WAAAe,CAAYzoC,EAAS2oC,GACd3oC,IAGLA,EAAQ8b,UAAU1B,OAAOqtB,IACzBznC,EAAQm7B,OACR1a,KAAKgoB,YAAYpiB,GAAec,uBAAuBnnB,IAcvDygB,KAAKmF,gBAZY,KACsB,QAAjC5lB,EAAQic,aAAa,SAIzBjc,EAAQ6B,aAAa,iBAAiB,GACtC7B,EAAQ6B,aAAa,WAAY,MACjC4e,KAAKmoB,gBAAgB5oC,GAAS,GAC9BghB,GAAaqB,QAAQriB,EAAS6mC,GAAgB,CAC5CtmB,cAAeooB,KAPf3oC,EAAQ8b,UAAU1B,OAAOutB,GAQzB,GAE0B3nC,EAASA,EAAQ8b,UAAU7W,SAASyiC,KACpE,CACA,QAAAva,CAAStN,GACP,IAAK,CAACsnB,GAAgBC,GAAiBC,GAAcC,GAAgBC,GAAUC,IAAS3lB,SAAShC,EAAMtiB,KACrG,OAEFsiB,EAAMuU,kBACNvU,EAAMkD,iBACN,MAAMwD,EAAW9F,KAAK0nB,eAAevhC,QAAO5G,IAAY2b,GAAW3b,KACnE,IAAI6oC,EACJ,GAAI,CAACtB,GAAUC,IAAS3lB,SAAShC,EAAMtiB,KACrCsrC,EAAoBtiB,EAAS1G,EAAMtiB,MAAQgqC,GAAW,EAAIhhB,EAASpV,OAAS,OACvE,CACL,MAAM2c,EAAS,CAACsZ,GAAiBE,IAAgBzlB,SAAShC,EAAMtiB,KAChEsrC,EAAoBtqB,GAAqBgI,EAAU1G,EAAM7S,OAAQ8gB,GAAQ,EAC3E,CACI+a,IACFA,EAAkB9V,MAAM,CACtB+V,eAAe,IAEjBb,GAAIliB,oBAAoB8iB,GAAmB1Y,OAE/C,CACA,YAAAgY,GAEE,OAAO9hB,GAAezT,KAAKm1B,GAAqBtnB,KAAKiS,QACvD,CACA,cAAA6V,GACE,OAAO9nB,KAAK0nB,eAAev1B,MAAKzN,GAASsb,KAAK4nB,cAAcljC,MAAW,IACzE,CACA,qBAAA+iC,CAAsBhjC,EAAQqhB,GAC5B9F,KAAKsoB,yBAAyB7jC,EAAQ,OAAQ,WAC9C,IAAK,MAAMC,KAASohB,EAClB9F,KAAKuoB,6BAA6B7jC,EAEtC,CACA,4BAAA6jC,CAA6B7jC,GAC3BA,EAAQsb,KAAKwoB,iBAAiB9jC,GAC9B,MAAM+jC,EAAWzoB,KAAK4nB,cAAcljC,GAC9BgkC,EAAY1oB,KAAK2oB,iBAAiBjkC,GACxCA,EAAMtD,aAAa,gBAAiBqnC,GAChCC,IAAchkC,GAChBsb,KAAKsoB,yBAAyBI,EAAW,OAAQ,gBAE9CD,GACH/jC,EAAMtD,aAAa,WAAY,MAEjC4e,KAAKsoB,yBAAyB5jC,EAAO,OAAQ,OAG7Csb,KAAK4oB,mCAAmClkC,EAC1C,CACA,kCAAAkkC,CAAmClkC,GACjC,MAAM6H,EAASqZ,GAAec,uBAAuBhiB,GAChD6H,IAGLyT,KAAKsoB,yBAAyB/7B,EAAQ,OAAQ,YAC1C7H,EAAMyV,IACR6F,KAAKsoB,yBAAyB/7B,EAAQ,kBAAmB,GAAG7H,EAAMyV,MAEtE,CACA,eAAAguB,CAAgB5oC,EAASspC,GACvB,MAAMH,EAAY1oB,KAAK2oB,iBAAiBppC,GACxC,IAAKmpC,EAAUrtB,UAAU7W,SApKN,YAqKjB,OAEF,MAAMkjB,EAAS,CAAC3N,EAAUia,KACxB,MAAMz0B,EAAUqmB,GAAeC,QAAQ9L,EAAU2uB,GAC7CnpC,GACFA,EAAQ8b,UAAUqM,OAAOsM,EAAW6U,EACtC,EAEFnhB,EAAOyf,GAA0BH,IACjCtf,EA5K2B,iBA4KIwf,IAC/BwB,EAAUtnC,aAAa,gBAAiBynC,EAC1C,CACA,wBAAAP,CAAyB/oC,EAASwC,EAAWpE,GACtC4B,EAAQgc,aAAaxZ,IACxBxC,EAAQ6B,aAAaW,EAAWpE,EAEpC,CACA,aAAAiqC,CAAczY,GACZ,OAAOA,EAAK9T,UAAU7W,SAASwiC,GACjC,CAGA,gBAAAwB,CAAiBrZ,GACf,OAAOA,EAAKpJ,QAAQuhB,IAAuBnY,EAAOvJ,GAAeC,QAAQyhB,GAAqBnY,EAChG,CAGA,gBAAAwZ,CAAiBxZ,GACf,OAAOA,EAAKnU,QA5LO,gCA4LoBmU,CACzC,CAGA,sBAAO1S,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAOm9B,GAAIliB,oBAAoBtF,MACrC,GAAsB,iBAAX8D,EAAX,CAGA,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,EAOFvD,GAAac,GAAGhc,SAAUkhC,GAAsBc,IAAsB,SAAUjoB,GAC1E,CAAC,IAAK,QAAQgC,SAASpB,KAAKgH,UAC9B5H,EAAMkD,iBAEJpH,GAAW8E,OAGfwnB,GAAIliB,oBAAoBtF,MAAM0P,MAChC,IAKAnP,GAAac,GAAGzhB,OAAQ6mC,IAAqB,KAC3C,IAAK,MAAMlnC,KAAWqmB,GAAezT,KAAKo1B,IACxCC,GAAIliB,oBAAoB/lB,EAC1B,IAMF4c,GAAmBqrB,IAcnB,MAEMxiB,GAAY,YACZ8jB,GAAkB,YAAY9jB,KAC9B+jB,GAAiB,WAAW/jB,KAC5BgkB,GAAgB,UAAUhkB,KAC1BikB,GAAiB,WAAWjkB,KAC5BkkB,GAAa,OAAOlkB,KACpBmkB,GAAe,SAASnkB,KACxBokB,GAAa,OAAOpkB,KACpBqkB,GAAc,QAAQrkB,KAEtBskB,GAAkB,OAClBC,GAAkB,OAClBC,GAAqB,UACrB7lB,GAAc,CAClBmc,UAAW,UACX2J,SAAU,UACVxJ,MAAO,UAEHvc,GAAU,CACdoc,WAAW,EACX2J,UAAU,EACVxJ,MAAO,KAOT,MAAMyJ,WAAchlB,GAClB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKsgB,SAAW,KAChBtgB,KAAK2pB,sBAAuB,EAC5B3pB,KAAK4pB,yBAA0B,EAC/B5pB,KAAK4gB,eACP,CAGA,kBAAWld,GACT,OAAOA,EACT,CACA,sBAAWC,GACT,OAAOA,EACT,CACA,eAAWpH,GACT,MA/CS,OAgDX,CAGA,IAAAmT,GACoBnP,GAAaqB,QAAQ5B,KAAK4E,SAAUwkB,IACxCpnB,mBAGdhC,KAAK6pB,gBACD7pB,KAAK6E,QAAQib,WACf9f,KAAK4E,SAASvJ,UAAU5E,IA/CN,QAsDpBuJ,KAAK4E,SAASvJ,UAAU1B,OAAO2vB,IAC/BztB,GAAOmE,KAAK4E,UACZ5E,KAAK4E,SAASvJ,UAAU5E,IAAI8yB,GAAiBC,IAC7CxpB,KAAKmF,gBARY,KACfnF,KAAK4E,SAASvJ,UAAU1B,OAAO6vB,IAC/BjpB,GAAaqB,QAAQ5B,KAAK4E,SAAUykB,IACpCrpB,KAAK8pB,oBAAoB,GAKG9pB,KAAK4E,SAAU5E,KAAK6E,QAAQib,WAC5D,CACA,IAAArQ,GACOzP,KAAK+pB,YAGQxpB,GAAaqB,QAAQ5B,KAAK4E,SAAUskB,IACxClnB,mBAQdhC,KAAK4E,SAASvJ,UAAU5E,IAAI+yB,IAC5BxpB,KAAKmF,gBANY,KACfnF,KAAK4E,SAASvJ,UAAU5E,IAAI6yB,IAC5BtpB,KAAK4E,SAASvJ,UAAU1B,OAAO6vB,GAAoBD,IACnDhpB,GAAaqB,QAAQ5B,KAAK4E,SAAUukB,GAAa,GAGrBnpB,KAAK4E,SAAU5E,KAAK6E,QAAQib,YAC5D,CACA,OAAA/a,GACE/E,KAAK6pB,gBACD7pB,KAAK+pB,WACP/pB,KAAK4E,SAASvJ,UAAU1B,OAAO4vB,IAEjC5kB,MAAMI,SACR,CACA,OAAAglB,GACE,OAAO/pB,KAAK4E,SAASvJ,UAAU7W,SAAS+kC,GAC1C,CAIA,kBAAAO,GACO9pB,KAAK6E,QAAQ4kB,WAGdzpB,KAAK2pB,sBAAwB3pB,KAAK4pB,0BAGtC5pB,KAAKsgB,SAAWziB,YAAW,KACzBmC,KAAKyP,MAAM,GACVzP,KAAK6E,QAAQob,QAClB,CACA,cAAA+J,CAAe5qB,EAAO6qB,GACpB,OAAQ7qB,EAAMqB,MACZ,IAAK,YACL,IAAK,WAEDT,KAAK2pB,qBAAuBM,EAC5B,MAEJ,IAAK,UACL,IAAK,WAEDjqB,KAAK4pB,wBAA0BK,EAIrC,GAAIA,EAEF,YADAjqB,KAAK6pB,gBAGP,MAAMvc,EAAclO,EAAMU,cACtBE,KAAK4E,WAAa0I,GAAetN,KAAK4E,SAASpgB,SAAS8oB,IAG5DtN,KAAK8pB,oBACP,CACA,aAAAlJ,GACErgB,GAAac,GAAGrB,KAAK4E,SAAUkkB,IAAiB1pB,GAASY,KAAKgqB,eAAe5qB,GAAO,KACpFmB,GAAac,GAAGrB,KAAK4E,SAAUmkB,IAAgB3pB,GAASY,KAAKgqB,eAAe5qB,GAAO,KACnFmB,GAAac,GAAGrB,KAAK4E,SAAUokB,IAAe5pB,GAASY,KAAKgqB,eAAe5qB,GAAO,KAClFmB,GAAac,GAAGrB,KAAK4E,SAAUqkB,IAAgB7pB,GAASY,KAAKgqB,eAAe5qB,GAAO,IACrF,CACA,aAAAyqB,GACE9c,aAAa/M,KAAKsgB,UAClBtgB,KAAKsgB,SAAW,IAClB,CAGA,sBAAO7jB,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAOq/B,GAAMpkB,oBAAoBtF,KAAM8D,GAC7C,GAAsB,iBAAXA,EAAqB,CAC9B,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,GAAQ9D,KACf,CACF,GACF,ECr0IK,SAASkqB,GAAc7tB,GACD,WAAvBhX,SAASuX,WAAyBP,IACjChX,SAASyF,iBAAiB,mBAAoBuR,EACrD,CDy0IAuK,GAAqB8iB,IAMrBvtB,GAAmButB,IEpyInBQ,IAzCA,WAC2B,GAAG93B,MAAM5U,KAChC6H,SAAS+a,iBAAiB,+BAETtd,KAAI,SAAUqnC,GAC/B,OAAO,IAAI,GAAkBA,EAAkB,CAC7ClK,MAAO,CAAEvQ,KAAM,IAAKD,KAAM,MAE9B,GACF,IAiCAya,IA5BA,WACY7kC,SAAS68B,eAAe,mBAC9Bp3B,iBAAiB,SAAS,WAC5BzF,SAAS6G,KAAKT,UAAY,EAC1BpG,SAASC,gBAAgBmG,UAAY,CACvC,GACF,IAuBAy+B,IArBA,WACE,IAAIE,EAAM/kC,SAAS68B,eAAe,mBAC9BmI,EAAShlC,SACVilC,uBAAuB,aAAa,GACpChnC,wBACH1D,OAAOkL,iBAAiB,UAAU,WAC5BkV,KAAKuqB,UAAYvqB,KAAKwqB,SAAWxqB,KAAKwqB,QAAUH,EAAOzsC,OACzDwsC,EAAIrpC,MAAM6wB,QAAU,QAEpBwY,EAAIrpC,MAAM6wB,QAAU,OAEtB5R,KAAKuqB,UAAYvqB,KAAKwqB,OACxB,GACF,IAUA5qC,OAAO6qC,UAAY","sources":["webpack://pydata_sphinx_theme/webpack/bootstrap","webpack://pydata_sphinx_theme/webpack/runtime/define property getters","webpack://pydata_sphinx_theme/webpack/runtime/hasOwnProperty shorthand","webpack://pydata_sphinx_theme/webpack/runtime/make namespace object","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/enums.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getNodeName.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getWindow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/instanceOf.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/applyStyles.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getBasePlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/math.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/userAgent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/isLayoutViewport.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getBoundingClientRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getLayoutRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/contains.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getComputedStyle.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/isTableElement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getDocumentElement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getParentNode.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getOffsetParent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getMainAxisFromPlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/within.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/mergePaddingObject.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getFreshSideObject.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/expandToHashMap.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/arrow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getVariation.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/computeStyles.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/eventListeners.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getOppositePlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getOppositeVariationPlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getWindowScroll.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getWindowScrollBarX.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/isScrollParent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getScrollParent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/listScrollParents.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/rectToClientRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getClippingRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getViewportRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getDocumentRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/computeOffsets.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/detectOverflow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/flip.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/computeAutoPlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/hide.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/offset.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/popperOffsets.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/preventOverflow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getAltAxis.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getCompositeRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getNodeScroll.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getHTMLElementScroll.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/orderModifiers.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/createPopper.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/debounce.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/mergeByName.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/popper.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/popper-lite.js","webpack://pydata_sphinx_theme/./node_modules/bootstrap/dist/js/bootstrap.esm.js","webpack://pydata_sphinx_theme/./src/pydata_sphinx_theme/assets/scripts/mixin.js","webpack://pydata_sphinx_theme/./src/pydata_sphinx_theme/assets/scripts/bootstrap.js"],"sourcesContent":["// The require scope\nvar __webpack_require__ = {};\n\n","// define getter functions for harmony exports\n__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n\t\t}\n\t}\n};","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","// define __esModule on exports\n__webpack_require__.r = (exports) => {\n\tif(typeof Symbol !== 'undefined' && Symbol.toStringTag) {\n\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });\n\t}\n\tObject.defineProperty(exports, '__esModule', { value: true });\n};","export var top = 'top';\nexport var bottom = 'bottom';\nexport var right = 'right';\nexport var left = 'left';\nexport var auto = 'auto';\nexport var basePlacements = [top, bottom, right, left];\nexport var start = 'start';\nexport var end = 'end';\nexport var clippingParents = 'clippingParents';\nexport var viewport = 'viewport';\nexport var popper = 'popper';\nexport var reference = 'reference';\nexport var variationPlacements = /*#__PURE__*/basePlacements.reduce(function (acc, placement) {\n return acc.concat([placement + \"-\" + start, placement + \"-\" + end]);\n}, []);\nexport var placements = /*#__PURE__*/[].concat(basePlacements, [auto]).reduce(function (acc, placement) {\n return acc.concat([placement, placement + \"-\" + start, placement + \"-\" + end]);\n}, []); // modifiers that need to read the DOM\n\nexport var beforeRead = 'beforeRead';\nexport var read = 'read';\nexport var afterRead = 'afterRead'; // pure-logic modifiers\n\nexport var beforeMain = 'beforeMain';\nexport var main = 'main';\nexport var afterMain = 'afterMain'; // modifier with the purpose to write to the DOM (or write into a framework state)\n\nexport var beforeWrite = 'beforeWrite';\nexport var write = 'write';\nexport var afterWrite = 'afterWrite';\nexport var modifierPhases = [beforeRead, read, afterRead, beforeMain, main, afterMain, beforeWrite, write, afterWrite];","export default function getNodeName(element) {\n return element ? (element.nodeName || '').toLowerCase() : null;\n}","export default function getWindow(node) {\n if (node == null) {\n return window;\n }\n\n if (node.toString() !== '[object Window]') {\n var ownerDocument = node.ownerDocument;\n return ownerDocument ? ownerDocument.defaultView || window : window;\n }\n\n return node;\n}","import getWindow from \"./getWindow.js\";\n\nfunction isElement(node) {\n var OwnElement = getWindow(node).Element;\n return node instanceof OwnElement || node instanceof Element;\n}\n\nfunction isHTMLElement(node) {\n var OwnElement = getWindow(node).HTMLElement;\n return node instanceof OwnElement || node instanceof HTMLElement;\n}\n\nfunction isShadowRoot(node) {\n // IE 11 has no ShadowRoot\n if (typeof ShadowRoot === 'undefined') {\n return false;\n }\n\n var OwnElement = getWindow(node).ShadowRoot;\n return node instanceof OwnElement || node instanceof ShadowRoot;\n}\n\nexport { isElement, isHTMLElement, isShadowRoot };","import getNodeName from \"../dom-utils/getNodeName.js\";\nimport { isHTMLElement } from \"../dom-utils/instanceOf.js\"; // This modifier takes the styles prepared by the `computeStyles` modifier\n// and applies them to the HTMLElements such as popper and arrow\n\nfunction applyStyles(_ref) {\n var state = _ref.state;\n Object.keys(state.elements).forEach(function (name) {\n var style = state.styles[name] || {};\n var attributes = state.attributes[name] || {};\n var element = state.elements[name]; // arrow is optional + virtual elements\n\n if (!isHTMLElement(element) || !getNodeName(element)) {\n return;\n } // Flow doesn't support to extend this property, but it's the most\n // effective way to apply styles to an HTMLElement\n // $FlowFixMe[cannot-write]\n\n\n Object.assign(element.style, style);\n Object.keys(attributes).forEach(function (name) {\n var value = attributes[name];\n\n if (value === false) {\n element.removeAttribute(name);\n } else {\n element.setAttribute(name, value === true ? '' : value);\n }\n });\n });\n}\n\nfunction effect(_ref2) {\n var state = _ref2.state;\n var initialStyles = {\n popper: {\n position: state.options.strategy,\n left: '0',\n top: '0',\n margin: '0'\n },\n arrow: {\n position: 'absolute'\n },\n reference: {}\n };\n Object.assign(state.elements.popper.style, initialStyles.popper);\n state.styles = initialStyles;\n\n if (state.elements.arrow) {\n Object.assign(state.elements.arrow.style, initialStyles.arrow);\n }\n\n return function () {\n Object.keys(state.elements).forEach(function (name) {\n var element = state.elements[name];\n var attributes = state.attributes[name] || {};\n var styleProperties = Object.keys(state.styles.hasOwnProperty(name) ? state.styles[name] : initialStyles[name]); // Set all values to an empty string to unset them\n\n var style = styleProperties.reduce(function (style, property) {\n style[property] = '';\n return style;\n }, {}); // arrow is optional + virtual elements\n\n if (!isHTMLElement(element) || !getNodeName(element)) {\n return;\n }\n\n Object.assign(element.style, style);\n Object.keys(attributes).forEach(function (attribute) {\n element.removeAttribute(attribute);\n });\n });\n };\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'applyStyles',\n enabled: true,\n phase: 'write',\n fn: applyStyles,\n effect: effect,\n requires: ['computeStyles']\n};","import { auto } from \"../enums.js\";\nexport default function getBasePlacement(placement) {\n return placement.split('-')[0];\n}","export var max = Math.max;\nexport var min = Math.min;\nexport var round = Math.round;","export default function getUAString() {\n var uaData = navigator.userAgentData;\n\n if (uaData != null && uaData.brands && Array.isArray(uaData.brands)) {\n return uaData.brands.map(function (item) {\n return item.brand + \"/\" + item.version;\n }).join(' ');\n }\n\n return navigator.userAgent;\n}","import getUAString from \"../utils/userAgent.js\";\nexport default function isLayoutViewport() {\n return !/^((?!chrome|android).)*safari/i.test(getUAString());\n}","import { isElement, isHTMLElement } from \"./instanceOf.js\";\nimport { round } from \"../utils/math.js\";\nimport getWindow from \"./getWindow.js\";\nimport isLayoutViewport from \"./isLayoutViewport.js\";\nexport default function getBoundingClientRect(element, includeScale, isFixedStrategy) {\n if (includeScale === void 0) {\n includeScale = false;\n }\n\n if (isFixedStrategy === void 0) {\n isFixedStrategy = false;\n }\n\n var clientRect = element.getBoundingClientRect();\n var scaleX = 1;\n var scaleY = 1;\n\n if (includeScale && isHTMLElement(element)) {\n scaleX = element.offsetWidth > 0 ? round(clientRect.width) / element.offsetWidth || 1 : 1;\n scaleY = element.offsetHeight > 0 ? round(clientRect.height) / element.offsetHeight || 1 : 1;\n }\n\n var _ref = isElement(element) ? getWindow(element) : window,\n visualViewport = _ref.visualViewport;\n\n var addVisualOffsets = !isLayoutViewport() && isFixedStrategy;\n var x = (clientRect.left + (addVisualOffsets && visualViewport ? visualViewport.offsetLeft : 0)) / scaleX;\n var y = (clientRect.top + (addVisualOffsets && visualViewport ? visualViewport.offsetTop : 0)) / scaleY;\n var width = clientRect.width / scaleX;\n var height = clientRect.height / scaleY;\n return {\n width: width,\n height: height,\n top: y,\n right: x + width,\n bottom: y + height,\n left: x,\n x: x,\n y: y\n };\n}","import getBoundingClientRect from \"./getBoundingClientRect.js\"; // Returns the layout rect of an element relative to its offsetParent. Layout\n// means it doesn't take into account transforms.\n\nexport default function getLayoutRect(element) {\n var clientRect = getBoundingClientRect(element); // Use the clientRect sizes if it's not been transformed.\n // Fixes https://github.com/popperjs/popper-core/issues/1223\n\n var width = element.offsetWidth;\n var height = element.offsetHeight;\n\n if (Math.abs(clientRect.width - width) <= 1) {\n width = clientRect.width;\n }\n\n if (Math.abs(clientRect.height - height) <= 1) {\n height = clientRect.height;\n }\n\n return {\n x: element.offsetLeft,\n y: element.offsetTop,\n width: width,\n height: height\n };\n}","import { isShadowRoot } from \"./instanceOf.js\";\nexport default function contains(parent, child) {\n var rootNode = child.getRootNode && child.getRootNode(); // First, attempt with faster native method\n\n if (parent.contains(child)) {\n return true;\n } // then fallback to custom implementation with Shadow DOM support\n else if (rootNode && isShadowRoot(rootNode)) {\n var next = child;\n\n do {\n if (next && parent.isSameNode(next)) {\n return true;\n } // $FlowFixMe[prop-missing]: need a better way to handle this...\n\n\n next = next.parentNode || next.host;\n } while (next);\n } // Give up, the result is false\n\n\n return false;\n}","import getWindow from \"./getWindow.js\";\nexport default function getComputedStyle(element) {\n return getWindow(element).getComputedStyle(element);\n}","import getNodeName from \"./getNodeName.js\";\nexport default function isTableElement(element) {\n return ['table', 'td', 'th'].indexOf(getNodeName(element)) >= 0;\n}","import { isElement } from \"./instanceOf.js\";\nexport default function getDocumentElement(element) {\n // $FlowFixMe[incompatible-return]: assume body is always available\n return ((isElement(element) ? element.ownerDocument : // $FlowFixMe[prop-missing]\n element.document) || window.document).documentElement;\n}","import getNodeName from \"./getNodeName.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport { isShadowRoot } from \"./instanceOf.js\";\nexport default function getParentNode(element) {\n if (getNodeName(element) === 'html') {\n return element;\n }\n\n return (// this is a quicker (but less type safe) way to save quite some bytes from the bundle\n // $FlowFixMe[incompatible-return]\n // $FlowFixMe[prop-missing]\n element.assignedSlot || // step into the shadow DOM of the parent of a slotted node\n element.parentNode || ( // DOM Element detected\n isShadowRoot(element) ? element.host : null) || // ShadowRoot detected\n // $FlowFixMe[incompatible-call]: HTMLElement is a Node\n getDocumentElement(element) // fallback\n\n );\n}","import getWindow from \"./getWindow.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport getComputedStyle from \"./getComputedStyle.js\";\nimport { isHTMLElement, isShadowRoot } from \"./instanceOf.js\";\nimport isTableElement from \"./isTableElement.js\";\nimport getParentNode from \"./getParentNode.js\";\nimport getUAString from \"../utils/userAgent.js\";\n\nfunction getTrueOffsetParent(element) {\n if (!isHTMLElement(element) || // https://github.com/popperjs/popper-core/issues/837\n getComputedStyle(element).position === 'fixed') {\n return null;\n }\n\n return element.offsetParent;\n} // `.offsetParent` reports `null` for fixed elements, while absolute elements\n// return the containing block\n\n\nfunction getContainingBlock(element) {\n var isFirefox = /firefox/i.test(getUAString());\n var isIE = /Trident/i.test(getUAString());\n\n if (isIE && isHTMLElement(element)) {\n // In IE 9, 10 and 11 fixed elements containing block is always established by the viewport\n var elementCss = getComputedStyle(element);\n\n if (elementCss.position === 'fixed') {\n return null;\n }\n }\n\n var currentNode = getParentNode(element);\n\n if (isShadowRoot(currentNode)) {\n currentNode = currentNode.host;\n }\n\n while (isHTMLElement(currentNode) && ['html', 'body'].indexOf(getNodeName(currentNode)) < 0) {\n var css = getComputedStyle(currentNode); // This is non-exhaustive but covers the most common CSS properties that\n // create a containing block.\n // https://developer.mozilla.org/en-US/docs/Web/CSS/Containing_block#identifying_the_containing_block\n\n if (css.transform !== 'none' || css.perspective !== 'none' || css.contain === 'paint' || ['transform', 'perspective'].indexOf(css.willChange) !== -1 || isFirefox && css.willChange === 'filter' || isFirefox && css.filter && css.filter !== 'none') {\n return currentNode;\n } else {\n currentNode = currentNode.parentNode;\n }\n }\n\n return null;\n} // Gets the closest ancestor positioned element. Handles some edge cases,\n// such as table ancestors and cross browser bugs.\n\n\nexport default function getOffsetParent(element) {\n var window = getWindow(element);\n var offsetParent = getTrueOffsetParent(element);\n\n while (offsetParent && isTableElement(offsetParent) && getComputedStyle(offsetParent).position === 'static') {\n offsetParent = getTrueOffsetParent(offsetParent);\n }\n\n if (offsetParent && (getNodeName(offsetParent) === 'html' || getNodeName(offsetParent) === 'body' && getComputedStyle(offsetParent).position === 'static')) {\n return window;\n }\n\n return offsetParent || getContainingBlock(element) || window;\n}","export default function getMainAxisFromPlacement(placement) {\n return ['top', 'bottom'].indexOf(placement) >= 0 ? 'x' : 'y';\n}","import { max as mathMax, min as mathMin } from \"./math.js\";\nexport function within(min, value, max) {\n return mathMax(min, mathMin(value, max));\n}\nexport function withinMaxClamp(min, value, max) {\n var v = within(min, value, max);\n return v > max ? max : v;\n}","import getFreshSideObject from \"./getFreshSideObject.js\";\nexport default function mergePaddingObject(paddingObject) {\n return Object.assign({}, getFreshSideObject(), paddingObject);\n}","export default function getFreshSideObject() {\n return {\n top: 0,\n right: 0,\n bottom: 0,\n left: 0\n };\n}","export default function expandToHashMap(value, keys) {\n return keys.reduce(function (hashMap, key) {\n hashMap[key] = value;\n return hashMap;\n }, {});\n}","import getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getLayoutRect from \"../dom-utils/getLayoutRect.js\";\nimport contains from \"../dom-utils/contains.js\";\nimport getOffsetParent from \"../dom-utils/getOffsetParent.js\";\nimport getMainAxisFromPlacement from \"../utils/getMainAxisFromPlacement.js\";\nimport { within } from \"../utils/within.js\";\nimport mergePaddingObject from \"../utils/mergePaddingObject.js\";\nimport expandToHashMap from \"../utils/expandToHashMap.js\";\nimport { left, right, basePlacements, top, bottom } from \"../enums.js\"; // eslint-disable-next-line import/no-unused-modules\n\nvar toPaddingObject = function toPaddingObject(padding, state) {\n padding = typeof padding === 'function' ? padding(Object.assign({}, state.rects, {\n placement: state.placement\n })) : padding;\n return mergePaddingObject(typeof padding !== 'number' ? padding : expandToHashMap(padding, basePlacements));\n};\n\nfunction arrow(_ref) {\n var _state$modifiersData$;\n\n var state = _ref.state,\n name = _ref.name,\n options = _ref.options;\n var arrowElement = state.elements.arrow;\n var popperOffsets = state.modifiersData.popperOffsets;\n var basePlacement = getBasePlacement(state.placement);\n var axis = getMainAxisFromPlacement(basePlacement);\n var isVertical = [left, right].indexOf(basePlacement) >= 0;\n var len = isVertical ? 'height' : 'width';\n\n if (!arrowElement || !popperOffsets) {\n return;\n }\n\n var paddingObject = toPaddingObject(options.padding, state);\n var arrowRect = getLayoutRect(arrowElement);\n var minProp = axis === 'y' ? top : left;\n var maxProp = axis === 'y' ? bottom : right;\n var endDiff = state.rects.reference[len] + state.rects.reference[axis] - popperOffsets[axis] - state.rects.popper[len];\n var startDiff = popperOffsets[axis] - state.rects.reference[axis];\n var arrowOffsetParent = getOffsetParent(arrowElement);\n var clientSize = arrowOffsetParent ? axis === 'y' ? arrowOffsetParent.clientHeight || 0 : arrowOffsetParent.clientWidth || 0 : 0;\n var centerToReference = endDiff / 2 - startDiff / 2; // Make sure the arrow doesn't overflow the popper if the center point is\n // outside of the popper bounds\n\n var min = paddingObject[minProp];\n var max = clientSize - arrowRect[len] - paddingObject[maxProp];\n var center = clientSize / 2 - arrowRect[len] / 2 + centerToReference;\n var offset = within(min, center, max); // Prevents breaking syntax highlighting...\n\n var axisProp = axis;\n state.modifiersData[name] = (_state$modifiersData$ = {}, _state$modifiersData$[axisProp] = offset, _state$modifiersData$.centerOffset = offset - center, _state$modifiersData$);\n}\n\nfunction effect(_ref2) {\n var state = _ref2.state,\n options = _ref2.options;\n var _options$element = options.element,\n arrowElement = _options$element === void 0 ? '[data-popper-arrow]' : _options$element;\n\n if (arrowElement == null) {\n return;\n } // CSS selector\n\n\n if (typeof arrowElement === 'string') {\n arrowElement = state.elements.popper.querySelector(arrowElement);\n\n if (!arrowElement) {\n return;\n }\n }\n\n if (!contains(state.elements.popper, arrowElement)) {\n return;\n }\n\n state.elements.arrow = arrowElement;\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'arrow',\n enabled: true,\n phase: 'main',\n fn: arrow,\n effect: effect,\n requires: ['popperOffsets'],\n requiresIfExists: ['preventOverflow']\n};","export default function getVariation(placement) {\n return placement.split('-')[1];\n}","import { top, left, right, bottom, end } from \"../enums.js\";\nimport getOffsetParent from \"../dom-utils/getOffsetParent.js\";\nimport getWindow from \"../dom-utils/getWindow.js\";\nimport getDocumentElement from \"../dom-utils/getDocumentElement.js\";\nimport getComputedStyle from \"../dom-utils/getComputedStyle.js\";\nimport getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getVariation from \"../utils/getVariation.js\";\nimport { round } from \"../utils/math.js\"; // eslint-disable-next-line import/no-unused-modules\n\nvar unsetSides = {\n top: 'auto',\n right: 'auto',\n bottom: 'auto',\n left: 'auto'\n}; // Round the offsets to the nearest suitable subpixel based on the DPR.\n// Zooming can change the DPR, but it seems to report a value that will\n// cleanly divide the values into the appropriate subpixels.\n\nfunction roundOffsetsByDPR(_ref, win) {\n var x = _ref.x,\n y = _ref.y;\n var dpr = win.devicePixelRatio || 1;\n return {\n x: round(x * dpr) / dpr || 0,\n y: round(y * dpr) / dpr || 0\n };\n}\n\nexport function mapToStyles(_ref2) {\n var _Object$assign2;\n\n var popper = _ref2.popper,\n popperRect = _ref2.popperRect,\n placement = _ref2.placement,\n variation = _ref2.variation,\n offsets = _ref2.offsets,\n position = _ref2.position,\n gpuAcceleration = _ref2.gpuAcceleration,\n adaptive = _ref2.adaptive,\n roundOffsets = _ref2.roundOffsets,\n isFixed = _ref2.isFixed;\n var _offsets$x = offsets.x,\n x = _offsets$x === void 0 ? 0 : _offsets$x,\n _offsets$y = offsets.y,\n y = _offsets$y === void 0 ? 0 : _offsets$y;\n\n var _ref3 = typeof roundOffsets === 'function' ? roundOffsets({\n x: x,\n y: y\n }) : {\n x: x,\n y: y\n };\n\n x = _ref3.x;\n y = _ref3.y;\n var hasX = offsets.hasOwnProperty('x');\n var hasY = offsets.hasOwnProperty('y');\n var sideX = left;\n var sideY = top;\n var win = window;\n\n if (adaptive) {\n var offsetParent = getOffsetParent(popper);\n var heightProp = 'clientHeight';\n var widthProp = 'clientWidth';\n\n if (offsetParent === getWindow(popper)) {\n offsetParent = getDocumentElement(popper);\n\n if (getComputedStyle(offsetParent).position !== 'static' && position === 'absolute') {\n heightProp = 'scrollHeight';\n widthProp = 'scrollWidth';\n }\n } // $FlowFixMe[incompatible-cast]: force type refinement, we compare offsetParent with window above, but Flow doesn't detect it\n\n\n offsetParent = offsetParent;\n\n if (placement === top || (placement === left || placement === right) && variation === end) {\n sideY = bottom;\n var offsetY = isFixed && offsetParent === win && win.visualViewport ? win.visualViewport.height : // $FlowFixMe[prop-missing]\n offsetParent[heightProp];\n y -= offsetY - popperRect.height;\n y *= gpuAcceleration ? 1 : -1;\n }\n\n if (placement === left || (placement === top || placement === bottom) && variation === end) {\n sideX = right;\n var offsetX = isFixed && offsetParent === win && win.visualViewport ? win.visualViewport.width : // $FlowFixMe[prop-missing]\n offsetParent[widthProp];\n x -= offsetX - popperRect.width;\n x *= gpuAcceleration ? 1 : -1;\n }\n }\n\n var commonStyles = Object.assign({\n position: position\n }, adaptive && unsetSides);\n\n var _ref4 = roundOffsets === true ? roundOffsetsByDPR({\n x: x,\n y: y\n }, getWindow(popper)) : {\n x: x,\n y: y\n };\n\n x = _ref4.x;\n y = _ref4.y;\n\n if (gpuAcceleration) {\n var _Object$assign;\n\n return Object.assign({}, commonStyles, (_Object$assign = {}, _Object$assign[sideY] = hasY ? '0' : '', _Object$assign[sideX] = hasX ? '0' : '', _Object$assign.transform = (win.devicePixelRatio || 1) <= 1 ? \"translate(\" + x + \"px, \" + y + \"px)\" : \"translate3d(\" + x + \"px, \" + y + \"px, 0)\", _Object$assign));\n }\n\n return Object.assign({}, commonStyles, (_Object$assign2 = {}, _Object$assign2[sideY] = hasY ? y + \"px\" : '', _Object$assign2[sideX] = hasX ? x + \"px\" : '', _Object$assign2.transform = '', _Object$assign2));\n}\n\nfunction computeStyles(_ref5) {\n var state = _ref5.state,\n options = _ref5.options;\n var _options$gpuAccelerat = options.gpuAcceleration,\n gpuAcceleration = _options$gpuAccelerat === void 0 ? true : _options$gpuAccelerat,\n _options$adaptive = options.adaptive,\n adaptive = _options$adaptive === void 0 ? true : _options$adaptive,\n _options$roundOffsets = options.roundOffsets,\n roundOffsets = _options$roundOffsets === void 0 ? true : _options$roundOffsets;\n var commonStyles = {\n placement: getBasePlacement(state.placement),\n variation: getVariation(state.placement),\n popper: state.elements.popper,\n popperRect: state.rects.popper,\n gpuAcceleration: gpuAcceleration,\n isFixed: state.options.strategy === 'fixed'\n };\n\n if (state.modifiersData.popperOffsets != null) {\n state.styles.popper = Object.assign({}, state.styles.popper, mapToStyles(Object.assign({}, commonStyles, {\n offsets: state.modifiersData.popperOffsets,\n position: state.options.strategy,\n adaptive: adaptive,\n roundOffsets: roundOffsets\n })));\n }\n\n if (state.modifiersData.arrow != null) {\n state.styles.arrow = Object.assign({}, state.styles.arrow, mapToStyles(Object.assign({}, commonStyles, {\n offsets: state.modifiersData.arrow,\n position: 'absolute',\n adaptive: false,\n roundOffsets: roundOffsets\n })));\n }\n\n state.attributes.popper = Object.assign({}, state.attributes.popper, {\n 'data-popper-placement': state.placement\n });\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'computeStyles',\n enabled: true,\n phase: 'beforeWrite',\n fn: computeStyles,\n data: {}\n};","import getWindow from \"../dom-utils/getWindow.js\"; // eslint-disable-next-line import/no-unused-modules\n\nvar passive = {\n passive: true\n};\n\nfunction effect(_ref) {\n var state = _ref.state,\n instance = _ref.instance,\n options = _ref.options;\n var _options$scroll = options.scroll,\n scroll = _options$scroll === void 0 ? true : _options$scroll,\n _options$resize = options.resize,\n resize = _options$resize === void 0 ? true : _options$resize;\n var window = getWindow(state.elements.popper);\n var scrollParents = [].concat(state.scrollParents.reference, state.scrollParents.popper);\n\n if (scroll) {\n scrollParents.forEach(function (scrollParent) {\n scrollParent.addEventListener('scroll', instance.update, passive);\n });\n }\n\n if (resize) {\n window.addEventListener('resize', instance.update, passive);\n }\n\n return function () {\n if (scroll) {\n scrollParents.forEach(function (scrollParent) {\n scrollParent.removeEventListener('scroll', instance.update, passive);\n });\n }\n\n if (resize) {\n window.removeEventListener('resize', instance.update, passive);\n }\n };\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'eventListeners',\n enabled: true,\n phase: 'write',\n fn: function fn() {},\n effect: effect,\n data: {}\n};","var hash = {\n left: 'right',\n right: 'left',\n bottom: 'top',\n top: 'bottom'\n};\nexport default function getOppositePlacement(placement) {\n return placement.replace(/left|right|bottom|top/g, function (matched) {\n return hash[matched];\n });\n}","var hash = {\n start: 'end',\n end: 'start'\n};\nexport default function getOppositeVariationPlacement(placement) {\n return placement.replace(/start|end/g, function (matched) {\n return hash[matched];\n });\n}","import getWindow from \"./getWindow.js\";\nexport default function getWindowScroll(node) {\n var win = getWindow(node);\n var scrollLeft = win.pageXOffset;\n var scrollTop = win.pageYOffset;\n return {\n scrollLeft: scrollLeft,\n scrollTop: scrollTop\n };\n}","import getBoundingClientRect from \"./getBoundingClientRect.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport getWindowScroll from \"./getWindowScroll.js\";\nexport default function getWindowScrollBarX(element) {\n // If has a CSS width greater than the viewport, then this will be\n // incorrect for RTL.\n // Popper 1 is broken in this case and never had a bug report so let's assume\n // it's not an issue. I don't think anyone ever specifies width on \n // anyway.\n // Browsers where the left scrollbar doesn't cause an issue report `0` for\n // this (e.g. Edge 2019, IE11, Safari)\n return getBoundingClientRect(getDocumentElement(element)).left + getWindowScroll(element).scrollLeft;\n}","import getComputedStyle from \"./getComputedStyle.js\";\nexport default function isScrollParent(element) {\n // Firefox wants us to check `-x` and `-y` variations as well\n var _getComputedStyle = getComputedStyle(element),\n overflow = _getComputedStyle.overflow,\n overflowX = _getComputedStyle.overflowX,\n overflowY = _getComputedStyle.overflowY;\n\n return /auto|scroll|overlay|hidden/.test(overflow + overflowY + overflowX);\n}","import getParentNode from \"./getParentNode.js\";\nimport isScrollParent from \"./isScrollParent.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport { isHTMLElement } from \"./instanceOf.js\";\nexport default function getScrollParent(node) {\n if (['html', 'body', '#document'].indexOf(getNodeName(node)) >= 0) {\n // $FlowFixMe[incompatible-return]: assume body is always available\n return node.ownerDocument.body;\n }\n\n if (isHTMLElement(node) && isScrollParent(node)) {\n return node;\n }\n\n return getScrollParent(getParentNode(node));\n}","import getScrollParent from \"./getScrollParent.js\";\nimport getParentNode from \"./getParentNode.js\";\nimport getWindow from \"./getWindow.js\";\nimport isScrollParent from \"./isScrollParent.js\";\n/*\ngiven a DOM element, return the list of all scroll parents, up the list of ancesors\nuntil we get to the top window object. This list is what we attach scroll listeners\nto, because if any of these parent elements scroll, we'll need to re-calculate the\nreference element's position.\n*/\n\nexport default function listScrollParents(element, list) {\n var _element$ownerDocumen;\n\n if (list === void 0) {\n list = [];\n }\n\n var scrollParent = getScrollParent(element);\n var isBody = scrollParent === ((_element$ownerDocumen = element.ownerDocument) == null ? void 0 : _element$ownerDocumen.body);\n var win = getWindow(scrollParent);\n var target = isBody ? [win].concat(win.visualViewport || [], isScrollParent(scrollParent) ? scrollParent : []) : scrollParent;\n var updatedList = list.concat(target);\n return isBody ? updatedList : // $FlowFixMe[incompatible-call]: isBody tells us target will be an HTMLElement here\n updatedList.concat(listScrollParents(getParentNode(target)));\n}","export default function rectToClientRect(rect) {\n return Object.assign({}, rect, {\n left: rect.x,\n top: rect.y,\n right: rect.x + rect.width,\n bottom: rect.y + rect.height\n });\n}","import { viewport } from \"../enums.js\";\nimport getViewportRect from \"./getViewportRect.js\";\nimport getDocumentRect from \"./getDocumentRect.js\";\nimport listScrollParents from \"./listScrollParents.js\";\nimport getOffsetParent from \"./getOffsetParent.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport getComputedStyle from \"./getComputedStyle.js\";\nimport { isElement, isHTMLElement } from \"./instanceOf.js\";\nimport getBoundingClientRect from \"./getBoundingClientRect.js\";\nimport getParentNode from \"./getParentNode.js\";\nimport contains from \"./contains.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport rectToClientRect from \"../utils/rectToClientRect.js\";\nimport { max, min } from \"../utils/math.js\";\n\nfunction getInnerBoundingClientRect(element, strategy) {\n var rect = getBoundingClientRect(element, false, strategy === 'fixed');\n rect.top = rect.top + element.clientTop;\n rect.left = rect.left + element.clientLeft;\n rect.bottom = rect.top + element.clientHeight;\n rect.right = rect.left + element.clientWidth;\n rect.width = element.clientWidth;\n rect.height = element.clientHeight;\n rect.x = rect.left;\n rect.y = rect.top;\n return rect;\n}\n\nfunction getClientRectFromMixedType(element, clippingParent, strategy) {\n return clippingParent === viewport ? rectToClientRect(getViewportRect(element, strategy)) : isElement(clippingParent) ? getInnerBoundingClientRect(clippingParent, strategy) : rectToClientRect(getDocumentRect(getDocumentElement(element)));\n} // A \"clipping parent\" is an overflowable container with the characteristic of\n// clipping (or hiding) overflowing elements with a position different from\n// `initial`\n\n\nfunction getClippingParents(element) {\n var clippingParents = listScrollParents(getParentNode(element));\n var canEscapeClipping = ['absolute', 'fixed'].indexOf(getComputedStyle(element).position) >= 0;\n var clipperElement = canEscapeClipping && isHTMLElement(element) ? getOffsetParent(element) : element;\n\n if (!isElement(clipperElement)) {\n return [];\n } // $FlowFixMe[incompatible-return]: https://github.com/facebook/flow/issues/1414\n\n\n return clippingParents.filter(function (clippingParent) {\n return isElement(clippingParent) && contains(clippingParent, clipperElement) && getNodeName(clippingParent) !== 'body';\n });\n} // Gets the maximum area that the element is visible in due to any number of\n// clipping parents\n\n\nexport default function getClippingRect(element, boundary, rootBoundary, strategy) {\n var mainClippingParents = boundary === 'clippingParents' ? getClippingParents(element) : [].concat(boundary);\n var clippingParents = [].concat(mainClippingParents, [rootBoundary]);\n var firstClippingParent = clippingParents[0];\n var clippingRect = clippingParents.reduce(function (accRect, clippingParent) {\n var rect = getClientRectFromMixedType(element, clippingParent, strategy);\n accRect.top = max(rect.top, accRect.top);\n accRect.right = min(rect.right, accRect.right);\n accRect.bottom = min(rect.bottom, accRect.bottom);\n accRect.left = max(rect.left, accRect.left);\n return accRect;\n }, getClientRectFromMixedType(element, firstClippingParent, strategy));\n clippingRect.width = clippingRect.right - clippingRect.left;\n clippingRect.height = clippingRect.bottom - clippingRect.top;\n clippingRect.x = clippingRect.left;\n clippingRect.y = clippingRect.top;\n return clippingRect;\n}","import getWindow from \"./getWindow.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport getWindowScrollBarX from \"./getWindowScrollBarX.js\";\nimport isLayoutViewport from \"./isLayoutViewport.js\";\nexport default function getViewportRect(element, strategy) {\n var win = getWindow(element);\n var html = getDocumentElement(element);\n var visualViewport = win.visualViewport;\n var width = html.clientWidth;\n var height = html.clientHeight;\n var x = 0;\n var y = 0;\n\n if (visualViewport) {\n width = visualViewport.width;\n height = visualViewport.height;\n var layoutViewport = isLayoutViewport();\n\n if (layoutViewport || !layoutViewport && strategy === 'fixed') {\n x = visualViewport.offsetLeft;\n y = visualViewport.offsetTop;\n }\n }\n\n return {\n width: width,\n height: height,\n x: x + getWindowScrollBarX(element),\n y: y\n };\n}","import getDocumentElement from \"./getDocumentElement.js\";\nimport getComputedStyle from \"./getComputedStyle.js\";\nimport getWindowScrollBarX from \"./getWindowScrollBarX.js\";\nimport getWindowScroll from \"./getWindowScroll.js\";\nimport { max } from \"../utils/math.js\"; // Gets the entire size of the scrollable document area, even extending outside\n// of the `` and `` rect bounds if horizontally scrollable\n\nexport default function getDocumentRect(element) {\n var _element$ownerDocumen;\n\n var html = getDocumentElement(element);\n var winScroll = getWindowScroll(element);\n var body = (_element$ownerDocumen = element.ownerDocument) == null ? void 0 : _element$ownerDocumen.body;\n var width = max(html.scrollWidth, html.clientWidth, body ? body.scrollWidth : 0, body ? body.clientWidth : 0);\n var height = max(html.scrollHeight, html.clientHeight, body ? body.scrollHeight : 0, body ? body.clientHeight : 0);\n var x = -winScroll.scrollLeft + getWindowScrollBarX(element);\n var y = -winScroll.scrollTop;\n\n if (getComputedStyle(body || html).direction === 'rtl') {\n x += max(html.clientWidth, body ? body.clientWidth : 0) - width;\n }\n\n return {\n width: width,\n height: height,\n x: x,\n y: y\n };\n}","import getBasePlacement from \"./getBasePlacement.js\";\nimport getVariation from \"./getVariation.js\";\nimport getMainAxisFromPlacement from \"./getMainAxisFromPlacement.js\";\nimport { top, right, bottom, left, start, end } from \"../enums.js\";\nexport default function computeOffsets(_ref) {\n var reference = _ref.reference,\n element = _ref.element,\n placement = _ref.placement;\n var basePlacement = placement ? getBasePlacement(placement) : null;\n var variation = placement ? getVariation(placement) : null;\n var commonX = reference.x + reference.width / 2 - element.width / 2;\n var commonY = reference.y + reference.height / 2 - element.height / 2;\n var offsets;\n\n switch (basePlacement) {\n case top:\n offsets = {\n x: commonX,\n y: reference.y - element.height\n };\n break;\n\n case bottom:\n offsets = {\n x: commonX,\n y: reference.y + reference.height\n };\n break;\n\n case right:\n offsets = {\n x: reference.x + reference.width,\n y: commonY\n };\n break;\n\n case left:\n offsets = {\n x: reference.x - element.width,\n y: commonY\n };\n break;\n\n default:\n offsets = {\n x: reference.x,\n y: reference.y\n };\n }\n\n var mainAxis = basePlacement ? getMainAxisFromPlacement(basePlacement) : null;\n\n if (mainAxis != null) {\n var len = mainAxis === 'y' ? 'height' : 'width';\n\n switch (variation) {\n case start:\n offsets[mainAxis] = offsets[mainAxis] - (reference[len] / 2 - element[len] / 2);\n break;\n\n case end:\n offsets[mainAxis] = offsets[mainAxis] + (reference[len] / 2 - element[len] / 2);\n break;\n\n default:\n }\n }\n\n return offsets;\n}","import getClippingRect from \"../dom-utils/getClippingRect.js\";\nimport getDocumentElement from \"../dom-utils/getDocumentElement.js\";\nimport getBoundingClientRect from \"../dom-utils/getBoundingClientRect.js\";\nimport computeOffsets from \"./computeOffsets.js\";\nimport rectToClientRect from \"./rectToClientRect.js\";\nimport { clippingParents, reference, popper, bottom, top, right, basePlacements, viewport } from \"../enums.js\";\nimport { isElement } from \"../dom-utils/instanceOf.js\";\nimport mergePaddingObject from \"./mergePaddingObject.js\";\nimport expandToHashMap from \"./expandToHashMap.js\"; // eslint-disable-next-line import/no-unused-modules\n\nexport default function detectOverflow(state, options) {\n if (options === void 0) {\n options = {};\n }\n\n var _options = options,\n _options$placement = _options.placement,\n placement = _options$placement === void 0 ? state.placement : _options$placement,\n _options$strategy = _options.strategy,\n strategy = _options$strategy === void 0 ? state.strategy : _options$strategy,\n _options$boundary = _options.boundary,\n boundary = _options$boundary === void 0 ? clippingParents : _options$boundary,\n _options$rootBoundary = _options.rootBoundary,\n rootBoundary = _options$rootBoundary === void 0 ? viewport : _options$rootBoundary,\n _options$elementConte = _options.elementContext,\n elementContext = _options$elementConte === void 0 ? popper : _options$elementConte,\n _options$altBoundary = _options.altBoundary,\n altBoundary = _options$altBoundary === void 0 ? false : _options$altBoundary,\n _options$padding = _options.padding,\n padding = _options$padding === void 0 ? 0 : _options$padding;\n var paddingObject = mergePaddingObject(typeof padding !== 'number' ? padding : expandToHashMap(padding, basePlacements));\n var altContext = elementContext === popper ? reference : popper;\n var popperRect = state.rects.popper;\n var element = state.elements[altBoundary ? altContext : elementContext];\n var clippingClientRect = getClippingRect(isElement(element) ? element : element.contextElement || getDocumentElement(state.elements.popper), boundary, rootBoundary, strategy);\n var referenceClientRect = getBoundingClientRect(state.elements.reference);\n var popperOffsets = computeOffsets({\n reference: referenceClientRect,\n element: popperRect,\n strategy: 'absolute',\n placement: placement\n });\n var popperClientRect = rectToClientRect(Object.assign({}, popperRect, popperOffsets));\n var elementClientRect = elementContext === popper ? popperClientRect : referenceClientRect; // positive = overflowing the clipping rect\n // 0 or negative = within the clipping rect\n\n var overflowOffsets = {\n top: clippingClientRect.top - elementClientRect.top + paddingObject.top,\n bottom: elementClientRect.bottom - clippingClientRect.bottom + paddingObject.bottom,\n left: clippingClientRect.left - elementClientRect.left + paddingObject.left,\n right: elementClientRect.right - clippingClientRect.right + paddingObject.right\n };\n var offsetData = state.modifiersData.offset; // Offsets can be applied only to the popper element\n\n if (elementContext === popper && offsetData) {\n var offset = offsetData[placement];\n Object.keys(overflowOffsets).forEach(function (key) {\n var multiply = [right, bottom].indexOf(key) >= 0 ? 1 : -1;\n var axis = [top, bottom].indexOf(key) >= 0 ? 'y' : 'x';\n overflowOffsets[key] += offset[axis] * multiply;\n });\n }\n\n return overflowOffsets;\n}","import getOppositePlacement from \"../utils/getOppositePlacement.js\";\nimport getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getOppositeVariationPlacement from \"../utils/getOppositeVariationPlacement.js\";\nimport detectOverflow from \"../utils/detectOverflow.js\";\nimport computeAutoPlacement from \"../utils/computeAutoPlacement.js\";\nimport { bottom, top, start, right, left, auto } from \"../enums.js\";\nimport getVariation from \"../utils/getVariation.js\"; // eslint-disable-next-line import/no-unused-modules\n\nfunction getExpandedFallbackPlacements(placement) {\n if (getBasePlacement(placement) === auto) {\n return [];\n }\n\n var oppositePlacement = getOppositePlacement(placement);\n return [getOppositeVariationPlacement(placement), oppositePlacement, getOppositeVariationPlacement(oppositePlacement)];\n}\n\nfunction flip(_ref) {\n var state = _ref.state,\n options = _ref.options,\n name = _ref.name;\n\n if (state.modifiersData[name]._skip) {\n return;\n }\n\n var _options$mainAxis = options.mainAxis,\n checkMainAxis = _options$mainAxis === void 0 ? true : _options$mainAxis,\n _options$altAxis = options.altAxis,\n checkAltAxis = _options$altAxis === void 0 ? true : _options$altAxis,\n specifiedFallbackPlacements = options.fallbackPlacements,\n padding = options.padding,\n boundary = options.boundary,\n rootBoundary = options.rootBoundary,\n altBoundary = options.altBoundary,\n _options$flipVariatio = options.flipVariations,\n flipVariations = _options$flipVariatio === void 0 ? true : _options$flipVariatio,\n allowedAutoPlacements = options.allowedAutoPlacements;\n var preferredPlacement = state.options.placement;\n var basePlacement = getBasePlacement(preferredPlacement);\n var isBasePlacement = basePlacement === preferredPlacement;\n var fallbackPlacements = specifiedFallbackPlacements || (isBasePlacement || !flipVariations ? [getOppositePlacement(preferredPlacement)] : getExpandedFallbackPlacements(preferredPlacement));\n var placements = [preferredPlacement].concat(fallbackPlacements).reduce(function (acc, placement) {\n return acc.concat(getBasePlacement(placement) === auto ? computeAutoPlacement(state, {\n placement: placement,\n boundary: boundary,\n rootBoundary: rootBoundary,\n padding: padding,\n flipVariations: flipVariations,\n allowedAutoPlacements: allowedAutoPlacements\n }) : placement);\n }, []);\n var referenceRect = state.rects.reference;\n var popperRect = state.rects.popper;\n var checksMap = new Map();\n var makeFallbackChecks = true;\n var firstFittingPlacement = placements[0];\n\n for (var i = 0; i < placements.length; i++) {\n var placement = placements[i];\n\n var _basePlacement = getBasePlacement(placement);\n\n var isStartVariation = getVariation(placement) === start;\n var isVertical = [top, bottom].indexOf(_basePlacement) >= 0;\n var len = isVertical ? 'width' : 'height';\n var overflow = detectOverflow(state, {\n placement: placement,\n boundary: boundary,\n rootBoundary: rootBoundary,\n altBoundary: altBoundary,\n padding: padding\n });\n var mainVariationSide = isVertical ? isStartVariation ? right : left : isStartVariation ? bottom : top;\n\n if (referenceRect[len] > popperRect[len]) {\n mainVariationSide = getOppositePlacement(mainVariationSide);\n }\n\n var altVariationSide = getOppositePlacement(mainVariationSide);\n var checks = [];\n\n if (checkMainAxis) {\n checks.push(overflow[_basePlacement] <= 0);\n }\n\n if (checkAltAxis) {\n checks.push(overflow[mainVariationSide] <= 0, overflow[altVariationSide] <= 0);\n }\n\n if (checks.every(function (check) {\n return check;\n })) {\n firstFittingPlacement = placement;\n makeFallbackChecks = false;\n break;\n }\n\n checksMap.set(placement, checks);\n }\n\n if (makeFallbackChecks) {\n // `2` may be desired in some cases – research later\n var numberOfChecks = flipVariations ? 3 : 1;\n\n var _loop = function _loop(_i) {\n var fittingPlacement = placements.find(function (placement) {\n var checks = checksMap.get(placement);\n\n if (checks) {\n return checks.slice(0, _i).every(function (check) {\n return check;\n });\n }\n });\n\n if (fittingPlacement) {\n firstFittingPlacement = fittingPlacement;\n return \"break\";\n }\n };\n\n for (var _i = numberOfChecks; _i > 0; _i--) {\n var _ret = _loop(_i);\n\n if (_ret === \"break\") break;\n }\n }\n\n if (state.placement !== firstFittingPlacement) {\n state.modifiersData[name]._skip = true;\n state.placement = firstFittingPlacement;\n state.reset = true;\n }\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'flip',\n enabled: true,\n phase: 'main',\n fn: flip,\n requiresIfExists: ['offset'],\n data: {\n _skip: false\n }\n};","import getVariation from \"./getVariation.js\";\nimport { variationPlacements, basePlacements, placements as allPlacements } from \"../enums.js\";\nimport detectOverflow from \"./detectOverflow.js\";\nimport getBasePlacement from \"./getBasePlacement.js\";\nexport default function computeAutoPlacement(state, options) {\n if (options === void 0) {\n options = {};\n }\n\n var _options = options,\n placement = _options.placement,\n boundary = _options.boundary,\n rootBoundary = _options.rootBoundary,\n padding = _options.padding,\n flipVariations = _options.flipVariations,\n _options$allowedAutoP = _options.allowedAutoPlacements,\n allowedAutoPlacements = _options$allowedAutoP === void 0 ? allPlacements : _options$allowedAutoP;\n var variation = getVariation(placement);\n var placements = variation ? flipVariations ? variationPlacements : variationPlacements.filter(function (placement) {\n return getVariation(placement) === variation;\n }) : basePlacements;\n var allowedPlacements = placements.filter(function (placement) {\n return allowedAutoPlacements.indexOf(placement) >= 0;\n });\n\n if (allowedPlacements.length === 0) {\n allowedPlacements = placements;\n } // $FlowFixMe[incompatible-type]: Flow seems to have problems with two array unions...\n\n\n var overflows = allowedPlacements.reduce(function (acc, placement) {\n acc[placement] = detectOverflow(state, {\n placement: placement,\n boundary: boundary,\n rootBoundary: rootBoundary,\n padding: padding\n })[getBasePlacement(placement)];\n return acc;\n }, {});\n return Object.keys(overflows).sort(function (a, b) {\n return overflows[a] - overflows[b];\n });\n}","import { top, bottom, left, right } from \"../enums.js\";\nimport detectOverflow from \"../utils/detectOverflow.js\";\n\nfunction getSideOffsets(overflow, rect, preventedOffsets) {\n if (preventedOffsets === void 0) {\n preventedOffsets = {\n x: 0,\n y: 0\n };\n }\n\n return {\n top: overflow.top - rect.height - preventedOffsets.y,\n right: overflow.right - rect.width + preventedOffsets.x,\n bottom: overflow.bottom - rect.height + preventedOffsets.y,\n left: overflow.left - rect.width - preventedOffsets.x\n };\n}\n\nfunction isAnySideFullyClipped(overflow) {\n return [top, right, bottom, left].some(function (side) {\n return overflow[side] >= 0;\n });\n}\n\nfunction hide(_ref) {\n var state = _ref.state,\n name = _ref.name;\n var referenceRect = state.rects.reference;\n var popperRect = state.rects.popper;\n var preventedOffsets = state.modifiersData.preventOverflow;\n var referenceOverflow = detectOverflow(state, {\n elementContext: 'reference'\n });\n var popperAltOverflow = detectOverflow(state, {\n altBoundary: true\n });\n var referenceClippingOffsets = getSideOffsets(referenceOverflow, referenceRect);\n var popperEscapeOffsets = getSideOffsets(popperAltOverflow, popperRect, preventedOffsets);\n var isReferenceHidden = isAnySideFullyClipped(referenceClippingOffsets);\n var hasPopperEscaped = isAnySideFullyClipped(popperEscapeOffsets);\n state.modifiersData[name] = {\n referenceClippingOffsets: referenceClippingOffsets,\n popperEscapeOffsets: popperEscapeOffsets,\n isReferenceHidden: isReferenceHidden,\n hasPopperEscaped: hasPopperEscaped\n };\n state.attributes.popper = Object.assign({}, state.attributes.popper, {\n 'data-popper-reference-hidden': isReferenceHidden,\n 'data-popper-escaped': hasPopperEscaped\n });\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'hide',\n enabled: true,\n phase: 'main',\n requiresIfExists: ['preventOverflow'],\n fn: hide\n};","import getBasePlacement from \"../utils/getBasePlacement.js\";\nimport { top, left, right, placements } from \"../enums.js\"; // eslint-disable-next-line import/no-unused-modules\n\nexport function distanceAndSkiddingToXY(placement, rects, offset) {\n var basePlacement = getBasePlacement(placement);\n var invertDistance = [left, top].indexOf(basePlacement) >= 0 ? -1 : 1;\n\n var _ref = typeof offset === 'function' ? offset(Object.assign({}, rects, {\n placement: placement\n })) : offset,\n skidding = _ref[0],\n distance = _ref[1];\n\n skidding = skidding || 0;\n distance = (distance || 0) * invertDistance;\n return [left, right].indexOf(basePlacement) >= 0 ? {\n x: distance,\n y: skidding\n } : {\n x: skidding,\n y: distance\n };\n}\n\nfunction offset(_ref2) {\n var state = _ref2.state,\n options = _ref2.options,\n name = _ref2.name;\n var _options$offset = options.offset,\n offset = _options$offset === void 0 ? [0, 0] : _options$offset;\n var data = placements.reduce(function (acc, placement) {\n acc[placement] = distanceAndSkiddingToXY(placement, state.rects, offset);\n return acc;\n }, {});\n var _data$state$placement = data[state.placement],\n x = _data$state$placement.x,\n y = _data$state$placement.y;\n\n if (state.modifiersData.popperOffsets != null) {\n state.modifiersData.popperOffsets.x += x;\n state.modifiersData.popperOffsets.y += y;\n }\n\n state.modifiersData[name] = data;\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'offset',\n enabled: true,\n phase: 'main',\n requires: ['popperOffsets'],\n fn: offset\n};","import computeOffsets from \"../utils/computeOffsets.js\";\n\nfunction popperOffsets(_ref) {\n var state = _ref.state,\n name = _ref.name;\n // Offsets are the actual position the popper needs to have to be\n // properly positioned near its reference element\n // This is the most basic placement, and will be adjusted by\n // the modifiers in the next step\n state.modifiersData[name] = computeOffsets({\n reference: state.rects.reference,\n element: state.rects.popper,\n strategy: 'absolute',\n placement: state.placement\n });\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'popperOffsets',\n enabled: true,\n phase: 'read',\n fn: popperOffsets,\n data: {}\n};","import { top, left, right, bottom, start } from \"../enums.js\";\nimport getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getMainAxisFromPlacement from \"../utils/getMainAxisFromPlacement.js\";\nimport getAltAxis from \"../utils/getAltAxis.js\";\nimport { within, withinMaxClamp } from \"../utils/within.js\";\nimport getLayoutRect from \"../dom-utils/getLayoutRect.js\";\nimport getOffsetParent from \"../dom-utils/getOffsetParent.js\";\nimport detectOverflow from \"../utils/detectOverflow.js\";\nimport getVariation from \"../utils/getVariation.js\";\nimport getFreshSideObject from \"../utils/getFreshSideObject.js\";\nimport { min as mathMin, max as mathMax } from \"../utils/math.js\";\n\nfunction preventOverflow(_ref) {\n var state = _ref.state,\n options = _ref.options,\n name = _ref.name;\n var _options$mainAxis = options.mainAxis,\n checkMainAxis = _options$mainAxis === void 0 ? true : _options$mainAxis,\n _options$altAxis = options.altAxis,\n checkAltAxis = _options$altAxis === void 0 ? false : _options$altAxis,\n boundary = options.boundary,\n rootBoundary = options.rootBoundary,\n altBoundary = options.altBoundary,\n padding = options.padding,\n _options$tether = options.tether,\n tether = _options$tether === void 0 ? true : _options$tether,\n _options$tetherOffset = options.tetherOffset,\n tetherOffset = _options$tetherOffset === void 0 ? 0 : _options$tetherOffset;\n var overflow = detectOverflow(state, {\n boundary: boundary,\n rootBoundary: rootBoundary,\n padding: padding,\n altBoundary: altBoundary\n });\n var basePlacement = getBasePlacement(state.placement);\n var variation = getVariation(state.placement);\n var isBasePlacement = !variation;\n var mainAxis = getMainAxisFromPlacement(basePlacement);\n var altAxis = getAltAxis(mainAxis);\n var popperOffsets = state.modifiersData.popperOffsets;\n var referenceRect = state.rects.reference;\n var popperRect = state.rects.popper;\n var tetherOffsetValue = typeof tetherOffset === 'function' ? tetherOffset(Object.assign({}, state.rects, {\n placement: state.placement\n })) : tetherOffset;\n var normalizedTetherOffsetValue = typeof tetherOffsetValue === 'number' ? {\n mainAxis: tetherOffsetValue,\n altAxis: tetherOffsetValue\n } : Object.assign({\n mainAxis: 0,\n altAxis: 0\n }, tetherOffsetValue);\n var offsetModifierState = state.modifiersData.offset ? state.modifiersData.offset[state.placement] : null;\n var data = {\n x: 0,\n y: 0\n };\n\n if (!popperOffsets) {\n return;\n }\n\n if (checkMainAxis) {\n var _offsetModifierState$;\n\n var mainSide = mainAxis === 'y' ? top : left;\n var altSide = mainAxis === 'y' ? bottom : right;\n var len = mainAxis === 'y' ? 'height' : 'width';\n var offset = popperOffsets[mainAxis];\n var min = offset + overflow[mainSide];\n var max = offset - overflow[altSide];\n var additive = tether ? -popperRect[len] / 2 : 0;\n var minLen = variation === start ? referenceRect[len] : popperRect[len];\n var maxLen = variation === start ? -popperRect[len] : -referenceRect[len]; // We need to include the arrow in the calculation so the arrow doesn't go\n // outside the reference bounds\n\n var arrowElement = state.elements.arrow;\n var arrowRect = tether && arrowElement ? getLayoutRect(arrowElement) : {\n width: 0,\n height: 0\n };\n var arrowPaddingObject = state.modifiersData['arrow#persistent'] ? state.modifiersData['arrow#persistent'].padding : getFreshSideObject();\n var arrowPaddingMin = arrowPaddingObject[mainSide];\n var arrowPaddingMax = arrowPaddingObject[altSide]; // If the reference length is smaller than the arrow length, we don't want\n // to include its full size in the calculation. If the reference is small\n // and near the edge of a boundary, the popper can overflow even if the\n // reference is not overflowing as well (e.g. virtual elements with no\n // width or height)\n\n var arrowLen = within(0, referenceRect[len], arrowRect[len]);\n var minOffset = isBasePlacement ? referenceRect[len] / 2 - additive - arrowLen - arrowPaddingMin - normalizedTetherOffsetValue.mainAxis : minLen - arrowLen - arrowPaddingMin - normalizedTetherOffsetValue.mainAxis;\n var maxOffset = isBasePlacement ? -referenceRect[len] / 2 + additive + arrowLen + arrowPaddingMax + normalizedTetherOffsetValue.mainAxis : maxLen + arrowLen + arrowPaddingMax + normalizedTetherOffsetValue.mainAxis;\n var arrowOffsetParent = state.elements.arrow && getOffsetParent(state.elements.arrow);\n var clientOffset = arrowOffsetParent ? mainAxis === 'y' ? arrowOffsetParent.clientTop || 0 : arrowOffsetParent.clientLeft || 0 : 0;\n var offsetModifierValue = (_offsetModifierState$ = offsetModifierState == null ? void 0 : offsetModifierState[mainAxis]) != null ? _offsetModifierState$ : 0;\n var tetherMin = offset + minOffset - offsetModifierValue - clientOffset;\n var tetherMax = offset + maxOffset - offsetModifierValue;\n var preventedOffset = within(tether ? mathMin(min, tetherMin) : min, offset, tether ? mathMax(max, tetherMax) : max);\n popperOffsets[mainAxis] = preventedOffset;\n data[mainAxis] = preventedOffset - offset;\n }\n\n if (checkAltAxis) {\n var _offsetModifierState$2;\n\n var _mainSide = mainAxis === 'x' ? top : left;\n\n var _altSide = mainAxis === 'x' ? bottom : right;\n\n var _offset = popperOffsets[altAxis];\n\n var _len = altAxis === 'y' ? 'height' : 'width';\n\n var _min = _offset + overflow[_mainSide];\n\n var _max = _offset - overflow[_altSide];\n\n var isOriginSide = [top, left].indexOf(basePlacement) !== -1;\n\n var _offsetModifierValue = (_offsetModifierState$2 = offsetModifierState == null ? void 0 : offsetModifierState[altAxis]) != null ? _offsetModifierState$2 : 0;\n\n var _tetherMin = isOriginSide ? _min : _offset - referenceRect[_len] - popperRect[_len] - _offsetModifierValue + normalizedTetherOffsetValue.altAxis;\n\n var _tetherMax = isOriginSide ? _offset + referenceRect[_len] + popperRect[_len] - _offsetModifierValue - normalizedTetherOffsetValue.altAxis : _max;\n\n var _preventedOffset = tether && isOriginSide ? withinMaxClamp(_tetherMin, _offset, _tetherMax) : within(tether ? _tetherMin : _min, _offset, tether ? _tetherMax : _max);\n\n popperOffsets[altAxis] = _preventedOffset;\n data[altAxis] = _preventedOffset - _offset;\n }\n\n state.modifiersData[name] = data;\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'preventOverflow',\n enabled: true,\n phase: 'main',\n fn: preventOverflow,\n requiresIfExists: ['offset']\n};","export default function getAltAxis(axis) {\n return axis === 'x' ? 'y' : 'x';\n}","import getBoundingClientRect from \"./getBoundingClientRect.js\";\nimport getNodeScroll from \"./getNodeScroll.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport { isHTMLElement } from \"./instanceOf.js\";\nimport getWindowScrollBarX from \"./getWindowScrollBarX.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport isScrollParent from \"./isScrollParent.js\";\nimport { round } from \"../utils/math.js\";\n\nfunction isElementScaled(element) {\n var rect = element.getBoundingClientRect();\n var scaleX = round(rect.width) / element.offsetWidth || 1;\n var scaleY = round(rect.height) / element.offsetHeight || 1;\n return scaleX !== 1 || scaleY !== 1;\n} // Returns the composite rect of an element relative to its offsetParent.\n// Composite means it takes into account transforms as well as layout.\n\n\nexport default function getCompositeRect(elementOrVirtualElement, offsetParent, isFixed) {\n if (isFixed === void 0) {\n isFixed = false;\n }\n\n var isOffsetParentAnElement = isHTMLElement(offsetParent);\n var offsetParentIsScaled = isHTMLElement(offsetParent) && isElementScaled(offsetParent);\n var documentElement = getDocumentElement(offsetParent);\n var rect = getBoundingClientRect(elementOrVirtualElement, offsetParentIsScaled, isFixed);\n var scroll = {\n scrollLeft: 0,\n scrollTop: 0\n };\n var offsets = {\n x: 0,\n y: 0\n };\n\n if (isOffsetParentAnElement || !isOffsetParentAnElement && !isFixed) {\n if (getNodeName(offsetParent) !== 'body' || // https://github.com/popperjs/popper-core/issues/1078\n isScrollParent(documentElement)) {\n scroll = getNodeScroll(offsetParent);\n }\n\n if (isHTMLElement(offsetParent)) {\n offsets = getBoundingClientRect(offsetParent, true);\n offsets.x += offsetParent.clientLeft;\n offsets.y += offsetParent.clientTop;\n } else if (documentElement) {\n offsets.x = getWindowScrollBarX(documentElement);\n }\n }\n\n return {\n x: rect.left + scroll.scrollLeft - offsets.x,\n y: rect.top + scroll.scrollTop - offsets.y,\n width: rect.width,\n height: rect.height\n };\n}","import getWindowScroll from \"./getWindowScroll.js\";\nimport getWindow from \"./getWindow.js\";\nimport { isHTMLElement } from \"./instanceOf.js\";\nimport getHTMLElementScroll from \"./getHTMLElementScroll.js\";\nexport default function getNodeScroll(node) {\n if (node === getWindow(node) || !isHTMLElement(node)) {\n return getWindowScroll(node);\n } else {\n return getHTMLElementScroll(node);\n }\n}","export default function getHTMLElementScroll(element) {\n return {\n scrollLeft: element.scrollLeft,\n scrollTop: element.scrollTop\n };\n}","import { modifierPhases } from \"../enums.js\"; // source: https://stackoverflow.com/questions/49875255\n\nfunction order(modifiers) {\n var map = new Map();\n var visited = new Set();\n var result = [];\n modifiers.forEach(function (modifier) {\n map.set(modifier.name, modifier);\n }); // On visiting object, check for its dependencies and visit them recursively\n\n function sort(modifier) {\n visited.add(modifier.name);\n var requires = [].concat(modifier.requires || [], modifier.requiresIfExists || []);\n requires.forEach(function (dep) {\n if (!visited.has(dep)) {\n var depModifier = map.get(dep);\n\n if (depModifier) {\n sort(depModifier);\n }\n }\n });\n result.push(modifier);\n }\n\n modifiers.forEach(function (modifier) {\n if (!visited.has(modifier.name)) {\n // check for visited object\n sort(modifier);\n }\n });\n return result;\n}\n\nexport default function orderModifiers(modifiers) {\n // order based on dependencies\n var orderedModifiers = order(modifiers); // order based on phase\n\n return modifierPhases.reduce(function (acc, phase) {\n return acc.concat(orderedModifiers.filter(function (modifier) {\n return modifier.phase === phase;\n }));\n }, []);\n}","import getCompositeRect from \"./dom-utils/getCompositeRect.js\";\nimport getLayoutRect from \"./dom-utils/getLayoutRect.js\";\nimport listScrollParents from \"./dom-utils/listScrollParents.js\";\nimport getOffsetParent from \"./dom-utils/getOffsetParent.js\";\nimport orderModifiers from \"./utils/orderModifiers.js\";\nimport debounce from \"./utils/debounce.js\";\nimport mergeByName from \"./utils/mergeByName.js\";\nimport detectOverflow from \"./utils/detectOverflow.js\";\nimport { isElement } from \"./dom-utils/instanceOf.js\";\nvar DEFAULT_OPTIONS = {\n placement: 'bottom',\n modifiers: [],\n strategy: 'absolute'\n};\n\nfunction areValidElements() {\n for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) {\n args[_key] = arguments[_key];\n }\n\n return !args.some(function (element) {\n return !(element && typeof element.getBoundingClientRect === 'function');\n });\n}\n\nexport function popperGenerator(generatorOptions) {\n if (generatorOptions === void 0) {\n generatorOptions = {};\n }\n\n var _generatorOptions = generatorOptions,\n _generatorOptions$def = _generatorOptions.defaultModifiers,\n defaultModifiers = _generatorOptions$def === void 0 ? [] : _generatorOptions$def,\n _generatorOptions$def2 = _generatorOptions.defaultOptions,\n defaultOptions = _generatorOptions$def2 === void 0 ? DEFAULT_OPTIONS : _generatorOptions$def2;\n return function createPopper(reference, popper, options) {\n if (options === void 0) {\n options = defaultOptions;\n }\n\n var state = {\n placement: 'bottom',\n orderedModifiers: [],\n options: Object.assign({}, DEFAULT_OPTIONS, defaultOptions),\n modifiersData: {},\n elements: {\n reference: reference,\n popper: popper\n },\n attributes: {},\n styles: {}\n };\n var effectCleanupFns = [];\n var isDestroyed = false;\n var instance = {\n state: state,\n setOptions: function setOptions(setOptionsAction) {\n var options = typeof setOptionsAction === 'function' ? setOptionsAction(state.options) : setOptionsAction;\n cleanupModifierEffects();\n state.options = Object.assign({}, defaultOptions, state.options, options);\n state.scrollParents = {\n reference: isElement(reference) ? listScrollParents(reference) : reference.contextElement ? listScrollParents(reference.contextElement) : [],\n popper: listScrollParents(popper)\n }; // Orders the modifiers based on their dependencies and `phase`\n // properties\n\n var orderedModifiers = orderModifiers(mergeByName([].concat(defaultModifiers, state.options.modifiers))); // Strip out disabled modifiers\n\n state.orderedModifiers = orderedModifiers.filter(function (m) {\n return m.enabled;\n });\n runModifierEffects();\n return instance.update();\n },\n // Sync update – it will always be executed, even if not necessary. This\n // is useful for low frequency updates where sync behavior simplifies the\n // logic.\n // For high frequency updates (e.g. `resize` and `scroll` events), always\n // prefer the async Popper#update method\n forceUpdate: function forceUpdate() {\n if (isDestroyed) {\n return;\n }\n\n var _state$elements = state.elements,\n reference = _state$elements.reference,\n popper = _state$elements.popper; // Don't proceed if `reference` or `popper` are not valid elements\n // anymore\n\n if (!areValidElements(reference, popper)) {\n return;\n } // Store the reference and popper rects to be read by modifiers\n\n\n state.rects = {\n reference: getCompositeRect(reference, getOffsetParent(popper), state.options.strategy === 'fixed'),\n popper: getLayoutRect(popper)\n }; // Modifiers have the ability to reset the current update cycle. The\n // most common use case for this is the `flip` modifier changing the\n // placement, which then needs to re-run all the modifiers, because the\n // logic was previously ran for the previous placement and is therefore\n // stale/incorrect\n\n state.reset = false;\n state.placement = state.options.placement; // On each update cycle, the `modifiersData` property for each modifier\n // is filled with the initial data specified by the modifier. This means\n // it doesn't persist and is fresh on each update.\n // To ensure persistent data, use `${name}#persistent`\n\n state.orderedModifiers.forEach(function (modifier) {\n return state.modifiersData[modifier.name] = Object.assign({}, modifier.data);\n });\n\n for (var index = 0; index < state.orderedModifiers.length; index++) {\n if (state.reset === true) {\n state.reset = false;\n index = -1;\n continue;\n }\n\n var _state$orderedModifie = state.orderedModifiers[index],\n fn = _state$orderedModifie.fn,\n _state$orderedModifie2 = _state$orderedModifie.options,\n _options = _state$orderedModifie2 === void 0 ? {} : _state$orderedModifie2,\n name = _state$orderedModifie.name;\n\n if (typeof fn === 'function') {\n state = fn({\n state: state,\n options: _options,\n name: name,\n instance: instance\n }) || state;\n }\n }\n },\n // Async and optimistically optimized update – it will not be executed if\n // not necessary (debounced to run at most once-per-tick)\n update: debounce(function () {\n return new Promise(function (resolve) {\n instance.forceUpdate();\n resolve(state);\n });\n }),\n destroy: function destroy() {\n cleanupModifierEffects();\n isDestroyed = true;\n }\n };\n\n if (!areValidElements(reference, popper)) {\n return instance;\n }\n\n instance.setOptions(options).then(function (state) {\n if (!isDestroyed && options.onFirstUpdate) {\n options.onFirstUpdate(state);\n }\n }); // Modifiers have the ability to execute arbitrary code before the first\n // update cycle runs. They will be executed in the same order as the update\n // cycle. This is useful when a modifier adds some persistent data that\n // other modifiers need to use, but the modifier is run after the dependent\n // one.\n\n function runModifierEffects() {\n state.orderedModifiers.forEach(function (_ref) {\n var name = _ref.name,\n _ref$options = _ref.options,\n options = _ref$options === void 0 ? {} : _ref$options,\n effect = _ref.effect;\n\n if (typeof effect === 'function') {\n var cleanupFn = effect({\n state: state,\n name: name,\n instance: instance,\n options: options\n });\n\n var noopFn = function noopFn() {};\n\n effectCleanupFns.push(cleanupFn || noopFn);\n }\n });\n }\n\n function cleanupModifierEffects() {\n effectCleanupFns.forEach(function (fn) {\n return fn();\n });\n effectCleanupFns = [];\n }\n\n return instance;\n };\n}\nexport var createPopper = /*#__PURE__*/popperGenerator(); // eslint-disable-next-line import/no-unused-modules\n\nexport { detectOverflow };","export default function debounce(fn) {\n var pending;\n return function () {\n if (!pending) {\n pending = new Promise(function (resolve) {\n Promise.resolve().then(function () {\n pending = undefined;\n resolve(fn());\n });\n });\n }\n\n return pending;\n };\n}","export default function mergeByName(modifiers) {\n var merged = modifiers.reduce(function (merged, current) {\n var existing = merged[current.name];\n merged[current.name] = existing ? Object.assign({}, existing, current, {\n options: Object.assign({}, existing.options, current.options),\n data: Object.assign({}, existing.data, current.data)\n }) : current;\n return merged;\n }, {}); // IE11 does not support Object.values\n\n return Object.keys(merged).map(function (key) {\n return merged[key];\n });\n}","import { popperGenerator, detectOverflow } from \"./createPopper.js\";\nimport eventListeners from \"./modifiers/eventListeners.js\";\nimport popperOffsets from \"./modifiers/popperOffsets.js\";\nimport computeStyles from \"./modifiers/computeStyles.js\";\nimport applyStyles from \"./modifiers/applyStyles.js\";\nimport offset from \"./modifiers/offset.js\";\nimport flip from \"./modifiers/flip.js\";\nimport preventOverflow from \"./modifiers/preventOverflow.js\";\nimport arrow from \"./modifiers/arrow.js\";\nimport hide from \"./modifiers/hide.js\";\nvar defaultModifiers = [eventListeners, popperOffsets, computeStyles, applyStyles, offset, flip, preventOverflow, arrow, hide];\nvar createPopper = /*#__PURE__*/popperGenerator({\n defaultModifiers: defaultModifiers\n}); // eslint-disable-next-line import/no-unused-modules\n\nexport { createPopper, popperGenerator, defaultModifiers, detectOverflow }; // eslint-disable-next-line import/no-unused-modules\n\nexport { createPopper as createPopperLite } from \"./popper-lite.js\"; // eslint-disable-next-line import/no-unused-modules\n\nexport * from \"./modifiers/index.js\";","import { popperGenerator, detectOverflow } from \"./createPopper.js\";\nimport eventListeners from \"./modifiers/eventListeners.js\";\nimport popperOffsets from \"./modifiers/popperOffsets.js\";\nimport computeStyles from \"./modifiers/computeStyles.js\";\nimport applyStyles from \"./modifiers/applyStyles.js\";\nvar defaultModifiers = [eventListeners, popperOffsets, computeStyles, applyStyles];\nvar createPopper = /*#__PURE__*/popperGenerator({\n defaultModifiers: defaultModifiers\n}); // eslint-disable-next-line import/no-unused-modules\n\nexport { createPopper, popperGenerator, defaultModifiers, detectOverflow };","/*!\n * Bootstrap v5.3.2 (https://getbootstrap.com/)\n * Copyright 2011-2023 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors)\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n */\nimport * as Popper from '@popperjs/core';\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dom/data.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n/**\n * Constants\n */\n\nconst elementMap = new Map();\nconst Data = {\n set(element, key, instance) {\n if (!elementMap.has(element)) {\n elementMap.set(element, new Map());\n }\n const instanceMap = elementMap.get(element);\n\n // make it clear we only want one instance per element\n // can be removed later when multiple key/instances are fine to be used\n if (!instanceMap.has(key) && instanceMap.size !== 0) {\n // eslint-disable-next-line no-console\n console.error(`Bootstrap doesn't allow more than one instance per element. Bound instance: ${Array.from(instanceMap.keys())[0]}.`);\n return;\n }\n instanceMap.set(key, instance);\n },\n get(element, key) {\n if (elementMap.has(element)) {\n return elementMap.get(element).get(key) || null;\n }\n return null;\n },\n remove(element, key) {\n if (!elementMap.has(element)) {\n return;\n }\n const instanceMap = elementMap.get(element);\n instanceMap.delete(key);\n\n // free up element references if there are no instances left for an element\n if (instanceMap.size === 0) {\n elementMap.delete(element);\n }\n }\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/index.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\nconst MAX_UID = 1000000;\nconst MILLISECONDS_MULTIPLIER = 1000;\nconst TRANSITION_END = 'transitionend';\n\n/**\n * Properly escape IDs selectors to handle weird IDs\n * @param {string} selector\n * @returns {string}\n */\nconst parseSelector = selector => {\n if (selector && window.CSS && window.CSS.escape) {\n // document.querySelector needs escaping to handle IDs (html5+) containing for instance /\n selector = selector.replace(/#([^\\s\"#']+)/g, (match, id) => `#${CSS.escape(id)}`);\n }\n return selector;\n};\n\n// Shout-out Angus Croll (https://goo.gl/pxwQGp)\nconst toType = object => {\n if (object === null || object === undefined) {\n return `${object}`;\n }\n return Object.prototype.toString.call(object).match(/\\s([a-z]+)/i)[1].toLowerCase();\n};\n\n/**\n * Public Util API\n */\n\nconst getUID = prefix => {\n do {\n prefix += Math.floor(Math.random() * MAX_UID);\n } while (document.getElementById(prefix));\n return prefix;\n};\nconst getTransitionDurationFromElement = element => {\n if (!element) {\n return 0;\n }\n\n // Get transition-duration of the element\n let {\n transitionDuration,\n transitionDelay\n } = window.getComputedStyle(element);\n const floatTransitionDuration = Number.parseFloat(transitionDuration);\n const floatTransitionDelay = Number.parseFloat(transitionDelay);\n\n // Return 0 if element or transition duration is not found\n if (!floatTransitionDuration && !floatTransitionDelay) {\n return 0;\n }\n\n // If multiple durations are defined, take the first\n transitionDuration = transitionDuration.split(',')[0];\n transitionDelay = transitionDelay.split(',')[0];\n return (Number.parseFloat(transitionDuration) + Number.parseFloat(transitionDelay)) * MILLISECONDS_MULTIPLIER;\n};\nconst triggerTransitionEnd = element => {\n element.dispatchEvent(new Event(TRANSITION_END));\n};\nconst isElement = object => {\n if (!object || typeof object !== 'object') {\n return false;\n }\n if (typeof object.jquery !== 'undefined') {\n object = object[0];\n }\n return typeof object.nodeType !== 'undefined';\n};\nconst getElement = object => {\n // it's a jQuery object or a node element\n if (isElement(object)) {\n return object.jquery ? object[0] : object;\n }\n if (typeof object === 'string' && object.length > 0) {\n return document.querySelector(parseSelector(object));\n }\n return null;\n};\nconst isVisible = element => {\n if (!isElement(element) || element.getClientRects().length === 0) {\n return false;\n }\n const elementIsVisible = getComputedStyle(element).getPropertyValue('visibility') === 'visible';\n // Handle `details` element as its content may falsie appear visible when it is closed\n const closedDetails = element.closest('details:not([open])');\n if (!closedDetails) {\n return elementIsVisible;\n }\n if (closedDetails !== element) {\n const summary = element.closest('summary');\n if (summary && summary.parentNode !== closedDetails) {\n return false;\n }\n if (summary === null) {\n return false;\n }\n }\n return elementIsVisible;\n};\nconst isDisabled = element => {\n if (!element || element.nodeType !== Node.ELEMENT_NODE) {\n return true;\n }\n if (element.classList.contains('disabled')) {\n return true;\n }\n if (typeof element.disabled !== 'undefined') {\n return element.disabled;\n }\n return element.hasAttribute('disabled') && element.getAttribute('disabled') !== 'false';\n};\nconst findShadowRoot = element => {\n if (!document.documentElement.attachShadow) {\n return null;\n }\n\n // Can find the shadow root otherwise it'll return the document\n if (typeof element.getRootNode === 'function') {\n const root = element.getRootNode();\n return root instanceof ShadowRoot ? root : null;\n }\n if (element instanceof ShadowRoot) {\n return element;\n }\n\n // when we don't find a shadow root\n if (!element.parentNode) {\n return null;\n }\n return findShadowRoot(element.parentNode);\n};\nconst noop = () => {};\n\n/**\n * Trick to restart an element's animation\n *\n * @param {HTMLElement} element\n * @return void\n *\n * @see https://www.charistheo.io/blog/2021/02/restart-a-css-animation-with-javascript/#restarting-a-css-animation\n */\nconst reflow = element => {\n element.offsetHeight; // eslint-disable-line no-unused-expressions\n};\n\nconst getjQuery = () => {\n if (window.jQuery && !document.body.hasAttribute('data-bs-no-jquery')) {\n return window.jQuery;\n }\n return null;\n};\nconst DOMContentLoadedCallbacks = [];\nconst onDOMContentLoaded = callback => {\n if (document.readyState === 'loading') {\n // add listener on the first call when the document is in loading state\n if (!DOMContentLoadedCallbacks.length) {\n document.addEventListener('DOMContentLoaded', () => {\n for (const callback of DOMContentLoadedCallbacks) {\n callback();\n }\n });\n }\n DOMContentLoadedCallbacks.push(callback);\n } else {\n callback();\n }\n};\nconst isRTL = () => document.documentElement.dir === 'rtl';\nconst defineJQueryPlugin = plugin => {\n onDOMContentLoaded(() => {\n const $ = getjQuery();\n /* istanbul ignore if */\n if ($) {\n const name = plugin.NAME;\n const JQUERY_NO_CONFLICT = $.fn[name];\n $.fn[name] = plugin.jQueryInterface;\n $.fn[name].Constructor = plugin;\n $.fn[name].noConflict = () => {\n $.fn[name] = JQUERY_NO_CONFLICT;\n return plugin.jQueryInterface;\n };\n }\n });\n};\nconst execute = (possibleCallback, args = [], defaultValue = possibleCallback) => {\n return typeof possibleCallback === 'function' ? possibleCallback(...args) : defaultValue;\n};\nconst executeAfterTransition = (callback, transitionElement, waitForTransition = true) => {\n if (!waitForTransition) {\n execute(callback);\n return;\n }\n const durationPadding = 5;\n const emulatedDuration = getTransitionDurationFromElement(transitionElement) + durationPadding;\n let called = false;\n const handler = ({\n target\n }) => {\n if (target !== transitionElement) {\n return;\n }\n called = true;\n transitionElement.removeEventListener(TRANSITION_END, handler);\n execute(callback);\n };\n transitionElement.addEventListener(TRANSITION_END, handler);\n setTimeout(() => {\n if (!called) {\n triggerTransitionEnd(transitionElement);\n }\n }, emulatedDuration);\n};\n\n/**\n * Return the previous/next element of a list.\n *\n * @param {array} list The list of elements\n * @param activeElement The active element\n * @param shouldGetNext Choose to get next or previous element\n * @param isCycleAllowed\n * @return {Element|elem} The proper element\n */\nconst getNextActiveElement = (list, activeElement, shouldGetNext, isCycleAllowed) => {\n const listLength = list.length;\n let index = list.indexOf(activeElement);\n\n // if the element does not exist in the list return an element\n // depending on the direction and if cycle is allowed\n if (index === -1) {\n return !shouldGetNext && isCycleAllowed ? list[listLength - 1] : list[0];\n }\n index += shouldGetNext ? 1 : -1;\n if (isCycleAllowed) {\n index = (index + listLength) % listLength;\n }\n return list[Math.max(0, Math.min(index, listLength - 1))];\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dom/event-handler.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst namespaceRegex = /[^.]*(?=\\..*)\\.|.*/;\nconst stripNameRegex = /\\..*/;\nconst stripUidRegex = /::\\d+$/;\nconst eventRegistry = {}; // Events storage\nlet uidEvent = 1;\nconst customEvents = {\n mouseenter: 'mouseover',\n mouseleave: 'mouseout'\n};\nconst nativeEvents = new Set(['click', 'dblclick', 'mouseup', 'mousedown', 'contextmenu', 'mousewheel', 'DOMMouseScroll', 'mouseover', 'mouseout', 'mousemove', 'selectstart', 'selectend', 'keydown', 'keypress', 'keyup', 'orientationchange', 'touchstart', 'touchmove', 'touchend', 'touchcancel', 'pointerdown', 'pointermove', 'pointerup', 'pointerleave', 'pointercancel', 'gesturestart', 'gesturechange', 'gestureend', 'focus', 'blur', 'change', 'reset', 'select', 'submit', 'focusin', 'focusout', 'load', 'unload', 'beforeunload', 'resize', 'move', 'DOMContentLoaded', 'readystatechange', 'error', 'abort', 'scroll']);\n\n/**\n * Private methods\n */\n\nfunction makeEventUid(element, uid) {\n return uid && `${uid}::${uidEvent++}` || element.uidEvent || uidEvent++;\n}\nfunction getElementEvents(element) {\n const uid = makeEventUid(element);\n element.uidEvent = uid;\n eventRegistry[uid] = eventRegistry[uid] || {};\n return eventRegistry[uid];\n}\nfunction bootstrapHandler(element, fn) {\n return function handler(event) {\n hydrateObj(event, {\n delegateTarget: element\n });\n if (handler.oneOff) {\n EventHandler.off(element, event.type, fn);\n }\n return fn.apply(element, [event]);\n };\n}\nfunction bootstrapDelegationHandler(element, selector, fn) {\n return function handler(event) {\n const domElements = element.querySelectorAll(selector);\n for (let {\n target\n } = event; target && target !== this; target = target.parentNode) {\n for (const domElement of domElements) {\n if (domElement !== target) {\n continue;\n }\n hydrateObj(event, {\n delegateTarget: target\n });\n if (handler.oneOff) {\n EventHandler.off(element, event.type, selector, fn);\n }\n return fn.apply(target, [event]);\n }\n }\n };\n}\nfunction findHandler(events, callable, delegationSelector = null) {\n return Object.values(events).find(event => event.callable === callable && event.delegationSelector === delegationSelector);\n}\nfunction normalizeParameters(originalTypeEvent, handler, delegationFunction) {\n const isDelegated = typeof handler === 'string';\n // TODO: tooltip passes `false` instead of selector, so we need to check\n const callable = isDelegated ? delegationFunction : handler || delegationFunction;\n let typeEvent = getTypeEvent(originalTypeEvent);\n if (!nativeEvents.has(typeEvent)) {\n typeEvent = originalTypeEvent;\n }\n return [isDelegated, callable, typeEvent];\n}\nfunction addHandler(element, originalTypeEvent, handler, delegationFunction, oneOff) {\n if (typeof originalTypeEvent !== 'string' || !element) {\n return;\n }\n let [isDelegated, callable, typeEvent] = normalizeParameters(originalTypeEvent, handler, delegationFunction);\n\n // in case of mouseenter or mouseleave wrap the handler within a function that checks for its DOM position\n // this prevents the handler from being dispatched the same way as mouseover or mouseout does\n if (originalTypeEvent in customEvents) {\n const wrapFunction = fn => {\n return function (event) {\n if (!event.relatedTarget || event.relatedTarget !== event.delegateTarget && !event.delegateTarget.contains(event.relatedTarget)) {\n return fn.call(this, event);\n }\n };\n };\n callable = wrapFunction(callable);\n }\n const events = getElementEvents(element);\n const handlers = events[typeEvent] || (events[typeEvent] = {});\n const previousFunction = findHandler(handlers, callable, isDelegated ? handler : null);\n if (previousFunction) {\n previousFunction.oneOff = previousFunction.oneOff && oneOff;\n return;\n }\n const uid = makeEventUid(callable, originalTypeEvent.replace(namespaceRegex, ''));\n const fn = isDelegated ? bootstrapDelegationHandler(element, handler, callable) : bootstrapHandler(element, callable);\n fn.delegationSelector = isDelegated ? handler : null;\n fn.callable = callable;\n fn.oneOff = oneOff;\n fn.uidEvent = uid;\n handlers[uid] = fn;\n element.addEventListener(typeEvent, fn, isDelegated);\n}\nfunction removeHandler(element, events, typeEvent, handler, delegationSelector) {\n const fn = findHandler(events[typeEvent], handler, delegationSelector);\n if (!fn) {\n return;\n }\n element.removeEventListener(typeEvent, fn, Boolean(delegationSelector));\n delete events[typeEvent][fn.uidEvent];\n}\nfunction removeNamespacedHandlers(element, events, typeEvent, namespace) {\n const storeElementEvent = events[typeEvent] || {};\n for (const [handlerKey, event] of Object.entries(storeElementEvent)) {\n if (handlerKey.includes(namespace)) {\n removeHandler(element, events, typeEvent, event.callable, event.delegationSelector);\n }\n }\n}\nfunction getTypeEvent(event) {\n // allow to get the native events from namespaced events ('click.bs.button' --> 'click')\n event = event.replace(stripNameRegex, '');\n return customEvents[event] || event;\n}\nconst EventHandler = {\n on(element, event, handler, delegationFunction) {\n addHandler(element, event, handler, delegationFunction, false);\n },\n one(element, event, handler, delegationFunction) {\n addHandler(element, event, handler, delegationFunction, true);\n },\n off(element, originalTypeEvent, handler, delegationFunction) {\n if (typeof originalTypeEvent !== 'string' || !element) {\n return;\n }\n const [isDelegated, callable, typeEvent] = normalizeParameters(originalTypeEvent, handler, delegationFunction);\n const inNamespace = typeEvent !== originalTypeEvent;\n const events = getElementEvents(element);\n const storeElementEvent = events[typeEvent] || {};\n const isNamespace = originalTypeEvent.startsWith('.');\n if (typeof callable !== 'undefined') {\n // Simplest case: handler is passed, remove that listener ONLY.\n if (!Object.keys(storeElementEvent).length) {\n return;\n }\n removeHandler(element, events, typeEvent, callable, isDelegated ? handler : null);\n return;\n }\n if (isNamespace) {\n for (const elementEvent of Object.keys(events)) {\n removeNamespacedHandlers(element, events, elementEvent, originalTypeEvent.slice(1));\n }\n }\n for (const [keyHandlers, event] of Object.entries(storeElementEvent)) {\n const handlerKey = keyHandlers.replace(stripUidRegex, '');\n if (!inNamespace || originalTypeEvent.includes(handlerKey)) {\n removeHandler(element, events, typeEvent, event.callable, event.delegationSelector);\n }\n }\n },\n trigger(element, event, args) {\n if (typeof event !== 'string' || !element) {\n return null;\n }\n const $ = getjQuery();\n const typeEvent = getTypeEvent(event);\n const inNamespace = event !== typeEvent;\n let jQueryEvent = null;\n let bubbles = true;\n let nativeDispatch = true;\n let defaultPrevented = false;\n if (inNamespace && $) {\n jQueryEvent = $.Event(event, args);\n $(element).trigger(jQueryEvent);\n bubbles = !jQueryEvent.isPropagationStopped();\n nativeDispatch = !jQueryEvent.isImmediatePropagationStopped();\n defaultPrevented = jQueryEvent.isDefaultPrevented();\n }\n const evt = hydrateObj(new Event(event, {\n bubbles,\n cancelable: true\n }), args);\n if (defaultPrevented) {\n evt.preventDefault();\n }\n if (nativeDispatch) {\n element.dispatchEvent(evt);\n }\n if (evt.defaultPrevented && jQueryEvent) {\n jQueryEvent.preventDefault();\n }\n return evt;\n }\n};\nfunction hydrateObj(obj, meta = {}) {\n for (const [key, value] of Object.entries(meta)) {\n try {\n obj[key] = value;\n } catch (_unused) {\n Object.defineProperty(obj, key, {\n configurable: true,\n get() {\n return value;\n }\n });\n }\n }\n return obj;\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dom/manipulator.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\nfunction normalizeData(value) {\n if (value === 'true') {\n return true;\n }\n if (value === 'false') {\n return false;\n }\n if (value === Number(value).toString()) {\n return Number(value);\n }\n if (value === '' || value === 'null') {\n return null;\n }\n if (typeof value !== 'string') {\n return value;\n }\n try {\n return JSON.parse(decodeURIComponent(value));\n } catch (_unused) {\n return value;\n }\n}\nfunction normalizeDataKey(key) {\n return key.replace(/[A-Z]/g, chr => `-${chr.toLowerCase()}`);\n}\nconst Manipulator = {\n setDataAttribute(element, key, value) {\n element.setAttribute(`data-bs-${normalizeDataKey(key)}`, value);\n },\n removeDataAttribute(element, key) {\n element.removeAttribute(`data-bs-${normalizeDataKey(key)}`);\n },\n getDataAttributes(element) {\n if (!element) {\n return {};\n }\n const attributes = {};\n const bsKeys = Object.keys(element.dataset).filter(key => key.startsWith('bs') && !key.startsWith('bsConfig'));\n for (const key of bsKeys) {\n let pureKey = key.replace(/^bs/, '');\n pureKey = pureKey.charAt(0).toLowerCase() + pureKey.slice(1, pureKey.length);\n attributes[pureKey] = normalizeData(element.dataset[key]);\n }\n return attributes;\n },\n getDataAttribute(element, key) {\n return normalizeData(element.getAttribute(`data-bs-${normalizeDataKey(key)}`));\n }\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/config.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Class definition\n */\n\nclass Config {\n // Getters\n static get Default() {\n return {};\n }\n static get DefaultType() {\n return {};\n }\n static get NAME() {\n throw new Error('You have to implement the static method \"NAME\", for each component!');\n }\n _getConfig(config) {\n config = this._mergeConfigObj(config);\n config = this._configAfterMerge(config);\n this._typeCheckConfig(config);\n return config;\n }\n _configAfterMerge(config) {\n return config;\n }\n _mergeConfigObj(config, element) {\n const jsonConfig = isElement(element) ? Manipulator.getDataAttribute(element, 'config') : {}; // try to parse\n\n return {\n ...this.constructor.Default,\n ...(typeof jsonConfig === 'object' ? jsonConfig : {}),\n ...(isElement(element) ? Manipulator.getDataAttributes(element) : {}),\n ...(typeof config === 'object' ? config : {})\n };\n }\n _typeCheckConfig(config, configTypes = this.constructor.DefaultType) {\n for (const [property, expectedTypes] of Object.entries(configTypes)) {\n const value = config[property];\n const valueType = isElement(value) ? 'element' : toType(value);\n if (!new RegExp(expectedTypes).test(valueType)) {\n throw new TypeError(`${this.constructor.NAME.toUpperCase()}: Option \"${property}\" provided type \"${valueType}\" but expected type \"${expectedTypes}\".`);\n }\n }\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap base-component.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst VERSION = '5.3.2';\n\n/**\n * Class definition\n */\n\nclass BaseComponent extends Config {\n constructor(element, config) {\n super();\n element = getElement(element);\n if (!element) {\n return;\n }\n this._element = element;\n this._config = this._getConfig(config);\n Data.set(this._element, this.constructor.DATA_KEY, this);\n }\n\n // Public\n dispose() {\n Data.remove(this._element, this.constructor.DATA_KEY);\n EventHandler.off(this._element, this.constructor.EVENT_KEY);\n for (const propertyName of Object.getOwnPropertyNames(this)) {\n this[propertyName] = null;\n }\n }\n _queueCallback(callback, element, isAnimated = true) {\n executeAfterTransition(callback, element, isAnimated);\n }\n _getConfig(config) {\n config = this._mergeConfigObj(config, this._element);\n config = this._configAfterMerge(config);\n this._typeCheckConfig(config);\n return config;\n }\n\n // Static\n static getInstance(element) {\n return Data.get(getElement(element), this.DATA_KEY);\n }\n static getOrCreateInstance(element, config = {}) {\n return this.getInstance(element) || new this(element, typeof config === 'object' ? config : null);\n }\n static get VERSION() {\n return VERSION;\n }\n static get DATA_KEY() {\n return `bs.${this.NAME}`;\n }\n static get EVENT_KEY() {\n return `.${this.DATA_KEY}`;\n }\n static eventName(name) {\n return `${name}${this.EVENT_KEY}`;\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dom/selector-engine.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\nconst getSelector = element => {\n let selector = element.getAttribute('data-bs-target');\n if (!selector || selector === '#') {\n let hrefAttribute = element.getAttribute('href');\n\n // The only valid content that could double as a selector are IDs or classes,\n // so everything starting with `#` or `.`. If a \"real\" URL is used as the selector,\n // `document.querySelector` will rightfully complain it is invalid.\n // See https://github.com/twbs/bootstrap/issues/32273\n if (!hrefAttribute || !hrefAttribute.includes('#') && !hrefAttribute.startsWith('.')) {\n return null;\n }\n\n // Just in case some CMS puts out a full URL with the anchor appended\n if (hrefAttribute.includes('#') && !hrefAttribute.startsWith('#')) {\n hrefAttribute = `#${hrefAttribute.split('#')[1]}`;\n }\n selector = hrefAttribute && hrefAttribute !== '#' ? parseSelector(hrefAttribute.trim()) : null;\n }\n return selector;\n};\nconst SelectorEngine = {\n find(selector, element = document.documentElement) {\n return [].concat(...Element.prototype.querySelectorAll.call(element, selector));\n },\n findOne(selector, element = document.documentElement) {\n return Element.prototype.querySelector.call(element, selector);\n },\n children(element, selector) {\n return [].concat(...element.children).filter(child => child.matches(selector));\n },\n parents(element, selector) {\n const parents = [];\n let ancestor = element.parentNode.closest(selector);\n while (ancestor) {\n parents.push(ancestor);\n ancestor = ancestor.parentNode.closest(selector);\n }\n return parents;\n },\n prev(element, selector) {\n let previous = element.previousElementSibling;\n while (previous) {\n if (previous.matches(selector)) {\n return [previous];\n }\n previous = previous.previousElementSibling;\n }\n return [];\n },\n // TODO: this is now unused; remove later along with prev()\n next(element, selector) {\n let next = element.nextElementSibling;\n while (next) {\n if (next.matches(selector)) {\n return [next];\n }\n next = next.nextElementSibling;\n }\n return [];\n },\n focusableChildren(element) {\n const focusables = ['a', 'button', 'input', 'textarea', 'select', 'details', '[tabindex]', '[contenteditable=\"true\"]'].map(selector => `${selector}:not([tabindex^=\"-\"])`).join(',');\n return this.find(focusables, element).filter(el => !isDisabled(el) && isVisible(el));\n },\n getSelectorFromElement(element) {\n const selector = getSelector(element);\n if (selector) {\n return SelectorEngine.findOne(selector) ? selector : null;\n }\n return null;\n },\n getElementFromSelector(element) {\n const selector = getSelector(element);\n return selector ? SelectorEngine.findOne(selector) : null;\n },\n getMultipleElementsFromSelector(element) {\n const selector = getSelector(element);\n return selector ? SelectorEngine.find(selector) : [];\n }\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/component-functions.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\nconst enableDismissTrigger = (component, method = 'hide') => {\n const clickEvent = `click.dismiss${component.EVENT_KEY}`;\n const name = component.NAME;\n EventHandler.on(document, clickEvent, `[data-bs-dismiss=\"${name}\"]`, function (event) {\n if (['A', 'AREA'].includes(this.tagName)) {\n event.preventDefault();\n }\n if (isDisabled(this)) {\n return;\n }\n const target = SelectorEngine.getElementFromSelector(this) || this.closest(`.${name}`);\n const instance = component.getOrCreateInstance(target);\n\n // Method argument is left, for Alert and only, as it doesn't implement the 'hide' method\n instance[method]();\n });\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap alert.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$f = 'alert';\nconst DATA_KEY$a = 'bs.alert';\nconst EVENT_KEY$b = `.${DATA_KEY$a}`;\nconst EVENT_CLOSE = `close${EVENT_KEY$b}`;\nconst EVENT_CLOSED = `closed${EVENT_KEY$b}`;\nconst CLASS_NAME_FADE$5 = 'fade';\nconst CLASS_NAME_SHOW$8 = 'show';\n\n/**\n * Class definition\n */\n\nclass Alert extends BaseComponent {\n // Getters\n static get NAME() {\n return NAME$f;\n }\n\n // Public\n close() {\n const closeEvent = EventHandler.trigger(this._element, EVENT_CLOSE);\n if (closeEvent.defaultPrevented) {\n return;\n }\n this._element.classList.remove(CLASS_NAME_SHOW$8);\n const isAnimated = this._element.classList.contains(CLASS_NAME_FADE$5);\n this._queueCallback(() => this._destroyElement(), this._element, isAnimated);\n }\n\n // Private\n _destroyElement() {\n this._element.remove();\n EventHandler.trigger(this._element, EVENT_CLOSED);\n this.dispose();\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Alert.getOrCreateInstance(this);\n if (typeof config !== 'string') {\n return;\n }\n if (data[config] === undefined || config.startsWith('_') || config === 'constructor') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config](this);\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nenableDismissTrigger(Alert, 'close');\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Alert);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap button.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$e = 'button';\nconst DATA_KEY$9 = 'bs.button';\nconst EVENT_KEY$a = `.${DATA_KEY$9}`;\nconst DATA_API_KEY$6 = '.data-api';\nconst CLASS_NAME_ACTIVE$3 = 'active';\nconst SELECTOR_DATA_TOGGLE$5 = '[data-bs-toggle=\"button\"]';\nconst EVENT_CLICK_DATA_API$6 = `click${EVENT_KEY$a}${DATA_API_KEY$6}`;\n\n/**\n * Class definition\n */\n\nclass Button extends BaseComponent {\n // Getters\n static get NAME() {\n return NAME$e;\n }\n\n // Public\n toggle() {\n // Toggle class and sync the `aria-pressed` attribute with the return value of the `.toggle()` method\n this._element.setAttribute('aria-pressed', this._element.classList.toggle(CLASS_NAME_ACTIVE$3));\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Button.getOrCreateInstance(this);\n if (config === 'toggle') {\n data[config]();\n }\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$6, SELECTOR_DATA_TOGGLE$5, event => {\n event.preventDefault();\n const button = event.target.closest(SELECTOR_DATA_TOGGLE$5);\n const data = Button.getOrCreateInstance(button);\n data.toggle();\n});\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Button);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/swipe.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$d = 'swipe';\nconst EVENT_KEY$9 = '.bs.swipe';\nconst EVENT_TOUCHSTART = `touchstart${EVENT_KEY$9}`;\nconst EVENT_TOUCHMOVE = `touchmove${EVENT_KEY$9}`;\nconst EVENT_TOUCHEND = `touchend${EVENT_KEY$9}`;\nconst EVENT_POINTERDOWN = `pointerdown${EVENT_KEY$9}`;\nconst EVENT_POINTERUP = `pointerup${EVENT_KEY$9}`;\nconst POINTER_TYPE_TOUCH = 'touch';\nconst POINTER_TYPE_PEN = 'pen';\nconst CLASS_NAME_POINTER_EVENT = 'pointer-event';\nconst SWIPE_THRESHOLD = 40;\nconst Default$c = {\n endCallback: null,\n leftCallback: null,\n rightCallback: null\n};\nconst DefaultType$c = {\n endCallback: '(function|null)',\n leftCallback: '(function|null)',\n rightCallback: '(function|null)'\n};\n\n/**\n * Class definition\n */\n\nclass Swipe extends Config {\n constructor(element, config) {\n super();\n this._element = element;\n if (!element || !Swipe.isSupported()) {\n return;\n }\n this._config = this._getConfig(config);\n this._deltaX = 0;\n this._supportPointerEvents = Boolean(window.PointerEvent);\n this._initEvents();\n }\n\n // Getters\n static get Default() {\n return Default$c;\n }\n static get DefaultType() {\n return DefaultType$c;\n }\n static get NAME() {\n return NAME$d;\n }\n\n // Public\n dispose() {\n EventHandler.off(this._element, EVENT_KEY$9);\n }\n\n // Private\n _start(event) {\n if (!this._supportPointerEvents) {\n this._deltaX = event.touches[0].clientX;\n return;\n }\n if (this._eventIsPointerPenTouch(event)) {\n this._deltaX = event.clientX;\n }\n }\n _end(event) {\n if (this._eventIsPointerPenTouch(event)) {\n this._deltaX = event.clientX - this._deltaX;\n }\n this._handleSwipe();\n execute(this._config.endCallback);\n }\n _move(event) {\n this._deltaX = event.touches && event.touches.length > 1 ? 0 : event.touches[0].clientX - this._deltaX;\n }\n _handleSwipe() {\n const absDeltaX = Math.abs(this._deltaX);\n if (absDeltaX <= SWIPE_THRESHOLD) {\n return;\n }\n const direction = absDeltaX / this._deltaX;\n this._deltaX = 0;\n if (!direction) {\n return;\n }\n execute(direction > 0 ? this._config.rightCallback : this._config.leftCallback);\n }\n _initEvents() {\n if (this._supportPointerEvents) {\n EventHandler.on(this._element, EVENT_POINTERDOWN, event => this._start(event));\n EventHandler.on(this._element, EVENT_POINTERUP, event => this._end(event));\n this._element.classList.add(CLASS_NAME_POINTER_EVENT);\n } else {\n EventHandler.on(this._element, EVENT_TOUCHSTART, event => this._start(event));\n EventHandler.on(this._element, EVENT_TOUCHMOVE, event => this._move(event));\n EventHandler.on(this._element, EVENT_TOUCHEND, event => this._end(event));\n }\n }\n _eventIsPointerPenTouch(event) {\n return this._supportPointerEvents && (event.pointerType === POINTER_TYPE_PEN || event.pointerType === POINTER_TYPE_TOUCH);\n }\n\n // Static\n static isSupported() {\n return 'ontouchstart' in document.documentElement || navigator.maxTouchPoints > 0;\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap carousel.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$c = 'carousel';\nconst DATA_KEY$8 = 'bs.carousel';\nconst EVENT_KEY$8 = `.${DATA_KEY$8}`;\nconst DATA_API_KEY$5 = '.data-api';\nconst ARROW_LEFT_KEY$1 = 'ArrowLeft';\nconst ARROW_RIGHT_KEY$1 = 'ArrowRight';\nconst TOUCHEVENT_COMPAT_WAIT = 500; // Time for mouse compat events to fire after touch\n\nconst ORDER_NEXT = 'next';\nconst ORDER_PREV = 'prev';\nconst DIRECTION_LEFT = 'left';\nconst DIRECTION_RIGHT = 'right';\nconst EVENT_SLIDE = `slide${EVENT_KEY$8}`;\nconst EVENT_SLID = `slid${EVENT_KEY$8}`;\nconst EVENT_KEYDOWN$1 = `keydown${EVENT_KEY$8}`;\nconst EVENT_MOUSEENTER$1 = `mouseenter${EVENT_KEY$8}`;\nconst EVENT_MOUSELEAVE$1 = `mouseleave${EVENT_KEY$8}`;\nconst EVENT_DRAG_START = `dragstart${EVENT_KEY$8}`;\nconst EVENT_LOAD_DATA_API$3 = `load${EVENT_KEY$8}${DATA_API_KEY$5}`;\nconst EVENT_CLICK_DATA_API$5 = `click${EVENT_KEY$8}${DATA_API_KEY$5}`;\nconst CLASS_NAME_CAROUSEL = 'carousel';\nconst CLASS_NAME_ACTIVE$2 = 'active';\nconst CLASS_NAME_SLIDE = 'slide';\nconst CLASS_NAME_END = 'carousel-item-end';\nconst CLASS_NAME_START = 'carousel-item-start';\nconst CLASS_NAME_NEXT = 'carousel-item-next';\nconst CLASS_NAME_PREV = 'carousel-item-prev';\nconst SELECTOR_ACTIVE = '.active';\nconst SELECTOR_ITEM = '.carousel-item';\nconst SELECTOR_ACTIVE_ITEM = SELECTOR_ACTIVE + SELECTOR_ITEM;\nconst SELECTOR_ITEM_IMG = '.carousel-item img';\nconst SELECTOR_INDICATORS = '.carousel-indicators';\nconst SELECTOR_DATA_SLIDE = '[data-bs-slide], [data-bs-slide-to]';\nconst SELECTOR_DATA_RIDE = '[data-bs-ride=\"carousel\"]';\nconst KEY_TO_DIRECTION = {\n [ARROW_LEFT_KEY$1]: DIRECTION_RIGHT,\n [ARROW_RIGHT_KEY$1]: DIRECTION_LEFT\n};\nconst Default$b = {\n interval: 5000,\n keyboard: true,\n pause: 'hover',\n ride: false,\n touch: true,\n wrap: true\n};\nconst DefaultType$b = {\n interval: '(number|boolean)',\n // TODO:v6 remove boolean support\n keyboard: 'boolean',\n pause: '(string|boolean)',\n ride: '(boolean|string)',\n touch: 'boolean',\n wrap: 'boolean'\n};\n\n/**\n * Class definition\n */\n\nclass Carousel extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._interval = null;\n this._activeElement = null;\n this._isSliding = false;\n this.touchTimeout = null;\n this._swipeHelper = null;\n this._indicatorsElement = SelectorEngine.findOne(SELECTOR_INDICATORS, this._element);\n this._addEventListeners();\n if (this._config.ride === CLASS_NAME_CAROUSEL) {\n this.cycle();\n }\n }\n\n // Getters\n static get Default() {\n return Default$b;\n }\n static get DefaultType() {\n return DefaultType$b;\n }\n static get NAME() {\n return NAME$c;\n }\n\n // Public\n next() {\n this._slide(ORDER_NEXT);\n }\n nextWhenVisible() {\n // FIXME TODO use `document.visibilityState`\n // Don't call next when the page isn't visible\n // or the carousel or its parent isn't visible\n if (!document.hidden && isVisible(this._element)) {\n this.next();\n }\n }\n prev() {\n this._slide(ORDER_PREV);\n }\n pause() {\n if (this._isSliding) {\n triggerTransitionEnd(this._element);\n }\n this._clearInterval();\n }\n cycle() {\n this._clearInterval();\n this._updateInterval();\n this._interval = setInterval(() => this.nextWhenVisible(), this._config.interval);\n }\n _maybeEnableCycle() {\n if (!this._config.ride) {\n return;\n }\n if (this._isSliding) {\n EventHandler.one(this._element, EVENT_SLID, () => this.cycle());\n return;\n }\n this.cycle();\n }\n to(index) {\n const items = this._getItems();\n if (index > items.length - 1 || index < 0) {\n return;\n }\n if (this._isSliding) {\n EventHandler.one(this._element, EVENT_SLID, () => this.to(index));\n return;\n }\n const activeIndex = this._getItemIndex(this._getActive());\n if (activeIndex === index) {\n return;\n }\n const order = index > activeIndex ? ORDER_NEXT : ORDER_PREV;\n this._slide(order, items[index]);\n }\n dispose() {\n if (this._swipeHelper) {\n this._swipeHelper.dispose();\n }\n super.dispose();\n }\n\n // Private\n _configAfterMerge(config) {\n config.defaultInterval = config.interval;\n return config;\n }\n _addEventListeners() {\n if (this._config.keyboard) {\n EventHandler.on(this._element, EVENT_KEYDOWN$1, event => this._keydown(event));\n }\n if (this._config.pause === 'hover') {\n EventHandler.on(this._element, EVENT_MOUSEENTER$1, () => this.pause());\n EventHandler.on(this._element, EVENT_MOUSELEAVE$1, () => this._maybeEnableCycle());\n }\n if (this._config.touch && Swipe.isSupported()) {\n this._addTouchEventListeners();\n }\n }\n _addTouchEventListeners() {\n for (const img of SelectorEngine.find(SELECTOR_ITEM_IMG, this._element)) {\n EventHandler.on(img, EVENT_DRAG_START, event => event.preventDefault());\n }\n const endCallBack = () => {\n if (this._config.pause !== 'hover') {\n return;\n }\n\n // If it's a touch-enabled device, mouseenter/leave are fired as\n // part of the mouse compatibility events on first tap - the carousel\n // would stop cycling until user tapped out of it;\n // here, we listen for touchend, explicitly pause the carousel\n // (as if it's the second time we tap on it, mouseenter compat event\n // is NOT fired) and after a timeout (to allow for mouse compatibility\n // events to fire) we explicitly restart cycling\n\n this.pause();\n if (this.touchTimeout) {\n clearTimeout(this.touchTimeout);\n }\n this.touchTimeout = setTimeout(() => this._maybeEnableCycle(), TOUCHEVENT_COMPAT_WAIT + this._config.interval);\n };\n const swipeConfig = {\n leftCallback: () => this._slide(this._directionToOrder(DIRECTION_LEFT)),\n rightCallback: () => this._slide(this._directionToOrder(DIRECTION_RIGHT)),\n endCallback: endCallBack\n };\n this._swipeHelper = new Swipe(this._element, swipeConfig);\n }\n _keydown(event) {\n if (/input|textarea/i.test(event.target.tagName)) {\n return;\n }\n const direction = KEY_TO_DIRECTION[event.key];\n if (direction) {\n event.preventDefault();\n this._slide(this._directionToOrder(direction));\n }\n }\n _getItemIndex(element) {\n return this._getItems().indexOf(element);\n }\n _setActiveIndicatorElement(index) {\n if (!this._indicatorsElement) {\n return;\n }\n const activeIndicator = SelectorEngine.findOne(SELECTOR_ACTIVE, this._indicatorsElement);\n activeIndicator.classList.remove(CLASS_NAME_ACTIVE$2);\n activeIndicator.removeAttribute('aria-current');\n const newActiveIndicator = SelectorEngine.findOne(`[data-bs-slide-to=\"${index}\"]`, this._indicatorsElement);\n if (newActiveIndicator) {\n newActiveIndicator.classList.add(CLASS_NAME_ACTIVE$2);\n newActiveIndicator.setAttribute('aria-current', 'true');\n }\n }\n _updateInterval() {\n const element = this._activeElement || this._getActive();\n if (!element) {\n return;\n }\n const elementInterval = Number.parseInt(element.getAttribute('data-bs-interval'), 10);\n this._config.interval = elementInterval || this._config.defaultInterval;\n }\n _slide(order, element = null) {\n if (this._isSliding) {\n return;\n }\n const activeElement = this._getActive();\n const isNext = order === ORDER_NEXT;\n const nextElement = element || getNextActiveElement(this._getItems(), activeElement, isNext, this._config.wrap);\n if (nextElement === activeElement) {\n return;\n }\n const nextElementIndex = this._getItemIndex(nextElement);\n const triggerEvent = eventName => {\n return EventHandler.trigger(this._element, eventName, {\n relatedTarget: nextElement,\n direction: this._orderToDirection(order),\n from: this._getItemIndex(activeElement),\n to: nextElementIndex\n });\n };\n const slideEvent = triggerEvent(EVENT_SLIDE);\n if (slideEvent.defaultPrevented) {\n return;\n }\n if (!activeElement || !nextElement) {\n // Some weirdness is happening, so we bail\n // TODO: change tests that use empty divs to avoid this check\n return;\n }\n const isCycling = Boolean(this._interval);\n this.pause();\n this._isSliding = true;\n this._setActiveIndicatorElement(nextElementIndex);\n this._activeElement = nextElement;\n const directionalClassName = isNext ? CLASS_NAME_START : CLASS_NAME_END;\n const orderClassName = isNext ? CLASS_NAME_NEXT : CLASS_NAME_PREV;\n nextElement.classList.add(orderClassName);\n reflow(nextElement);\n activeElement.classList.add(directionalClassName);\n nextElement.classList.add(directionalClassName);\n const completeCallBack = () => {\n nextElement.classList.remove(directionalClassName, orderClassName);\n nextElement.classList.add(CLASS_NAME_ACTIVE$2);\n activeElement.classList.remove(CLASS_NAME_ACTIVE$2, orderClassName, directionalClassName);\n this._isSliding = false;\n triggerEvent(EVENT_SLID);\n };\n this._queueCallback(completeCallBack, activeElement, this._isAnimated());\n if (isCycling) {\n this.cycle();\n }\n }\n _isAnimated() {\n return this._element.classList.contains(CLASS_NAME_SLIDE);\n }\n _getActive() {\n return SelectorEngine.findOne(SELECTOR_ACTIVE_ITEM, this._element);\n }\n _getItems() {\n return SelectorEngine.find(SELECTOR_ITEM, this._element);\n }\n _clearInterval() {\n if (this._interval) {\n clearInterval(this._interval);\n this._interval = null;\n }\n }\n _directionToOrder(direction) {\n if (isRTL()) {\n return direction === DIRECTION_LEFT ? ORDER_PREV : ORDER_NEXT;\n }\n return direction === DIRECTION_LEFT ? ORDER_NEXT : ORDER_PREV;\n }\n _orderToDirection(order) {\n if (isRTL()) {\n return order === ORDER_PREV ? DIRECTION_LEFT : DIRECTION_RIGHT;\n }\n return order === ORDER_PREV ? DIRECTION_RIGHT : DIRECTION_LEFT;\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Carousel.getOrCreateInstance(this, config);\n if (typeof config === 'number') {\n data.to(config);\n return;\n }\n if (typeof config === 'string') {\n if (data[config] === undefined || config.startsWith('_') || config === 'constructor') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n }\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$5, SELECTOR_DATA_SLIDE, function (event) {\n const target = SelectorEngine.getElementFromSelector(this);\n if (!target || !target.classList.contains(CLASS_NAME_CAROUSEL)) {\n return;\n }\n event.preventDefault();\n const carousel = Carousel.getOrCreateInstance(target);\n const slideIndex = this.getAttribute('data-bs-slide-to');\n if (slideIndex) {\n carousel.to(slideIndex);\n carousel._maybeEnableCycle();\n return;\n }\n if (Manipulator.getDataAttribute(this, 'slide') === 'next') {\n carousel.next();\n carousel._maybeEnableCycle();\n return;\n }\n carousel.prev();\n carousel._maybeEnableCycle();\n});\nEventHandler.on(window, EVENT_LOAD_DATA_API$3, () => {\n const carousels = SelectorEngine.find(SELECTOR_DATA_RIDE);\n for (const carousel of carousels) {\n Carousel.getOrCreateInstance(carousel);\n }\n});\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Carousel);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap collapse.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$b = 'collapse';\nconst DATA_KEY$7 = 'bs.collapse';\nconst EVENT_KEY$7 = `.${DATA_KEY$7}`;\nconst DATA_API_KEY$4 = '.data-api';\nconst EVENT_SHOW$6 = `show${EVENT_KEY$7}`;\nconst EVENT_SHOWN$6 = `shown${EVENT_KEY$7}`;\nconst EVENT_HIDE$6 = `hide${EVENT_KEY$7}`;\nconst EVENT_HIDDEN$6 = `hidden${EVENT_KEY$7}`;\nconst EVENT_CLICK_DATA_API$4 = `click${EVENT_KEY$7}${DATA_API_KEY$4}`;\nconst CLASS_NAME_SHOW$7 = 'show';\nconst CLASS_NAME_COLLAPSE = 'collapse';\nconst CLASS_NAME_COLLAPSING = 'collapsing';\nconst CLASS_NAME_COLLAPSED = 'collapsed';\nconst CLASS_NAME_DEEPER_CHILDREN = `:scope .${CLASS_NAME_COLLAPSE} .${CLASS_NAME_COLLAPSE}`;\nconst CLASS_NAME_HORIZONTAL = 'collapse-horizontal';\nconst WIDTH = 'width';\nconst HEIGHT = 'height';\nconst SELECTOR_ACTIVES = '.collapse.show, .collapse.collapsing';\nconst SELECTOR_DATA_TOGGLE$4 = '[data-bs-toggle=\"collapse\"]';\nconst Default$a = {\n parent: null,\n toggle: true\n};\nconst DefaultType$a = {\n parent: '(null|element)',\n toggle: 'boolean'\n};\n\n/**\n * Class definition\n */\n\nclass Collapse extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._isTransitioning = false;\n this._triggerArray = [];\n const toggleList = SelectorEngine.find(SELECTOR_DATA_TOGGLE$4);\n for (const elem of toggleList) {\n const selector = SelectorEngine.getSelectorFromElement(elem);\n const filterElement = SelectorEngine.find(selector).filter(foundElement => foundElement === this._element);\n if (selector !== null && filterElement.length) {\n this._triggerArray.push(elem);\n }\n }\n this._initializeChildren();\n if (!this._config.parent) {\n this._addAriaAndCollapsedClass(this._triggerArray, this._isShown());\n }\n if (this._config.toggle) {\n this.toggle();\n }\n }\n\n // Getters\n static get Default() {\n return Default$a;\n }\n static get DefaultType() {\n return DefaultType$a;\n }\n static get NAME() {\n return NAME$b;\n }\n\n // Public\n toggle() {\n if (this._isShown()) {\n this.hide();\n } else {\n this.show();\n }\n }\n show() {\n if (this._isTransitioning || this._isShown()) {\n return;\n }\n let activeChildren = [];\n\n // find active children\n if (this._config.parent) {\n activeChildren = this._getFirstLevelChildren(SELECTOR_ACTIVES).filter(element => element !== this._element).map(element => Collapse.getOrCreateInstance(element, {\n toggle: false\n }));\n }\n if (activeChildren.length && activeChildren[0]._isTransitioning) {\n return;\n }\n const startEvent = EventHandler.trigger(this._element, EVENT_SHOW$6);\n if (startEvent.defaultPrevented) {\n return;\n }\n for (const activeInstance of activeChildren) {\n activeInstance.hide();\n }\n const dimension = this._getDimension();\n this._element.classList.remove(CLASS_NAME_COLLAPSE);\n this._element.classList.add(CLASS_NAME_COLLAPSING);\n this._element.style[dimension] = 0;\n this._addAriaAndCollapsedClass(this._triggerArray, true);\n this._isTransitioning = true;\n const complete = () => {\n this._isTransitioning = false;\n this._element.classList.remove(CLASS_NAME_COLLAPSING);\n this._element.classList.add(CLASS_NAME_COLLAPSE, CLASS_NAME_SHOW$7);\n this._element.style[dimension] = '';\n EventHandler.trigger(this._element, EVENT_SHOWN$6);\n };\n const capitalizedDimension = dimension[0].toUpperCase() + dimension.slice(1);\n const scrollSize = `scroll${capitalizedDimension}`;\n this._queueCallback(complete, this._element, true);\n this._element.style[dimension] = `${this._element[scrollSize]}px`;\n }\n hide() {\n if (this._isTransitioning || !this._isShown()) {\n return;\n }\n const startEvent = EventHandler.trigger(this._element, EVENT_HIDE$6);\n if (startEvent.defaultPrevented) {\n return;\n }\n const dimension = this._getDimension();\n this._element.style[dimension] = `${this._element.getBoundingClientRect()[dimension]}px`;\n reflow(this._element);\n this._element.classList.add(CLASS_NAME_COLLAPSING);\n this._element.classList.remove(CLASS_NAME_COLLAPSE, CLASS_NAME_SHOW$7);\n for (const trigger of this._triggerArray) {\n const element = SelectorEngine.getElementFromSelector(trigger);\n if (element && !this._isShown(element)) {\n this._addAriaAndCollapsedClass([trigger], false);\n }\n }\n this._isTransitioning = true;\n const complete = () => {\n this._isTransitioning = false;\n this._element.classList.remove(CLASS_NAME_COLLAPSING);\n this._element.classList.add(CLASS_NAME_COLLAPSE);\n EventHandler.trigger(this._element, EVENT_HIDDEN$6);\n };\n this._element.style[dimension] = '';\n this._queueCallback(complete, this._element, true);\n }\n _isShown(element = this._element) {\n return element.classList.contains(CLASS_NAME_SHOW$7);\n }\n\n // Private\n _configAfterMerge(config) {\n config.toggle = Boolean(config.toggle); // Coerce string values\n config.parent = getElement(config.parent);\n return config;\n }\n _getDimension() {\n return this._element.classList.contains(CLASS_NAME_HORIZONTAL) ? WIDTH : HEIGHT;\n }\n _initializeChildren() {\n if (!this._config.parent) {\n return;\n }\n const children = this._getFirstLevelChildren(SELECTOR_DATA_TOGGLE$4);\n for (const element of children) {\n const selected = SelectorEngine.getElementFromSelector(element);\n if (selected) {\n this._addAriaAndCollapsedClass([element], this._isShown(selected));\n }\n }\n }\n _getFirstLevelChildren(selector) {\n const children = SelectorEngine.find(CLASS_NAME_DEEPER_CHILDREN, this._config.parent);\n // remove children if greater depth\n return SelectorEngine.find(selector, this._config.parent).filter(element => !children.includes(element));\n }\n _addAriaAndCollapsedClass(triggerArray, isOpen) {\n if (!triggerArray.length) {\n return;\n }\n for (const element of triggerArray) {\n element.classList.toggle(CLASS_NAME_COLLAPSED, !isOpen);\n element.setAttribute('aria-expanded', isOpen);\n }\n }\n\n // Static\n static jQueryInterface(config) {\n const _config = {};\n if (typeof config === 'string' && /show|hide/.test(config)) {\n _config.toggle = false;\n }\n return this.each(function () {\n const data = Collapse.getOrCreateInstance(this, _config);\n if (typeof config === 'string') {\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n }\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$4, SELECTOR_DATA_TOGGLE$4, function (event) {\n // preventDefault only for elements (which change the URL) not inside the collapsible element\n if (event.target.tagName === 'A' || event.delegateTarget && event.delegateTarget.tagName === 'A') {\n event.preventDefault();\n }\n for (const element of SelectorEngine.getMultipleElementsFromSelector(this)) {\n Collapse.getOrCreateInstance(element, {\n toggle: false\n }).toggle();\n }\n});\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Collapse);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dropdown.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$a = 'dropdown';\nconst DATA_KEY$6 = 'bs.dropdown';\nconst EVENT_KEY$6 = `.${DATA_KEY$6}`;\nconst DATA_API_KEY$3 = '.data-api';\nconst ESCAPE_KEY$2 = 'Escape';\nconst TAB_KEY$1 = 'Tab';\nconst ARROW_UP_KEY$1 = 'ArrowUp';\nconst ARROW_DOWN_KEY$1 = 'ArrowDown';\nconst RIGHT_MOUSE_BUTTON = 2; // MouseEvent.button value for the secondary button, usually the right button\n\nconst EVENT_HIDE$5 = `hide${EVENT_KEY$6}`;\nconst EVENT_HIDDEN$5 = `hidden${EVENT_KEY$6}`;\nconst EVENT_SHOW$5 = `show${EVENT_KEY$6}`;\nconst EVENT_SHOWN$5 = `shown${EVENT_KEY$6}`;\nconst EVENT_CLICK_DATA_API$3 = `click${EVENT_KEY$6}${DATA_API_KEY$3}`;\nconst EVENT_KEYDOWN_DATA_API = `keydown${EVENT_KEY$6}${DATA_API_KEY$3}`;\nconst EVENT_KEYUP_DATA_API = `keyup${EVENT_KEY$6}${DATA_API_KEY$3}`;\nconst CLASS_NAME_SHOW$6 = 'show';\nconst CLASS_NAME_DROPUP = 'dropup';\nconst CLASS_NAME_DROPEND = 'dropend';\nconst CLASS_NAME_DROPSTART = 'dropstart';\nconst CLASS_NAME_DROPUP_CENTER = 'dropup-center';\nconst CLASS_NAME_DROPDOWN_CENTER = 'dropdown-center';\nconst SELECTOR_DATA_TOGGLE$3 = '[data-bs-toggle=\"dropdown\"]:not(.disabled):not(:disabled)';\nconst SELECTOR_DATA_TOGGLE_SHOWN = `${SELECTOR_DATA_TOGGLE$3}.${CLASS_NAME_SHOW$6}`;\nconst SELECTOR_MENU = '.dropdown-menu';\nconst SELECTOR_NAVBAR = '.navbar';\nconst SELECTOR_NAVBAR_NAV = '.navbar-nav';\nconst SELECTOR_VISIBLE_ITEMS = '.dropdown-menu .dropdown-item:not(.disabled):not(:disabled)';\nconst PLACEMENT_TOP = isRTL() ? 'top-end' : 'top-start';\nconst PLACEMENT_TOPEND = isRTL() ? 'top-start' : 'top-end';\nconst PLACEMENT_BOTTOM = isRTL() ? 'bottom-end' : 'bottom-start';\nconst PLACEMENT_BOTTOMEND = isRTL() ? 'bottom-start' : 'bottom-end';\nconst PLACEMENT_RIGHT = isRTL() ? 'left-start' : 'right-start';\nconst PLACEMENT_LEFT = isRTL() ? 'right-start' : 'left-start';\nconst PLACEMENT_TOPCENTER = 'top';\nconst PLACEMENT_BOTTOMCENTER = 'bottom';\nconst Default$9 = {\n autoClose: true,\n boundary: 'clippingParents',\n display: 'dynamic',\n offset: [0, 2],\n popperConfig: null,\n reference: 'toggle'\n};\nconst DefaultType$9 = {\n autoClose: '(boolean|string)',\n boundary: '(string|element)',\n display: 'string',\n offset: '(array|string|function)',\n popperConfig: '(null|object|function)',\n reference: '(string|element|object)'\n};\n\n/**\n * Class definition\n */\n\nclass Dropdown extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._popper = null;\n this._parent = this._element.parentNode; // dropdown wrapper\n // TODO: v6 revert #37011 & change markup https://getbootstrap.com/docs/5.3/forms/input-group/\n this._menu = SelectorEngine.next(this._element, SELECTOR_MENU)[0] || SelectorEngine.prev(this._element, SELECTOR_MENU)[0] || SelectorEngine.findOne(SELECTOR_MENU, this._parent);\n this._inNavbar = this._detectNavbar();\n }\n\n // Getters\n static get Default() {\n return Default$9;\n }\n static get DefaultType() {\n return DefaultType$9;\n }\n static get NAME() {\n return NAME$a;\n }\n\n // Public\n toggle() {\n return this._isShown() ? this.hide() : this.show();\n }\n show() {\n if (isDisabled(this._element) || this._isShown()) {\n return;\n }\n const relatedTarget = {\n relatedTarget: this._element\n };\n const showEvent = EventHandler.trigger(this._element, EVENT_SHOW$5, relatedTarget);\n if (showEvent.defaultPrevented) {\n return;\n }\n this._createPopper();\n\n // If this is a touch-enabled device we add extra\n // empty mouseover listeners to the body's immediate children;\n // only needed because of broken event delegation on iOS\n // https://www.quirksmode.org/blog/archives/2014/02/mouse_event_bub.html\n if ('ontouchstart' in document.documentElement && !this._parent.closest(SELECTOR_NAVBAR_NAV)) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.on(element, 'mouseover', noop);\n }\n }\n this._element.focus();\n this._element.setAttribute('aria-expanded', true);\n this._menu.classList.add(CLASS_NAME_SHOW$6);\n this._element.classList.add(CLASS_NAME_SHOW$6);\n EventHandler.trigger(this._element, EVENT_SHOWN$5, relatedTarget);\n }\n hide() {\n if (isDisabled(this._element) || !this._isShown()) {\n return;\n }\n const relatedTarget = {\n relatedTarget: this._element\n };\n this._completeHide(relatedTarget);\n }\n dispose() {\n if (this._popper) {\n this._popper.destroy();\n }\n super.dispose();\n }\n update() {\n this._inNavbar = this._detectNavbar();\n if (this._popper) {\n this._popper.update();\n }\n }\n\n // Private\n _completeHide(relatedTarget) {\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE$5, relatedTarget);\n if (hideEvent.defaultPrevented) {\n return;\n }\n\n // If this is a touch-enabled device we remove the extra\n // empty mouseover listeners we added for iOS support\n if ('ontouchstart' in document.documentElement) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.off(element, 'mouseover', noop);\n }\n }\n if (this._popper) {\n this._popper.destroy();\n }\n this._menu.classList.remove(CLASS_NAME_SHOW$6);\n this._element.classList.remove(CLASS_NAME_SHOW$6);\n this._element.setAttribute('aria-expanded', 'false');\n Manipulator.removeDataAttribute(this._menu, 'popper');\n EventHandler.trigger(this._element, EVENT_HIDDEN$5, relatedTarget);\n }\n _getConfig(config) {\n config = super._getConfig(config);\n if (typeof config.reference === 'object' && !isElement(config.reference) && typeof config.reference.getBoundingClientRect !== 'function') {\n // Popper virtual elements require a getBoundingClientRect method\n throw new TypeError(`${NAME$a.toUpperCase()}: Option \"reference\" provided type \"object\" without a required \"getBoundingClientRect\" method.`);\n }\n return config;\n }\n _createPopper() {\n if (typeof Popper === 'undefined') {\n throw new TypeError('Bootstrap\\'s dropdowns require Popper (https://popper.js.org)');\n }\n let referenceElement = this._element;\n if (this._config.reference === 'parent') {\n referenceElement = this._parent;\n } else if (isElement(this._config.reference)) {\n referenceElement = getElement(this._config.reference);\n } else if (typeof this._config.reference === 'object') {\n referenceElement = this._config.reference;\n }\n const popperConfig = this._getPopperConfig();\n this._popper = Popper.createPopper(referenceElement, this._menu, popperConfig);\n }\n _isShown() {\n return this._menu.classList.contains(CLASS_NAME_SHOW$6);\n }\n _getPlacement() {\n const parentDropdown = this._parent;\n if (parentDropdown.classList.contains(CLASS_NAME_DROPEND)) {\n return PLACEMENT_RIGHT;\n }\n if (parentDropdown.classList.contains(CLASS_NAME_DROPSTART)) {\n return PLACEMENT_LEFT;\n }\n if (parentDropdown.classList.contains(CLASS_NAME_DROPUP_CENTER)) {\n return PLACEMENT_TOPCENTER;\n }\n if (parentDropdown.classList.contains(CLASS_NAME_DROPDOWN_CENTER)) {\n return PLACEMENT_BOTTOMCENTER;\n }\n\n // We need to trim the value because custom properties can also include spaces\n const isEnd = getComputedStyle(this._menu).getPropertyValue('--bs-position').trim() === 'end';\n if (parentDropdown.classList.contains(CLASS_NAME_DROPUP)) {\n return isEnd ? PLACEMENT_TOPEND : PLACEMENT_TOP;\n }\n return isEnd ? PLACEMENT_BOTTOMEND : PLACEMENT_BOTTOM;\n }\n _detectNavbar() {\n return this._element.closest(SELECTOR_NAVBAR) !== null;\n }\n _getOffset() {\n const {\n offset\n } = this._config;\n if (typeof offset === 'string') {\n return offset.split(',').map(value => Number.parseInt(value, 10));\n }\n if (typeof offset === 'function') {\n return popperData => offset(popperData, this._element);\n }\n return offset;\n }\n _getPopperConfig() {\n const defaultBsPopperConfig = {\n placement: this._getPlacement(),\n modifiers: [{\n name: 'preventOverflow',\n options: {\n boundary: this._config.boundary\n }\n }, {\n name: 'offset',\n options: {\n offset: this._getOffset()\n }\n }]\n };\n\n // Disable Popper if we have a static display or Dropdown is in Navbar\n if (this._inNavbar || this._config.display === 'static') {\n Manipulator.setDataAttribute(this._menu, 'popper', 'static'); // TODO: v6 remove\n defaultBsPopperConfig.modifiers = [{\n name: 'applyStyles',\n enabled: false\n }];\n }\n return {\n ...defaultBsPopperConfig,\n ...execute(this._config.popperConfig, [defaultBsPopperConfig])\n };\n }\n _selectMenuItem({\n key,\n target\n }) {\n const items = SelectorEngine.find(SELECTOR_VISIBLE_ITEMS, this._menu).filter(element => isVisible(element));\n if (!items.length) {\n return;\n }\n\n // if target isn't included in items (e.g. when expanding the dropdown)\n // allow cycling to get the last item in case key equals ARROW_UP_KEY\n getNextActiveElement(items, target, key === ARROW_DOWN_KEY$1, !items.includes(target)).focus();\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Dropdown.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n });\n }\n static clearMenus(event) {\n if (event.button === RIGHT_MOUSE_BUTTON || event.type === 'keyup' && event.key !== TAB_KEY$1) {\n return;\n }\n const openToggles = SelectorEngine.find(SELECTOR_DATA_TOGGLE_SHOWN);\n for (const toggle of openToggles) {\n const context = Dropdown.getInstance(toggle);\n if (!context || context._config.autoClose === false) {\n continue;\n }\n const composedPath = event.composedPath();\n const isMenuTarget = composedPath.includes(context._menu);\n if (composedPath.includes(context._element) || context._config.autoClose === 'inside' && !isMenuTarget || context._config.autoClose === 'outside' && isMenuTarget) {\n continue;\n }\n\n // Tab navigation through the dropdown menu or events from contained inputs shouldn't close the menu\n if (context._menu.contains(event.target) && (event.type === 'keyup' && event.key === TAB_KEY$1 || /input|select|option|textarea|form/i.test(event.target.tagName))) {\n continue;\n }\n const relatedTarget = {\n relatedTarget: context._element\n };\n if (event.type === 'click') {\n relatedTarget.clickEvent = event;\n }\n context._completeHide(relatedTarget);\n }\n }\n static dataApiKeydownHandler(event) {\n // If not an UP | DOWN | ESCAPE key => not a dropdown command\n // If input/textarea && if key is other than ESCAPE => not a dropdown command\n\n const isInput = /input|textarea/i.test(event.target.tagName);\n const isEscapeEvent = event.key === ESCAPE_KEY$2;\n const isUpOrDownEvent = [ARROW_UP_KEY$1, ARROW_DOWN_KEY$1].includes(event.key);\n if (!isUpOrDownEvent && !isEscapeEvent) {\n return;\n }\n if (isInput && !isEscapeEvent) {\n return;\n }\n event.preventDefault();\n\n // TODO: v6 revert #37011 & change markup https://getbootstrap.com/docs/5.3/forms/input-group/\n const getToggleButton = this.matches(SELECTOR_DATA_TOGGLE$3) ? this : SelectorEngine.prev(this, SELECTOR_DATA_TOGGLE$3)[0] || SelectorEngine.next(this, SELECTOR_DATA_TOGGLE$3)[0] || SelectorEngine.findOne(SELECTOR_DATA_TOGGLE$3, event.delegateTarget.parentNode);\n const instance = Dropdown.getOrCreateInstance(getToggleButton);\n if (isUpOrDownEvent) {\n event.stopPropagation();\n instance.show();\n instance._selectMenuItem(event);\n return;\n }\n if (instance._isShown()) {\n // else is escape and we check if it is shown\n event.stopPropagation();\n instance.hide();\n getToggleButton.focus();\n }\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_KEYDOWN_DATA_API, SELECTOR_DATA_TOGGLE$3, Dropdown.dataApiKeydownHandler);\nEventHandler.on(document, EVENT_KEYDOWN_DATA_API, SELECTOR_MENU, Dropdown.dataApiKeydownHandler);\nEventHandler.on(document, EVENT_CLICK_DATA_API$3, Dropdown.clearMenus);\nEventHandler.on(document, EVENT_KEYUP_DATA_API, Dropdown.clearMenus);\nEventHandler.on(document, EVENT_CLICK_DATA_API$3, SELECTOR_DATA_TOGGLE$3, function (event) {\n event.preventDefault();\n Dropdown.getOrCreateInstance(this).toggle();\n});\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Dropdown);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/backdrop.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$9 = 'backdrop';\nconst CLASS_NAME_FADE$4 = 'fade';\nconst CLASS_NAME_SHOW$5 = 'show';\nconst EVENT_MOUSEDOWN = `mousedown.bs.${NAME$9}`;\nconst Default$8 = {\n className: 'modal-backdrop',\n clickCallback: null,\n isAnimated: false,\n isVisible: true,\n // if false, we use the backdrop helper without adding any element to the dom\n rootElement: 'body' // give the choice to place backdrop under different elements\n};\n\nconst DefaultType$8 = {\n className: 'string',\n clickCallback: '(function|null)',\n isAnimated: 'boolean',\n isVisible: 'boolean',\n rootElement: '(element|string)'\n};\n\n/**\n * Class definition\n */\n\nclass Backdrop extends Config {\n constructor(config) {\n super();\n this._config = this._getConfig(config);\n this._isAppended = false;\n this._element = null;\n }\n\n // Getters\n static get Default() {\n return Default$8;\n }\n static get DefaultType() {\n return DefaultType$8;\n }\n static get NAME() {\n return NAME$9;\n }\n\n // Public\n show(callback) {\n if (!this._config.isVisible) {\n execute(callback);\n return;\n }\n this._append();\n const element = this._getElement();\n if (this._config.isAnimated) {\n reflow(element);\n }\n element.classList.add(CLASS_NAME_SHOW$5);\n this._emulateAnimation(() => {\n execute(callback);\n });\n }\n hide(callback) {\n if (!this._config.isVisible) {\n execute(callback);\n return;\n }\n this._getElement().classList.remove(CLASS_NAME_SHOW$5);\n this._emulateAnimation(() => {\n this.dispose();\n execute(callback);\n });\n }\n dispose() {\n if (!this._isAppended) {\n return;\n }\n EventHandler.off(this._element, EVENT_MOUSEDOWN);\n this._element.remove();\n this._isAppended = false;\n }\n\n // Private\n _getElement() {\n if (!this._element) {\n const backdrop = document.createElement('div');\n backdrop.className = this._config.className;\n if (this._config.isAnimated) {\n backdrop.classList.add(CLASS_NAME_FADE$4);\n }\n this._element = backdrop;\n }\n return this._element;\n }\n _configAfterMerge(config) {\n // use getElement() with the default \"body\" to get a fresh Element on each instantiation\n config.rootElement = getElement(config.rootElement);\n return config;\n }\n _append() {\n if (this._isAppended) {\n return;\n }\n const element = this._getElement();\n this._config.rootElement.append(element);\n EventHandler.on(element, EVENT_MOUSEDOWN, () => {\n execute(this._config.clickCallback);\n });\n this._isAppended = true;\n }\n _emulateAnimation(callback) {\n executeAfterTransition(callback, this._getElement(), this._config.isAnimated);\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/focustrap.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$8 = 'focustrap';\nconst DATA_KEY$5 = 'bs.focustrap';\nconst EVENT_KEY$5 = `.${DATA_KEY$5}`;\nconst EVENT_FOCUSIN$2 = `focusin${EVENT_KEY$5}`;\nconst EVENT_KEYDOWN_TAB = `keydown.tab${EVENT_KEY$5}`;\nconst TAB_KEY = 'Tab';\nconst TAB_NAV_FORWARD = 'forward';\nconst TAB_NAV_BACKWARD = 'backward';\nconst Default$7 = {\n autofocus: true,\n trapElement: null // The element to trap focus inside of\n};\n\nconst DefaultType$7 = {\n autofocus: 'boolean',\n trapElement: 'element'\n};\n\n/**\n * Class definition\n */\n\nclass FocusTrap extends Config {\n constructor(config) {\n super();\n this._config = this._getConfig(config);\n this._isActive = false;\n this._lastTabNavDirection = null;\n }\n\n // Getters\n static get Default() {\n return Default$7;\n }\n static get DefaultType() {\n return DefaultType$7;\n }\n static get NAME() {\n return NAME$8;\n }\n\n // Public\n activate() {\n if (this._isActive) {\n return;\n }\n if (this._config.autofocus) {\n this._config.trapElement.focus();\n }\n EventHandler.off(document, EVENT_KEY$5); // guard against infinite focus loop\n EventHandler.on(document, EVENT_FOCUSIN$2, event => this._handleFocusin(event));\n EventHandler.on(document, EVENT_KEYDOWN_TAB, event => this._handleKeydown(event));\n this._isActive = true;\n }\n deactivate() {\n if (!this._isActive) {\n return;\n }\n this._isActive = false;\n EventHandler.off(document, EVENT_KEY$5);\n }\n\n // Private\n _handleFocusin(event) {\n const {\n trapElement\n } = this._config;\n if (event.target === document || event.target === trapElement || trapElement.contains(event.target)) {\n return;\n }\n const elements = SelectorEngine.focusableChildren(trapElement);\n if (elements.length === 0) {\n trapElement.focus();\n } else if (this._lastTabNavDirection === TAB_NAV_BACKWARD) {\n elements[elements.length - 1].focus();\n } else {\n elements[0].focus();\n }\n }\n _handleKeydown(event) {\n if (event.key !== TAB_KEY) {\n return;\n }\n this._lastTabNavDirection = event.shiftKey ? TAB_NAV_BACKWARD : TAB_NAV_FORWARD;\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/scrollBar.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst SELECTOR_FIXED_CONTENT = '.fixed-top, .fixed-bottom, .is-fixed, .sticky-top';\nconst SELECTOR_STICKY_CONTENT = '.sticky-top';\nconst PROPERTY_PADDING = 'padding-right';\nconst PROPERTY_MARGIN = 'margin-right';\n\n/**\n * Class definition\n */\n\nclass ScrollBarHelper {\n constructor() {\n this._element = document.body;\n }\n\n // Public\n getWidth() {\n // https://developer.mozilla.org/en-US/docs/Web/API/Window/innerWidth#usage_notes\n const documentWidth = document.documentElement.clientWidth;\n return Math.abs(window.innerWidth - documentWidth);\n }\n hide() {\n const width = this.getWidth();\n this._disableOverFlow();\n // give padding to element to balance the hidden scrollbar width\n this._setElementAttributes(this._element, PROPERTY_PADDING, calculatedValue => calculatedValue + width);\n // trick: We adjust positive paddingRight and negative marginRight to sticky-top elements to keep showing fullwidth\n this._setElementAttributes(SELECTOR_FIXED_CONTENT, PROPERTY_PADDING, calculatedValue => calculatedValue + width);\n this._setElementAttributes(SELECTOR_STICKY_CONTENT, PROPERTY_MARGIN, calculatedValue => calculatedValue - width);\n }\n reset() {\n this._resetElementAttributes(this._element, 'overflow');\n this._resetElementAttributes(this._element, PROPERTY_PADDING);\n this._resetElementAttributes(SELECTOR_FIXED_CONTENT, PROPERTY_PADDING);\n this._resetElementAttributes(SELECTOR_STICKY_CONTENT, PROPERTY_MARGIN);\n }\n isOverflowing() {\n return this.getWidth() > 0;\n }\n\n // Private\n _disableOverFlow() {\n this._saveInitialAttribute(this._element, 'overflow');\n this._element.style.overflow = 'hidden';\n }\n _setElementAttributes(selector, styleProperty, callback) {\n const scrollbarWidth = this.getWidth();\n const manipulationCallBack = element => {\n if (element !== this._element && window.innerWidth > element.clientWidth + scrollbarWidth) {\n return;\n }\n this._saveInitialAttribute(element, styleProperty);\n const calculatedValue = window.getComputedStyle(element).getPropertyValue(styleProperty);\n element.style.setProperty(styleProperty, `${callback(Number.parseFloat(calculatedValue))}px`);\n };\n this._applyManipulationCallback(selector, manipulationCallBack);\n }\n _saveInitialAttribute(element, styleProperty) {\n const actualValue = element.style.getPropertyValue(styleProperty);\n if (actualValue) {\n Manipulator.setDataAttribute(element, styleProperty, actualValue);\n }\n }\n _resetElementAttributes(selector, styleProperty) {\n const manipulationCallBack = element => {\n const value = Manipulator.getDataAttribute(element, styleProperty);\n // We only want to remove the property if the value is `null`; the value can also be zero\n if (value === null) {\n element.style.removeProperty(styleProperty);\n return;\n }\n Manipulator.removeDataAttribute(element, styleProperty);\n element.style.setProperty(styleProperty, value);\n };\n this._applyManipulationCallback(selector, manipulationCallBack);\n }\n _applyManipulationCallback(selector, callBack) {\n if (isElement(selector)) {\n callBack(selector);\n return;\n }\n for (const sel of SelectorEngine.find(selector, this._element)) {\n callBack(sel);\n }\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap modal.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$7 = 'modal';\nconst DATA_KEY$4 = 'bs.modal';\nconst EVENT_KEY$4 = `.${DATA_KEY$4}`;\nconst DATA_API_KEY$2 = '.data-api';\nconst ESCAPE_KEY$1 = 'Escape';\nconst EVENT_HIDE$4 = `hide${EVENT_KEY$4}`;\nconst EVENT_HIDE_PREVENTED$1 = `hidePrevented${EVENT_KEY$4}`;\nconst EVENT_HIDDEN$4 = `hidden${EVENT_KEY$4}`;\nconst EVENT_SHOW$4 = `show${EVENT_KEY$4}`;\nconst EVENT_SHOWN$4 = `shown${EVENT_KEY$4}`;\nconst EVENT_RESIZE$1 = `resize${EVENT_KEY$4}`;\nconst EVENT_CLICK_DISMISS = `click.dismiss${EVENT_KEY$4}`;\nconst EVENT_MOUSEDOWN_DISMISS = `mousedown.dismiss${EVENT_KEY$4}`;\nconst EVENT_KEYDOWN_DISMISS$1 = `keydown.dismiss${EVENT_KEY$4}`;\nconst EVENT_CLICK_DATA_API$2 = `click${EVENT_KEY$4}${DATA_API_KEY$2}`;\nconst CLASS_NAME_OPEN = 'modal-open';\nconst CLASS_NAME_FADE$3 = 'fade';\nconst CLASS_NAME_SHOW$4 = 'show';\nconst CLASS_NAME_STATIC = 'modal-static';\nconst OPEN_SELECTOR$1 = '.modal.show';\nconst SELECTOR_DIALOG = '.modal-dialog';\nconst SELECTOR_MODAL_BODY = '.modal-body';\nconst SELECTOR_DATA_TOGGLE$2 = '[data-bs-toggle=\"modal\"]';\nconst Default$6 = {\n backdrop: true,\n focus: true,\n keyboard: true\n};\nconst DefaultType$6 = {\n backdrop: '(boolean|string)',\n focus: 'boolean',\n keyboard: 'boolean'\n};\n\n/**\n * Class definition\n */\n\nclass Modal extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._dialog = SelectorEngine.findOne(SELECTOR_DIALOG, this._element);\n this._backdrop = this._initializeBackDrop();\n this._focustrap = this._initializeFocusTrap();\n this._isShown = false;\n this._isTransitioning = false;\n this._scrollBar = new ScrollBarHelper();\n this._addEventListeners();\n }\n\n // Getters\n static get Default() {\n return Default$6;\n }\n static get DefaultType() {\n return DefaultType$6;\n }\n static get NAME() {\n return NAME$7;\n }\n\n // Public\n toggle(relatedTarget) {\n return this._isShown ? this.hide() : this.show(relatedTarget);\n }\n show(relatedTarget) {\n if (this._isShown || this._isTransitioning) {\n return;\n }\n const showEvent = EventHandler.trigger(this._element, EVENT_SHOW$4, {\n relatedTarget\n });\n if (showEvent.defaultPrevented) {\n return;\n }\n this._isShown = true;\n this._isTransitioning = true;\n this._scrollBar.hide();\n document.body.classList.add(CLASS_NAME_OPEN);\n this._adjustDialog();\n this._backdrop.show(() => this._showElement(relatedTarget));\n }\n hide() {\n if (!this._isShown || this._isTransitioning) {\n return;\n }\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE$4);\n if (hideEvent.defaultPrevented) {\n return;\n }\n this._isShown = false;\n this._isTransitioning = true;\n this._focustrap.deactivate();\n this._element.classList.remove(CLASS_NAME_SHOW$4);\n this._queueCallback(() => this._hideModal(), this._element, this._isAnimated());\n }\n dispose() {\n EventHandler.off(window, EVENT_KEY$4);\n EventHandler.off(this._dialog, EVENT_KEY$4);\n this._backdrop.dispose();\n this._focustrap.deactivate();\n super.dispose();\n }\n handleUpdate() {\n this._adjustDialog();\n }\n\n // Private\n _initializeBackDrop() {\n return new Backdrop({\n isVisible: Boolean(this._config.backdrop),\n // 'static' option will be translated to true, and booleans will keep their value,\n isAnimated: this._isAnimated()\n });\n }\n _initializeFocusTrap() {\n return new FocusTrap({\n trapElement: this._element\n });\n }\n _showElement(relatedTarget) {\n // try to append dynamic modal\n if (!document.body.contains(this._element)) {\n document.body.append(this._element);\n }\n this._element.style.display = 'block';\n this._element.removeAttribute('aria-hidden');\n this._element.setAttribute('aria-modal', true);\n this._element.setAttribute('role', 'dialog');\n this._element.scrollTop = 0;\n const modalBody = SelectorEngine.findOne(SELECTOR_MODAL_BODY, this._dialog);\n if (modalBody) {\n modalBody.scrollTop = 0;\n }\n reflow(this._element);\n this._element.classList.add(CLASS_NAME_SHOW$4);\n const transitionComplete = () => {\n if (this._config.focus) {\n this._focustrap.activate();\n }\n this._isTransitioning = false;\n EventHandler.trigger(this._element, EVENT_SHOWN$4, {\n relatedTarget\n });\n };\n this._queueCallback(transitionComplete, this._dialog, this._isAnimated());\n }\n _addEventListeners() {\n EventHandler.on(this._element, EVENT_KEYDOWN_DISMISS$1, event => {\n if (event.key !== ESCAPE_KEY$1) {\n return;\n }\n if (this._config.keyboard) {\n this.hide();\n return;\n }\n this._triggerBackdropTransition();\n });\n EventHandler.on(window, EVENT_RESIZE$1, () => {\n if (this._isShown && !this._isTransitioning) {\n this._adjustDialog();\n }\n });\n EventHandler.on(this._element, EVENT_MOUSEDOWN_DISMISS, event => {\n // a bad trick to segregate clicks that may start inside dialog but end outside, and avoid listen to scrollbar clicks\n EventHandler.one(this._element, EVENT_CLICK_DISMISS, event2 => {\n if (this._element !== event.target || this._element !== event2.target) {\n return;\n }\n if (this._config.backdrop === 'static') {\n this._triggerBackdropTransition();\n return;\n }\n if (this._config.backdrop) {\n this.hide();\n }\n });\n });\n }\n _hideModal() {\n this._element.style.display = 'none';\n this._element.setAttribute('aria-hidden', true);\n this._element.removeAttribute('aria-modal');\n this._element.removeAttribute('role');\n this._isTransitioning = false;\n this._backdrop.hide(() => {\n document.body.classList.remove(CLASS_NAME_OPEN);\n this._resetAdjustments();\n this._scrollBar.reset();\n EventHandler.trigger(this._element, EVENT_HIDDEN$4);\n });\n }\n _isAnimated() {\n return this._element.classList.contains(CLASS_NAME_FADE$3);\n }\n _triggerBackdropTransition() {\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE_PREVENTED$1);\n if (hideEvent.defaultPrevented) {\n return;\n }\n const isModalOverflowing = this._element.scrollHeight > document.documentElement.clientHeight;\n const initialOverflowY = this._element.style.overflowY;\n // return if the following background transition hasn't yet completed\n if (initialOverflowY === 'hidden' || this._element.classList.contains(CLASS_NAME_STATIC)) {\n return;\n }\n if (!isModalOverflowing) {\n this._element.style.overflowY = 'hidden';\n }\n this._element.classList.add(CLASS_NAME_STATIC);\n this._queueCallback(() => {\n this._element.classList.remove(CLASS_NAME_STATIC);\n this._queueCallback(() => {\n this._element.style.overflowY = initialOverflowY;\n }, this._dialog);\n }, this._dialog);\n this._element.focus();\n }\n\n /**\n * The following methods are used to handle overflowing modals\n */\n\n _adjustDialog() {\n const isModalOverflowing = this._element.scrollHeight > document.documentElement.clientHeight;\n const scrollbarWidth = this._scrollBar.getWidth();\n const isBodyOverflowing = scrollbarWidth > 0;\n if (isBodyOverflowing && !isModalOverflowing) {\n const property = isRTL() ? 'paddingLeft' : 'paddingRight';\n this._element.style[property] = `${scrollbarWidth}px`;\n }\n if (!isBodyOverflowing && isModalOverflowing) {\n const property = isRTL() ? 'paddingRight' : 'paddingLeft';\n this._element.style[property] = `${scrollbarWidth}px`;\n }\n }\n _resetAdjustments() {\n this._element.style.paddingLeft = '';\n this._element.style.paddingRight = '';\n }\n\n // Static\n static jQueryInterface(config, relatedTarget) {\n return this.each(function () {\n const data = Modal.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config](relatedTarget);\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$2, SELECTOR_DATA_TOGGLE$2, function (event) {\n const target = SelectorEngine.getElementFromSelector(this);\n if (['A', 'AREA'].includes(this.tagName)) {\n event.preventDefault();\n }\n EventHandler.one(target, EVENT_SHOW$4, showEvent => {\n if (showEvent.defaultPrevented) {\n // only register focus restorer if modal will actually get shown\n return;\n }\n EventHandler.one(target, EVENT_HIDDEN$4, () => {\n if (isVisible(this)) {\n this.focus();\n }\n });\n });\n\n // avoid conflict when clicking modal toggler while another one is open\n const alreadyOpen = SelectorEngine.findOne(OPEN_SELECTOR$1);\n if (alreadyOpen) {\n Modal.getInstance(alreadyOpen).hide();\n }\n const data = Modal.getOrCreateInstance(target);\n data.toggle(this);\n});\nenableDismissTrigger(Modal);\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Modal);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap offcanvas.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$6 = 'offcanvas';\nconst DATA_KEY$3 = 'bs.offcanvas';\nconst EVENT_KEY$3 = `.${DATA_KEY$3}`;\nconst DATA_API_KEY$1 = '.data-api';\nconst EVENT_LOAD_DATA_API$2 = `load${EVENT_KEY$3}${DATA_API_KEY$1}`;\nconst ESCAPE_KEY = 'Escape';\nconst CLASS_NAME_SHOW$3 = 'show';\nconst CLASS_NAME_SHOWING$1 = 'showing';\nconst CLASS_NAME_HIDING = 'hiding';\nconst CLASS_NAME_BACKDROP = 'offcanvas-backdrop';\nconst OPEN_SELECTOR = '.offcanvas.show';\nconst EVENT_SHOW$3 = `show${EVENT_KEY$3}`;\nconst EVENT_SHOWN$3 = `shown${EVENT_KEY$3}`;\nconst EVENT_HIDE$3 = `hide${EVENT_KEY$3}`;\nconst EVENT_HIDE_PREVENTED = `hidePrevented${EVENT_KEY$3}`;\nconst EVENT_HIDDEN$3 = `hidden${EVENT_KEY$3}`;\nconst EVENT_RESIZE = `resize${EVENT_KEY$3}`;\nconst EVENT_CLICK_DATA_API$1 = `click${EVENT_KEY$3}${DATA_API_KEY$1}`;\nconst EVENT_KEYDOWN_DISMISS = `keydown.dismiss${EVENT_KEY$3}`;\nconst SELECTOR_DATA_TOGGLE$1 = '[data-bs-toggle=\"offcanvas\"]';\nconst Default$5 = {\n backdrop: true,\n keyboard: true,\n scroll: false\n};\nconst DefaultType$5 = {\n backdrop: '(boolean|string)',\n keyboard: 'boolean',\n scroll: 'boolean'\n};\n\n/**\n * Class definition\n */\n\nclass Offcanvas extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._isShown = false;\n this._backdrop = this._initializeBackDrop();\n this._focustrap = this._initializeFocusTrap();\n this._addEventListeners();\n }\n\n // Getters\n static get Default() {\n return Default$5;\n }\n static get DefaultType() {\n return DefaultType$5;\n }\n static get NAME() {\n return NAME$6;\n }\n\n // Public\n toggle(relatedTarget) {\n return this._isShown ? this.hide() : this.show(relatedTarget);\n }\n show(relatedTarget) {\n if (this._isShown) {\n return;\n }\n const showEvent = EventHandler.trigger(this._element, EVENT_SHOW$3, {\n relatedTarget\n });\n if (showEvent.defaultPrevented) {\n return;\n }\n this._isShown = true;\n this._backdrop.show();\n if (!this._config.scroll) {\n new ScrollBarHelper().hide();\n }\n this._element.setAttribute('aria-modal', true);\n this._element.setAttribute('role', 'dialog');\n this._element.classList.add(CLASS_NAME_SHOWING$1);\n const completeCallBack = () => {\n if (!this._config.scroll || this._config.backdrop) {\n this._focustrap.activate();\n }\n this._element.classList.add(CLASS_NAME_SHOW$3);\n this._element.classList.remove(CLASS_NAME_SHOWING$1);\n EventHandler.trigger(this._element, EVENT_SHOWN$3, {\n relatedTarget\n });\n };\n this._queueCallback(completeCallBack, this._element, true);\n }\n hide() {\n if (!this._isShown) {\n return;\n }\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE$3);\n if (hideEvent.defaultPrevented) {\n return;\n }\n this._focustrap.deactivate();\n this._element.blur();\n this._isShown = false;\n this._element.classList.add(CLASS_NAME_HIDING);\n this._backdrop.hide();\n const completeCallback = () => {\n this._element.classList.remove(CLASS_NAME_SHOW$3, CLASS_NAME_HIDING);\n this._element.removeAttribute('aria-modal');\n this._element.removeAttribute('role');\n if (!this._config.scroll) {\n new ScrollBarHelper().reset();\n }\n EventHandler.trigger(this._element, EVENT_HIDDEN$3);\n };\n this._queueCallback(completeCallback, this._element, true);\n }\n dispose() {\n this._backdrop.dispose();\n this._focustrap.deactivate();\n super.dispose();\n }\n\n // Private\n _initializeBackDrop() {\n const clickCallback = () => {\n if (this._config.backdrop === 'static') {\n EventHandler.trigger(this._element, EVENT_HIDE_PREVENTED);\n return;\n }\n this.hide();\n };\n\n // 'static' option will be translated to true, and booleans will keep their value\n const isVisible = Boolean(this._config.backdrop);\n return new Backdrop({\n className: CLASS_NAME_BACKDROP,\n isVisible,\n isAnimated: true,\n rootElement: this._element.parentNode,\n clickCallback: isVisible ? clickCallback : null\n });\n }\n _initializeFocusTrap() {\n return new FocusTrap({\n trapElement: this._element\n });\n }\n _addEventListeners() {\n EventHandler.on(this._element, EVENT_KEYDOWN_DISMISS, event => {\n if (event.key !== ESCAPE_KEY) {\n return;\n }\n if (this._config.keyboard) {\n this.hide();\n return;\n }\n EventHandler.trigger(this._element, EVENT_HIDE_PREVENTED);\n });\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Offcanvas.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (data[config] === undefined || config.startsWith('_') || config === 'constructor') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config](this);\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$1, SELECTOR_DATA_TOGGLE$1, function (event) {\n const target = SelectorEngine.getElementFromSelector(this);\n if (['A', 'AREA'].includes(this.tagName)) {\n event.preventDefault();\n }\n if (isDisabled(this)) {\n return;\n }\n EventHandler.one(target, EVENT_HIDDEN$3, () => {\n // focus on trigger when it is closed\n if (isVisible(this)) {\n this.focus();\n }\n });\n\n // avoid conflict when clicking a toggler of an offcanvas, while another is open\n const alreadyOpen = SelectorEngine.findOne(OPEN_SELECTOR);\n if (alreadyOpen && alreadyOpen !== target) {\n Offcanvas.getInstance(alreadyOpen).hide();\n }\n const data = Offcanvas.getOrCreateInstance(target);\n data.toggle(this);\n});\nEventHandler.on(window, EVENT_LOAD_DATA_API$2, () => {\n for (const selector of SelectorEngine.find(OPEN_SELECTOR)) {\n Offcanvas.getOrCreateInstance(selector).show();\n }\n});\nEventHandler.on(window, EVENT_RESIZE, () => {\n for (const element of SelectorEngine.find('[aria-modal][class*=show][class*=offcanvas-]')) {\n if (getComputedStyle(element).position !== 'fixed') {\n Offcanvas.getOrCreateInstance(element).hide();\n }\n }\n});\nenableDismissTrigger(Offcanvas);\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Offcanvas);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/sanitizer.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n// js-docs-start allow-list\nconst ARIA_ATTRIBUTE_PATTERN = /^aria-[\\w-]*$/i;\nconst DefaultAllowlist = {\n // Global attributes allowed on any supplied element below.\n '*': ['class', 'dir', 'id', 'lang', 'role', ARIA_ATTRIBUTE_PATTERN],\n a: ['target', 'href', 'title', 'rel'],\n area: [],\n b: [],\n br: [],\n col: [],\n code: [],\n div: [],\n em: [],\n hr: [],\n h1: [],\n h2: [],\n h3: [],\n h4: [],\n h5: [],\n h6: [],\n i: [],\n img: ['src', 'srcset', 'alt', 'title', 'width', 'height'],\n li: [],\n ol: [],\n p: [],\n pre: [],\n s: [],\n small: [],\n span: [],\n sub: [],\n sup: [],\n strong: [],\n u: [],\n ul: []\n};\n// js-docs-end allow-list\n\nconst uriAttributes = new Set(['background', 'cite', 'href', 'itemtype', 'longdesc', 'poster', 'src', 'xlink:href']);\n\n/**\n * A pattern that recognizes URLs that are safe wrt. XSS in URL navigation\n * contexts.\n *\n * Shout-out to Angular https://github.com/angular/angular/blob/15.2.8/packages/core/src/sanitization/url_sanitizer.ts#L38\n */\n// eslint-disable-next-line unicorn/better-regex\nconst SAFE_URL_PATTERN = /^(?!javascript:)(?:[a-z0-9+.-]+:|[^&:/?#]*(?:[/?#]|$))/i;\nconst allowedAttribute = (attribute, allowedAttributeList) => {\n const attributeName = attribute.nodeName.toLowerCase();\n if (allowedAttributeList.includes(attributeName)) {\n if (uriAttributes.has(attributeName)) {\n return Boolean(SAFE_URL_PATTERN.test(attribute.nodeValue));\n }\n return true;\n }\n\n // Check if a regular expression validates the attribute.\n return allowedAttributeList.filter(attributeRegex => attributeRegex instanceof RegExp).some(regex => regex.test(attributeName));\n};\nfunction sanitizeHtml(unsafeHtml, allowList, sanitizeFunction) {\n if (!unsafeHtml.length) {\n return unsafeHtml;\n }\n if (sanitizeFunction && typeof sanitizeFunction === 'function') {\n return sanitizeFunction(unsafeHtml);\n }\n const domParser = new window.DOMParser();\n const createdDocument = domParser.parseFromString(unsafeHtml, 'text/html');\n const elements = [].concat(...createdDocument.body.querySelectorAll('*'));\n for (const element of elements) {\n const elementName = element.nodeName.toLowerCase();\n if (!Object.keys(allowList).includes(elementName)) {\n element.remove();\n continue;\n }\n const attributeList = [].concat(...element.attributes);\n const allowedAttributes = [].concat(allowList['*'] || [], allowList[elementName] || []);\n for (const attribute of attributeList) {\n if (!allowedAttribute(attribute, allowedAttributes)) {\n element.removeAttribute(attribute.nodeName);\n }\n }\n }\n return createdDocument.body.innerHTML;\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/template-factory.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$5 = 'TemplateFactory';\nconst Default$4 = {\n allowList: DefaultAllowlist,\n content: {},\n // { selector : text , selector2 : text2 , }\n extraClass: '',\n html: false,\n sanitize: true,\n sanitizeFn: null,\n template: '
'\n};\nconst DefaultType$4 = {\n allowList: 'object',\n content: 'object',\n extraClass: '(string|function)',\n html: 'boolean',\n sanitize: 'boolean',\n sanitizeFn: '(null|function)',\n template: 'string'\n};\nconst DefaultContentType = {\n entry: '(string|element|function|null)',\n selector: '(string|element)'\n};\n\n/**\n * Class definition\n */\n\nclass TemplateFactory extends Config {\n constructor(config) {\n super();\n this._config = this._getConfig(config);\n }\n\n // Getters\n static get Default() {\n return Default$4;\n }\n static get DefaultType() {\n return DefaultType$4;\n }\n static get NAME() {\n return NAME$5;\n }\n\n // Public\n getContent() {\n return Object.values(this._config.content).map(config => this._resolvePossibleFunction(config)).filter(Boolean);\n }\n hasContent() {\n return this.getContent().length > 0;\n }\n changeContent(content) {\n this._checkContent(content);\n this._config.content = {\n ...this._config.content,\n ...content\n };\n return this;\n }\n toHtml() {\n const templateWrapper = document.createElement('div');\n templateWrapper.innerHTML = this._maybeSanitize(this._config.template);\n for (const [selector, text] of Object.entries(this._config.content)) {\n this._setContent(templateWrapper, text, selector);\n }\n const template = templateWrapper.children[0];\n const extraClass = this._resolvePossibleFunction(this._config.extraClass);\n if (extraClass) {\n template.classList.add(...extraClass.split(' '));\n }\n return template;\n }\n\n // Private\n _typeCheckConfig(config) {\n super._typeCheckConfig(config);\n this._checkContent(config.content);\n }\n _checkContent(arg) {\n for (const [selector, content] of Object.entries(arg)) {\n super._typeCheckConfig({\n selector,\n entry: content\n }, DefaultContentType);\n }\n }\n _setContent(template, content, selector) {\n const templateElement = SelectorEngine.findOne(selector, template);\n if (!templateElement) {\n return;\n }\n content = this._resolvePossibleFunction(content);\n if (!content) {\n templateElement.remove();\n return;\n }\n if (isElement(content)) {\n this._putElementInTemplate(getElement(content), templateElement);\n return;\n }\n if (this._config.html) {\n templateElement.innerHTML = this._maybeSanitize(content);\n return;\n }\n templateElement.textContent = content;\n }\n _maybeSanitize(arg) {\n return this._config.sanitize ? sanitizeHtml(arg, this._config.allowList, this._config.sanitizeFn) : arg;\n }\n _resolvePossibleFunction(arg) {\n return execute(arg, [this]);\n }\n _putElementInTemplate(element, templateElement) {\n if (this._config.html) {\n templateElement.innerHTML = '';\n templateElement.append(element);\n return;\n }\n templateElement.textContent = element.textContent;\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap tooltip.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$4 = 'tooltip';\nconst DISALLOWED_ATTRIBUTES = new Set(['sanitize', 'allowList', 'sanitizeFn']);\nconst CLASS_NAME_FADE$2 = 'fade';\nconst CLASS_NAME_MODAL = 'modal';\nconst CLASS_NAME_SHOW$2 = 'show';\nconst SELECTOR_TOOLTIP_INNER = '.tooltip-inner';\nconst SELECTOR_MODAL = `.${CLASS_NAME_MODAL}`;\nconst EVENT_MODAL_HIDE = 'hide.bs.modal';\nconst TRIGGER_HOVER = 'hover';\nconst TRIGGER_FOCUS = 'focus';\nconst TRIGGER_CLICK = 'click';\nconst TRIGGER_MANUAL = 'manual';\nconst EVENT_HIDE$2 = 'hide';\nconst EVENT_HIDDEN$2 = 'hidden';\nconst EVENT_SHOW$2 = 'show';\nconst EVENT_SHOWN$2 = 'shown';\nconst EVENT_INSERTED = 'inserted';\nconst EVENT_CLICK$1 = 'click';\nconst EVENT_FOCUSIN$1 = 'focusin';\nconst EVENT_FOCUSOUT$1 = 'focusout';\nconst EVENT_MOUSEENTER = 'mouseenter';\nconst EVENT_MOUSELEAVE = 'mouseleave';\nconst AttachmentMap = {\n AUTO: 'auto',\n TOP: 'top',\n RIGHT: isRTL() ? 'left' : 'right',\n BOTTOM: 'bottom',\n LEFT: isRTL() ? 'right' : 'left'\n};\nconst Default$3 = {\n allowList: DefaultAllowlist,\n animation: true,\n boundary: 'clippingParents',\n container: false,\n customClass: '',\n delay: 0,\n fallbackPlacements: ['top', 'right', 'bottom', 'left'],\n html: false,\n offset: [0, 6],\n placement: 'top',\n popperConfig: null,\n sanitize: true,\n sanitizeFn: null,\n selector: false,\n template: '
' + '
' + '
' + '
',\n title: '',\n trigger: 'hover focus'\n};\nconst DefaultType$3 = {\n allowList: 'object',\n animation: 'boolean',\n boundary: '(string|element)',\n container: '(string|element|boolean)',\n customClass: '(string|function)',\n delay: '(number|object)',\n fallbackPlacements: 'array',\n html: 'boolean',\n offset: '(array|string|function)',\n placement: '(string|function)',\n popperConfig: '(null|object|function)',\n sanitize: 'boolean',\n sanitizeFn: '(null|function)',\n selector: '(string|boolean)',\n template: 'string',\n title: '(string|element|function)',\n trigger: 'string'\n};\n\n/**\n * Class definition\n */\n\nclass Tooltip extends BaseComponent {\n constructor(element, config) {\n if (typeof Popper === 'undefined') {\n throw new TypeError('Bootstrap\\'s tooltips require Popper (https://popper.js.org)');\n }\n super(element, config);\n\n // Private\n this._isEnabled = true;\n this._timeout = 0;\n this._isHovered = null;\n this._activeTrigger = {};\n this._popper = null;\n this._templateFactory = null;\n this._newContent = null;\n\n // Protected\n this.tip = null;\n this._setListeners();\n if (!this._config.selector) {\n this._fixTitle();\n }\n }\n\n // Getters\n static get Default() {\n return Default$3;\n }\n static get DefaultType() {\n return DefaultType$3;\n }\n static get NAME() {\n return NAME$4;\n }\n\n // Public\n enable() {\n this._isEnabled = true;\n }\n disable() {\n this._isEnabled = false;\n }\n toggleEnabled() {\n this._isEnabled = !this._isEnabled;\n }\n toggle() {\n if (!this._isEnabled) {\n return;\n }\n this._activeTrigger.click = !this._activeTrigger.click;\n if (this._isShown()) {\n this._leave();\n return;\n }\n this._enter();\n }\n dispose() {\n clearTimeout(this._timeout);\n EventHandler.off(this._element.closest(SELECTOR_MODAL), EVENT_MODAL_HIDE, this._hideModalHandler);\n if (this._element.getAttribute('data-bs-original-title')) {\n this._element.setAttribute('title', this._element.getAttribute('data-bs-original-title'));\n }\n this._disposePopper();\n super.dispose();\n }\n show() {\n if (this._element.style.display === 'none') {\n throw new Error('Please use show on visible elements');\n }\n if (!(this._isWithContent() && this._isEnabled)) {\n return;\n }\n const showEvent = EventHandler.trigger(this._element, this.constructor.eventName(EVENT_SHOW$2));\n const shadowRoot = findShadowRoot(this._element);\n const isInTheDom = (shadowRoot || this._element.ownerDocument.documentElement).contains(this._element);\n if (showEvent.defaultPrevented || !isInTheDom) {\n return;\n }\n\n // TODO: v6 remove this or make it optional\n this._disposePopper();\n const tip = this._getTipElement();\n this._element.setAttribute('aria-describedby', tip.getAttribute('id'));\n const {\n container\n } = this._config;\n if (!this._element.ownerDocument.documentElement.contains(this.tip)) {\n container.append(tip);\n EventHandler.trigger(this._element, this.constructor.eventName(EVENT_INSERTED));\n }\n this._popper = this._createPopper(tip);\n tip.classList.add(CLASS_NAME_SHOW$2);\n\n // If this is a touch-enabled device we add extra\n // empty mouseover listeners to the body's immediate children;\n // only needed because of broken event delegation on iOS\n // https://www.quirksmode.org/blog/archives/2014/02/mouse_event_bub.html\n if ('ontouchstart' in document.documentElement) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.on(element, 'mouseover', noop);\n }\n }\n const complete = () => {\n EventHandler.trigger(this._element, this.constructor.eventName(EVENT_SHOWN$2));\n if (this._isHovered === false) {\n this._leave();\n }\n this._isHovered = false;\n };\n this._queueCallback(complete, this.tip, this._isAnimated());\n }\n hide() {\n if (!this._isShown()) {\n return;\n }\n const hideEvent = EventHandler.trigger(this._element, this.constructor.eventName(EVENT_HIDE$2));\n if (hideEvent.defaultPrevented) {\n return;\n }\n const tip = this._getTipElement();\n tip.classList.remove(CLASS_NAME_SHOW$2);\n\n // If this is a touch-enabled device we remove the extra\n // empty mouseover listeners we added for iOS support\n if ('ontouchstart' in document.documentElement) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.off(element, 'mouseover', noop);\n }\n }\n this._activeTrigger[TRIGGER_CLICK] = false;\n this._activeTrigger[TRIGGER_FOCUS] = false;\n this._activeTrigger[TRIGGER_HOVER] = false;\n this._isHovered = null; // it is a trick to support manual triggering\n\n const complete = () => {\n if (this._isWithActiveTrigger()) {\n return;\n }\n if (!this._isHovered) {\n this._disposePopper();\n }\n this._element.removeAttribute('aria-describedby');\n EventHandler.trigger(this._element, this.constructor.eventName(EVENT_HIDDEN$2));\n };\n this._queueCallback(complete, this.tip, this._isAnimated());\n }\n update() {\n if (this._popper) {\n this._popper.update();\n }\n }\n\n // Protected\n _isWithContent() {\n return Boolean(this._getTitle());\n }\n _getTipElement() {\n if (!this.tip) {\n this.tip = this._createTipElement(this._newContent || this._getContentForTemplate());\n }\n return this.tip;\n }\n _createTipElement(content) {\n const tip = this._getTemplateFactory(content).toHtml();\n\n // TODO: remove this check in v6\n if (!tip) {\n return null;\n }\n tip.classList.remove(CLASS_NAME_FADE$2, CLASS_NAME_SHOW$2);\n // TODO: v6 the following can be achieved with CSS only\n tip.classList.add(`bs-${this.constructor.NAME}-auto`);\n const tipId = getUID(this.constructor.NAME).toString();\n tip.setAttribute('id', tipId);\n if (this._isAnimated()) {\n tip.classList.add(CLASS_NAME_FADE$2);\n }\n return tip;\n }\n setContent(content) {\n this._newContent = content;\n if (this._isShown()) {\n this._disposePopper();\n this.show();\n }\n }\n _getTemplateFactory(content) {\n if (this._templateFactory) {\n this._templateFactory.changeContent(content);\n } else {\n this._templateFactory = new TemplateFactory({\n ...this._config,\n // the `content` var has to be after `this._config`\n // to override config.content in case of popover\n content,\n extraClass: this._resolvePossibleFunction(this._config.customClass)\n });\n }\n return this._templateFactory;\n }\n _getContentForTemplate() {\n return {\n [SELECTOR_TOOLTIP_INNER]: this._getTitle()\n };\n }\n _getTitle() {\n return this._resolvePossibleFunction(this._config.title) || this._element.getAttribute('data-bs-original-title');\n }\n\n // Private\n _initializeOnDelegatedTarget(event) {\n return this.constructor.getOrCreateInstance(event.delegateTarget, this._getDelegateConfig());\n }\n _isAnimated() {\n return this._config.animation || this.tip && this.tip.classList.contains(CLASS_NAME_FADE$2);\n }\n _isShown() {\n return this.tip && this.tip.classList.contains(CLASS_NAME_SHOW$2);\n }\n _createPopper(tip) {\n const placement = execute(this._config.placement, [this, tip, this._element]);\n const attachment = AttachmentMap[placement.toUpperCase()];\n return Popper.createPopper(this._element, tip, this._getPopperConfig(attachment));\n }\n _getOffset() {\n const {\n offset\n } = this._config;\n if (typeof offset === 'string') {\n return offset.split(',').map(value => Number.parseInt(value, 10));\n }\n if (typeof offset === 'function') {\n return popperData => offset(popperData, this._element);\n }\n return offset;\n }\n _resolvePossibleFunction(arg) {\n return execute(arg, [this._element]);\n }\n _getPopperConfig(attachment) {\n const defaultBsPopperConfig = {\n placement: attachment,\n modifiers: [{\n name: 'flip',\n options: {\n fallbackPlacements: this._config.fallbackPlacements\n }\n }, {\n name: 'offset',\n options: {\n offset: this._getOffset()\n }\n }, {\n name: 'preventOverflow',\n options: {\n boundary: this._config.boundary\n }\n }, {\n name: 'arrow',\n options: {\n element: `.${this.constructor.NAME}-arrow`\n }\n }, {\n name: 'preSetPlacement',\n enabled: true,\n phase: 'beforeMain',\n fn: data => {\n // Pre-set Popper's placement attribute in order to read the arrow sizes properly.\n // Otherwise, Popper mixes up the width and height dimensions since the initial arrow style is for top placement\n this._getTipElement().setAttribute('data-popper-placement', data.state.placement);\n }\n }]\n };\n return {\n ...defaultBsPopperConfig,\n ...execute(this._config.popperConfig, [defaultBsPopperConfig])\n };\n }\n _setListeners() {\n const triggers = this._config.trigger.split(' ');\n for (const trigger of triggers) {\n if (trigger === 'click') {\n EventHandler.on(this._element, this.constructor.eventName(EVENT_CLICK$1), this._config.selector, event => {\n const context = this._initializeOnDelegatedTarget(event);\n context.toggle();\n });\n } else if (trigger !== TRIGGER_MANUAL) {\n const eventIn = trigger === TRIGGER_HOVER ? this.constructor.eventName(EVENT_MOUSEENTER) : this.constructor.eventName(EVENT_FOCUSIN$1);\n const eventOut = trigger === TRIGGER_HOVER ? this.constructor.eventName(EVENT_MOUSELEAVE) : this.constructor.eventName(EVENT_FOCUSOUT$1);\n EventHandler.on(this._element, eventIn, this._config.selector, event => {\n const context = this._initializeOnDelegatedTarget(event);\n context._activeTrigger[event.type === 'focusin' ? TRIGGER_FOCUS : TRIGGER_HOVER] = true;\n context._enter();\n });\n EventHandler.on(this._element, eventOut, this._config.selector, event => {\n const context = this._initializeOnDelegatedTarget(event);\n context._activeTrigger[event.type === 'focusout' ? TRIGGER_FOCUS : TRIGGER_HOVER] = context._element.contains(event.relatedTarget);\n context._leave();\n });\n }\n }\n this._hideModalHandler = () => {\n if (this._element) {\n this.hide();\n }\n };\n EventHandler.on(this._element.closest(SELECTOR_MODAL), EVENT_MODAL_HIDE, this._hideModalHandler);\n }\n _fixTitle() {\n const title = this._element.getAttribute('title');\n if (!title) {\n return;\n }\n if (!this._element.getAttribute('aria-label') && !this._element.textContent.trim()) {\n this._element.setAttribute('aria-label', title);\n }\n this._element.setAttribute('data-bs-original-title', title); // DO NOT USE IT. Is only for backwards compatibility\n this._element.removeAttribute('title');\n }\n _enter() {\n if (this._isShown() || this._isHovered) {\n this._isHovered = true;\n return;\n }\n this._isHovered = true;\n this._setTimeout(() => {\n if (this._isHovered) {\n this.show();\n }\n }, this._config.delay.show);\n }\n _leave() {\n if (this._isWithActiveTrigger()) {\n return;\n }\n this._isHovered = false;\n this._setTimeout(() => {\n if (!this._isHovered) {\n this.hide();\n }\n }, this._config.delay.hide);\n }\n _setTimeout(handler, timeout) {\n clearTimeout(this._timeout);\n this._timeout = setTimeout(handler, timeout);\n }\n _isWithActiveTrigger() {\n return Object.values(this._activeTrigger).includes(true);\n }\n _getConfig(config) {\n const dataAttributes = Manipulator.getDataAttributes(this._element);\n for (const dataAttribute of Object.keys(dataAttributes)) {\n if (DISALLOWED_ATTRIBUTES.has(dataAttribute)) {\n delete dataAttributes[dataAttribute];\n }\n }\n config = {\n ...dataAttributes,\n ...(typeof config === 'object' && config ? config : {})\n };\n config = this._mergeConfigObj(config);\n config = this._configAfterMerge(config);\n this._typeCheckConfig(config);\n return config;\n }\n _configAfterMerge(config) {\n config.container = config.container === false ? document.body : getElement(config.container);\n if (typeof config.delay === 'number') {\n config.delay = {\n show: config.delay,\n hide: config.delay\n };\n }\n if (typeof config.title === 'number') {\n config.title = config.title.toString();\n }\n if (typeof config.content === 'number') {\n config.content = config.content.toString();\n }\n return config;\n }\n _getDelegateConfig() {\n const config = {};\n for (const [key, value] of Object.entries(this._config)) {\n if (this.constructor.Default[key] !== value) {\n config[key] = value;\n }\n }\n config.selector = false;\n config.trigger = 'manual';\n\n // In the future can be replaced with:\n // const keysWithDifferentValues = Object.entries(this._config).filter(entry => this.constructor.Default[entry[0]] !== this._config[entry[0]])\n // `Object.fromEntries(keysWithDifferentValues)`\n return config;\n }\n _disposePopper() {\n if (this._popper) {\n this._popper.destroy();\n this._popper = null;\n }\n if (this.tip) {\n this.tip.remove();\n this.tip = null;\n }\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Tooltip.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n });\n }\n}\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Tooltip);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap popover.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$3 = 'popover';\nconst SELECTOR_TITLE = '.popover-header';\nconst SELECTOR_CONTENT = '.popover-body';\nconst Default$2 = {\n ...Tooltip.Default,\n content: '',\n offset: [0, 8],\n placement: 'right',\n template: '
' + '
' + '

' + '
' + '
',\n trigger: 'click'\n};\nconst DefaultType$2 = {\n ...Tooltip.DefaultType,\n content: '(null|string|element|function)'\n};\n\n/**\n * Class definition\n */\n\nclass Popover extends Tooltip {\n // Getters\n static get Default() {\n return Default$2;\n }\n static get DefaultType() {\n return DefaultType$2;\n }\n static get NAME() {\n return NAME$3;\n }\n\n // Overrides\n _isWithContent() {\n return this._getTitle() || this._getContent();\n }\n\n // Private\n _getContentForTemplate() {\n return {\n [SELECTOR_TITLE]: this._getTitle(),\n [SELECTOR_CONTENT]: this._getContent()\n };\n }\n _getContent() {\n return this._resolvePossibleFunction(this._config.content);\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Popover.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n });\n }\n}\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Popover);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap scrollspy.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$2 = 'scrollspy';\nconst DATA_KEY$2 = 'bs.scrollspy';\nconst EVENT_KEY$2 = `.${DATA_KEY$2}`;\nconst DATA_API_KEY = '.data-api';\nconst EVENT_ACTIVATE = `activate${EVENT_KEY$2}`;\nconst EVENT_CLICK = `click${EVENT_KEY$2}`;\nconst EVENT_LOAD_DATA_API$1 = `load${EVENT_KEY$2}${DATA_API_KEY}`;\nconst CLASS_NAME_DROPDOWN_ITEM = 'dropdown-item';\nconst CLASS_NAME_ACTIVE$1 = 'active';\nconst SELECTOR_DATA_SPY = '[data-bs-spy=\"scroll\"]';\nconst SELECTOR_TARGET_LINKS = '[href]';\nconst SELECTOR_NAV_LIST_GROUP = '.nav, .list-group';\nconst SELECTOR_NAV_LINKS = '.nav-link';\nconst SELECTOR_NAV_ITEMS = '.nav-item';\nconst SELECTOR_LIST_ITEMS = '.list-group-item';\nconst SELECTOR_LINK_ITEMS = `${SELECTOR_NAV_LINKS}, ${SELECTOR_NAV_ITEMS} > ${SELECTOR_NAV_LINKS}, ${SELECTOR_LIST_ITEMS}`;\nconst SELECTOR_DROPDOWN = '.dropdown';\nconst SELECTOR_DROPDOWN_TOGGLE$1 = '.dropdown-toggle';\nconst Default$1 = {\n offset: null,\n // TODO: v6 @deprecated, keep it for backwards compatibility reasons\n rootMargin: '0px 0px -25%',\n smoothScroll: false,\n target: null,\n threshold: [0.1, 0.5, 1]\n};\nconst DefaultType$1 = {\n offset: '(number|null)',\n // TODO v6 @deprecated, keep it for backwards compatibility reasons\n rootMargin: 'string',\n smoothScroll: 'boolean',\n target: 'element',\n threshold: 'array'\n};\n\n/**\n * Class definition\n */\n\nclass ScrollSpy extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n\n // this._element is the observablesContainer and config.target the menu links wrapper\n this._targetLinks = new Map();\n this._observableSections = new Map();\n this._rootElement = getComputedStyle(this._element).overflowY === 'visible' ? null : this._element;\n this._activeTarget = null;\n this._observer = null;\n this._previousScrollData = {\n visibleEntryTop: 0,\n parentScrollTop: 0\n };\n this.refresh(); // initialize\n }\n\n // Getters\n static get Default() {\n return Default$1;\n }\n static get DefaultType() {\n return DefaultType$1;\n }\n static get NAME() {\n return NAME$2;\n }\n\n // Public\n refresh() {\n this._initializeTargetsAndObservables();\n this._maybeEnableSmoothScroll();\n if (this._observer) {\n this._observer.disconnect();\n } else {\n this._observer = this._getNewObserver();\n }\n for (const section of this._observableSections.values()) {\n this._observer.observe(section);\n }\n }\n dispose() {\n this._observer.disconnect();\n super.dispose();\n }\n\n // Private\n _configAfterMerge(config) {\n // TODO: on v6 target should be given explicitly & remove the {target: 'ss-target'} case\n config.target = getElement(config.target) || document.body;\n\n // TODO: v6 Only for backwards compatibility reasons. Use rootMargin only\n config.rootMargin = config.offset ? `${config.offset}px 0px -30%` : config.rootMargin;\n if (typeof config.threshold === 'string') {\n config.threshold = config.threshold.split(',').map(value => Number.parseFloat(value));\n }\n return config;\n }\n _maybeEnableSmoothScroll() {\n if (!this._config.smoothScroll) {\n return;\n }\n\n // unregister any previous listeners\n EventHandler.off(this._config.target, EVENT_CLICK);\n EventHandler.on(this._config.target, EVENT_CLICK, SELECTOR_TARGET_LINKS, event => {\n const observableSection = this._observableSections.get(event.target.hash);\n if (observableSection) {\n event.preventDefault();\n const root = this._rootElement || window;\n const height = observableSection.offsetTop - this._element.offsetTop;\n if (root.scrollTo) {\n root.scrollTo({\n top: height,\n behavior: 'smooth'\n });\n return;\n }\n\n // Chrome 60 doesn't support `scrollTo`\n root.scrollTop = height;\n }\n });\n }\n _getNewObserver() {\n const options = {\n root: this._rootElement,\n threshold: this._config.threshold,\n rootMargin: this._config.rootMargin\n };\n return new IntersectionObserver(entries => this._observerCallback(entries), options);\n }\n\n // The logic of selection\n _observerCallback(entries) {\n const targetElement = entry => this._targetLinks.get(`#${entry.target.id}`);\n const activate = entry => {\n this._previousScrollData.visibleEntryTop = entry.target.offsetTop;\n this._process(targetElement(entry));\n };\n const parentScrollTop = (this._rootElement || document.documentElement).scrollTop;\n const userScrollsDown = parentScrollTop >= this._previousScrollData.parentScrollTop;\n this._previousScrollData.parentScrollTop = parentScrollTop;\n for (const entry of entries) {\n if (!entry.isIntersecting) {\n this._activeTarget = null;\n this._clearActiveClass(targetElement(entry));\n continue;\n }\n const entryIsLowerThanPrevious = entry.target.offsetTop >= this._previousScrollData.visibleEntryTop;\n // if we are scrolling down, pick the bigger offsetTop\n if (userScrollsDown && entryIsLowerThanPrevious) {\n activate(entry);\n // if parent isn't scrolled, let's keep the first visible item, breaking the iteration\n if (!parentScrollTop) {\n return;\n }\n continue;\n }\n\n // if we are scrolling up, pick the smallest offsetTop\n if (!userScrollsDown && !entryIsLowerThanPrevious) {\n activate(entry);\n }\n }\n }\n _initializeTargetsAndObservables() {\n this._targetLinks = new Map();\n this._observableSections = new Map();\n const targetLinks = SelectorEngine.find(SELECTOR_TARGET_LINKS, this._config.target);\n for (const anchor of targetLinks) {\n // ensure that the anchor has an id and is not disabled\n if (!anchor.hash || isDisabled(anchor)) {\n continue;\n }\n const observableSection = SelectorEngine.findOne(decodeURI(anchor.hash), this._element);\n\n // ensure that the observableSection exists & is visible\n if (isVisible(observableSection)) {\n this._targetLinks.set(decodeURI(anchor.hash), anchor);\n this._observableSections.set(anchor.hash, observableSection);\n }\n }\n }\n _process(target) {\n if (this._activeTarget === target) {\n return;\n }\n this._clearActiveClass(this._config.target);\n this._activeTarget = target;\n target.classList.add(CLASS_NAME_ACTIVE$1);\n this._activateParents(target);\n EventHandler.trigger(this._element, EVENT_ACTIVATE, {\n relatedTarget: target\n });\n }\n _activateParents(target) {\n // Activate dropdown parents\n if (target.classList.contains(CLASS_NAME_DROPDOWN_ITEM)) {\n SelectorEngine.findOne(SELECTOR_DROPDOWN_TOGGLE$1, target.closest(SELECTOR_DROPDOWN)).classList.add(CLASS_NAME_ACTIVE$1);\n return;\n }\n for (const listGroup of SelectorEngine.parents(target, SELECTOR_NAV_LIST_GROUP)) {\n // Set triggered links parents as active\n // With both