Skip to content

Commit

Permalink
Sync
Browse files Browse the repository at this point in the history
  • Loading branch information
mcencini committed Jan 15, 2024
1 parent f426a91 commit cd4f3c3
Show file tree
Hide file tree
Showing 8 changed files with 324 additions and 231 deletions.
112 changes: 70 additions & 42 deletions src/deepmr/io/generic/mrd.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from ..utils.header import Header
from ..utils.pathlib import get_filepath

def read_mrd(filepath: str, return_ordering: bool = False):
def read_mrd(filepath, return_ordering=False):
"""
Read kspace data from mrd file.
Expand All @@ -28,17 +28,8 @@ def read_mrd(filepath: str, return_ordering: bool = False):
-------
data : np.ndarray
Complex k-space data of shape (ncoils, ncontrasts, nslices, nview, npts).
traj : np.ndarray
Sampling trajectory of shape (ncontrasts, nslices, nview, npts).
dcf : np.ndarray
Sampling density compensation factor of shape (ncontrasts, nslices, nview, npts, ndims)
header : deepmr.Header
Metadata for image reconstruction.
ordering: np.ndarray, optional
Re-ordering indexes of shape (nacquisitions, 3), storing
the contrast, slice and view index (in the column axis), respectively,
for each acquisition (row) in the dataset. Only returned if 'return_ordering'
is True.
"""
# get full path
Expand All @@ -63,46 +54,59 @@ def read_mrd(filepath: str, return_ordering: bool = False):
_, firstVolumeIdx, _ = mrd._get_slice_locations(acquisitions)

# build header
header = Header.from_mrd(mrdhead, acquisitions, firstVolumeIdx)
head = Header.from_mrd(mrdhead, acquisitions, firstVolumeIdx)

# update header
header.FA = FA
header.TI = TI
header.TE = TE
header.TR = TR

if return_ordering:
return data, traj, dcf, header, ordering
head.FA = FA
head.TI = TI
head.TE = TE
head.TR = TR

# get npts
if data.size != 0:
npts = data.shape[-1]
elif traj.size != 0:
npts = traj.shape[-2]
else:
return data, traj, dcf, header
raise RuntimeError("HDF5 did not contain data nor trajectory")

head.adc = (acquisitions[0].discard_pre, npts - acquisitions[0].discard_post)
dt = acquisitions[0].sample_time_us * 1e-3 # ms
head.t = dt * np.arange(npts, dtype=np.float32)
head.traj = traj
head.dcf = dcf

if return_ordering:
head.user["ordering"] = ordering

return data, head


# %% subroutines
def _read_header(dset):
xml_header = dset.read_xml_header()
xml_header = xml_header.decode("utf-8")
return ismrmrd.xsd.CreateFromDocument(xml_header)


def _read_mrd(filename):
# open file
dset = ismrmrd.Dataset(filename)

# read header
mrdhead = _read_header(dset)

# read acquisitions
nacq = dset.number_of_acquisitions()
acquisitions = [dset.read_acquisition(n) for n in range(nacq)]

# close
dset.close()
with ismrmrd.File(filename) as dset:

# read header
mrdhead = _read_header(dset["dataset"])

# read acquisitions
acquisitions = dset["acquisitions"]

return acquisitions, mrdhead


def _read_header(dset):
xml_header = dset["xml"]
xml_header = xml_header.decode("utf-8")
return ismrmrd.xsd.CreateFromDocument(xml_header)


def _get_data(acquisitions):
data = np.stack([acq.data for acq in acquisitions], axis=0)
if acquisitions[0].data.size != 0:
data = np.stack([acq.data for acq in acquisitions], axis=0)
else:
data = None

if acquisitions[0].traj.size != 0:
trajdcf = np.stack([acq.traj for acq in acquisitions], axis=0)
Expand Down Expand Up @@ -144,17 +148,17 @@ def _sort_data(data, traj, dcf, acquisitions, mrdhead):
dcftmp = np.zeros(shape, dtype=np.float32)

# actual sorting
_loop_sorting(datatmp, data, icontrast, iz, iview)
_loop_sorting(trajtmp, traj, icontrast, iz, iview)
_loop_sorting(dcftmp, dcf, icontrast, iz, iview)
_data_sorting(datatmp, data, icontrast, iz, iview)
_traj_sorting(trajtmp, traj, icontrast, iz, iview)
_dcf_sorting(dcftmp, dcf, icontrast, iz, iview)

# assign
data = np.ascontiguousarray(datatmp.squeeze())
traj = np.ascontiguousarray(trajtmp.squeeze())
dcf = np.ascontiguousarray(dcftmp.squeeze())
else:
# actual sorting
_loop_sorting(datatmp, data, icontrast, iz, iview)
_data_sorting(datatmp, data, icontrast, iz, iview)

# assign
data = np.ascontiguousarray(datatmp.squeeze())
Expand All @@ -166,7 +170,7 @@ def _sort_data(data, traj, dcf, acquisitions, mrdhead):


@nb.njit(cache=True)
def _loop_sorting(output, input, echo_num, slice_num, view_num):
def _data_sorting(output, input, echo_num, slice_num, view_num):
# get size
nframes = input.shape[0]

Expand All @@ -177,4 +181,28 @@ def _loop_sorting(output, input, echo_num, slice_num, view_num):
iview = view_num[n]
output[:, iecho, islice, iview, :] = input[n]

@nb.njit(cache=True)
def _traj_sorting(output, input, echo_num, slice_num, view_num):
# get size
nframes = input.shape[0]

# actual reordering
for n in range(nframes):
iecho = echo_num[n]
islice = slice_num[n]
iview = view_num[n]
output[iecho, islice, iview, :, :] = input[n]

@nb.njit(cache=True)
def _dcf_sorting(output, input, echo_num, slice_num, view_num):
# get size
nframes = input.shape[0]

# actual reordering
for n in range(nframes):
iecho = echo_num[n]
islice = slice_num[n]
iview = view_num[n]
output[iecho, islice, iview, :] = input[n]


8 changes: 2 additions & 6 deletions src/deepmr/io/kspace/gehc.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from ..utils.pathlib import get_filepath


def read_gehc(filepath, acqheader=None, ordering=None):
def read_gehc(filepath, acqheader=None):
"""
Read kspace data from GEHC file.
Expand All @@ -29,10 +29,6 @@ def read_gehc(filepath, acqheader=None, ordering=None):
Acquisition header loaded from trajectory.
If not provided, assume Cartesian acquisition and infer from data.
The default is None.
ordering: np.ndarray, optional
Data ordering loaded from external file (e.g, trajectory).
If not provided, infer from data.
The default is None.
Returns
-------
Expand All @@ -48,7 +44,7 @@ def read_gehc(filepath, acqheader=None, ordering=None):
if __GEHC_AVAILABLE__:
with warnings.catch_warnings():
warnings.simplefilter("ignore") # change the hook
data, header = gehc.read_rawdata(filepath, acqheader, ordering)
data, header = gehc.read_rawdata(filepath, acqheader)

# build header
header = Header.from_gehc(header)
Expand Down
52 changes: 28 additions & 24 deletions src/deepmr/io/trajectories/matlab.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ def read_matfile_traj(filename, dcfname=None, schedulename=None):
Returns
-------
dict
Deserialized matfile.
dict : deepmr.Header
Deserialized trajectory.
"""
# load dcf
Expand All @@ -32,11 +32,12 @@ def read_matfile_traj(filename, dcfname=None, schedulename=None):

# get indexes
if "ind" in matfile:
ind = matfile["ind"][0]
ind = matfile["ind"][0].astype(bool)
elif "inds" in matfile:
ind = matfile["inds"].squeeze()
ind = matfile["inds"].squeeze().astype(bool)
else:
raise RuntimeError("ADC indexes not found!")
ind = np.argwhere(ind)[[0, -1]].squeeze()

# get dcf
if "dcf" in matfile:
Expand All @@ -52,11 +53,14 @@ def read_matfile_traj(filename, dcfname=None, schedulename=None):

# get sampling time
if "t" in matfile:
dt = np.diff(matfile["t"][0])[0] * 1e3 # ms
t = matfile["t"][0] * 1e3 # ms
elif "ts" in matfile:
dt = np.diff(matfile["ts"].squeeze())[0] * 1e3 # ms
t = matfile["ts"].squeeze() * 1e3 # ms
else:
raise RuntimeError("Sampling time not found!")

# remove offset from sampling time
t -= t[0]

# get matrix
if "mtx" in matfile:
Expand All @@ -72,13 +76,9 @@ def read_matfile_traj(filename, dcfname=None, schedulename=None):
else:
raise RuntimeError("Field of View not found!")
resolution = (np.asarray(fov) / np.asarray(shape)).tolist()

# if ndim != 3:
# shape = [1] + shape
# resolution = [1.0] + resolution


# initialize header
head = Header(shape, resolution, dt=dt)
head = Header(shape, resolution, t=t)
head.adc = ind

# get schedule file
Expand All @@ -93,8 +93,12 @@ def read_matfile_traj(filename, dcfname=None, schedulename=None):
if dcf is not None:
dcf = dcf.reshape(nviews, ncontrasts, -1).swapaxes(0, 1)
dcf = np.ascontiguousarray(dcf).astype(np.float32)

# append
head.traj = k
head.dcf = dcf

return k, dcf, head
return head


# %% subroutines
Expand All @@ -103,12 +107,12 @@ def _get_trajectory(matfile):
if "k" in matfile:
k = matfile["k"]
# get shape
if "ind" in matfile:
npts = matfile["ind"].shape[-1]
elif "inds" in matfile:
npts = matfile["inds"].shape[-1]
if "t" in matfile:
npts = matfile["t"].shape[-1]
elif "ts" in matfile:
npts = matfile["ts"].shape[-1]
else:
raise RuntimeError("ADC indexes not found!")
raise RuntimeError("Time not found!")
nviews = int(k.shape[1] / npts)

# reshape
Expand Down Expand Up @@ -164,24 +168,24 @@ def _get_schedule(head, matfile, schedulename):
schedule = None
if schedule is not None:
if "VariableFlip" in schedule.dtype.fields:
FA = schedule["VariableFlip"][0][0].squeeze()
FA = schedule["VariableFlip"][0][0].squeeze().astype(np.float32)
else:
FA = 0.0
if "VariablePhase" in schedule.dtype.fields:
phase = schedule["VariableFlip"][0][0].squeeze()
phase = schedule["VariableFlip"][0][0].squeeze().astype(np.float32)
FA = FA * np.exp(1j * np.deg2rad(phase))
else:
FA = 0.0
if "VariableTE" in schedule.dtype.fields:
TE = schedule["VariableTE"][0][0].squeeze()
TE = schedule["VariableTE"][0][0].squeeze().astype(np.float32)
elif "TE" in schedule.dtype.fields:
TE = float(schedule["TE"][0][0].squeeze())
TE = schedule["TE"][0][0].squeeze().astype(np.float32)
if "VariableTR" in schedule.dtype.fields:
TR = schedule["VariableTR"][0][0].squeeze()
TR = schedule["VariableTR"][0][0].squeeze().astype(np.float32)
else:
TR = 0.0
if "InversionTime" in schedule.dtype.fields:
TI = schedule["InversionTime"][0][0].squeeze()
TI = schedule["InversionTime"][0][0].squeeze().astype(np.float32)
else:
TI = 0.0
else:
Expand Down
Loading

0 comments on commit cd4f3c3

Please sign in to comment.