From befac12ae69e1f929758b458c7a5631b845b69b1 Mon Sep 17 00:00:00 2001 From: Dan Levitas Date: Thu, 18 Jan 2024 16:02:51 +0000 Subject: [PATCH] [ENH] add MEG support --- handler/Dockerfile | 2 +- handler/bids.sh | 4 + handler/convert_meg.py | 93 +++ handler/ezBIDS_core/createThumbnailsMovies.py | 55 +- handler/ezBIDS_core/ezBIDS_core.py | 576 +++++++++++++++--- handler/ezBIDS_core/update_ezBIDS_core.py | 20 +- handler/preprocess.sh | 16 +- ui/src/Events.vue | 6 +- ui/src/store/index.ts | 12 + 9 files changed, 674 insertions(+), 110 deletions(-) create mode 100755 handler/convert_meg.py diff --git a/handler/Dockerfile b/handler/Dockerfile index 4c1ac2a9..a594f497 100755 --- a/handler/Dockerfile +++ b/handler/Dockerfile @@ -10,7 +10,7 @@ RUN apt update && \ RUN apt install -y parallel python3 python3-pip tree curl unzip git jq python libgl-dev python-numpy RUN pip3 install numpy==1.23.0 nibabel==4.0.0 pandas==1.0.1 matplotlib pyyaml==5.4.1 pydicom==2.3.1 natsort pydeface && \ - pip3 install quickshear pypet2bids + pip3 install quickshear pypet2bids mne mne-bids RUN apt-get install -y build-essential pkg-config cmake git pigz rename zstd libopenjp2-7 libgdcm-tools wget libopenblas-dev && \ apt-get clean -y && apt-get autoclean -y && apt-get autoremove -y diff --git a/handler/bids.sh b/handler/bids.sh index 0d45fb60..247f50ab 100755 --- a/handler/bids.sh +++ b/handler/bids.sh @@ -16,6 +16,10 @@ rootDir="$root/bids/$datasetName" #clean up from previous run rm -rf $root/bids +# run MEG BIDS conversion if relevant data found +echo "converting MEG data (if present)" +./convert_meg.py $root/finalized.json $rootDir + echo "converting output to bids" ./convert.js $root diff --git a/handler/convert_meg.py b/handler/convert_meg.py new file mode 100755 index 00000000..4ba3f8ba --- /dev/null +++ b/handler/convert_meg.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python3 + +""" +Takes the ezBIDS data (finalized.json) and uses MNE-BIDS to perform the BIDS conversion for this +specific modality data. +""" + +import sys +import json +from mne.io import read_raw +from mne_bids import (BIDSPath, write_raw_bids) + +# Begin: +finalized_json_data = json.load(open(sys.argv[1]), strict=False) +bids_root_dir = sys.argv[2] + +# subjects = finalized_json_data["subjects"] # Is this the final say? +subjects = [x["_entities"]["subject"] for x in finalized_json_data["objects"]] + +# Let's see how many MEG objects there are +for obj in finalized_json_data["objects"]: + obj_type = obj["_type"] + if "meg" in obj_type: + datatype = "meg" + img_data = obj["_SeriesDescription"] + + raw = read_raw(img_data, verbose=0) + + # Get entity information + entities = obj["_entities"] + sub_idx = obj["subject_idx"] + ses_idx = obj["session_idx"] + + # sub + sub = subjects[sub_idx]["subject"] + + # ses + ses = subjects[sub_idx]["sessions"][ses_idx] + if ses["session"] == "" or ses["exclude"] is True: + ses = None + + # task + task = entities["task"] + + # acquisition + if entities["acquisition"] == "": + acq = None + else: + acq = entities["acquisition"] + + # run + if entities["run"] == "": + run = None + else: + run = entities["run"] + + # processing + if entities["processing"] == "": + proc = None + else: + proc = entities["processing"] + + # split + if entities["split"] == "": + split = None + else: + split = entities["split"] + + # Create an MNE BIDSpath + bids_path = BIDSPath( + subject=sub, + session=ses, + task=task, + acquisition=acq, + run=run, + processing=proc, + split=split, + datatype=datatype, + root=bids_root_dir + ) + + print("Converting MEG data to BIDS") + empty_room = None + if "emptyroom" in subjects and sub != "emptyroom": + er_idx = subjects.index("emptyroom") + raw_er_data = finalized_json_data["objects"][er_idx]["SeriesDescription"] + raw_er = read_raw(raw_er_data, verbose=0) + empty_room = raw_er + + write_raw_bids(raw, bids_path=bids_path, verbose=0, empty_room=empty_room, overwrite=True) + +print("Finished MEG BIDS conversion (with MNE-BIDS)") +print("") diff --git a/handler/ezBIDS_core/createThumbnailsMovies.py b/handler/ezBIDS_core/createThumbnailsMovies.py index 0300b4d0..7e77bbf0 100755 --- a/handler/ezBIDS_core/createThumbnailsMovies.py +++ b/handler/ezBIDS_core/createThumbnailsMovies.py @@ -17,14 +17,53 @@ import nibabel as nib from PIL import Image from math import floor +from natsort import natsorted matplotlib.use('Agg') plt.style.use('dark_background') os.environ['MPLCONFIGDIR'] = os.getcwd() + "/configs/" +DATA_DIR = sys.argv[1] + # Functions +def create_MEG_thumbnail(): + """ + Generate a simple visualization of the MEG data. + """ + import mne.viz + + uploaded_json_list = natsorted(pd.read_csv("list", header=None, lineterminator='\n').to_numpy().flatten().tolist()) + + MEG_extensions = [".ds", ".fif", ".sqd", ".con", ".raw", ".ave", ".mrk", ".kdf", ".mhd", ".trg", ".chn", ".dat"] + + """ + Get the MEG data organized + """ + data_files = [x.split("./")[-1] for x in uploaded_json_list] + MEG_data_files = [] + for data in data_files: + if any(x in data for x in MEG_extensions): + MEG_data_files.append(data) + + if len(MEG_data_files): + for meg in MEG_data_files: + fname = f"{DATA_DIR}/{meg}" + raw = mne.io.read_raw(fname, verbose=0) + mne.viz.set_browser_backend('matplotlib', verbose=None) + + png_types = ["channels", "psd"] + for png_type in png_types: + output_file = fname.split(".")[0] + f"_{png_type}.png" + + if png_type == "channels": + fig = raw.plot() + elif png_type == "psd": + fig = raw.compute_psd().plot() + + fig.savefig(output_file, bbox_inches='tight') + def create_thumbnail(nifti_file, image): """ @@ -118,7 +157,7 @@ def create_DWIshell_thumbnails(nifti_file, image, bval_file): w, h, d = buf.shape png = Image.frombytes("RGBA", (w, h), buf.tobytes()) - png.save("{}_shell-{}.png".format(output_file, bval)) + png.save(f"{output_file}_shell-{bval}.png") # Begin: @@ -133,14 +172,16 @@ def create_DWIshell_thumbnails(nifti_file, image, bval_file): print(nifti_file) print("") -if not os.path.isfile("{}/{}".format(data_dir, nifti_file)): # no corresponding nifti, so don't process - print("{} does not have a corresponding NIfTI file, cannot process".format(json_file)) +create_MEG_thumbnail() + +if not os.path.isfile(f"{data_dir}/{nifti_file}"): # no corresponding nifti, so don't process + print(f"{json_file} does not have a corresponding NIfTI file, cannot process") else: output_dir = nifti_file.split(".nii.gz")[0] image = nib.load(nifti_file) if len([x for x in image.shape if x < 0]): # image has negative dimension(s), cannot process - print("{} has negative dimension(s), cannot process".format(nifti_file)) + print(f"{nifti_file} has negative dimension(s), cannot process") else: # if image.get_data_dtype() == [('R', 'u1'), ('G', 'u1'), ('B', 'u1')]: if image.get_data_dtype() not in [" 0) + if chpi: + hpi_freqs, _, _ = get_chpi_info(info=raw.info, on_missing="ignore") + hpi_freqs = list(hpi_freqs) + + # Define datatype-specific JSON dictionaries + ch_info_json_common = [ + ("TaskName", task), + ("Manufacturer", manufacturer), + ("PowerLineFrequency", powerlinefrequency), + ("SamplingFrequency", sfreq), + ("SoftwareFilters", "n/a"), + ("RecordingDuration", raw.times[-1]), + ("RecordingType", rec_type), + ] + + ch_info_json_meg = [ + ("DewarPosition", "n/a"), + ("DigitizedLandmarks", digitized_landmark), + ("DigitizedHeadPoints", digitized_head_points), + ("MEGChannelCount", n_megchan), + ("MEGREFChannelCount", n_megrefchan), + ("SoftwareFilters", software_filters), + ] + + if chpi is not None: + ch_info_json_meg.append(("ContinuousHeadLocalization", chpi)) + ch_info_json_meg.append(("HeadCoilFrequency", hpi_freqs)) + + if emptyroom_fname is not None: + ch_info_json_meg.append(("AssociatedEmptyRoom", str(emptyroom_fname))) + + ch_info_json_eeg = [ + ("EEGReference", "n/a"), + ("EEGGround", "n/a"), + ("EEGPlacementScheme", _infer_eeg_placement_scheme(raw)), + ("Manufacturer", manufacturer), + ] + + ch_info_json_ieeg = [ + ("iEEGReference", "n/a"), + ("ECOGChannelCount", n_ecogchan), + ("SEEGChannelCount", n_seegchan + n_dbschan), + ] + + ch_info_json_nirs = [("Manufacturer", manufacturer)] + + ch_info_ch_counts = [ + ("EEGChannelCount", n_eegchan), + ("EOGChannelCount", n_eogchan), + ("ECGChannelCount", n_ecgchan), + ("EMGChannelCount", n_emgchan), + ("MiscChannelCount", n_miscchan), + ("TriggerChannelCount", n_stimchan), + ] + + ch_info_ch_counts_nirs = [ + ("NIRSChannelCount", n_nirscwchan), + ("NIRSSourceOptodeCount", n_nirscwsrc), + ("NIRSDetectorOptodeCount", n_nirscwdet), + ] + + # Stitch together the complete JSON dictionary + ch_info_json = ch_info_json_common + if datatype == "meg": + append_datatype_json = ch_info_json_meg + elif datatype == "eeg": + append_datatype_json = ch_info_json_eeg + elif datatype == "ieeg": + append_datatype_json = ch_info_json_ieeg + elif datatype == "nirs": + append_datatype_json = ch_info_json_nirs + ch_info_ch_counts.extend(ch_info_ch_counts_nirs) + + ch_info_json += append_datatype_json + ch_info_json += ch_info_ch_counts + ch_info_json = OrderedDict(ch_info_json) + + _write_json(fname, ch_info_json, overwrite) + + +def generate_MEG_json_sidecars(uploaded_json_list): + """ + Get the MEG data organized + """ + data_files = [x.split("./")[-1] for x in uploaded_json_list] + MEG_data_files = [] + for data in data_files: + include = True + if any(x in data for x in MEG_extensions): + if ".ds" in str(Path(data).parent): + # don't want subfolders or files to get caught, the ".ds" folder is essentially the data + include = False + + if include is True: + MEG_data_files.append(data) + + if len(MEG_data_files): + """ + Generate the JSON metadata + """ + from mne_bids.path import _parse_ext + from mne_bids.sidecar_updates import _update_sidecar + from mne_bids.config import MANUFACTURERS + + for meg in MEG_data_files: + fname = f"{DATA_DIR}/{meg}" + json_output_name = fname.split(".")[0] + ".json" + raw = mne.io.read_raw(fname, verbose=0) + acquisition_date_time = raw.info["meas_date"].strftime("%Y-%m-%dT%H:%M:%S.%f") + acquisition_date = acquisition_date_time.split("T")[0] + acquisition_time = acquisition_date_time.split("T")[-1] + datatype = "meg" # assume meg datatype for now + task = "unknown" # set placeholder label for task entity + + # get the manufacturer from the file in the Raw object + _, ext = _parse_ext(raw.filenames[0]) + manufacturer = MANUFACTURERS.get(ext, "") + + # check a few parameters + data_id = raw.info["subject_info"]["his_id"] + + if ( + ("noise" in fname.lower() or "emptyroom" in fname.lower()) + or ("noise" in data_id.lower() or "emptyroom" in data_id.lower()) + ): + sub = "sub-emptyroom" + ses = "ses-" + str(raw.info["meas_date"].strftime("%Y%m%d")) + task = "noise" + else: + sub = data_id + ses = None + + # Create the JSON sidecar + _sidecar_json(raw, task, manufacturer, json_output_name, datatype, emptyroom_fname=None, overwrite=True) + + # Add some fields to the sidecar + if ses is not None: + _update_sidecar(json_output_name, "PatientID", f"{sub}_{ses}") + else: + _update_sidecar(json_output_name, "PatientID", sub) + _update_sidecar(json_output_name, "AcquisitionDateTime", acquisition_date_time) + _update_sidecar(json_output_name, "AcquisitionDate", acquisition_date) + _update_sidecar(json_output_name, "AcquisitionTime", acquisition_time) + _update_sidecar(json_output_name, "Modality", "MEG") + _update_sidecar(json_output_name, "ConversionSoftware", "MNE-BIDS") + _update_sidecar(json_output_name, "SeriesDescription", fname) + + # Replace the MEG data files in the list file with the newly generated JSON sidecar names + substring = json_output_name.split(".json")[0].split("/")[-1] + corresponding_data = [x for x in uploaded_json_list if substring in x][0] + idx = uploaded_json_list.index(corresponding_data) + uploaded_json_list.pop(idx) + replacement = "./" + corresponding_data.split("./")[-1].split(".")[0] + ".json" + uploaded_json_list.append(replacement) + + print("") + + return natsorted(uploaded_json_list) + + def modify_uploaded_dataset_list(uploaded_json_list): """ Filters the list of json files generated by preprocess.sh to ensure that @@ -116,17 +416,19 @@ def modify_uploaded_dataset_list(uploaded_json_list): json_data = open(json_file) json_data = json.load(json_data, strict=False) - # Only want json files with corresponding nifti (and bval/bvec) and if the files come from dcm2niix - if ("ConversionSoftware" in json_data and ("dcm2niix" in json_data["ConversionSoftware"] - or "pypet2bids" in json_data["ConversionSoftware"])): + # Only want json files with corresponding nifti (and bval/bvec) and if the files come accepted software + if ("ConversionSoftware" in json_data + and any(x for x in ["dcm2niix", "pypet2bids", "MNE-BIDS"] if x == json_data["ConversionSoftware"])): + json_dir = os.path.dirname(json_file) grouped_files = [ json_dir + "/" + x for x in os.listdir(json_dir) if os.path.basename(json_file)[:-4] in x ] + # Check that both .nii.gz and .nii aren't in the same group. Redundant, so remove .nii file if found if len([x for x in grouped_files if ".nii" in x]) == 2: - grouped_files = [x for x in grouped_files if x[-4:] != ".nii"] + grouped_files = [x for x in grouped_files if not x.endswith(".nii")] # If json comes with imaging data (NIfTI, bval/bvec) add it to list for processing if len(grouped_files) > 1: @@ -134,7 +436,7 @@ def modify_uploaded_dataset_list(uploaded_json_list): else: exclude_data = True print( - f"{json_file} was not generated from dcm2niix or pypet2bids. " + f"{json_file} was not generated from dcm2niix, pypet2bids, or MNE-BIDS. " "ezBIDS requires NIfTI/JSON file provenance to be from one " "of these two, thus this will not be converted by ezBIDS." ) @@ -142,12 +444,12 @@ def modify_uploaded_dataset_list(uploaded_json_list): exclude_data = True print( f"{json_file} has improper JSON syntax, possibly because " - "uploaded data was converted by older dcm2niix version. " + "uploaded data was converted by older dcm2niix version or other software. " "Will not be converted by ezBIDS." ) - # Flatten uploaded_dataset_list - uploaded_files_list = [file for sublist in uploaded_files_list for file in sublist] + # Flatten uploaded_files_list + uploaded_files_list = natsorted([file for sublist in uploaded_files_list for file in sublist]) return uploaded_files_list, exclude_data, config, config_file @@ -534,13 +836,16 @@ def generate_dataset_list(uploaded_files_list, exclude_data): dataset_list = [] # Get separate nifti and json (i.e. sidecar) lists - json_list = [x for x in uploaded_files_list if ".json" in x] - nifti_list = [ - x for x in uploaded_files_list if ".nii.gz" in x - or ".bval" in x - or ".bvec" in x - ] + json_list = natsorted([x for x in uploaded_files_list if ".json" in x]) + + nifti_list = natsorted([ + x for x in uploaded_files_list if x.endswith(".nii.gz") + or x.endswith(".bval") + or x.endswith(".bvec") + or x.endswith(tuple(MEG_extensions)) + ]) + print("") print("Determining unique acquisitions in dataset") print("------------------------------------------") sub_info_list_id = "01" @@ -553,7 +858,13 @@ def generate_dataset_list(uploaded_files_list, exclude_data): # Make sure each JSON has a corresponding NIfTI file corresponding_nifti = None try: - corresponding_nifti = [x for x in nifti_list if json_file[:-4] in x if ".nii" in x][0] + if json_data["Modality"] != "MEG": + corresponding_nifti = [x for x in nifti_list if json_file[:-4] in x and x.endswith(".nii.gz")][0] + else: + corresponding_nifti = [ + x for x in nifti_list if json_file[:-4] in x + and x.endswith(tuple(MEG_extensions)) + ][0] except: pass @@ -570,7 +881,7 @@ def generate_dataset_list(uploaded_files_list, exclude_data): except: ornt = None - if pe_direction is not None and ornt is not None: + if pe_direction is not None and ornt is not None and json_data["Modality"] != "MEG": proper_pe_direction = correct_pe(pe_direction, ornt) ped = determine_direction(proper_pe_direction, ornt) else: @@ -695,19 +1006,26 @@ def generate_dataset_list(uploaded_files_list, exclude_data): echo_time = 0 # Get the nibabel nifti image info - image = nib.load(json_file[:-4] + "nii.gz") - - # if image.get_data_dtype() == [('R', 'u1'), ('G', 'u1'), ('B', 'u1')]: - if image.get_data_dtype() in [" 1', 'unique_dic["RepetitionTime"] > 0', 'not any(x in unique_dic["ImageType"] ' @@ -1603,7 +1932,7 @@ def create_lookup_info(): [ '"DIFFUSION" not in unique_dic["ImageType"]', '"sbref" in sd and unique_dic["NumVolumes"] == 1', - 'unique_dic["nibabel_image"].ndim == 3', + 'unique_dic["ndim"] == 3', 'not any(x in unique_dic["ImageType"] ' 'for x in ["DERIVED", "PERFUSION", "DIFFUSION", "ASL", "UNI"])' ] @@ -1632,7 +1961,7 @@ def create_lookup_info(): 'any(".bvec" in x for x in unique_dic["paths"])', # '"DIFFUSION" in unique_dic["ImageType"]', 'not any(x in sd for x in ["trace", "_fa_", "adc"])', - 'unique_dic["nibabel_image"].ndim == 3', + 'unique_dic["ndim"] == 3', '("b0" in sd or "bzero" in sd or "sbref" in sd) ' 'and unique_dic["NumVolumes"] == 1' ] @@ -1754,6 +2083,30 @@ def create_lookup_info(): 'or unique_dic["Modality"] == "PT"' ] ) + elif datatype == "meg": + if suffix == "meg": + lookup_dic[datatype][suffix]["search_terms"].extend( + [ + "_ds_", + "_fif_", + "_sqd_", + "_con_", + "_raw_", + "_ave_", + "_mrk_", + "_kdf_", + "_mhd_", + "_trg_", + "_chn_", + "_dat_" + ] + ) + lookup_dic[datatype][suffix]["conditions"].extend( + [ + '"MNE-BIDS" in unique_dic["sidecar"]["ConversionSoftware"] ' + 'and unique_dic["Modality"] == "MEG"' + ] + ) # Add DWI derivatives (TRACEW, FA, ADC) to lookup dictionary lookup_dic["dwi_derivatives"] = { @@ -1795,6 +2148,7 @@ def datatype_suffix_identification(dataset_list_unique_series, lookup_dic, confi A modified version of dataset_list, where this list contains only the dictionaries of acquisitions with a unique series group ID. """ + print("") print("Datatype & suffix identification") print("------------------------------------") """ @@ -1998,7 +2352,7 @@ def datatype_suffix_identification(dataset_list_unique_series, lookup_dic, confi ] if (datatype in ["func", "dwi"] - and (unique_dic["nibabel_image"].ndim == 3 and unique_dic["NumVolumes"] > 1)): + and (unique_dic["ndim"] == 3 and unique_dic["NumVolumes"] > 1)): """ func and dwi can also have sbref suffix pairings, so 3D dimension data with only a single volume likely indicates that the sequence was closer to being @@ -2145,7 +2499,7 @@ def entity_labels_identification(dataset_list_unique_series, lookup_dic): A modified version of dataset_list, where this list contains only the dictionaries of acquisitions with a unique series group ID. """ - + print("") print("Entity label identification") print("----------------------------") entity_ordering = yaml.load(open(os.path.join(analyzer_dir, entity_ordering_file)), Loader=yaml.FullLoader) @@ -2167,7 +2521,8 @@ def entity_labels_identification(dataset_list_unique_series, lookup_dic): entity = entities_yaml[key]['entity'] if f"_{entity}_" in sd: # series_entities[key] = re.split(regex, sd.split(f"{entity}_")[-1])[0].replace("_", "") - series_entities[key] = re.split('_', sd.split(f"{entity}_")[-1])[0] + # series_entities[key] = re.split('_', sd.split(f"{entity}_")[-1])[0] Used as of 12/13/23 + series_entities[key] = re.split('[^a-zA-Z0-9]', re.split('_', sd.split(f"{entity}_")[-1])[0])[0] elif f"_{entity}-" in json_path: series_entities[key] = re.split('[^a-zA-Z0-9]', json_path.split(f"{entity}-")[-1])[0] else: @@ -2175,38 +2530,41 @@ def entity_labels_identification(dataset_list_unique_series, lookup_dic): else: series_entities[key] = "" + # MEG data was given a placeholder task entity label, so remove it + if key == "task" and series_entities["task"] == "unknown": + series_entities["task"] = "" + # If BIDS naming convention isn't detected, do a more thorough check for certain entities labels - # Specific code for Nigerian dataset only (revert these changes once dataset is converted) + # # Specific code for Nigerian dataset only (revert these changes once dataset is converted) # if "ABDN" in unique_dic["json_path"]: - if "ABDN" in unique_dic["json_path"]: - sd = re.sub("[^A-Za-z0-9]+", "", sd).lower() - if sd[-1] == "c": - series_entities["ceagent"] = "gadolinium" - if any(x in sd for x in ["axial", "ax", "axi"]): - series_entities["acquisition"] = "axial" - if any(x in sd for x in ["cor", "coronal"]): - series_entities["acquisition"] = "coronal" - if any(x in sd for x in ["sag", "sagittal"]): - series_entities["acquisition"] = "sagittal" - - if ("t1" in sd or "t1w" in sd) and "gre" not in sd: - unique_dic["datatype"] = "anat" - unique_dic["suffix"] = "T1w" - unique_dic["type"] = "anat/T1w" - if ("t2" in sd or "t2w" in sd) and "gre" not in sd: - if "flair" in sd: - unique_dic["datatype"] = "anat" - unique_dic["suffix"] = "FLAIR" - unique_dic["type"] = "anat/FLAIR" - else: - unique_dic["datatype"] = "anat" - unique_dic["suffix"] = "T2w" - unique_dic["type"] = "anat/T2w" - if "oblcor" in sd: - unique_dic["type"] = "exclude" - if "array" in sd: - unique_dic["type"] = "exclude" + # sd = re.sub("[^A-Za-z0-9]+", "", sd).lower() + # if sd[-1] == "c": + # series_entities["ceagent"] = "gadolinium" + # if any(x in sd for x in ["axial", "ax", "axi"]): + # series_entities["acquisition"] = "axial" + # if any(x in sd for x in ["cor", "coronal"]): + # series_entities["acquisition"] = "coronal" + # if any(x in sd for x in ["sag", "sagittal"]): + # series_entities["acquisition"] = "sagittal" + + # if ("t1" in sd or "t1w" in sd) and "gre" not in sd: + # unique_dic["datatype"] = "anat" + # unique_dic["suffix"] = "T1w" + # unique_dic["type"] = "anat/T1w" + # if ("t2" in sd or "t2w" in sd) and "gre" not in sd: + # if "flair" in sd: + # unique_dic["datatype"] = "anat" + # unique_dic["suffix"] = "FLAIR" + # unique_dic["type"] = "anat/FLAIR" + # else: + # unique_dic["datatype"] = "anat" + # unique_dic["suffix"] = "T2w" + # unique_dic["type"] = "anat/T2w" + # if "oblcor" in sd: + # unique_dic["type"] = "exclude" + # if "array" in sd: + # unique_dic["type"] = "exclude" # task func_rest_keys = ["rest", "rsfmri", "fcmri"] @@ -2219,6 +2577,10 @@ def entity_labels_identification(dataset_list_unique_series, lookup_dic): if len(match_index): series_entities["task"] = cog_atlas_tasks[match_index[0]] + if (any(x in re.sub("[^A-Za-z0-9]+", "", sd).lower() for x in ["noise", "emptyroom"]) + or series_entities["subject"] == "emptyroom"): # for MEG data + series_entities["task"] = "noise" + # dir (required for fmap/epi and highly recommended for dwi/dwi) if any(x in unique_dic["type"] for x in ["fmap/epi", "dwi/dwi"]): series_entities["direction"] = unique_dic["direction"] @@ -2266,6 +2628,12 @@ def entity_labels_identification(dataset_list_unique_series, lookup_dic): elif "Body" in unique_dic["sidecar"]["ReceiveCoilName"]: series_entities["acquisition"] = "body" + if unique_dic["sidecar"]["Manufacturer"] in ["Elekta", "Neuromag", "MEGIN"]: # For specific MEG instances + if unique_dic["SeriesDescription"].endswith(".dat"): # calibration file + series_entities["acquisition"] = "calibration" + elif unique_dic["SeriesDescription"].endswith(".fif"): # crosstalk file + series_entities["acquisition"] = "crosstalk" + # inversion if (any(x in unique_dic["type"] for x in ["anat/MP2RAGE", "anat/IRT1"]) and "InversionTime" in unique_dic["sidecar"]): @@ -2435,29 +2803,32 @@ def modify_objects_info(dataset_list): additional information. """ for protocol in scan_protocol: - image = protocol["nibabel_image"] - protocol["headers"] = str(image.header).splitlines()[1:] - - object_img_array = image.dataobj - # PET images are scaled, type will be float list) + # find non MRI and PET (i.e. MEG) products + MEG_extensions=("*.ds" "*.fif" "*.sqd" "*.con" "*.raw" "*.ave" "*.mrk" "*.kdf" "*.mhd" "*.trg" "*.chn" "*.dat") + for ext in ${MEG_extensions[*]} + do + if [[ $ext == "*.ds" ]] + then + find_type=d + else + find_type=f + fi + + (cd $root && find . -maxdepth 9 -type $find_type \( -name $ext \) >> list) + done + # remove irrelevant json files (e.g., ezBIDS_core.json, etc) if found - grep -F -v *ezBIDS_core*.json $root/list > $root/list_tmp && mv $root/list_tmp $root/list + grep -F -v ezBIDS_core.json $root/list > $root/list_tmp && mv $root/list_tmp $root/list # grep -F -v dataset_description.json $root/list > $root/list_tmp && mv $root/list_tmp $root/list # grep -F -v participants.json $root/list > $root/list_tmp && mv $root/list_tmp $root/list diff --git a/ui/src/Events.vue b/ui/src/Events.vue index 9fa34a61..89c8e8fe 100755 --- a/ui/src/Events.vue +++ b/ui/src/Events.vue @@ -3,13 +3,15 @@

If you'd like to include task events/timing data with your BIDS datasets, you can upload them here.

- Please skip this page if you do not have events data, or if your events data is not set up where each - row pertains to an individual trial. An exception is E-Prime txt files, which are allowed. + Please skip this page if working with MEG data, if you do not have events data, or if your events data + are not set up where each row pertains to an individual trial. An exception is E-Prime txt files, which + are allowed for functional BOLD data.

Only the following file extensions will be accepted by ezBIDS: .csv, .tsv, .txt, .out, and .xlsx. Uploaded files with other extensions will be ignored.

+
diff --git a/ui/src/store/index.ts b/ui/src/store/index.ts index 25bb4f81..4373e176 100755 --- a/ui/src/store/index.ts +++ b/ui/src/store/index.ts @@ -517,6 +517,9 @@ loadDatatype('fmap', fmapDatatype, 'Field Map'); import petDatatype from '../assets/schema/rules/datatypes/pet.json'; loadDatatype('pet', petDatatype, 'PET'); +import megDatatype from '../assets/schema/rules/datatypes/meg.json'; +loadDatatype('meg', megDatatype, 'MEG'); + import perfDatatype from '../assets/schema/rules/datatypes/perf.json'; import { ElNotification } from 'element-plus'; loadDatatype('perf', perfDatatype, 'Perfusion'); @@ -635,6 +638,15 @@ const store = createStore({ delete sidecar.PatientWeight; delete sidecar.AcquisitionDateTime; + // Don't need this (I think) if we're relying on MNE-BIDS + // if (sidecar.Modality === 'MEG') { + // delete sidecar.AcquisitionDate; + // delete sidecar.AcquisitionTime; + // delete sidecar.Modality; + // delete sidecar.ConversionSoftware; + // delete sidecar.SeriesDescription; + // } + item['sidecar_json'] = JSON.stringify(sidecar, null, 4); } });