diff --git a/narps_open/pipelines/matlabbatch_R5K7.m b/narps_open/pipelines/matlabbatch_R5K7.m new file mode 100644 index 00000000..7e90190a --- /dev/null +++ b/narps_open/pipelines/matlabbatch_R5K7.m @@ -0,0 +1,344 @@ +% first-level job ---- + +% $ narps_description -t R5K7 -d preprocessing --json + +clear matlabbatch; + +% 1) motion correction + +% "motion_correction": "SPM12, Realign & Unwarp using the phase map +% generated from the fieldmap data with SPM12 Fieldmap Toolbox v2.1 +% (default options). \nOther than defaults: \n- estimation: quality 0.95, +% separation 3, two-pass procedure (i. registering to 1st scan, ii. +% registering to mean image), interpolation 7th degree B-spline; \n- unwarp +% & reslice: interpolation 7th degree B-spline ", + +% *** Create VDM map from field maps +matlabbatch{1}.spm.tools.fieldmap.calculatevdm.subj.data.presubphasemag.phase = { + 'ABS_PATH/narps_open_pipelines/data/original/ds001734/sub-001/fmap/sub-001_phasediff.nii,1'}; +matlabbatch{1}.spm.tools.fieldmap.calculatevdm.subj.data.presubphasemag.magnitude = { + 'ABS_PATH/narps_open_pipelines/data/original/ds001734/sub-001/fmap/sub-001_magnitude1.nii,1'}; +matlabbatch{1}.spm.tools.fieldmap.calculatevdm.subj.defaults.defaultsval.et = [4.92 7.38]; +matlabbatch{1}.spm.tools.fieldmap.calculatevdm.subj.defaults.defaultsval.maskbrain = 0; +matlabbatch{1}.spm.tools.fieldmap.calculatevdm.subj.defaults.defaultsval.blipdir = -1; +matlabbatch{1}.spm.tools.fieldmap.calculatevdm.subj.defaults.defaultsval.tert = 29.15; +matlabbatch{1}.spm.tools.fieldmap.calculatevdm.subj.defaults.defaultsval.epifm = 0; +matlabbatch{1}.spm.tools.fieldmap.calculatevdm.subj.session(1).epi = { + '/Users/camaumet/Softs/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-01_bold.nii,1'}; +matlabbatch{1}.spm.tools.fieldmap.calculatevdm.subj.session(2).epi = { + '/Users/camaumet/Softs/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-02_bold.nii,1'}; +matlabbatch{1}.spm.tools.fieldmap.calculatevdm.subj.session(3).epi = { + '/Users/camaumet/Softs/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-03_bold.nii,1'}; +matlabbatch{1}.spm.tools.fieldmap.calculatevdm.subj.session(4).epi = { + '/Users/camaumet/Softs/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-04_bold.nii,1'}; +matlabbatch{1}.spm.tools.fieldmap.calculatevdm.subj.matchvdm = 1; +matlabbatch{1}.spm.tools.fieldmap.calculatevdm.subj.writeunwarped = 0; +matlabbatch{1}.spm.tools.fieldmap.calculatevdm.subj.matchanat = 0; + +% --> This generates 4 files whose name start with vdm5_ +% In the following we will denote them by vdm5_runXX.extension + + +matlabbatch{end+1}.spm.spatial.realignunwarp.data(1).scans = { + % Note: as many volumes as 3d volume in the fmri 4D image + 'ABS_PATH/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-01_bold.nii,1' + 'ABS_PATH/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-01_bold.nii,2' + 'ABS_PATH/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-01_bold.nii,3'}; +matlabbatch{end}.spm.spatial.realignunwarp.data(1).pmscan = {'ABS_PATH/vdm5_run01.nii'}; +matlabbatch{end}.spm.spatial.realignunwarp.data(2).scans = { + '/Users/camaumet/Softs/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-02_bold.nii,1' + '/Users/camaumet/Softs/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-02_bold.nii,2' + '/Users/camaumet/Softs/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-02_bold.nii,3'}; +matlabbatch{end}.spm.spatial.realignunwarp.data(2).pmscan = {'ABS_PATH/vdm5_run02.nii'}; +matlabbatch{end}.spm.spatial.realignunwarp.data(3).scans = { + '/Users/camaumet/Softs/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-03_bold.nii,1' + '/Users/camaumet/Softs/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-03_bold.nii,2' + '/Users/camaumet/Softs/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-02_bold.nii,3'}; +matlabbatch{end}.spm.spatial.realignunwarp.data(3).pmscan = {'ABS_PATH/vdm5_run03.nii'}; +matlabbatch{end}.spm.spatial.realignunwarp.data(4).scans = { + '/Users/camaumet/Softs/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-04_bold.nii,1' + '/Users/camaumet/Softs/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-04_bold.nii,2' + '/Users/camaumet/Softs/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-04_bold.nii,3'}; +matlabbatch{end}.spm.spatial.realignunwarp.data(4).pmscan = {'ABS_PATH/vdm5_run04.nii'}; +matlabbatch{end}.spm.spatial.realignunwarp.eoptions.quality = 0.95; +matlabbatch{end}.spm.spatial.realignunwarp.eoptions.sep = 3; +matlabbatch{end}.spm.spatial.realignunwarp.eoptions.rtm = 1; +matlabbatch{end}.spm.spatial.realignunwarp.eoptions.einterp = 7; +matlabbatch{end}.spm.spatial.realignunwarp.uwroptions.rinterp = 7; + +% --> Here we get 4 files usub-001_task-MGT_run-XX_bold.nii as well as 1 +% unwarped_mean_image.nii + + +% 2) intersubject registration (normalization) + +% Note: we need to do segmentation first +matlabbatch{end+1}.spm.tools.oldseg.data = { + 'ABS_PATH/narps_open_pipelines/data/original/ds001734/sub-001/anat/sub-001_T1w.nii,1'}; + +% --> Here we get a c1sub-001_T1w.nii file that is the grey matter +% probability map and transfo.nii file (not sure it is a .nii) that is the +% calculated transformation between anat and standardized space + +% Coreg each sbref onto mean unwarp + +% --> For each run, the distortion-corrected single-band reference EPI image +% was co-registered to the mean EPI image obtained from Realignment & Unwarping + % using normalised mutual information. + +% Note in Python implem: This sounds like there were 4 coreg and not a single +% as done below (4 coregs were implemented in the Python code) + +matlabbatch{end+1}.spm.spatial.coreg.estimate.ref(1) = { + 'ABS_PATH/unwarped_mean_image.nii' + }; +matlabbatch{end}.spm.spatial.coreg.estimate.source(1) = { + 'ABS_PATH/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-01_sbref.nii'}; +matlabbatch{end}.spm.spatial.coreg.estimate.eoptions.cost_fun = 'nmi'; + +matlabbatch{end+1}.spm.spatial.coreg.estimate.ref(1) = { + 'ABS_PATH/unwarped_mean_image.nii' + }; +matlabbatch{end}.spm.spatial.coreg.estimate.source(1) = { + 'ABS_PATH/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-02_sbref.nii'}; +matlabbatch{end}.spm.spatial.coreg.estimate.eoptions.cost_fun = 'nmi'; + +matlabbatch{end+1}.spm.spatial.coreg.estimate.ref(1) = { + 'ABS_PATH/unwarped_mean_image.nii' + }; +matlabbatch{end}.spm.spatial.coreg.estimate.source(1) = { + 'ABS_PATH/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-03_sbref.nii'}; +matlabbatch{end}.spm.spatial.coreg.estimate.eoptions.cost_fun = 'nmi'; + +matlabbatch{end+1}.spm.spatial.coreg.estimate.ref(1) = { + 'ABS_PATH/unwarped_mean_image.nii' + }; +matlabbatch{end}.spm.spatial.coreg.estimate.source(1) = { + 'ABS_PATH/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-04_sbref.nii'}; +matlabbatch{end}.spm.spatial.coreg.estimate.eoptions.cost_fun = 'nmi'; + +% --> HERE WE get 4 file ssub-001_task-MGT_run-XX_sbref.nii (that +% keeps the same name as before *but* the header has been modified to apply +% the coregistration' + + +% Note in Python implem: The coreg are done separatly in each run and therefore +% other only includes 'usub-001_task-MGT_run-01_bold.nii' +matlabbatch{end+1}.spm.spatial.coreg.estimate.ref(1) = { + 'ABS_PATH/c1sub-001_T1w.nii' +}; +matlabbatch{end}.spm.spatial.coreg.estimate.source(1) = { + 'ABS_PATH/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-01_sbref.nii' + }; +matlabbatch{end}.spm.spatial.coreg.estimate.other = {'usub-001_task-MGT_run-01_bold.nii' + 'usub-001_task-MGT_run-02_bold.nii' + 'usub-001_task-MGT_run-03_bold.nii' + 'usub-001_task-MGT_run-04_bold.nii' + 'ABS_PATH/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-02_sbref.nii' + 'ABS_PATH/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-03_sbref.nii' + 'ABS_PATH/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-04_sbref.nii' +}; +matlabbatch{end}.spm.spatial.coreg.estimate.eoptions.cost_fun = 'nmi'; + + +% We apply the transformation to standardized space + +matlabbatch{end+1}.spm.spatial.normalise.write.subj.def = {'transfo.nii'}; +matlabbatch{end}.spm.spatial.normalise.write.subj.resample = { + 'ABS_PATH/narps_open_pipelines/data/original/ds001734/sub-001/func/usub-001_task-MGT_run-01_bold.nii' + 'ABS_PATH/narps_open_pipelines/data/original/ds001734/sub-001/func/usub-001_task-MGT_run-02_bold.nii' + 'ABS_PATH/narps_open_pipelines/data/original/ds001734/sub-001/func/usub-001_task-MGT_run-03_bold.nii' + 'ABS_PATH/narps_open_pipelines/data/original/ds001734/sub-001/func/usub-001_task-MGT_run-04_bold.nii' + 'ABS_PATH/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-02_sbref.nii' + 'ABS_PATH/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-03_sbref.nii' + 'ABS_PATH/narps_open_pipelines/data/original/ds001734/sub-001/func/sub-001_task-MGT_run-04_sbref.nii' +}; + +% 3) spatial smoothing +matlabbatch{end+1}.spm.spatial.smooth.data = { + 'ABS_PATH/narps_open_pipelines/data/original/ds001734/sub-001/func/usub-001_task-MGT_run-01_bold.nii' + 'ABS_PATH/narps_open_pipelines/data/original/ds001734/sub-001/func/usub-001_task-MGT_run-02_bold.nii' + 'ABS_PATH/narps_open_pipelines/data/original/ds001734/sub-001/func/usub-001_task-MGT_run-03_bold.nii' + 'ABS_PATH/narps_open_pipelines/data/original/ds001734/sub-001/func/usub-001_task-MGT_run-04_bold.nii' +}; +matlabbatch{end}.spm.spatial.smooth.fwhm = [8 8 8]; + +% ##### 4) First-level statistical analysis + +% We'll reuse Python code from DC61 to generate the conditions with parametric +% modulation + +def get_subject_information(event_files: list): + from nipype.interfaces.base import Bunch + + subject_info = [] + + for run_id, event_file in enumerate(event_files): + + onsets = [] + durations = [] + gain_value = [] + loss_value = [] + reaction_time = [] + + # Parse the events file + with open(event_file, 'rt') as file: + next(file) # skip the header + + for line in file: + info = line.strip().split() + +% --> independent_vars_first_level : - event-related design with each trial +% modelled with a duration of 4 sec and 3 linear parametric modulators (PMs +% orthogonalized via de-meaning against task and preceding PMs, respectively) +% for gain, loss and reaction time (in that order) as given in the .tsv log +% files + + onsets.append(float(info[0])) + durations.append(float(info[1])) + gain_value.append(float(info[2])) + loss_value.append(float(info[3])) + reaction_time.append(float(info[4])) + + # Create a Bunch for the run + subject_info.append( + Bunch( + conditions = [f'gamble_run{run_id + 1}'], + onsets = [onsets], + durations = [durations], % duration is 4s + amplitudes = None, + tmod = None, + pmod = [ + Bunch( + name = ['gain_param', 'loss_param', 'rt_param'], + poly = [1, 1, 1], + param = [gain_value, loss_value, reaction_time] + ) + ], + regressor_names = None, + regressors = None + )) + + return subject_info + + +matlabbatch{1}.spm.stats.fmri_spec.dir = ''; +matlabbatch{1}.spm.stats.fmri_spec.timing.units = 'secs'; +matlabbatch{1}.spm.stats.fmri_spec.timing.RT = ''; % Same as usual = TR +matlabbatch{1}.spm.stats.fmri_spec.sess(1).scans = ''; % scans from session 1 +% Note that parametric modulation was done in the Python code above and is not repeated here +matlabbatch{1}.spm.stats.fmri_spec.sess(1).cond = struct('name', {}, 'onset', {}, 'duration', {}, 'tmod', {}, 'pmod', {}, 'orth', {}); +matlabbatch{1}.spm.stats.fmri_spec.sess(1).multi = {''}; +matlabbatch{1}.spm.stats.fmri_spec.sess(1).regress = struct('name', {}, 'val', {}); +% --> 6 motion regressors (1st-order only) reflecting the 6 realignment parameters for translation and rotation movements obtained during preprocessing +matlabbatch{1}.spm.stats.fmri_spec.sess(1).multi_reg = {''}; % link to parameter motion files created by realign +matlabbatch{1}.spm.stats.fmri_spec.sess(1).hpf = 128; +matlabbatch{1}.spm.stats.fmri_spec.sess(2).scans = ''; % scans from session 2 +matlabbatch{1}.spm.stats.fmri_spec.sess(2).cond = struct('name', {}, 'onset', {}, 'duration', {}, 'tmod', {}, 'pmod', {}, 'orth', {}); +matlabbatch{1}.spm.stats.fmri_spec.sess(2).multi = {''}; +matlabbatch{1}.spm.stats.fmri_spec.sess(2).regress = struct('name', {}, 'val', {}); +matlabbatch{1}.spm.stats.fmri_spec.sess(2).multi_reg = {''}; % link to parameter motion files created by realign +matlabbatch{1}.spm.stats.fmri_spec.sess(2).hpf = 128; +matlabbatch{1}.spm.stats.fmri_spec.sess(3).scans = ''; % scans from session 3 +matlabbatch{1}.spm.stats.fmri_spec.sess(3).cond = struct('name', {}, 'onset', {}, 'duration', {}, 'tmod', {}, 'pmod', {}, 'orth', {}); +matlabbatch{1}.spm.stats.fmri_spec.sess(3).multi = {''}; +matlabbatch{1}.spm.stats.fmri_spec.sess(3).regress = struct('name', {}, 'val', {}); +matlabbatch{1}.spm.stats.fmri_spec.sess(3).multi_reg = {''}; % link to parameter motion files created by realign +matlabbatch{1}.spm.stats.fmri_spec.sess(3).hpf = 128; +matlabbatch{1}.spm.stats.fmri_spec.sess(4).scans = ''; % scans from session 4 +matlabbatch{1}.spm.stats.fmri_spec.sess(4).cond = struct('name', {}, 'onset', {}, 'duration', {}, 'tmod', {}, 'pmod', {}, 'orth', {}); +matlabbatch{1}.spm.stats.fmri_spec.sess(4).multi = {''}; +matlabbatch{1}.spm.stats.fmri_spec.sess(4).regress = struct('name', {}, 'val', {}); +matlabbatch{1}.spm.stats.fmri_spec.sess(4).multi_reg = {''}; % link to parameter motion files created by realign +matlabbatch{1}.spm.stats.fmri_spec.fact = struct('name', {}, 'levels', {}); +% --> canonical HRF plus temporal derivative +matlabbatch{1}.spm.stats.fmri_spec.bases.hrf.derivs = [1 0]; + +% Those are just the default +% matlabbatch{1}.spm.stats.fmri_spec.volt = 1; +% matlabbatch{1}.spm.stats.fmri_spec.global = 'None'; +% matlabbatch{1}.spm.stats.fmri_spec.mthresh = 0.8; +% matlabbatch{1}.spm.stats.fmri_spec.mask = {''}; + +% --> model_settings : 1st-level model: "AR(1) + w" autocorrelation model in +% SPM, high-pass filter: 128 s (Note: those are default in SPM) +matlabbatch{1}.spm.stats.fmri_spec.cvi = 'AR(1)'; +matlabbatch{1}.spm.stats.fmri_spec.sess(4).hpf = 128; + +% ##### 5) Contrast definition at the first-level +% --> After model estimation, sum contrast images for each regressor of +% interest [task, gain (PM1), loss (PM2) and RT (PM3)] were computed across +% the 4 sessions in each participant. + +self.contrast_list = ['0001', '0002', '0003', '0004'] +self.subject_level_contrasts = [ + ('task', 'T', + [f'gamble_run{r}' for r in range(1, len(self.run_list) + 1)], + [1]*len(self.run_list)), + ('effect_of_gain', 'T', + [f'gamble_run{r}xgain_param^1' for r in range(1, len(self.run_list) + 1)], + [1]*len(self.run_list)), + ('effect_of_loss', 'T', + [f'gamble_run{r}xloss_param^1' for r in range(1, len(self.run_list) + 1)], + [1]*len(self.run_list)) + ('effect_of_RT', 'T', + [f'gamble_run{r}xRT_param^1' for r in range(1, len(self.run_list) + 1)], + [1]*len(self.run_list)) +] + +% ##### 6) Group-level statistical analysis +% --> A flexible factorial design was used to examine the effects of 4 factors +% of interest [task, gain (PM1), loss (PM2) and RT (PM3); cf. description +% above] for each of the 2 groups (Equal Indifference vs. Equal Range). + +% --> 2nd-level model: random-effects GLM implemented with weighted least +% squares (via SPM's restricted maximum likelihood estimation); both between-condition and between-group variances assumed to be unequal + +I think this means we have a single stat model with the 4 factors and the 2 +groups and that the contrast. + +matlabbatch{1}.spm.stats.factorial_design.des.fblock.fac(1).name = 'Factor'; +matlabbatch{1}.spm.stats.factorial_design.des.fblock.fac(1).dept = 0; +matlabbatch{1}.spm.stats.factorial_design.des.fblock.fac(1).variance = 1; +matlabbatch{1}.spm.stats.factorial_design.des.fblock.fac(1).gmsca = 0; +matlabbatch{1}.spm.stats.factorial_design.des.fblock.fac(1).ancova = 0; +matlabbatch{1}.spm.stats.factorial_design.des.fblock.fac(2).name = 'Group'; +matlabbatch{1}.spm.stats.factorial_design.des.fblock.fac(2).dept = 0; +matlabbatch{1}.spm.stats.factorial_design.des.fblock.fac(2).variance = 1; +matlabbatch{1}.spm.stats.factorial_design.des.fblock.fac(2).gmsca = 0; +matlabbatch{1}.spm.stats.factorial_design.des.fblock.fac(2).ancova = 0; +matlabbatch{1}.spm.stats.factorial_design.des.fblock.fsuball.fsubject.scans = ''; +matlabbatch{1}.spm.stats.factorial_design.des.fblock.fsuball.fsubject.conds = [1 1 + 2 1 + 3 1 + 4 1]; + + +% ##### 6) Group-level contrast +% --> inference_contrast_effect : Linear T contrasts for the two parameters of +% interest (PM1 indicating linear hemodynamic changes with Gain value over +% trials within each subject, PM2 indicating such changes with Loss value) were + % used to test for the effects specified in the 9 hypotheses given. + + task_range gain_range loss_range RT_range task_indiff gain_indiff loss_indiff RT_indiff + +% H1 - Positive parametric effect of gains in the vmPFC (equal indifference group) +% H3 - Positive parametric effect of gains in the ventral striatum (equal indifference group) +['gain_indiff_pos', 'T', ['gain_indiff'], [1]], +% H2 - Positive parametric effect of gains in the vmPFC (equal range group) +% H4 - Positive parametric effect of gains in the ventral striatum (equal range group) +['gain_range_pos', 'T', ['gain_range'], [1]], +% H5 - Negative parametric effect of losses in the vmPFC (equal indifference group) +['loss_indiff_neg', 'T', ['loss_indiff'], [-1]] +% H6 - Negative parametric effect of losses in the vmPFC (equal range group) +['loss_range_neg', 'T', ['loss_range'], [-1]] +% H7 - Positive parametric effect of losses in the amygdala (equal indifference group) +['loss_indiff_pos', 'T', ['loss_indiff'], [1]] +% H8 - Positive parametric effect of losses in the amygdala (equal range group) +['loss_range_pos', 'T', ['loss_range'], [1]] +% H9 - Greater positive response to losses in amygdala (equal range group vs. equal indifference group) +['loss_range_pos_range_vs_indiff', 'T', ['loss_range' 'loss_indiff'], [1 -1]] + +% ##### 7) Inference +% --> pval_computation : standard parametric inference +% --> multiple_testing_correction : family-wise error correction, based on Random Field Theory diff --git a/narps_open/pipelines/team_0ED6.py b/narps_open/pipelines/team_0ED6.py new file mode 100644 index 00000000..1553b2da --- /dev/null +++ b/narps_open/pipelines/team_0ED6.py @@ -0,0 +1,857 @@ +#!/usr/bin/python +# coding: utf-8 + +""" Write the work of NARPS' team 0ED6 using Nipype """ + +from os.path import join +from itertools import product + +from nipype import Workflow, Node, MapNode +from nipype.interfaces.utility import IdentityInterface, Function +from nipype.interfaces.utility.base import Merge, Split, Select +from nipype.interfaces.io import SelectFiles, DataSink +from nipype.interfaces.spm import ( + Coregister, Segment, RealignUnwarp, FieldMap, Normalize, + Smooth, Level1Design, OneSampleTTestDesign, TwoSampleTTestDesign, + EstimateModel, EstimateContrast, Threshold, ApplyVDM + ) +from nipype.interfaces.spm.base import Info as SPMInfo +from nipype.algorithms.modelgen import SpecifySPMModel +from nipype.algorithms.misc import Gunzip + +from narps_open.pipelines import Pipeline +from narps_open.core.interfaces.confounds import ComputeDVARS +from narps_open.data.task import TaskInformation +from narps_open.data.participants import get_group +from narps_open.utils.configuration import Configuration +from narps_open.core.common import ( + list_intersection, elements_in_string, clean_list + ) +from narps_open.core.interfaces import InterfaceFactory + +class PipelineTeam0ED6(Pipeline): + """ A class that defines the pipeline of team 0ED6. """ + + def __init__(self): + super().__init__() + self.fwhm = 5.0 + self.team_id = '0ED6' + self.contrast_list = ['0001', '0002', '0003', '0004'] + condition_names = ['task', 'taskxgain^1', 'taskxloss^1', 'taskxreaction_time^1'] + self.subject_level_contrasts = [ + ['task', 'T', condition_names, [1, 0, 0, 0]], + ['gain', 'T', condition_names, [0, 1, 0, 0]], + ['loss', 'T', condition_names, [0, 0, 1, 0]], + ['reaction_time', 'T', condition_names, [0, 0, 0, 1]] + ] + + def get_preprocessing(self): + """ + Create the preprocessing workflow. + + Returns: + - preprocessing : nipype.WorkFlow + """ + # Initialize preprocessing workflow to connect nodes along the way + preprocessing = Workflow( + base_dir = self.directories.working_dir, + name = 'preprocessing') + + # IDENTITY INTERFACE - To iterate on subjects and runs + information_source = Node(IdentityInterface( + fields = ['subject_id', 'run_id']), + name = 'information_source') + information_source.iterables = [ + ('subject_id', self.subject_list), + ('run_id', self.run_list) + ] + + # SELECT FILES - Select input files + templates = { + 'anat' : join('sub-{subject_id}', 'anat', 'sub-{subject_id}_T1w.nii.gz'), + 'magnitude' : join('sub-{subject_id}', 'fmap', 'sub-{subject_id}_magnitude1.nii.gz'), + 'phase' : join('sub-{subject_id}', 'fmap', 'sub-{subject_id}_phasediff.nii.gz'), + 'func' : join('sub-{subject_id}', 'func', + 'sub-{subject_id}_task-MGT_run-{run_id}_bold.nii.gz'), + 'sbref' : join('sub-{subject_id}', 'func', + 'sub-{subject_id}_task-MGT_run-{run_id}_sbref.nii.gz') + } + select_files = Node(SelectFiles(templates), name = 'select_files') + select_files.inputs.base_directory = self.directories.dataset_dir + preprocessing.connect(information_source, 'subject_id', select_files, 'subject_id') + preprocessing.connect(information_source, 'run_id', select_files, 'run_id') + + # GUNZIP - gunzip files because SPM do not use .nii.gz files + gunzip_anat = Node(Gunzip(), name = 'gunzip_anat') + gunzip_func = Node(Gunzip(), name = 'gunzip_func') + gunzip_sbref = Node(Gunzip(), name = 'gunzip_sbref') + gunzip_magnitude = Node(Gunzip(), name = 'gunzip_magnitude') + gunzip_phase = Node(Gunzip(), name = 'gunzip_phase') + preprocessing.connect(select_files, 'anat', gunzip_anat, 'in_file') + preprocessing.connect(select_files, 'func', gunzip_func, 'in_file') + preprocessing.connect(select_files, 'sbref', gunzip_sbref, 'in_file') + preprocessing.connect(select_files, 'magnitude', gunzip_magnitude, 'in_file') + preprocessing.connect(select_files, 'phase', gunzip_phase, 'in_file') + + # IMCALC - Compute difference between the two magnitude maps + + # FIELD MAP - Compute a voxel displacement map from magnitude and phase data + fieldmap = Node(FieldMap(), name = 'fieldmap') + fieldmap.inputs.blip_direction = -1 + fieldmap.inputs.echo_times = (4.92, 7.38) + fieldmap.inputs.total_readout_time = 29.15 + fieldmap.inputs.template = join(SPMInfo.getinfo()['path'], 'toolbox', 'FieldMap', 'T1.nii') + preprocessing.connect(gunzip_magnitude, 'out_file', fieldmap, 'magnitude_file') + preprocessing.connect(gunzip_phase, 'out_file', fieldmap, 'phase_file') + preprocessing.connect(gunzip_sbref, 'out_file', fieldmap, 'epi_file') + preprocessing.connect(gunzip_anat, 'out_file', fieldmap, 'anat_file') + + # APPLYVDM - Apply fieldmap to single band reference EPI (sbref) + apply_fieldmap = Node(ApplyVDM(), name = 'apply_fieldmap') + apply_fieldmap.inputs.distortion_direction = 2 + apply_fieldmap.inputs.write_which = [2, 0] + apply_fieldmap.inputs.interpolation = 7 + preprocessing.connect(gunzip_sbref, 'out_file', apply_fieldmap, 'in_files') + preprocessing.connect(fieldmap, 'vdm', apply_fieldmap, 'vdmfile') + + # MERGE - Merge files for the realign & unwarp node into one input. + merge_sbref_func = Node(Merge(2), name = 'merge_sbref_func') + merge_sbref_func.inputs.ravel_inputs = True + preprocessing.connect(gunzip_sbref, 'out_file', merge_sbref_func, 'in1') + preprocessing.connect(gunzip_func, 'out_file', merge_sbref_func, 'in2') + + # REALIGN UNWARP + realign_unwarp = MapNode(RealignUnwarp(), name = 'realign_unwarp', iterfield = 'in_files') + realign_unwarp.inputs.quality = 0.95 + realign_unwarp.inputs.separation = 3 + realign_unwarp.inputs.fwhm = 5 + realign_unwarp.inputs.register_to_mean = True + realign_unwarp.inputs.interp = 7 + realign_unwarp.inputs.wrap = [0, 0, 0] + realign_unwarp.inputs.weight = '' + realign_unwarp.inputs.est_basis_func = [12, 12] + realign_unwarp.inputs.est_reg_order = 1 + realign_unwarp.inputs.est_reg_factor = 100000 + realign_unwarp.inputs.est_jacobian_deformations = False + realign_unwarp.inputs.est_first_order_effects = [4, 5] + realign_unwarp.inputs.est_second_order_effects = [] + realign_unwarp.inputs.est_unwarp_fwhm = 4 + realign_unwarp.inputs.est_re_est_mov_par = True + realign_unwarp.inputs.est_num_of_iterations = [5] + realign_unwarp.inputs.est_taylor_expansion_point = 'Average' + realign_unwarp.inputs.reslice_which = [1, 1] + realign_unwarp.inputs.reslice_interp = 7 + realign_unwarp.inputs.reslice_wrap = [0, 0, 0] + realign_unwarp.inputs.reslice_mask = True + realign_unwarp.inputs.out_prefix = 'u' + preprocessing.connect(fieldmap, 'vdm', realign_unwarp, 'phase_map') + preprocessing.connect(merge_sbref_func, 'out', realign_unwarp, 'in_files') + + # SPLIT - Split the mean outputs of realign_unwarp + # * realigned+unwarped sbref mean + # * realigned+unwarped func mean + split_realign_unwarp_means = Node(Split(), name = 'split_realign_unwarp_means') + split_realign_unwarp_means.inputs.splits = [1, 1] # out1 is sbref; out2 is func + split_realign_unwarp_means.inputs.squeeze = True # Unfold one-element splits + preprocessing.connect(realign_unwarp, 'mean_image', split_realign_unwarp_means, 'inlist') + + # SPLIT - Split the output of realign_unwarp + # * realigned+unwarped sbref + # * realigned+unwarped func + split_realign_unwarp_outputs = Node(Split(), name = 'split_realign_unwarp_outputs') + split_realign_unwarp_outputs.inputs.splits = [1, 1] # out1 is sbref; out2 is func + split_realign_unwarp_outputs.inputs.squeeze = True # Unfold one-element splits + preprocessing.connect( + realign_unwarp, 'realigned_unwarped_files', + split_realign_unwarp_outputs, 'inlist') + + # COREGISTER - Coregister sbref to realigned and unwarped mean image of func + coregister_sbref_to_func = Node(Coregister(), name = 'coregister_sbref_to_func') + coregister_sbref_to_func.inputs.cost_function = 'nmi' + coregister_sbref_to_func.inputs.separation = [4, 2] + coregister_sbref_to_func.inputs.ctolerance = [ + 0.02, 0.02, 0.02, 0.001, 0.001, 0.001, 0.01, 0.01, 0.01, 0.001, 0.001, 0.001] + coregister_sbref_to_func.inputs.fwhm = [7, 7] + coregister_sbref_to_func.inputs.jobtype = 'estimate' + preprocessing.connect( + split_realign_unwarp_means, 'out2', # mean func + coregister_sbref_to_func, 'target') + preprocessing.connect( + split_realign_unwarp_outputs, 'out1', # sbref + coregister_sbref_to_func, 'source') + + # MERGE - Merge func + func mean image files for the coregister_sbref_to_anat + # node into one input. + merge_func_before_coregister = Node(Merge(2), name = 'merge_func_before_coregister') + merge_func_before_coregister.inputs.ravel_inputs = True + preprocessing.connect( # out2 is func + split_realign_unwarp_outputs, 'out2', merge_func_before_coregister, 'in1') + preprocessing.connect( # out2 is func mean + split_realign_unwarp_means, 'out2', merge_func_before_coregister, 'in2') + + # COREGISTER - Coregister sbref to anat + coregister_sbref_to_anat = Node(Coregister(), name = 'coregister_sbref_to_anat') + coregister_sbref_to_anat.inputs.cost_function = 'nmi' + coregister_sbref_to_anat.inputs.separation = [4, 2] + coregister_sbref_to_anat.inputs.ctolerance = [ + 0.02, 0.02, 0.02, 0.001, 0.001, 0.001, 0.01, 0.01, 0.01, 0.001, 0.001, 0.001] + coregister_sbref_to_anat.inputs.fwhm = [7, 7] + coregister_sbref_to_anat.inputs.jobtype = 'estimate' + coregister_sbref_to_anat.inputs.target = join( + SPMInfo.getinfo()['path'], 'toolbox', 'OldSeg', 'grey.nii') + preprocessing.connect( + coregister_sbref_to_func, 'coregistered_source', coregister_sbref_to_anat, 'source') + preprocessing.connect(# out[0] is func, out[1] is func mean + merge_func_before_coregister, 'out', coregister_sbref_to_anat, 'apply_to_files') + + # SEGMENT - First step of sbref normalization. + segmentation_sbref = Node(Segment(), name = 'segmentation_sbref') + segmentation_sbref.inputs.csf_output_type = [False, False, False] # No output for CSF + segmentation_sbref.inputs.gm_output_type = [False, False, False] # No output for GM + segmentation_sbref.inputs.wm_output_type = [False, False, False] # No output for WM + segmentation_sbref.inputs.save_bias_corrected = False + segmentation_sbref.inputs.warp_frequency_cutoff = 45 + segmentation_sbref.inputs.sampling_distance = 2.0 + preprocessing.connect( + coregister_sbref_to_anat, 'coregistered_source', segmentation_sbref, 'data') + + # NORMALIZE - Deformation computed by the segmentation_sbref step is applied to + # the func, mean func, and sbref. + normalize = Node(Normalize(), name = 'normalize') + normalize.inputs.write_voxel_sizes = [2, 2, 2] + normalize.inputs.jobtype = 'write' # Estimation was done on previous step + preprocessing.connect( + segmentation_sbref, 'transformation_mat', normalize, 'parameter_file') + preprocessing.connect(# coregistered_files[0] is func, coregistered_files[1] is func mean + coregister_sbref_to_anat, 'coregistered_files', normalize, 'apply_to_files') + + # SMOOTH - Spatial smoothing of fMRI data + smoothing = Node(Smooth(), name = 'smoothing') + smoothing.inputs.fwhm = [self.fwhm] * 3 + smoothing.inputs.data_type = 0 + smoothing.inputs.implicit_masking = False + smoothing.inputs.prefix = 's' + preprocessing.connect(normalize, 'normalized_files', smoothing, 'in_files') + + # SELECT - Select the smoothed func. + select_func = Node(Select(), name = 'select_func') + select_func.inputs.index = [0] # func file + preprocessing.connect(smoothing, 'smoothed_files', select_func, 'inlist') + + # COMPUTE DVARS - Identify corrupted time-points from func + compute_dvars = Node(ComputeDVARS(), name = 'compute_dvars') + compute_dvars.inputs.out_file_name = 'dvars_out' + preprocessing.connect(select_func, 'out', compute_dvars, 'in_file') + + # DATA SINK - store the wanted results in the wanted repository + data_sink = Node(DataSink(), name = 'data_sink') + data_sink.inputs.base_directory = self.directories.output_dir + preprocessing.connect(smoothing, 'smoothed_files', data_sink, 'preprocessing.@smoothed') + preprocessing.connect( + realign_unwarp, 'realignment_parameters', + data_sink, 'preprocessing.@realignement_parameters') + preprocessing.connect( + compute_dvars, 'dvars_out_file', data_sink, 'preprocessing.@dvars_out_file') + preprocessing.connect( + compute_dvars, 'inference_out_file', data_sink, 'preprocessing.@inference_out_file') + + # Remove large files, if requested + if Configuration()['pipelines']['remove_unused_data']: + + # MERGE - Merge all temporary outputs once they are no longer needed + merge_temp_files = Node(Merge(8), name = 'merge_temp_files') + preprocessing.connect(gunzip_anat, 'out_file', merge_temp_files, 'in1') + preprocessing.connect(gunzip_func, 'out_file', merge_temp_files, 'in2') + preprocessing.connect(gunzip_sbref, 'out_file', merge_temp_files, 'in3') + preprocessing.connect(gunzip_magnitude, 'out_file', merge_temp_files, 'in4') + preprocessing.connect(gunzip_phase, 'out_file', merge_temp_files, 'in5') + preprocessing.connect( + realign_unwarp, 'realigned_unwarped_files', + merge_temp_files, 'in6') + preprocessing.connect(normalize, 'normalized_files', merge_temp_files, 'in7') + preprocessing.connect( + coregister_sbref_to_anat, 'coregistered_files', merge_temp_files, 'in8') + + # FUNCTION - Remove gunziped files once they are no longer needed + remove_gunziped = MapNode( + InterfaceFactory.create('remove_parent_directory'), + name = 'remove_gunziped', + iterfield = 'file_name' + ) + preprocessing.connect(merge_temp_files, 'out', remove_gunziped, 'file_name') + preprocessing.connect(data_sink, 'out_file', remove_gunziped, '_') + + return preprocessing + + def get_preprocessing_outputs(self): + """ Return the names of the files the preprocessing is supposed to generate. """ + + output_dir = join(self.directories.output_dir, 'preprocessing', + '_run_id_{run_id}_subject_id_{subject_id}') + + # Smoothing outputs + templates = [ + join(output_dir, 'swusub-{subject_id}_task-MGT_run-{run_id}_bold.nii'), + join(output_dir, 'swmeanusub-{subject_id}_task-MGT_run-{run_id}_bold.nii') + ] + + # DVARS output + templates += [ + join(output_dir, 'dvars_out_DVARS.tsv'), + join(output_dir, 'dvars_out_Inference.tsv') + ] + + # Realignement parameters + templates += [ + join(output_dir, '_realign_unwarp0', + 'rp_sub-{subject_id}_task-MGT_run-{run_id}_sbref.txt'), + join(output_dir, '_realign_unwarp1', + 'rp_sub-{subject_id}_task-MGT_run-{run_id}_bold.txt') + ] + + # Format with subject_ids and run_ids + return [t.format(subject_id = s, run_id = r) + for t in templates for s in self.subject_list for r in self.run_list] + + def get_run_level_analysis(self): + """ No run level analysis has been done by team 0ED6 """ + return None + + def get_subject_information(event_file): + """ + Create Bunchs for specifySPMModel, from data extracted from an event_file. + + Parameters : + - event_files: str, event file (one per run) for the subject + + Returns : + - subject_information: Bunch, relevant event information for subject level analysis. + """ + from nipype.interfaces.base import Bunch + + # Init empty lists inside directiries + onsets = [] + durations = [] + gain_value = [] + loss_value = [] + reaction_time = [] + + with open(event_file, 'rt') as file: + next(file) # skip the header + + for line in file: + info = line.strip().split() + + onsets.append(float(info[0])) + durations.append(float(info[1])) + gain_value.append(float(info[2])) + loss_value.append(float(info[3])) + reaction_time.append(float(info[4])) + + # TODO : SPM automatically mean-centers regressors ??? + # TODO : SPM automatically orthoganalizes regressors ??? + + # Fill Bunch + return Bunch( + conditions = ['task'], + onsets = [onsets], + durations = [durations], + amplitudes = None, + tmod = None, + pmod = [ + Bunch( + name = ['gain', 'loss', 'reaction_time'], + poly = [1, 1, 1], + param = [gain_value, loss_value, reaction_time] + ) + ], + regressor_names = None, + regressors = None + ) + + def get_confounds_file( + dvars_file: str, + dvars_inference_file: str, + realignement_parameters: str, + subject_id: str, + run_id: str) -> str: + """ + Create a tsv file with only desired confounds per subject per run. + + Parameters : + - dvars_file: str, path to the output values of DVARS computation + - dvars_inference_file: str, path to the output values of DVARS computation (inference) + - realignement_parameters : path to the realignment parameters file + - subject_id : related subject id + - run_id : related run id + + Return : + - confounds_file : path to new file containing only desired confounds + """ + from os.path import abspath + + from pandas import DataFrame, read_csv + from numpy import array, insert, c_ + + # Get the dataframe containing the 6 head motion parameter regressors + realign_array = array(read_csv(realignement_parameters, sep = r'\s+', header = None)) + nb_time_points = realign_array.shape[0] + + # Get the dataframes containing dvars values + dvars_data_frame = read_csv(dvars_file, sep = '\t', header = 0) + dvars_inference_data_frame = read_csv(dvars_inference_file, sep = '\t', header = 0) + + # Create a "corrupted points" regressor as indicated in the DVARS repo + # find(Stat.pvals<0.05./(T-1) & Stat.DeltapDvar>5) %print corrupted DVARS data-points + dvars_regressor = insert(array( + (dvars_inference_data_frame['Pval'] < (0.05/(nb_time_points-1))) \ + & (dvars_data_frame['DeltapDvar'] > 5.0)), + 0, 0, axis = 0) # Add a value of 0 at the beginning (first frame) + + # Concatenate all parameters + retained_parameters = DataFrame(c_[realign_array, dvars_regressor]) + + # Write confounds to a file + confounds_file = abspath(f'confounds_file_sub-{subject_id}_run-{run_id}.tsv') + with open(confounds_file, 'w', encoding = 'utf-8') as writer: + writer.write(retained_parameters.to_csv( + sep = '\t', index = False, header = False, na_rep = '0.0')) + + return confounds_file + + def get_subject_level_analysis(self): + """ + Create the subject level analysis workflow. + + Returns: + - subject_level : nipype.WorkFlow + WARNING: the name attribute of the workflow is 'subject_level_analysis' + """ + # Create subject level analysis workflow + subject_level = Workflow( + base_dir = self.directories.working_dir, + name = 'subject_level_analysis') + + # IDENTITY INTERFACE - To iterate on subjects + information_source = Node(IdentityInterface( + fields = ['subject_id']), + name = 'information_source') + information_source.iterables = [('subject_id', self.subject_list)] + + # SELECT FILES - to select necessary files + templates = { + 'dvars_file' : join(self.directories.output_dir, 'preprocessing', + '_run_id_*_subject_id_{subject_id}', + 'dvars_out_DVARS.tsv'), + 'dvars_inference_file' : join(self.directories.output_dir, 'preprocessing', + '_run_id_*_subject_id_{subject_id}', + 'dvars_out_Inference.tsv'), + 'realignement_parameters' : join(self.directories.output_dir, 'preprocessing', + '_run_id_*_subject_id_{subject_id}', '_realign_unwarp1', + 'rp_sub-{subject_id}_task-MGT_run-*_bold.txt'), + 'func' : join(self.directories.output_dir, 'preprocessing', + '_run_id_*_subject_id_{subject_id}', 'swusub-{subject_id}_task-MGT_run-*_bold.nii'), + 'event' : join('sub-{subject_id}', 'func', + 'sub-{subject_id}_task-MGT_run-*_events.tsv') + } + select_files = Node(SelectFiles(templates), name = 'select_files') + select_files.inputs.base_directory = self.directories.dataset_dir + subject_level.connect(information_source, 'subject_id', select_files, 'subject_id') + + # FUNCTION get_subject_information - generate files with event data + subject_information = MapNode(Function( + function = self.get_subject_information, + input_names = ['event_file'], + output_names = ['subject_info']), + iterfield = 'event_file', + name = 'subject_information') + subject_level.connect(select_files, 'event', subject_information, 'event_file') + + # FUNCTION node get_confounds_file - generate files with confounds data + confounds = MapNode( + Function( + function = self.get_confounds_file, + input_names = ['dvars_file', 'dvars_inference_file', 'realignement_parameters', + 'subject_id', 'run_id'], + output_names = ['confounds_file'] + ), + name = 'confounds', + iterfield = ['dvars_file', 'dvars_inference_file', 'realignement_parameters', + 'run_id']) + confounds.inputs.run_id = self.run_list + subject_level.connect(information_source, 'subject_id', confounds, 'subject_id') + subject_level.connect(select_files, 'dvars_file', confounds, 'dvars_file') + subject_level.connect( + select_files, 'dvars_inference_file', confounds, 'dvars_inference_file') + subject_level.connect( + select_files, 'realignement_parameters', confounds, 'realignement_parameters') + + # SPECIFY MODEL - generates SPM-specific Model + specify_model = Node(SpecifySPMModel(), name = 'specify_model') + specify_model.inputs.input_units = 'secs' + specify_model.inputs.output_units = 'secs' + specify_model.inputs.time_repetition = TaskInformation()['RepetitionTime'] + specify_model.inputs.high_pass_filter_cutoff = 128 + subject_level.connect(select_files, 'func', specify_model, 'functional_runs') + subject_level.connect(confounds, 'confounds_file', specify_model, 'realignment_parameters') + subject_level.connect(subject_information, 'subject_info', specify_model, 'subject_info') + + # LEVEL 1 DESIGN - Generates an SPM design matrix + model_design = Node(Level1Design(), name = 'model_design') + model_design.inputs.bases = {'hrf': {'derivs': [1,0]}} # Temporal derivative + model_design.inputs.timing_units = 'secs' + model_design.inputs.interscan_interval = TaskInformation()['RepetitionTime'] + subject_level.connect(specify_model, 'session_info', model_design, 'session_info') + + # ESTIMATE MODEL - estimate the parameters of the model + model_estimate = Node(EstimateModel(), name = 'model_estimate') + model_estimate.inputs.estimation_method = {'Classical': 1} + subject_level.connect(model_design, 'spm_mat_file', model_estimate, 'spm_mat_file') + + # ESTIMATE CONTRAST - estimates contrasts + contrast_estimate = Node(EstimateContrast(), name = 'contrast_estimate') + contrast_estimate.inputs.contrasts = self.subject_level_contrasts + subject_level.connect([ + (model_estimate, contrast_estimate, [ + ('spm_mat_file', 'spm_mat_file'), + ('beta_images', 'beta_images'), + ('residual_image', 'residual_image') + ]) + ]) + + # DATA SINK - store the wanted results in the wanted repository + data_sink = Node(DataSink(), name = 'data_sink') + data_sink.inputs.base_directory = self.directories.output_dir + subject_level.connect([ + (contrast_estimate, data_sink, [ + ('con_images', 'subject_level_analysis.@con_images'), + ('spmT_images', 'subject_level_analysis.@spmT_images'), + ('spm_mat_file', 'subject_level_analysis.@spm_mat_file') + ]) + ]) + + return subject_level + + def get_subject_level_outputs(self): + """ Return the names of the files the subject level analysis is supposed to generate. """ + + # Contrat maps + templates = [join( + self.directories.output_dir, + 'subject_level_analysis', '_subject_id_{subject_id}', f'con_{contrast_id}.nii')\ + for contrast_id in self.contrast_list] + + # SPM.mat file + templates += [join( + self.directories.output_dir, + 'subject_level_analysis', '_subject_id_{subject_id}', 'SPM.mat')] + + # spmT maps + templates += [join( + self.directories.output_dir, + 'subject_level_analysis', '_subject_id_{subject_id}', f'spmT_{contrast_id}.nii')\ + for contrast_id in self.contrast_list] + + # Format with subject_ids + return_list = [] + for template in templates: + return_list += [template.format(subject_id = s) for s in self.subject_list] + + return return_list + + def get_group_level_analysis(self): + """ + Return all workflows for the group level analysis. + + Returns; + - a list of nipype.WorkFlow + """ + + methods = ['equalRange', 'equalIndifference', 'groupComp'] + return [self.get_group_level_analysis_sub_workflow(method) for method in methods] + + def get_group_level_analysis_sub_workflow(self, method): + """ + Return a workflow for the group level analysis. + + Parameters: + - method: one of 'equalRange', 'equalIndifference' or 'groupComp' + + Returns: + - group_level_analysis: nipype.WorkFlow + """ + # Compute the number of participants used to do the analysis + nb_subjects = len(self.subject_list) + + # Infosource - a function free node to iterate over the list of subject names + information_source = Node( + IdentityInterface( + fields=['contrast_id']), + name='information_source') + information_source.iterables = [('contrast_id', self.contrast_list)] + + # SelectFiles + templates = { + # Contrasts for all participants + 'contrasts' : join(self.directories.output_dir, + 'subject_level_analysis', '_subject_id_*', 'con_{contrast_id}.nii') + } + + select_files = Node(SelectFiles(templates), name = 'select_files') + select_files.inputs.base_directory = self.directories.results_dir + select_files.inputs.force_lists = True + + # Datasink - save important files + data_sink = Node(DataSink(), name = 'data_sink') + data_sink.inputs.base_directory = self.directories.output_dir + + # Function Node get_equal_range_subjects + # Get subjects in the equalRange group and in the subject_list + get_equal_range_subjects = Node(Function( + function = list_intersection, + input_names = ['list_1', 'list_2'], + output_names = ['out_list'] + ), + name = 'get_equal_range_subjects' + ) + get_equal_range_subjects.inputs.list_1 = get_group('equalRange') + get_equal_range_subjects.inputs.list_2 = self.subject_list + + # Function Node get_equal_indifference_subjects + # Get subjects in the equalIndifference group and in the subject_list + get_equal_indifference_subjects = Node(Function( + function = list_intersection, + input_names = ['list_1', 'list_2'], + output_names = ['out_list'] + ), + name = 'get_equal_indifference_subjects' + ) + get_equal_indifference_subjects.inputs.list_1 = get_group('equalIndifference') + get_equal_indifference_subjects.inputs.list_2 = self.subject_list + + # Create a function to complete the subject ids out from the get_equal_*_subjects nodes + # If not complete, subject id '001' in search patterns + # would match all contrast files with 'con_0001.nii'. + complete_subject_ids = lambda l : [f'_subject_id_{a}' for a in l] + + # Function Node elements_in_string + # Get contrast files for required subjects + # Note : using a MapNode with elements_in_string requires using clean_list to remove + # None values from the out_list + get_contrasts = MapNode(Function( + function = elements_in_string, + input_names = ['input_str', 'elements'], + output_names = ['out_list'] + ), + name = 'get_contrasts', iterfield = 'input_str' + ) + + # Estimate model + estimate_model = Node(EstimateModel(), name = 'estimate_model') + estimate_model.inputs.estimation_method = {'Classical':1} + + # Estimate contrasts + estimate_contrast = Node(EstimateContrast(), name = 'estimate_contrast') + estimate_contrast.inputs.group_contrast = True + + ## Create thresholded maps + threshold = MapNode(Threshold(), + name = 'threshold', + iterfield = ['stat_image', 'contrast_index']) + threshold.inputs.use_fwe_correction = False + threshold.inputs.height_threshold = 0.001 + threshold.inputs.extent_fdr_p_threshold = 0.05 + threshold.inputs.use_topo_fdr = False + threshold.inputs.force_activation = True + threshold.synchronize = True + + group_level_analysis = Workflow( + base_dir = self.directories.working_dir, + name = f'group_level_analysis_{method}_nsub_{nb_subjects}') + group_level_analysis.connect([ + (information_source, select_files, [('contrast_id', 'contrast_id')]), + (select_files, get_contrasts, [('contrasts', 'input_str')]), + (estimate_model, estimate_contrast, [ + ('spm_mat_file', 'spm_mat_file'), + ('residual_image', 'residual_image'), + ('beta_images', 'beta_images')]), + (estimate_contrast, threshold, [ + ('spm_mat_file', 'spm_mat_file'), + ('spmT_images', 'stat_image')]), + (estimate_model, data_sink, [ + ('mask_image', f'group_level_analysis_{method}_nsub_{nb_subjects}.@mask')]), + (estimate_contrast, data_sink, [ + ('spm_mat_file', f'group_level_analysis_{method}_nsub_{nb_subjects}.@spm_mat'), + ('spmT_images', f'group_level_analysis_{method}_nsub_{nb_subjects}.@T'), + ('con_images', f'group_level_analysis_{method}_nsub_{nb_subjects}.@con')]), + (threshold, data_sink, [ + ('thresholded_map', f'group_level_analysis_{method}_nsub_{nb_subjects}.@thresh')])]) + + if method in ('equalRange', 'equalIndifference'): + estimate_contrast.inputs.contrasts = [ + ('Group', 'T', ['mean'], [1]), + ('Group', 'T', ['mean'], [-1]) + ] + + threshold.inputs.contrast_index = [1, 2] + + # Specify design matrix + one_sample_t_test_design = Node(OneSampleTTestDesign(), + name = 'one_sample_t_test_design') + group_level_analysis.connect([ + (get_contrasts, one_sample_t_test_design, [ + (('out_list', clean_list), 'in_files') + ]), + (one_sample_t_test_design, estimate_model, [('spm_mat_file', 'spm_mat_file')]) + ]) + + if method == 'equalRange': + group_level_analysis.connect([ + (get_equal_range_subjects, get_contrasts, [ + (('out_list', complete_subject_ids), 'elements') + ]) + ]) + + elif method == 'equalIndifference': + group_level_analysis.connect([ + (get_equal_indifference_subjects, get_contrasts, [ + (('out_list', complete_subject_ids), 'elements') + ]) + ]) + + elif method == 'groupComp': + estimate_contrast.inputs.contrasts = [ + ('Eq range vs Eq indiff in loss', 'T', ['Group_{1}', 'Group_{2}'], [1, -1]) + ] + + threshold.inputs.contrast_index = [1] + + # Function Node elements_in_string + # Get contrast files for required subjects + # Note : using a MapNode with elements_in_string requires using clean_list to remove + # None values from the out_list + get_contrasts_2 = MapNode(Function( + function = elements_in_string, + input_names = ['input_str', 'elements'], + output_names = ['out_list'] + ), + name = 'get_contrasts_2', iterfield = 'input_str' + ) + + # Specify design matrix + two_sample_t_test_design = Node(TwoSampleTTestDesign(), + name = 'two_sample_t_test_design') + two_sample_t_test_design.inputs.unequal_variance = True + + group_level_analysis.connect([ + (select_files, get_contrasts_2, [('contrasts', 'input_str')]), + (get_equal_range_subjects, get_contrasts, [ + (('out_list', complete_subject_ids), 'elements') + ]), + (get_equal_indifference_subjects, get_contrasts_2, [ + (('out_list', complete_subject_ids), 'elements') + ]), + (get_contrasts, two_sample_t_test_design, [ + (('out_list', clean_list), 'group1_files') + ]), + (get_contrasts_2, two_sample_t_test_design, [ + (('out_list', clean_list), 'group2_files') + ]), + (two_sample_t_test_design, estimate_model, [('spm_mat_file', 'spm_mat_file')]) + ]) + + return group_level_analysis + + def get_group_level_outputs(self): + """ Return all names for the files the group level analysis is supposed to generate. """ + + # Handle equalRange and equalIndifference + parameters = { + 'contrast_id': self.contrast_list, + 'method': ['equalRange', 'equalIndifference'], + 'file': [ + 'con_0001.nii', 'con_0002.nii', 'mask.nii', 'SPM.mat', + 'spmT_0001.nii', 'spmT_0002.nii', + join('_threshold0', 'spmT_0001_thr.nii'), join('_threshold1', 'spmT_0002_thr.nii') + ], + 'nb_subjects' : [str(len(self.subject_list))] + } + parameter_sets = product(*parameters.values()) + template = join( + self.directories.output_dir, + 'group_level_analysis_{method}_nsub_{nb_subjects}', + '_contrast_id_{contrast_id}', + '{file}' + ) + + return_list = [template.format(**dict(zip(parameters.keys(), parameter_values)))\ + for parameter_values in parameter_sets] + + # Handle groupComp + parameters = { + 'contrast_id': self.contrast_list, + 'method': ['groupComp'], + 'file': [ + 'con_0001.nii', 'mask.nii', 'SPM.mat', 'spmT_0001.nii', + join('_threshold0', 'spmT_0001_thr.nii') + ], + 'nb_subjects' : [str(len(self.subject_list))] + } + parameter_sets = product(*parameters.values()) + template = join( + self.directories.output_dir, + 'group_level_analysis_{method}_nsub_{nb_subjects}', + '_contrast_id_{contrast_id}', + '{file}' + ) + + return_list += [template.format(**dict(zip(parameters.keys(), parameter_values)))\ + for parameter_values in parameter_sets] + + return return_list + + def get_hypotheses_outputs(self): + """ Return all hypotheses output file names. + Note that hypotheses 5 to 8 correspond to the maps given by the team in their results ; + but they are not fully consistent with the hypotheses definitions as expected by NARPS. + """ + nb_sub = len(self.subject_list) + files = [ + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0002', 'spmT_0001.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0002', 'spmT_0001.nii'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0002', 'spmT_0001.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0002', 'spmT_0001.nii'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0003', '_threshold1', 'spmT_0002_thr.nii'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0003', 'spmT_0002.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0003', '_threshold1', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0003', 'spmT_0001.nii'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0003', 'spmT_0001.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0003', '_threshold0', 'spmT_0002_thr.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0003', 'spmT_0002.nii'), + join(f'group_level_analysis_groupComp_nsub_{nb_sub}', + '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_groupComp_nsub_{nb_sub}', + '_contrast_id_0003', 'spmT_0001.nii') + ] + return [join(self.directories.output_dir, f) for f in files] diff --git a/narps_open/pipelines/team_R5K7.py b/narps_open/pipelines/team_R5K7.py new file mode 100644 index 00000000..8148fa22 --- /dev/null +++ b/narps_open/pipelines/team_R5K7.py @@ -0,0 +1,692 @@ +#!/usr/bin/python +# coding: utf-8 + +""" Write the work of NARPS' team R5K7 using Nipype """ + +from os.path import join +from itertools import product + +from nipype import Workflow, Node, MapNode +from nipype.interfaces.utility import IdentityInterface, Function +from nipype.interfaces.utility.base import Merge, Split, Select +from nipype.interfaces.io import SelectFiles, DataSink +from nipype.interfaces.spm import ( + Coregister, Segment, RealignUnwarp, FieldMap, Normalize, + Smooth, Level1Design, FactorialDesign, + EstimateModel, EstimateContrast, Threshold, ApplyVDM + ) +from nipype.interfaces.spm.base import Info as SPMInfo +from nipype.algorithms.modelgen import SpecifySPMModel +from nipype.algorithms.misc import Gunzip + +from narps_open.pipelines import Pipeline +from narps_open.core.interfaces.confounds import ComputeDVARS +from narps_open.data.task import TaskInformation +from narps_open.data.participants import get_group +from narps_open.utils.configuration import Configuration +from narps_open.core.common import ( + list_intersection, elements_in_string, clean_list + ) +from narps_open.core.interfaces import InterfaceFactory + +class PipelineTeamR5K7(Pipeline): + """ A class that defines the pipeline of team R5K7. """ + + def __init__(self): + super().__init__() + self.fwhm = 8.0 + self.team_id = 'R5K7' + self.contrast_list = ['0001', '0002', '0003', '0004'] + condition_names = ['task', 'taskxgain^1', 'taskxloss^1', 'taskxreaction_time^1'] + self.subject_level_contrasts = [ + ['task', 'T', condition_names, [1, 0, 0, 0]], + ['effect_of_gain', 'T', condition_names, [0, 1, 0, 0]], + ['effect_of_loss', 'T', condition_names, [0, 0, 1, 0]], + ['effect_of_RT', 'T', condition_names, [0, 0, 0, 1]] + ] + + def get_preprocessing(self): + """ + Create the preprocessing workflow. + + Returns: + - preprocessing : nipype.WorkFlow + """ + # Initialize preprocessing workflow to connect nodes along the way + preprocessing = Workflow( + base_dir = self.directories.working_dir, + name = 'preprocessing') + + # IDENTITY INTERFACE - To iterate on subjects + information_source_subjects = Node(IdentityInterface( + fields = ['subject_id']), + name = 'information_source_subjects') + information_source_subjects.iterables = [('subject_id', self.subject_list)] + + # IDENTITY INTERFACE - To iterate on subjects and runs + information_source_runs = Node(IdentityInterface( + fields = ['subject_id', 'run_id']), + name = 'information_source_runs') + information_source_runs.iterables = [('run_id', self.run_list)] + preprocessing.connect( + information_source_subjects, 'subject_id', information_source_runs, 'subject_id') + + # SELECT FILES - Select input files per subject + templates = { + 'anat' : join('sub-{subject_id}', 'anat', 'sub-{subject_id}_T1w.nii.gz'), + 'magnitude' : join('sub-{subject_id}', 'fmap', 'sub-{subject_id}_magnitude1.nii.gz'), + 'phase' : join('sub-{subject_id}', 'fmap', 'sub-{subject_id}_phasediff.nii.gz') + } + select_subject_files = Node(SelectFiles(templates), name = 'select_subject_files') + select_subject_files.inputs.base_directory = self.directories.dataset_dir + preprocessing.connect( + information_source_subjects, 'subject_id', select_subject_files, 'subject_id') + + # SELECT FILES - Select input files per run + templates = { + 'func' : join('sub-{subject_id}', 'func', + 'sub-{subject_id}_task-MGT_run-{run_id}_bold.nii.gz'), + 'sbref' : join('sub-{subject_id}', 'func', + 'sub-{subject_id}_task-MGT_run-{run_id}_sbref.nii.gz') + } + select_run_files = Node(SelectFiles(templates), name = 'select_run_files') + select_run_files.inputs.base_directory = self.directories.dataset_dir + preprocessing.connect( + information_source_runs, 'subject_id', select_run_files, 'subject_id') + preprocessing.connect( + information_source_runs, 'run_id', select_run_files, 'run_id') + + # GUNZIP - gunzip files because SPM do not use .nii.gz files + gunzip_anat = Node(Gunzip(), name = 'gunzip_anat') + gunzip_func = Node(Gunzip(), name = 'gunzip_func') + gunzip_sbref = Node(Gunzip(), name = 'gunzip_sbref') + gunzip_magnitude = Node(Gunzip(), name = 'gunzip_magnitude') + gunzip_phase = Node(Gunzip(), name = 'gunzip_phase') + preprocessing.connect(select_subject_files, 'anat', gunzip_anat, 'in_file') + preprocessing.connect(select_run_files, 'func', gunzip_func, 'in_file') + preprocessing.connect(select_run_files, 'sbref', gunzip_sbref, 'in_file') + preprocessing.connect(select_subject_files, 'magnitude', gunzip_magnitude, 'in_file') + preprocessing.connect(select_subject_files, 'phase', gunzip_phase, 'in_file') + + # FIELD MAP - Compute a voxel displacement map from magnitude and phase data + fieldmap = Node(FieldMap(), name = 'fieldmap') + fieldmap.inputs.blip_direction = -1 + fieldmap.inputs.echo_times = (4.92, 7.38) + fieldmap.inputs.total_readout_time = 29.15 + fieldmap.inputs.template = join(SPMInfo.getinfo()['path'], 'toolbox', 'FieldMap', 'T1.nii') + preprocessing.connect(gunzip_magnitude, 'out_file', fieldmap, 'magnitude_file') + preprocessing.connect(gunzip_phase, 'out_file', fieldmap, 'phase_file') + preprocessing.connect(gunzip_sbref, 'out_file', fieldmap, 'epi_file') + preprocessing.connect(gunzip_anat, 'out_file', fieldmap, 'anat_file') + + # REALIGN UNWARP + realign_unwarp = MapNode(RealignUnwarp(), name = 'realign_unwarp', iterfield = 'in_files') + realign_unwarp.inputs.quality = 0.95 + realign_unwarp.inputs.separation = 3 + realign_unwarp.inputs.register_to_mean = True + realign_unwarp.inputs.interp = 7 + realign_unwarp.inputs.reslice_interp = 7 + preprocessing.connect(fieldmap, 'vdm', realign_unwarp, 'phase_map') + preprocessing.connect(gunzip_func, 'out_file', realign_unwarp, 'in_files') + + # SEGMENT - brain segmentation of anat file + segmentation_anat = Node(Segment(), name = 'segmentation_anat') + segmentation_anat.inputs.csf_output_type = [False, False, False] # No output for CSF + segmentation_anat.inputs.gm_output_type = [True, False, False] # No output for GM + segmentation_anat.inputs.wm_output_type = [False, False, False] # No output for WM + segmentation_anat.inputs.save_bias_corrected = False + preprocessing.connect(gunzip_func, 'out_file', segmentation_anat, 'data') + + # COREGISTER - Coregister sbref to realigned and unwarped mean image of func + coregister_sbref_to_mean_func = Node(Coregister(), name = 'coregister_sbref_to_mean_func') + coregister_sbref_to_mean_func.inputs.cost_function = 'nmi' + coregister_sbref_to_mean_func.inputs.jobtype = 'estimate' + preprocessing.connect( + realign_unwarp, 'mean_image', # realigned and unwarp mean func + coregister_sbref_to_mean_func, 'target') + preprocessing.connect( + gunzip_sbref, 'out_file', coregister_sbref_to_mean_func, 'source') + + # COREGISTER - Coregister sbref (now in the func space) and func to anat space + coregister_sbref_to_anat = Node(Coregister(), name = 'coregister_sbref_to_anat') + coregister_sbref_to_anat.inputs.cost_function = 'nmi' + coregister_sbref_to_anat.inputs.jobtype = 'estimate' + preprocessing.connect( + coregister_sbref_to_mean_func, 'coregistered_source', + coregister_sbref_to_anat, 'source') + preprocessing.connect( + segmentation_anat, 'native_gm_image', + coregister_sbref_to_anat, 'target') + preprocessing.connect( + realign_unwarp, 'realigned_unwarped_files', # realigned and unwarp func files + coregister_sbref_to_anat, 'apply_to_files') + + # NORMALIZE - Deformation computed by the segmentation_anat step is applied to + # the func (in anat space). + # TODO : we might need the sbref (in anat space) to be normalized as well + normalize = Node(Normalize(), name = 'normalize') + normalize.inputs.jobtype = 'write' # Estimation was done on previous step + preprocessing.connect( + segmentation_anat, 'transformation_mat', normalize, 'parameter_file') + preprocessing.connect( + coregister_sbref_to_anat, 'coregistered_files', normalize, 'apply_to_files') + + # SMOOTH - Spatial smoothing of fMRI data + smoothing = Node(Smooth(), name = 'smoothing') + smoothing.inputs.fwhm = [self.fwhm] * 3 + smoothing.inputs.data_type = 0 + smoothing.inputs.implicit_masking = False + smoothing.inputs.prefix = 's' + preprocessing.connect(normalize, 'normalized_files', smoothing, 'in_files') + + # DATA SINK - store the wanted results in the wanted repository + data_sink = Node(DataSink(), name = 'data_sink') + data_sink.inputs.base_directory = self.directories.output_dir + preprocessing.connect(smoothing, 'smoothed_files', data_sink, 'preprocessing.@smoothed') + preprocessing.connect( + realign_unwarp, 'realignment_parameters', + data_sink, 'preprocessing.@realignement_parameters') + + # Remove large files, if requested + if Configuration()['pipelines']['remove_unused_data']: + + # MERGE - Merge all temporary outputs once they are no longer needed + merge_temp_files = Node(Merge(8), name = 'merge_temp_files') + preprocessing.connect(gunzip_anat, 'out_file', merge_temp_files, 'in1') + preprocessing.connect(gunzip_func, 'out_file', merge_temp_files, 'in2') + preprocessing.connect(gunzip_sbref, 'out_file', merge_temp_files, 'in3') + preprocessing.connect(gunzip_magnitude, 'out_file', merge_temp_files, 'in4') + preprocessing.connect(gunzip_phase, 'out_file', merge_temp_files, 'in5') + preprocessing.connect( + realign_unwarp, 'realigned_unwarped_files', + merge_temp_files, 'in6') + preprocessing.connect(normalize, 'normalized_files', merge_temp_files, 'in7') + preprocessing.connect( + coregister_sbref_to_anat, 'coregistered_files', merge_temp_files, 'in8') + + # FUNCTION - Remove gunziped files once they are no longer needed + remove_gunziped = MapNode( + InterfaceFactory.create('remove_parent_directory'), + name = 'remove_gunziped', + iterfield = 'file_name' + ) + preprocessing.connect(merge_temp_files, 'out', remove_gunziped, 'file_name') + preprocessing.connect(data_sink, 'out_file', remove_gunziped, '_') + + return preprocessing + + def get_preprocessing_outputs(self): + """ Return the names of the files the preprocessing is supposed to generate. """ + + output_dir = join(self.directories.output_dir, 'preprocessing', + '_run_id_{run_id}_subject_id_{subject_id}') + + # Smoothing outputs + templates = [ + join(output_dir, 'swusub-{subject_id}_task-MGT_run-{run_id}_bold.nii') + ] + + # Realignement parameters + templates += [ + join(output_dir, '_realign_unwarp1', + 'rp_sub-{subject_id}_task-MGT_run-{run_id}_bold.txt') + ] + + # Format with subject_ids and run_ids + return [t.format(subject_id = s, run_id = r) + for t in templates for s in self.subject_list for r in self.run_list] + + def get_run_level_analysis(self): + """ No run level analysis has been done by team R5K7 """ + return None + + def get_subject_information(event_file): + """ + Create Bunchs for specifySPMModel, from data extracted from an event_file. + + Parameters : + - event_files: str, event file (one per run) for the subject + + Returns : + - subject_information: Bunch, relevant event information for subject level analysis. + """ + from nipype.interfaces.base import Bunch + + # Init empty lists inside directiries + onsets = [] + durations = [] + gain_value = [] + loss_value = [] + reaction_time = [] + + with open(event_file, 'rt') as file: + next(file) # skip the header + + for line in file: + info = line.strip().split() + + onsets.append(float(info[0])) + durations.append(float(info[1])) + gain_value.append(float(info[2])) + loss_value.append(float(info[3])) + reaction_time.append(float(info[4])) + + # Fill Bunch + return Bunch( + conditions = ['task'], + onsets = [onsets], + durations = [durations], + amplitudes = None, + tmod = None, + pmod = [ + Bunch( + name = ['gain', 'loss', 'reaction_time'], + poly = [1, 1, 1], + param = [gain_value, loss_value, reaction_time] + ) + ], + regressor_names = None, + regressors = None + ) + + def get_subject_level_analysis(self): + """ + Create the subject level analysis workflow. + + Returns: + - subject_level : nipype.WorkFlow + WARNING: the name attribute of the workflow is 'subject_level_analysis' + """ + # Create subject level analysis workflow + subject_level = Workflow( + base_dir = self.directories.working_dir, + name = 'subject_level_analysis') + + # IDENTITY INTERFACE - To iterate on subjects + information_source = Node(IdentityInterface( + fields = ['subject_id']), + name = 'information_source') + information_source.iterables = [('subject_id', self.subject_list)] + + # SELECT FILES - to select necessary files + templates = { + 'realignement_parameters' : join(self.directories.output_dir, 'preprocessing', + '_run_id_*_subject_id_{subject_id}', '_realign_unwarp1', + 'rp_sub-{subject_id}_task-MGT_run-*_bold.txt'), + 'func' : join(self.directories.output_dir, 'preprocessing', + '_run_id_*_subject_id_{subject_id}', 'swusub-{subject_id}_task-MGT_run-*_bold.nii'), + 'event' : join('sub-{subject_id}', 'func', + 'sub-{subject_id}_task-MGT_run-*_events.tsv') + } + select_files = Node(SelectFiles(templates), name = 'select_files') + select_files.inputs.base_directory = self.directories.dataset_dir + subject_level.connect(information_source, 'subject_id', select_files, 'subject_id') + + # FUNCTION get_subject_information - generate files with event data + subject_information = MapNode(Function( + function = self.get_subject_information, + input_names = ['event_file'], + output_names = ['subject_info']), + iterfield = 'event_file', + name = 'subject_information') + subject_level.connect(select_files, 'event', subject_information, 'event_file') + + # SPECIFY MODEL - generates SPM-specific Model + specify_model = Node(SpecifySPMModel(), name = 'specify_model') + specify_model.inputs.input_units = 'secs' + specify_model.inputs.output_units = 'secs' + specify_model.inputs.time_repetition = TaskInformation()['RepetitionTime'] + specify_model.inputs.high_pass_filter_cutoff = 128 + subject_level.connect(select_files, 'func', specify_model, 'functional_runs') + subject_level.connect( + select_files, 'realignement_parameters', specify_model, 'realignment_parameters') + subject_level.connect(subject_information, 'subject_info', specify_model, 'subject_info') + + # LEVEL 1 DESIGN - Generates an SPM design matrix + model_design = Node(Level1Design(), name = 'model_design') + model_design.inputs.bases = {'hrf': {'derivs': [1,0]}} # Temporal derivative + model_design.inputs.timing_units = 'secs' + model_design.inputs.interscan_interval = TaskInformation()['RepetitionTime'] + subject_level.connect(specify_model, 'session_info', model_design, 'session_info') + + # ESTIMATE MODEL - estimate the parameters of the model + model_estimate = Node(EstimateModel(), name = 'model_estimate') + model_estimate.inputs.estimation_method = {'Classical': 1} + subject_level.connect(model_design, 'spm_mat_file', model_estimate, 'spm_mat_file') + + # ESTIMATE CONTRAST - estimates contrasts + contrast_estimate = Node(EstimateContrast(), name = 'contrast_estimate') + contrast_estimate.inputs.contrasts = self.subject_level_contrasts + subject_level.connect([ + (model_estimate, contrast_estimate, [ + ('spm_mat_file', 'spm_mat_file'), + ('beta_images', 'beta_images'), + ('residual_image', 'residual_image') + ]) + ]) + + # DATA SINK - store the wanted results in the wanted repository + data_sink = Node(DataSink(), name = 'data_sink') + data_sink.inputs.base_directory = self.directories.output_dir + subject_level.connect([ + (contrast_estimate, data_sink, [ + ('con_images', 'subject_level_analysis.@con_images'), + ('spmT_images', 'subject_level_analysis.@spmT_images'), + ('spm_mat_file', 'subject_level_analysis.@spm_mat_file') + ]) + ]) + + return subject_level + + def get_subject_level_outputs(self): + """ Return the names of the files the subject level analysis is supposed to generate. """ + + # Contrat maps + templates = [join( + self.directories.output_dir, + 'subject_level_analysis', '_subject_id_{subject_id}', f'con_{contrast_id}.nii')\ + for contrast_id in self.contrast_list] + + # SPM.mat file + templates += [join( + self.directories.output_dir, + 'subject_level_analysis', '_subject_id_{subject_id}', 'SPM.mat')] + + # spmT maps + templates += [join( + self.directories.output_dir, + 'subject_level_analysis', '_subject_id_{subject_id}', f'spmT_{contrast_id}.nii')\ + for contrast_id in self.contrast_list] + + # Format with subject_ids + return_list = [] + for template in templates: + return_list += [template.format(subject_id = s) for s in self.subject_list] + + return return_list + + def get_group_level_analysis(self): + """ + Return a workflow for the group level analysis. + + Returns; + - group_level_analysis: nipype.WorkFlow + """ + # Compute the number of participants used to do the analysis + nb_subjects = len(self.subject_list) + + # Infosource - a function free node to iterate over the list of subject names + information_source = Node( + IdentityInterface( + fields=['contrast_id']), + name='information_source') + information_source.iterables = [('contrast_id', self.contrast_list)] + + # SelectFiles + templates = { + # Contrasts for all participants + 'contrasts' : join(self.directories.output_dir, + 'subject_level_analysis', '_subject_id_*', 'con_{contrast_id}.nii') + } + + select_files = Node(SelectFiles(templates), name = 'select_files') + select_files.inputs.base_directory = self.directories.results_dir + select_files.inputs.force_lists = True + + # Datasink - save important files + data_sink = Node(DataSink(), name = 'data_sink') + data_sink.inputs.base_directory = self.directories.output_dir + + # FactorialDesign + # {vector, name, interaction, centering}. + + # Function Node get_equal_range_subjects + # Get subjects in the equalRange group and in the subject_list + get_equal_range_subjects = Node(Function( + function = list_intersection, + input_names = ['list_1', 'list_2'], + output_names = ['out_list'] + ), + name = 'get_equal_range_subjects' + ) + get_equal_range_subjects.inputs.list_1 = get_group('equalRange') + get_equal_range_subjects.inputs.list_2 = self.subject_list + + # Function Node get_equal_indifference_subjects + # Get subjects in the equalIndifference group and in the subject_list + get_equal_indifference_subjects = Node(Function( + function = list_intersection, + input_names = ['list_1', 'list_2'], + output_names = ['out_list'] + ), + name = 'get_equal_indifference_subjects' + ) + get_equal_indifference_subjects.inputs.list_1 = get_group('equalIndifference') + get_equal_indifference_subjects.inputs.list_2 = self.subject_list + + # Create a function to complete the subject ids out from the get_equal_*_subjects nodes + # If not complete, subject id '001' in search patterns + # would match all contrast files with 'con_0001.nii'. + complete_subject_ids = lambda l : [f'_subject_id_{a}' for a in l] + + # Function Node elements_in_string + # Get contrast files for required subjects + # Note : using a MapNode with elements_in_string requires using clean_list to remove + # None values from the out_list + get_contrasts = MapNode(Function( + function = elements_in_string, + input_names = ['input_str', 'elements'], + output_names = ['out_list'] + ), + name = 'get_contrasts', iterfield = 'input_str' + ) + + # Estimate model + estimate_model = Node(EstimateModel(), name = 'estimate_model') + estimate_model.inputs.estimation_method = {'Classical':1} + + # Estimate contrasts + estimate_contrast = Node(EstimateContrast(), name = 'estimate_contrast') + estimate_contrast.inputs.group_contrast = True + + ## Create thresholded maps + threshold = MapNode(Threshold(), + name = 'threshold', + iterfield = ['stat_image', 'contrast_index']) + threshold.inputs.use_fwe_correction = False + threshold.inputs.height_threshold = 0.001 + threshold.inputs.extent_fdr_p_threshold = 0.05 + threshold.inputs.use_topo_fdr = False + threshold.inputs.force_activation = True + threshold.synchronize = True + + group_level_analysis = Workflow( + base_dir = self.directories.working_dir, + name = f'group_level_analysis_{method}_nsub_{nb_subjects}') + group_level_analysis.connect([ + (information_source, select_files, [('contrast_id', 'contrast_id')]), + (select_files, get_contrasts, [('contrasts', 'input_str')]), + (estimate_model, estimate_contrast, [ + ('spm_mat_file', 'spm_mat_file'), + ('residual_image', 'residual_image'), + ('beta_images', 'beta_images')]), + (estimate_contrast, threshold, [ + ('spm_mat_file', 'spm_mat_file'), + ('spmT_images', 'stat_image')]), + (estimate_model, data_sink, [ + ('mask_image', f'group_level_analysis_{method}_nsub_{nb_subjects}.@mask')]), + (estimate_contrast, data_sink, [ + ('spm_mat_file', f'group_level_analysis_{method}_nsub_{nb_subjects}.@spm_mat'), + ('spmT_images', f'group_level_analysis_{method}_nsub_{nb_subjects}.@T'), + ('con_images', f'group_level_analysis_{method}_nsub_{nb_subjects}.@con')]), + (threshold, data_sink, [ + ('thresholded_map', f'group_level_analysis_{method}_nsub_{nb_subjects}.@thresh')])]) + + if method in ('equalRange', 'equalIndifference'): + estimate_contrast.inputs.contrasts = [ + ('Group', 'T', ['mean'], [1]), + ('Group', 'T', ['mean'], [-1]) + ] + + threshold.inputs.contrast_index = [1, 2] + + # Specify design matrix + one_sample_t_test_design = Node(OneSampleTTestDesign(), + name = 'one_sample_t_test_design') + group_level_analysis.connect([ + (get_contrasts, one_sample_t_test_design, [ + (('out_list', clean_list), 'in_files') + ]), + (one_sample_t_test_design, estimate_model, [('spm_mat_file', 'spm_mat_file')]) + ]) + + if method == 'equalRange': + group_level_analysis.connect([ + (get_equal_range_subjects, get_contrasts, [ + (('out_list', complete_subject_ids), 'elements') + ]) + ]) + + elif method == 'equalIndifference': + group_level_analysis.connect([ + (get_equal_indifference_subjects, get_contrasts, [ + (('out_list', complete_subject_ids), 'elements') + ]) + ]) + + elif method == 'groupComp': + estimate_contrast.inputs.contrasts = [ + ('Eq range vs Eq indiff in loss', 'T', ['Group_{1}', 'Group_{2}'], [1, -1]) + ] + + threshold.inputs.contrast_index = [1] + + # Function Node elements_in_string + # Get contrast files for required subjects + # Note : using a MapNode with elements_in_string requires using clean_list to remove + # None values from the out_list + get_contrasts_2 = MapNode(Function( + function = elements_in_string, + input_names = ['input_str', 'elements'], + output_names = ['out_list'] + ), + name = 'get_contrasts_2', iterfield = 'input_str' + ) + + # Specify design matrix + two_sample_t_test_design = Node(TwoSampleTTestDesign(), + name = 'two_sample_t_test_design') + two_sample_t_test_design.inputs.unequal_variance = True + + group_level_analysis.connect([ + (select_files, get_contrasts_2, [('contrasts', 'input_str')]), + (get_equal_range_subjects, get_contrasts, [ + (('out_list', complete_subject_ids), 'elements') + ]), + (get_equal_indifference_subjects, get_contrasts_2, [ + (('out_list', complete_subject_ids), 'elements') + ]), + (get_contrasts, two_sample_t_test_design, [ + (('out_list', clean_list), 'group1_files') + ]), + (get_contrasts_2, two_sample_t_test_design, [ + (('out_list', clean_list), 'group2_files') + ]), + (two_sample_t_test_design, estimate_model, [('spm_mat_file', 'spm_mat_file')]) + ]) + + return group_level_analysis + + def get_group_level_outputs(self): + """ Return all names for the files the group level analysis is supposed to generate. """ + + # Handle equalRange and equalIndifference + parameters = { + 'contrast_id': self.contrast_list, + 'method': ['equalRange', 'equalIndifference'], + 'file': [ + 'con_0001.nii', 'con_0002.nii', 'mask.nii', 'SPM.mat', + 'spmT_0001.nii', 'spmT_0002.nii', + join('_threshold0', 'spmT_0001_thr.nii'), join('_threshold1', 'spmT_0002_thr.nii') + ], + 'nb_subjects' : [str(len(self.subject_list))] + } + parameter_sets = product(*parameters.values()) + template = join( + self.directories.output_dir, + 'group_level_analysis_{method}_nsub_{nb_subjects}', + '_contrast_id_{contrast_id}', + '{file}' + ) + + return_list = [template.format(**dict(zip(parameters.keys(), parameter_values)))\ + for parameter_values in parameter_sets] + + # Handle groupComp + parameters = { + 'contrast_id': self.contrast_list, + 'method': ['groupComp'], + 'file': [ + 'con_0001.nii', 'mask.nii', 'SPM.mat', 'spmT_0001.nii', + join('_threshold0', 'spmT_0001_thr.nii') + ], + 'nb_subjects' : [str(len(self.subject_list))] + } + parameter_sets = product(*parameters.values()) + template = join( + self.directories.output_dir, + 'group_level_analysis_{method}_nsub_{nb_subjects}', + '_contrast_id_{contrast_id}', + '{file}' + ) + + return_list += [template.format(**dict(zip(parameters.keys(), parameter_values)))\ + for parameter_values in parameter_sets] + + return return_list + + def get_hypotheses_outputs(self): + """ Return all hypotheses output file names. + Note that hypotheses 5 to 8 correspond to the maps given by the team in their results ; + but they are not fully consistent with the hypotheses definitions as expected by NARPS. + """ + nb_sub = len(self.subject_list) + files = [ + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0002', 'spmT_0001.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0002', 'spmT_0001.nii'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0002', 'spmT_0001.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0002', 'spmT_0001.nii'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0003', '_threshold1', 'spmT_0002_thr.nii'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0003', 'spmT_0002.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0003', '_threshold1', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0003', 'spmT_0001.nii'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0003', 'spmT_0001.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0003', '_threshold0', 'spmT_0002_thr.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0003', 'spmT_0002.nii'), + join(f'group_level_analysis_groupComp_nsub_{nb_sub}', + '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_groupComp_nsub_{nb_sub}', + '_contrast_id_0003', 'spmT_0001.nii') + ] + return [join(self.directories.output_dir, f) for f in files]