Skip to content

Commit

Permalink
[REPRO] Add test file and include registration to subject-level analy…
Browse files Browse the repository at this point in the history
…sis.
  • Loading branch information
elodiegermani1 committed Dec 14, 2023
1 parent 254b622 commit 6009632
Show file tree
Hide file tree
Showing 2 changed files with 288 additions and 114 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -173,37 +173,37 @@ def get_preprocessing_outputs(self):

templates = [join(
self.directories.output_dir,
'l1_analysis', f'run_id_{run_id}'+'_subject_id_{subject_id}', '_addmean0',
'preprocess', f'run_id_{run_id}'+'_subject_id_{subject_id}', '_addmean0',
'sub-{subject_id}_'+f'task-MGT_run-{run_id}_bold_dtype_mcf_mask_smooth_mask_gms_tempfilt_maths.nii.gz')\
for run_id in self.run_list]

templates += [join(
self.directories.output_dir,
'l1_analysis', f'run_id_{run_id}'+'_subject_id_{subject_id}', '_dilatemask0',
'preprocess', f'run_id_{run_id}'+'_subject_id_{subject_id}', '_dilatemask0',
'sub-{subject_id}_'+f'task-MGT_run-{run_id}_bold_dtype_mcf_bet_thresh_dil.nii.gz')\
for run_id in self.run_list]

templates += [join(
self.directories.output_dir,
'l1_analysis', f'run_id_{run_id}'+'_subject_id_{subject_id}', '_maskfunc30',
'preprocess', f'run_id_{run_id}'+'_subject_id_{subject_id}', '_maskfunc30',
'sub-{subject_id}_'+f'task-MGT_run-{run_id}_bold_dtype_mcf_mask_smooth_mask.nii.gz')\
for run_id in self.run_list]

templates += [join(
self.directories.output_dir,
'l1_analysis', f'run_id_{run_id}'+'_subject_id_{subject_id}', '_realign0',
'preprocess', f'run_id_{run_id}'+'_subject_id_{subject_id}', '_realign0',
'sub-{subject_id}_'+f'task-MGT_run-{run_id}_bold_dtype_mcf.nii.gz.par')\
for run_id in self.run_list]

templates += [join(
self.directories.output_dir,
'l1_analysis', f'run_id_{run_id}'+'_subject_id_{subject_id}',
'preprocess', f'run_id_{run_id}'+'_subject_id_{subject_id}',
'sub-{subject_id}_'+f'T1w_fieldwarp.nii.gz')\
for run_id in self.run_list]

templates += [join(
self.directories.output_dir,
'l1_analysis', f'run_id_{run_id}'+'_subject_id_{subject_id}',
'preprocess', f'run_id_{run_id}'+'_subject_id_{subject_id}',
'sub-{subject_id}_'+f'task-MGT_run-{run_id}_bold_dtype_mean_flirt.mat')\
for run_id in self.run_list]

Expand Down Expand Up @@ -493,37 +493,79 @@ def get_run_level_analysis(self):

return l1_analysis

def get_registration(self):
""" Return a Nipype workflow describing the registration part of the pipeline """

def get_run_level_outputs(self):
""" Return a list of the files generated by the run level analysis """

parameters = {
'run_id' : self.run_list,
'subject_id' : self.subject_list,
'file' : [
'run0.mat',
'run0.png'
]
}
parameter_sets = product(*parameters.values())
template = join(
self.directories.output_dir,
'run_level_analysis', '_run_id_{run_id}_subject_id_{subject_id}','{file}'
)
return_list = [template.format(**dict(zip(parameters.keys(), parameter_values)))\
for parameter_values in parameter_sets]

parameters = {
'run_id' : self.run_list,
'subject_id' : self.subject_list,
'contrast_id' : self.contrast_list,
'file' : [
join('results', 'cope{contrast_id}.nii.gz'),
join('results', 'tstat{contrast_id}.nii.gz'),
join('results', 'varcope{contrast_id}.nii.gz'),
join('results', 'zstat{contrast_id}.nii.gz'),
]
}
parameter_sets = product(*parameters.values())
template = join(
self.directories.output_dir,
'run_level_analysis', '_run_id_{run_id}_subject_id_{subject_id}','{file}'
)

return_list += [template.format(**dict(zip(parameters.keys(), parameter_values)))\
for parameter_values in parameter_sets]

return return_list

def get_subject_level_analysis(self):
""" Return a Nipype workflow describing the subject level analysis part of the pipeline """

# [INFO] The following part stays the same for all pipelines

# Infosource Node - To iterate on subjects
info_source = Node(
IdentityInterface(
fields = ['subject_id', 'contrast_id', 'run_id'],
fields = ['subject_id', 'contrast_id'],
),
name='info_source',
)
info_source.iterables = [('subject_id', self.subject_list),
('contrast_id', self.contrast_list),
('run_id', self.run_list)]
info_source.iterables = [('subject_id', self.subject_list), ('contrast_id', self.contrast_list)]

# Templates to select files node
# [TODO] Change the name of the files depending on the filenames of results of preprocessing
templates = {
'cope': join(
self.directories.output_dir,
'run_level_analysis',
'_run_id_{run_id}_subject_id_{subject_id}',
'results',
'registration',
'_contrast_id_{contrast_id}_run_id_*_subject_id_{subject_id}',
'_warpall_cope0',
'cope{contrast_id}.nii.gz',
),
'varcope': join(
self.directories.output_dir,
'run_level_analysis',
'_run_id_{run_id}_subject_id_{subject_id}',
'results',
'registration',
'_contrast_id_{contrast_id}_run_id_*_subject_id_{subject_id}',
'_warpall_varcope0',
'varcope{contrast_id}.nii.gz',
),
)
'func2anat_transform':join(
self.directories.output_dir,
'preprocess',
Expand All @@ -549,7 +591,10 @@ def get_registration(self):
DataSink(base_directory = self.directories.output_dir),
name = 'data_sink'
)


# Generate design matrix
specify_model = Node(L2Model(num_copes = len(self.run_list)), name='l2model')

warpall_cope = MapNode(
ApplyWarp(interp='spline'),
name='warpall_cope',
Expand All @@ -568,97 +613,6 @@ def get_registration(self):
warpall_varcope.inputs.ref_file = Info.standard_image('MNI152_T1_2mm_brain.nii.gz')
warpall_varcope.inputs.mask_file = Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz')

# Create registration workflow and connect its nodes
registration = Workflow(
base_dir = self.directories.working_dir,
name = "registration"
)

registration.connect([
(
info_source,
select_files,
[('subject_id', 'subject_id'),
('run_id', 'run_id'),
('contrast_id', 'contrast_id')]
),
(
select_files,
warpall_cope,
[('func2anat_transform', 'premat'),
('anat2target_transform', 'field_file'),
('cope', 'in_file')]
),
(
select_files,
warpall_varcope,
[('func2anat_transform', 'premat'),
('anat2target_transform', 'field_file'),
('varcope', 'in_file')]
),
(
warpall_cope,
data_sink,
[('out_file', 'registration.@reg_cope')]
),
(
warpall_varcope,
data_sink,
[('out_file', 'registration.@reg_varcope')]
)
])

return registration


def get_subject_level_analysis(self):
""" Return a Nipype workflow describing the subject level analysis part of the pipeline """

# [INFO] The following part stays the same for all pipelines

# Infosource Node - To iterate on subjects
info_source = Node(
IdentityInterface(
fields = ['subject_id', 'contrast_id'],
),
name='info_source',
)
info_source.iterables = [('subject_id', self.subject_list), ('contrast_id', self.contrast_list)]

# Templates to select files node
# [TODO] Change the name of the files depending on the filenames of results of preprocessing
templates = {
'cope': join(
self.directories.output_dir,
'registration',
'_contrast_id_{contrast_id}_run_id_*_subject_id_{subject_id}',
'_warpall_cope0',
'cope{contrast_id}_warp.nii.gz',
),
'varcope': join(
self.directories.output_dir,
'registration',
'_contrast_id_{contrast_id}_run_id_*_subject_id_{subject_id}',
'_warpall_varcope0',
'varcope{contrast_id}_warp.nii.gz',
)
}

# SelectFiles node - to select necessary files
select_files = Node(
SelectFiles(templates, base_directory = self.directories.dataset_dir),
name = 'select_files'
)

# DataSink Node - store the wanted results in the wanted repository
data_sink = Node(
DataSink(base_directory = self.directories.output_dir),
name = 'data_sink'
)

# Generate design matrix
specify_model = Node(L2Model(num_copes = len(self.run_list)), name='l2model')

# Merge copes and varcopes files for each subject
merge_copes = Node(Merge(dimension='t'), name='merge_copes')

Expand All @@ -685,13 +639,27 @@ def get_subject_level_analysis(self):
),
(
select_files,
warpall_cope,
[('func2anat_transform', 'premat'),
('anat2target_transform', 'field_file'),
('cope', 'in_file')]
),
(
select_files,
warpall_varcope,
[('func2anat_transform', 'premat'),
('anat2target_transform', 'field_file'),
('varcope', 'in_file')]
),
(
warpall_cope,
merge_copes,
[('cope', 'in_files')]
[('out_file', 'in_files')]
),
(
select_files,
warpall_varcope,
merge_varcopes,
[('varcope', 'in_files')]
[('out_file', 'in_files')]
),
(
merge_copes,
Expand Down Expand Up @@ -723,6 +691,25 @@ def get_subject_level_analysis(self):
# [INFO] Here we simply return the created workflow
return subject_level_analysis


def get_subject_level_outputs(self):
""" Return a list of the files generated by the subject level analysis """

parameters = {
'contrast_id' : self.contrast_list,
'subject_id' : self.subject_list,
'file' : ['cope1.nii.gz', 'tstat1.nii.gz', 'varcope1.nii.gz', 'zstat1.nii.gz']
}
parameter_sets = product(*parameters.values())
template = join(
self.directories.output_dir,
'subject_level_analysis', '_contrast_id_{contrast_id}_subject_id_{subject_id}','{file}'
)

return [template.format(**dict(zip(parameters.keys(), parameter_values)))\
for parameter_values in parameter_sets]


# [INFO] This function returns the list of ids and files of each group of participants
# to do analyses for both groups, and one between the two groups.
def get_subgroups_contrasts(
Expand Down Expand Up @@ -1141,3 +1128,47 @@ def get_group_level_analysis_sub_workflow(self, method):

# [INFO] Here we simply return the created workflow
return group_level_analysis

def get_hypotheses_outputs(self):
""" Return the names of the files used by the team to answer the hypotheses of NARPS. """

nb_sub = len(self.subject_list)
files = [
join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_1', 'randomise_tfce_corrp_tstat1.nii.gz'),
join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_1', 'zstat1.nii.gz'),
join(f'group_level_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_1', 'randomise_tfce_corrp_tstat1.nii.gz'),
join(f'group_level_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_1', 'zstat1.nii.gz'),
join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_1', 'randomise_tfce_corrp_tstat1.nii.gz'),
join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_1', 'zstat1.nii.gz'),
join(f'group_level_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_1', 'randomise_tfce_corrp_tstat1.nii.gz'),
join(f'group_level_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_1', 'zstat1.nii.gz'),
join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_2', 'randomise_tfce_corrp_tstat2.nii.gz'),
join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_2', 'zstat2.nii.gz'),
join(f'group_level_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_2', 'randomise_tfce_corrp_tstat2.nii.gz'),
join(f'group_level_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_2', 'zstat2.nii.gz'),
join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_2', 'randomise_tfce_corrp_tstat1.nii.gz'),
join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}',
'_contrast_id_2', 'zstat1.nii.gz'),
join(f'group_level_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_2', 'randomise_tfce_corrp_tstat1.nii.gz'),
join(f'group_level_analysis_equalRange_nsub_{nb_sub}',
'_contrast_id_2', 'zstat1.nii.gz'),
join(f'group_level_analysis_groupComp_nsub_{nb_sub}',
'_contrast_id_2', 'randomise_tfce_corrp_tstat1.nii.gz'),
join(f'group_level_analysis_groupComp_nsub_{nb_sub}',
'_contrast_id_2', 'zstat1.nii.gz')
]
return [join(self.directories.output_dir, f) for f in files]
Loading

0 comments on commit 6009632

Please sign in to comment.