Skip to content

Commit

Permalink
T54A refactoring (#77)
Browse files Browse the repository at this point in the history
* Dummy change to test 2T6S pipeline on CI with rectified results data

* [T54A] refactoring

* [TEST] test updates for T54A pipeline

* Remove changes on 2T6S

* [TEST] change pipeline test_utils to avoid having to rewrite it at every repro

* [CI] from now on, unit_test workflow must run on self hosted runner

* [BUG] Cleaning connections

* [TEST] test_conftest was not supposed to belong to this branch

* Wildcard to create parameters dir

* Set default file type for FSL

* [BUG] inside unit_tests workflow

* Use computed masks

* Bug with output folder names

* PEP8 related refac

* Issue with contrasts ids

* Run level outputs

* [REFAC] refactoring T54A + adding groups to MultipleRegressDesign [skip ci]

* [TEST] adding unit tests for T54A [skip ci]

* Removed get_contrasts method to write run level contrasts as class attributes instead [skip ci]

* [REFAC] end of refactoring T54A [TEST] end of unit_tests for T54A

* Dealing with subjects selection in SelectFiles Nodes

* [REFAC] group level : better identify groups

* [BUG] with participants ids [skip ci]

* Dummy commit to run CI

* [DATALAD] change results url

* [BUG] session information

* [BUG] session information

* Correct value for fractional intensity threshold

* Run level contrast file

* Merge run level masks for FLAMEo

* T54A conditional removing files [skip ci]

* [SRC][TEST] level outputs & mask intersection for group level analysis

* helper for testing pipeline outputs [skip ci]
  • Loading branch information
bclenet authored Jan 31, 2024
1 parent d4ff702 commit 801d996
Show file tree
Hide file tree
Showing 8 changed files with 1,109 additions and 627 deletions.
2 changes: 1 addition & 1 deletion narps_open/pipelines/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@
'R7D1': None,
'R9K3': None,
'SM54': None,
'T54A': None,
'T54A': 'PipelineTeamT54A',
'U26C': None,
'UI76': None,
'UK24': None,
Expand Down
1,444 changes: 819 additions & 625 deletions narps_open/pipelines/team_T54A.py

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
]
extras_require = {
'tests': [
'pathvalidate',
'pylint',
'pytest',
'pytest-cov',
Expand Down
26 changes: 26 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,9 @@
from shutil import rmtree

from pytest import helpers
from pathvalidate import is_valid_filepath

from narps_open.pipelines import Pipeline
from narps_open.runner import PipelineRunner
from narps_open.utils import get_subject_id
from narps_open.utils.correlation import get_correlation_coefficient
Expand All @@ -21,6 +23,30 @@
# Init configuration, to ensure it is in testing mode
Configuration(config_type='testing')

@helpers.register
def test_pipeline_outputs(pipeline: Pipeline, number_of_outputs: list):
""" Test the outputs of a Pipeline.
Arguments:
- pipeline, Pipeline: the pipeline to test
- number_of_outputs, list: a list containing the expected number of outputs for each
stage of the pipeline (preprocessing, run_level, subject_level, group_level, hypotheses)
Return: True if the outputs are in sufficient number and each ones name is valid,
False otherwise.
"""
assert len(number_of_outputs) == 5
for outputs, number in zip([
pipeline.get_preprocessing_outputs(),
pipeline.get_run_level_outputs(),
pipeline.get_subject_level_outputs(),
pipeline.get_group_level_outputs(),
pipeline.get_hypotheses_outputs()], number_of_outputs):

assert len(outputs) == number
for output in outputs:
assert is_valid_filepath(output, platform = 'auto')
assert not any(c in output for c in ['{', '}'])

@helpers.register
def test_pipeline_execution(
team_id: str,
Expand Down
208 changes: 208 additions & 0 deletions tests/pipelines/test_team_T54A.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,208 @@
#!/usr/bin/python
# coding: utf-8

""" Tests of the 'narps_open.pipelines.team_T54A' module.
Launch this test with PyTest
Usage:
======
pytest -q test_team_T54A.py
pytest -q test_team_T54A.py -k <selected_test>
"""
from os import mkdir
from os.path import exists, join
from shutil import rmtree

from pytest import helpers, mark, fixture
from numpy import isclose
from nipype import Workflow
from nipype.interfaces.base import Bunch

from narps_open.pipelines.team_T54A import PipelineTeamT54A
from narps_open.utils.configuration import Configuration

TEMPORARY_DIR = join(Configuration()['directories']['test_runs'], 'test_T54A')

@fixture
def remove_test_dir():
""" A fixture to remove temporary directory created by tests """

rmtree(TEMPORARY_DIR, ignore_errors = True)
mkdir(TEMPORARY_DIR)
yield # test runs here
rmtree(TEMPORARY_DIR, ignore_errors = True)

def compare_float_2d_arrays(array_1, array_2):
""" Assert array_1 and array_2 are close enough """

assert len(array_1) == len(array_2)
for reference_array, test_array in zip(array_1, array_2):
assert len(reference_array) == len(test_array)
assert isclose(reference_array, test_array).all()

class TestPipelinesTeamT54A:
""" A class that contains all the unit tests for the PipelineTeamT54A class."""

@staticmethod
@mark.unit_test
def test_create():
""" Test the creation of a PipelineTeamT54A object """

pipeline = PipelineTeamT54A()

# 1 - check the parameters
assert pipeline.fwhm == 4.0
assert pipeline.team_id == 'T54A'
assert pipeline.contrast_list == ['1', '2']
assert pipeline.run_level_contrasts == [
('gain', 'T', ['trial', 'gain', 'loss'], [0, 1, 0]),
('loss', 'T', ['trial', 'gain', 'loss'], [0, 0, 1])
]

# 2 - check workflows
assert pipeline.get_preprocessing() is None
assert isinstance(pipeline.get_run_level_analysis(), Workflow)
assert isinstance(pipeline.get_subject_level_analysis(), Workflow)
group_level = pipeline.get_group_level_analysis()

assert len(group_level) == 3
for sub_workflow in group_level:
assert isinstance(sub_workflow, Workflow)

@staticmethod
@mark.unit_test
def test_outputs():
""" Test the expected outputs of a PipelineTeamT54A object """
pipeline = PipelineTeamT54A()
# 1 - 1 subject outputs
pipeline.subject_list = ['001']
helpers.test_pipeline_outputs(pipeline, [0, 9*4*1, 5*2*1, 8*2*2 + 4, 18])

# 2 - 4 subjects outputs
pipeline.subject_list = ['001', '002', '003', '004']
helpers.test_pipeline_outputs(pipeline, [0, 9*4*4, 5*2*4, 8*2*2 + 4, 18])

@staticmethod
@mark.unit_test
def test_subject_information():
""" Test the get_subject_information method """

# Get test files
test_file = join(Configuration()['directories']['test_data'], 'pipelines', 'events.tsv')
test_file_2 = join(Configuration()['directories']['test_data'],
'pipelines', 'events_resp.tsv')

# Prepare several scenarii
info_missed = PipelineTeamT54A.get_subject_information(test_file)
info_ok = PipelineTeamT54A.get_subject_information(test_file_2)

# Compare bunches to expected
bunch = info_missed[0]
assert isinstance(bunch, Bunch)
assert bunch.conditions == ['trial', 'gain', 'loss', 'difficulty', 'response', 'missed']
compare_float_2d_arrays(bunch.onsets, [
[4.071, 11.834, 27.535, 36.435],
[4.071, 11.834, 27.535, 36.435],
[4.071, 11.834, 27.535, 36.435],
[4.071, 11.834, 27.535, 36.435],
[6.459, 14.123, 29.615, 38.723],
[19.535]
])
compare_float_2d_arrays(bunch.durations, [
[2.388, 2.289, 2.08, 2.288],
[2.388, 2.289, 2.08, 2.288],
[2.388, 2.289, 2.08, 2.288],
[2.388, 2.289, 2.08, 2.288],
[0.0, 0.0, 0.0, 0.0],
[0.0]
])
compare_float_2d_arrays(bunch.amplitudes, [
[1.0, 1.0, 1.0, 1.0],
[14.0, 34.0, 10.0, 16.0],
[6.0, 14.0, 15.0, 17.0],
[1.0, 3.0, 10.0, 9.0],
[1.0, 1.0, 1.0, 1.0],
[1.0]
])
assert bunch.regressor_names == None
assert bunch.regressors == None

bunch = info_ok[0]
assert isinstance(bunch, Bunch)
assert bunch.conditions == ['trial', 'gain', 'loss', 'difficulty', 'response']
compare_float_2d_arrays(bunch.onsets, [
[4.071, 11.834, 27.535, 36.435],
[4.071, 11.834, 27.535, 36.435],
[4.071, 11.834, 27.535, 36.435],
[4.071, 11.834, 27.535, 36.435],
[6.459, 14.123, 29.615, 38.723]
])
compare_float_2d_arrays(bunch.durations, [
[2.388, 2.289, 2.08, 2.288],
[2.388, 2.289, 2.08, 2.288],
[2.388, 2.289, 2.08, 2.288],
[2.388, 2.289, 2.08, 2.288],
[0.0, 0.0, 0.0, 0.0]
])
compare_float_2d_arrays(bunch.amplitudes, [
[1.0, 1.0, 1.0, 1.0],
[14.0, 34.0, 10.0, 16.0],
[6.0, 14.0, 15.0, 17.0],
[1.0, 3.0, 10.0, 9.0],
[1.0, 1.0, 1.0, 1.0]
])
assert bunch.regressor_names == None
assert bunch.regressors == None

@staticmethod
@mark.unit_test
def test_parameters_file(remove_test_dir):
""" Test the get_parameters_file method """

confounds_file_path = join(
Configuration()['directories']['test_data'], 'pipelines', 'confounds.tsv')

PipelineTeamT54A.get_parameters_file(
confounds_file_path,
'fake_subject_id',
'fake_run_id',
TEMPORARY_DIR
)

# Check parameter file was created
assert exists(join(
TEMPORARY_DIR,
'parameters_file',
'parameters_file_sub-fake_subject_id_run-fake_run_id.tsv')
)

@staticmethod
@mark.unit_test
def test_one_sample_t_test_regressors():
""" Test the get_one_sample_t_test_regressors method """

regressors = PipelineTeamT54A.get_one_sample_t_test_regressors(['001', '002'])
assert regressors == {'group_mean': [1, 1]}

@staticmethod
@mark.unit_test
def test_two_sample_t_test_regressors():
""" Test the get_two_sample_t_test_regressors method """

regressors, groups = PipelineTeamT54A.get_two_sample_t_test_regressors(
['001', '003'], # equalRange group
['002', '004'], # equalIndifference group
['001', '002', '003', '004'] # all subjects
)
assert regressors == dict(
equalRange = [1, 0, 1, 0],
equalIndifference = [0, 1, 0, 1]
)
assert groups == [1, 2, 1, 2]

@staticmethod
@mark.pipeline_test
def test_execution():
""" Test the execution of a PipelineTeamT54A and compare results """
helpers.test_pipeline_evaluation('T54A')
45 changes: 44 additions & 1 deletion tests/test_conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

from datetime import datetime

from pytest import mark, helpers, fixture
from pytest import mark, helpers, fixture, raises

from nipype import Node, Workflow
from nipype.interfaces.utility import Function
Expand Down Expand Up @@ -239,6 +239,49 @@ def download(self):
class TestConftest:
""" A class that contains all the unit tests for the conftest module."""

@staticmethod
@mark.unit_test
def test_test_outputs(set_test_directory):
""" Test the test_pipeline_outputs helper """

# Test pipeline
pipeline = MockupPipeline()
pipeline.subject_list = ['001', '002']

# Wrong length for nb_of_outputs
with raises(AssertionError):
helpers.test_pipeline_outputs(pipeline, [1,2,3])

# Wrong number of outputs
with raises(AssertionError):
helpers.test_pipeline_outputs(pipeline, [0, 2, 2, 20, 18])
with raises(AssertionError):
helpers.test_pipeline_outputs(pipeline, [2, 0, 2, 20, 18])
with raises(AssertionError):
helpers.test_pipeline_outputs(pipeline, [2, 2, 0, 20, 18])
with raises(AssertionError):
helpers.test_pipeline_outputs(pipeline, [2, 2, 2, 0, 18])
with raises(AssertionError):
helpers.test_pipeline_outputs(pipeline, [2, 2, 2, 20, 0])

# Right number of outputs
helpers.test_pipeline_outputs(pipeline, [2, 2, 2, 20, 18])

# Not a valid path name
pipeline.get_group_level_outputs = lambda : 'not_fo\rmatted'
with raises(AssertionError):
helpers.test_pipeline_outputs(pipeline, [2, 2, 2, 1, 18])

# Not a valid path name
pipeline.get_group_level_outputs = lambda : '{not_formatted'
with raises(AssertionError):
helpers.test_pipeline_outputs(pipeline, [2, 2, 2, 1, 18])

# Not a valid path name
pipeline.get_group_level_outputs = lambda : '{not_formatted'
with raises(AssertionError):
helpers.test_pipeline_outputs(pipeline, [2, 2, 2, 1, 18])

@staticmethod
@mark.unit_test
def test_test_correlation_results(mocker):
Expand Down
5 changes: 5 additions & 0 deletions tests/test_data/pipelines/events_resp.tsv
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
onset duration gain loss RT participant_response
4.071 4 14 6 2.388 weakly_accept
11.834 4 34 14 2.289 strongly_accept
27.535 4 10 15 2.08 strongly_reject
36.435 4 16 17 2.288 weakly_reject
5 changes: 5 additions & 0 deletions tests/test_data/pipelines/participants.tsv
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
participant_id group gender age
sub-001 equalIndifference M 24
sub-002 equalRange M 25
sub-003 equalIndifference F 27
sub-004 equalRange M 25

0 comments on commit 801d996

Please sign in to comment.