From 9034678a9a34a0f76692f551e56dd7ab07a6c66e Mon Sep 17 00:00:00 2001 From: Yaroslav Halchenko Date: Mon, 21 Aug 2023 08:36:27 -0400 Subject: [PATCH] Codespell: a workflow to prevent typos * Added a github action to codespell main on push and PRs * Add rudimentary codespell config * Add some file ignores --- .codespellrc | 4 ++++ .github/workflows/codespell.yml | 22 +++++++++++++++++++ CONTRIBUTING.md | 2 +- README.md | 2 +- docs/README.md | 4 ++-- docs/ci-cd.md | 7 +++--- docs/configuration.md | 2 +- docs/description.md | 4 ++-- docs/pipelines.md | 10 ++++----- docs/status.md | 2 +- docs/testing.md | 6 ++--- examples/notebooks/reproduction_2T6S.ipynb | 2 +- examples/notebooks/reproduction_4TQ6.ipynb | 2 +- examples/notebooks/reproduction_98BT.ipynb | 2 +- examples/notebooks/reproduction_C88N.ipynb | 2 +- examples/notebooks/reproduction_J7F9.ipynb | 2 +- examples/notebooks/reproduction_Q6O0.ipynb | 2 +- examples/notebooks/reproduction_R9K3.ipynb | 2 +- examples/notebooks/reproduction_T54A.ipynb | 2 +- examples/notebooks/reproduction_V55J.ipynb | 2 +- examples/notebooks/reproduction_X19V.ipynb | 2 +- narps_open/data/description/__init__.py | 2 +- narps_open/data/results/__init__.py | 4 ++-- narps_open/pipelines/__init__.py | 8 +++---- narps_open/pipelines/team_0I4U_debug.py | 2 +- narps_open/pipelines/team_1KB2_debug.py | 6 ++--- narps_open/pipelines/team_2T6S.py | 4 ++-- narps_open/pipelines/team_4TQ6_wip.py | 6 ++--- narps_open/pipelines/team_98BT.py | 8 +++---- narps_open/pipelines/team_C88N.py | 6 ++--- narps_open/pipelines/team_J7F9.py | 2 +- narps_open/pipelines/team_Q6O0.py | 6 ++--- narps_open/pipelines/team_T54A.py | 6 ++--- narps_open/pipelines/team_V55J.py | 4 ++-- narps_open/pipelines/team_X19V.py | 6 ++--- .../pipelines/templates/template_afni.py | 6 ++--- .../pipelines/templates/template_fsl.py | 20 ++++++++--------- .../pipelines/templates/template_spm.py | 22 +++++++++---------- narps_open/runner.py | 2 +- narps_open/utils/__init__.py | 2 +- narps_open/utils/status.py | 4 ++-- tests/data/test_description.py | 2 +- tests/pipelines/test_pipelines.py | 4 ++-- tests/test_runner.py | 14 ++++++------ tests/utils/test_status.py | 2 +- 45 files changed, 130 insertions(+), 103 deletions(-) create mode 100644 .codespellrc create mode 100644 .github/workflows/codespell.yml diff --git a/.codespellrc b/.codespellrc new file mode 100644 index 00000000..f898a0f3 --- /dev/null +++ b/.codespellrc @@ -0,0 +1,4 @@ +[codespell] +skip = .git,*.pdf,*.svg,analysis_pipelines_full_descriptions.tsv +# softwares - key in data structures etc +ignore-words-list = te,fpr,fwe,softwares diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml new file mode 100644 index 00000000..3ebbf550 --- /dev/null +++ b/.github/workflows/codespell.yml @@ -0,0 +1,22 @@ +--- +name: Codespell + +on: + push: + branches: [main] + pull_request: + branches: [main] + +permissions: + contents: read + +jobs: + codespell: + name: Check for spelling errors + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Codespell + uses: codespell-project/actions-codespell@v2 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e859d510..1ddbda84 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -5,7 +5,7 @@ General guidelines can be found [here](https://docs.github.com/en/get-started/qu ## Reproduce a pipeline :keyboard: :thinking: Not sure which one to start with ? You can have a look on [this table](https://github.com/Inria-Empenn/narps_open_pipelines/wiki/pipeline_status) giving the work progress status for each pipeline. This will help choosing the one that best suits you! -Need more information ? You can have a look to the pipeline decription [here](https://docs.google.com/spreadsheets/d/1FU_F6kdxOD4PRQDIHXGHS4zTi_jEVaUqY_Zwg0z6S64/edit?usp=sharing). Also feel free to use the `narps_open.utils.description` module of the project, as described [in the documentation](/docs/description.md). +Need more information ? You can have a look to the pipeline description [here](https://docs.google.com/spreadsheets/d/1FU_F6kdxOD4PRQDIHXGHS4zTi_jEVaUqY_Zwg0z6S64/edit?usp=sharing). Also feel free to use the `narps_open.utils.description` module of the project, as described [in the documentation](/docs/description.md). When you are ready, [start an issue](https://github.com/Inria-Empenn/narps_open_pipelines/issues/new/choose) and choose **Pipeline reproduction**! diff --git a/README.md b/README.md index 7a4704ea..4116010f 100644 --- a/README.md +++ b/README.md @@ -53,7 +53,7 @@ To get the pipelines running, please follow the installation steps in [INSTALL.m ### Contributing -:wave: Any help is welcome ! Follow the guidelines in [CONTRIBUTING.md](/CONTRIBUTING.md) if you wish to get involed ! +:wave: Any help is welcome ! Follow the guidelines in [CONTRIBUTING.md](/CONTRIBUTING.md) if you wish to get involved ! ## References diff --git a/docs/README.md b/docs/README.md index ddb51c1d..8c4fd662 100644 --- a/docs/README.md +++ b/docs/README.md @@ -5,8 +5,8 @@ Here are the available topics : * :runner: [running](/docs/running.md) tells you how to run pipelines in NARPS open pipelines -* :brain: [data](/docs/data.md) contains intructions to handle the data needed by the project -* :hammer_and_wrench: [environment](/docs/environment.md) contains intructions to handle the software environment needed by the project +* :brain: [data](/docs/data.md) contains instructions to handle the data needed by the project +* :hammer_and_wrench: [environment](/docs/environment.md) contains instructions to handle the software environment needed by the project * :goggles: [description](/docs/description.md) tells you how to get convenient descriptions of the pipelines, as written by the teams involved in NARPS. * :microscope: [testing](/docs/testing.md) details the testing features of the project, i.e.: how is the code tested ? * :package: [ci-cd](/docs/ci-cd.md) contains the information on how continuous integration and delivery (knowned as CI/CD) is set up. diff --git a/docs/ci-cd.md b/docs/ci-cd.md index 5c7bdba9..c292eed1 100644 --- a/docs/ci-cd.md +++ b/docs/ci-cd.md @@ -1,14 +1,14 @@ # :package: Continuous Integration (CI) and Continuous Deployment (CD) for the NARPS open pipelines project -:mega: This file descripes how CI/CD works for the project. +:mega: This file describes how CI/CD works for the project. ## :octopus: CI on GitHub GitHub allows to launch CI workflows using [Actions](https://docs.github.com/en/actions). -See GitHub's documentation on [worflow syntax](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions) to write your own workflows. +See GitHub's documentation on [workflow syntax](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions) to write your own workflows. -These worflows are YAML files, located in the `.github/workflows/` directory. +These workflows are YAML files, located in the `.github/workflows/` directory. ### CI scheme @@ -35,6 +35,7 @@ For now, the following workflows are set up: | Name / File | What does it do ? | When is it launched ? | Where does it run ? | How can I see the results ? | | ----------- | ----------- | ----------- | ----------- | ----------- | | [code_quality](/.github/workflows/code_quality.yml) | A static analysis of the python code (see the [testing](/docs/testing.md) topic of the documentation for more information). | For every push or pull_request if there are changes on `.py` files. | On GitHub servers. | Outputs (logs of pylint) are stored as [downloadable artifacts](https://docs.github.com/en/actions/managing-workflow-runs/downloading-workflow-artifacts) during 15 days after the push. | +| [codespell](/.github/workflows/codespell.yml) | A static analysis of the text files for commonly made typos using [codespell](codespell-project/codespell: check code for common misspellings). | For every push or pull_request to the `maint` branch. | On GitHub servers. | Outputs (logs of codespell) are stored as [downloadable artifacts](https://docs.github.com/en/actions/managing-workflow-runs/downloading-workflow-artifacts) during 15 days after the push. | | [pipeline_tests](/.github/workflows/pipelines.yml) | Runs all the tests for changed pipelines. | For every push or pull_request, if a pipeline file changed. | On Empenn runners. | Outputs (logs of pytest) are stored as downloadable artifacts during 15 days after the push. | | [test_changes](/.github/workflows/test_changes.yml) | It runs all the changed tests for the project. | For every push or pull_request, if a test file changed. | On Empenn runners. | Outputs (logs of pytest) are stored as downloadable artifacts during 15 days after the push. | | [unit_testing](/.github/workflows/unit_testing.yml) | It runs all the unit tests for the project (see the [testing](/docs/testing.md) topic of the documentation for more information). | For every push or pull_request, if a file changed inside `narps_open/`, or a file related to test execution. | On GitHub servers. | Outputs (logs of pytest) are stored as downloadable artifacts during 15 days after the push. | diff --git a/docs/configuration.md b/docs/configuration.md index 313dab86..5c61b892 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -32,4 +32,4 @@ configuration.config_file = '/path/to/my/own/configuration/file.toml' Configuration files must conform with the [TOML](https://toml.io/en/) format. See this [article](https://realpython.com/python-toml/#use-toml-as-a-configuration-format) on Real Python to know more about configuration files with TOML. -For python versions below 3.11, we use [tomli](https://pypi.org/project/tomli/) as a dependancy for parsing TOML. Starting from python 3.11, [tomllib](https://docs.python.org/3/library/tomllib.html) is included in the Standard Library and would replace tomli. +For python versions below 3.11, we use [tomli](https://pypi.org/project/tomli/) as a dependency for parsing TOML. Starting from python 3.11, [tomllib](https://docs.python.org/3/library/tomllib.html) is included in the Standard Library and would replace tomli. diff --git a/docs/description.md b/docs/description.md index f090cc5f..0a769229 100644 --- a/docs/description.md +++ b/docs/description.md @@ -1,10 +1,10 @@ # Access the descriptions of NARPS teams pipelines The file `narps_open/data/description/analysis_pipelines_full_descriptions.tsv` contains the description provided by each team participating to NARPS. -It is a convertion into tsv format (tab-separated values) of the [original .xlsx file published in NARPS](https://github.com/poldrack/narps/blob/1.0.1/ImageAnalyses/metadata_files/analysis_pipelines_for_analysis.xlsx +It is a conversion into tsv format (tab-separated values) of the [original .xlsx file published in NARPS](https://github.com/poldrack/narps/blob/1.0.1/ImageAnalyses/metadata_files/analysis_pipelines_for_analysis.xlsx ), which allows easier parsing with python. -The file `narps_open/data/description/analysis_pipelines_derived_descriptions.tsv` contains for each team a set of programatically usable data based on the textual descriptions of the previous file. This data is available in the `derived` sub dictionary (see examples hereafter). +The file `narps_open/data/description/analysis_pipelines_derived_descriptions.tsv` contains for each team a set of programmatically usable data based on the textual descriptions of the previous file. This data is available in the `derived` sub dictionary (see examples hereafter). The class `TeamDescription` of module `narps_open.data.description` acts as a parser for these two files. diff --git a/docs/pipelines.md b/docs/pipelines.md index 086f386f..fb7d2afc 100644 --- a/docs/pipelines.md +++ b/docs/pipelines.md @@ -22,16 +22,16 @@ The `narps_open.pipelines.Pipeline` class declares abstract methods that must be ```python def get_preprocessing(self): - """ Return a Nipype worflow describing the prerpocessing part of the pipeline """ + """ Return a Nipype workflow describing the prerpocessing part of the pipeline """ def get_run_level_analysis(self): - """ Return a Nipype worflow describing the run level analysis part of the pipeline """ + """ Return a Nipype workflow describing the run level analysis part of the pipeline """ def get_subject_level_analysis(self): - """ Return a Nipype worflow describing the subject level analysis part of the pipeline """ + """ Return a Nipype workflow describing the subject level analysis part of the pipeline """ def get_group_level_analysis(self): - """ Return a Nipype worflow describing the group level analysis part of the pipeline """ + """ Return a Nipype workflow describing the group level analysis part of the pipeline """ ``` If one of these four steps was not performed by the team, simply make the corresponding method return `None`. Here is an example: @@ -89,7 +89,7 @@ def get_group_level_outputs(self): """ Return the names of the files the group level analysis is supposed to generate. """ ``` -:warning: Do not declare the method if no files are generated by the corresponding step. For exemple, if no preprocessing was done by the team, the `get_preprocessing_outputs` method must not be implemented. +:warning: Do not declare the method if no files are generated by the corresponding step. For example, if no preprocessing was done by the team, the `get_preprocessing_outputs` method must not be implemented. You should use other pipeline attributes to generate the lists of outputs dynamically. E.g.: diff --git a/docs/status.md b/docs/status.md index 5d068954..f323cc8f 100644 --- a/docs/status.md +++ b/docs/status.md @@ -1,6 +1,6 @@ # Access the work progress status pipelines -The class `PipelineStatusReport` of module `narps_open.utils.status` allows to create a report containing the following informations for each pipeline: +The class `PipelineStatusReport` of module `narps_open.utils.status` allows to create a report containing the following information for each pipeline: * the software it uses (collected from the `categorized_for_analysis.analysis_SW` of the [team description](/docs/description.md)) ; * whether it uses data from fMRIprep or not ; * a list of issues related to it (the opened issues of the project that have the team ID inside their title or description) ; diff --git a/docs/testing.md b/docs/testing.md index c9f19473..2bd96584 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -1,6 +1,6 @@ # :microscope: How to test NARPS open pipelines ? -:mega: This file descripes the test suite and features for the project. +:mega: This file describes the test suite and features for the project. ## Static analysis @@ -46,7 +46,7 @@ Tests can be launched manually or while using CI (Continuous Integration). The main idea is to create one test file per source module (eg.: *tests/pipelines/test_pipelines.py* contains all the unit tests for the module `narps_open.pipelines`). -Each test file defines a class (in the example: `TestPipelines`), in which each test is written in a static method begining with `test_`. +Each test file defines a class (in the example: `TestPipelines`), in which each test is written in a static method beginning with `test_`. Finally we use one or several `assert` ; each one of them making the whole test fail if the assertion is False. One can also use the `raises` method of pytest, writing `with raises(Exception):` to test if a piece of code raised the expected Exception. See the reference [here](https://docs.pytest.org/en/6.2.x/reference.html?highlight=raises#pytest.raises). @@ -59,7 +59,7 @@ Use pytest [markers](https://docs.pytest.org/en/7.1.x/example/markers.html) to i ## Save time by downsampling data -Running pipelines over all the subjects is time and resource consuming. Ideally, this could be done only once we are confident that the pipeline is correcly reproduced, just to make sure the final values of correlations between original team results and the reproduced ones are above the expected thresholds. +Running pipelines over all the subjects is time and resource consuming. Ideally, this could be done only once we are confident that the pipeline is correctly reproduced, just to make sure the final values of correlations between original team results and the reproduced ones are above the expected thresholds. But most of the time we need to run pipelines earlier in the development process, and for this step we need a (quick) answer whether it is going the right way or not. diff --git a/examples/notebooks/reproduction_2T6S.ipynb b/examples/notebooks/reproduction_2T6S.ipynb index d504af63..b7b74a18 100755 --- a/examples/notebooks/reproduction_2T6S.ipynb +++ b/examples/notebooks/reproduction_2T6S.ipynb @@ -16,7 +16,7 @@ "This notebook reproduce the analysis made by team **2T6S** for the NARPS experiment (Botvinik-Nezer & al, 2020). \n", "\n", "**To use this notebook :** \n", - "- Download fMRIprep datas available [**here**](https://openneuro.org/datasets/ds001734/versions/1.0.4) or in [**datalad**](http://datasets.datalad.org/?dir=/workshops/nih-2017/ds001734).\n", + "- Download fMRIprep data available [**here**](https://openneuro.org/datasets/ds001734/versions/1.0.4) or in [**datalad**](http://datasets.datalad.org/?dir=/workshops/nih-2017/ds001734).\n", "- Modify the paths:\n", " - **exp_dir**: directory where the ds001734-download repository is stored\n", " - **result_dir**: directory where the intermediate and final repositories will be stored\n", diff --git a/examples/notebooks/reproduction_4TQ6.ipynb b/examples/notebooks/reproduction_4TQ6.ipynb index 303b8722..0fffcf00 100644 --- a/examples/notebooks/reproduction_4TQ6.ipynb +++ b/examples/notebooks/reproduction_4TQ6.ipynb @@ -10,7 +10,7 @@ "This notebook reproduce the analysis made by team **4TQ6** for the NARPS experiment (Botvinik-Nezer & al, 2020). \n", "\n", "**To use this notebook :** \n", - "- Download fMRIprep datas available [**here**](https://openneuro.org/datasets/ds001734/versions/1.0.4) or in [**datalad**](http://datasets.datalad.org/?dir=/workshops/nih-2017/ds001734).\n", + "- Download fMRIprep data available [**here**](https://openneuro.org/datasets/ds001734/versions/1.0.4) or in [**datalad**](http://datasets.datalad.org/?dir=/workshops/nih-2017/ds001734).\n", "- Modify the paths:\n", " - **exp_dir**: directory where the ds001734-download repository is stored\n", " - **result_dir**: directory where the intermediate and final repositories will be stored\n", diff --git a/examples/notebooks/reproduction_98BT.ipynb b/examples/notebooks/reproduction_98BT.ipynb index c98a569d..6bb9d8c1 100755 --- a/examples/notebooks/reproduction_98BT.ipynb +++ b/examples/notebooks/reproduction_98BT.ipynb @@ -16,7 +16,7 @@ "This notebook reproduce the analysis made by team **98BT** for the NARPS experiment (Botvinik-Nezer & al, 2020). \n", "\n", "**To use this notebook :** \n", - "- Download fMRIprep datas available [**here**](https://openneuro.org/datasets/ds001734/versions/1.0.4) or in [**datalad**](http://datasets.datalad.org/?dir=/workshops/nih-2017/ds001734).\n", + "- Download fMRIprep data available [**here**](https://openneuro.org/datasets/ds001734/versions/1.0.4) or in [**datalad**](http://datasets.datalad.org/?dir=/workshops/nih-2017/ds001734).\n", "- Modify the paths:\n", " - **exp_dir**: directory where the ds001734-download repository is stored\n", " - **result_dir**: directory where the intermediate and final repositories will be stored\n", diff --git a/examples/notebooks/reproduction_C88N.ipynb b/examples/notebooks/reproduction_C88N.ipynb index 7509c6da..606f0fd9 100755 --- a/examples/notebooks/reproduction_C88N.ipynb +++ b/examples/notebooks/reproduction_C88N.ipynb @@ -16,7 +16,7 @@ "This notebook reproduce the analysis made by team **C88N** for the NARPS experiment (Botvinik-Nezer & al, 2020). \n", "\n", "**To use this notebook :** \n", - "- Download fMRIprep datas available [**here**](https://openneuro.org/datasets/ds001734/versions/1.0.4) or in [**datalad**](http://datasets.datalad.org/?dir=/workshops/nih-2017/ds001734).\n", + "- Download fMRIprep data available [**here**](https://openneuro.org/datasets/ds001734/versions/1.0.4) or in [**datalad**](http://datasets.datalad.org/?dir=/workshops/nih-2017/ds001734).\n", "- Modify the paths:\n", " - **exp_dir**: directory where the ds001734-download repository is stored\n", " - **result_dir**: directory where the intermediate and final repositories will be stored\n", diff --git a/examples/notebooks/reproduction_J7F9.ipynb b/examples/notebooks/reproduction_J7F9.ipynb index e2a6279e..3cf36664 100644 --- a/examples/notebooks/reproduction_J7F9.ipynb +++ b/examples/notebooks/reproduction_J7F9.ipynb @@ -10,7 +10,7 @@ "This notebook reproduce the analysis made by team **J7F9** for the NARPS experiment (Botvinik-Nezer & al, 2020). \n", "\n", "**To use this notebook :** \n", - "- Download fMRIprep datas available [**here**](https://openneuro.org/datasets/ds001734/versions/1.0.4) or in [**datalad**](http://datasets.datalad.org/?dir=/workshops/nih-2017/ds001734).\n", + "- Download fMRIprep data available [**here**](https://openneuro.org/datasets/ds001734/versions/1.0.4) or in [**datalad**](http://datasets.datalad.org/?dir=/workshops/nih-2017/ds001734).\n", "- Modify the paths:\n", " - **exp_dir**: directory where the ds001734-download repository is stored\n", " - **result_dir**: directory where the intermediate and final repositories will be stored\n", diff --git a/examples/notebooks/reproduction_Q6O0.ipynb b/examples/notebooks/reproduction_Q6O0.ipynb index e9f6e15b..b2e4800c 100755 --- a/examples/notebooks/reproduction_Q6O0.ipynb +++ b/examples/notebooks/reproduction_Q6O0.ipynb @@ -10,7 +10,7 @@ "This notebook reproduce the analysis made by team **Q6O0** for the NARPS experiment (Botvinik-Nezer & al, 2020). \n", "\n", "**To use this notebook :** \n", - "- Download fMRIprep datas available [**here**](https://openneuro.org/datasets/ds001734/versions/1.0.4) or in [**datalad**](http://datasets.datalad.org/?dir=/workshops/nih-2017/ds001734).\n", + "- Download fMRIprep data available [**here**](https://openneuro.org/datasets/ds001734/versions/1.0.4) or in [**datalad**](http://datasets.datalad.org/?dir=/workshops/nih-2017/ds001734).\n", "- Modify the paths:\n", " - **exp_dir**: directory where the ds001734-download repository is stored\n", " - **result_dir**: directory where the intermediate and final repositories will be stored\n", diff --git a/examples/notebooks/reproduction_R9K3.ipynb b/examples/notebooks/reproduction_R9K3.ipynb index e730a2e6..bdc66870 100644 --- a/examples/notebooks/reproduction_R9K3.ipynb +++ b/examples/notebooks/reproduction_R9K3.ipynb @@ -17,7 +17,7 @@ "\n", "\n", "**To use this notebook :** \n", - "- Download fMRIprep datas available [**here**](https://openneuro.org/datasets/ds001734/versions/1.0.4) \n", + "- Download fMRIprep data available [**here**](https://openneuro.org/datasets/ds001734/versions/1.0.4) \n", " or in [**datalad**](http://datasets.datalad.org/?dir=/workshops/nih-2017/ds001734).\n", "\n", "Botvinik-Nezer, R., Holzmeister, F., Camerer, C.F., Dreber, A., Huber, J., Johannesson, M., Kirchler, M., Iwanir, R., Mumford, J.A., ..., Nichols, T.E., Poldrack, R.A., Schonberg, T. (2020). Variability in the analysis of a single neuroimaging dataset by many teams. Nature. https://doi.org/10.1038/s41586-020-2314-9." diff --git a/examples/notebooks/reproduction_T54A.ipynb b/examples/notebooks/reproduction_T54A.ipynb index eb6aea54..18651341 100644 --- a/examples/notebooks/reproduction_T54A.ipynb +++ b/examples/notebooks/reproduction_T54A.ipynb @@ -10,7 +10,7 @@ "This notebook reproduce the analysis made by team **T54A** for the NARPS experiment (Botvinik-Nezer & al, 2020). \n", "\n", "**To use this notebook :** \n", - "- Download fMRIprep datas available [**here**](https://openneuro.org/datasets/ds001734/versions/1.0.4) or in [**datalad**](http://datasets.datalad.org/?dir=/workshops/nih-2017/ds001734).\n", + "- Download fMRIprep data available [**here**](https://openneuro.org/datasets/ds001734/versions/1.0.4) or in [**datalad**](http://datasets.datalad.org/?dir=/workshops/nih-2017/ds001734).\n", "- Modify the paths:\n", " - **exp_dir**: directory where the ds001734-download repository is stored\n", " - **result_dir**: directory where the intermediate and final repositories will be stored\n", diff --git a/examples/notebooks/reproduction_V55J.ipynb b/examples/notebooks/reproduction_V55J.ipynb index dba13202..c8663723 100644 --- a/examples/notebooks/reproduction_V55J.ipynb +++ b/examples/notebooks/reproduction_V55J.ipynb @@ -17,7 +17,7 @@ "\n", "\n", "**To use this notebook :** \n", - "- Download fMRIprep datas available [**here**](https://openneuro.org/datasets/ds001734/versions/1.0.4) or in [**datalad**](http://datasets.datalad.org/?dir=/workshops/nih-2017/ds001734).\n", + "- Download fMRIprep data available [**here**](https://openneuro.org/datasets/ds001734/versions/1.0.4) or in [**datalad**](http://datasets.datalad.org/?dir=/workshops/nih-2017/ds001734).\n", "- Modify the paths:\n", " - **exp_dir**: directory where the ds001734-download repository is stored\n", " - **result_dir**: directory where the intermediate and final repositories will be stored\n", diff --git a/examples/notebooks/reproduction_X19V.ipynb b/examples/notebooks/reproduction_X19V.ipynb index 01f08fb0..847a7875 100755 --- a/examples/notebooks/reproduction_X19V.ipynb +++ b/examples/notebooks/reproduction_X19V.ipynb @@ -10,7 +10,7 @@ "This notebook reproduce the analysis made by team **X19V** for the NARPS experiment (Botvinik-Nezer & al, 2020). \n", "\n", "**To use this notebook :** \n", - "- Download fMRIprep datas available [**here**](https://openneuro.org/datasets/ds001734/versions/1.0.4) or in [**datalad**](http://datasets.datalad.org/?dir=/workshops/nih-2017/ds001734).\n", + "- Download fMRIprep data available [**here**](https://openneuro.org/datasets/ds001734/versions/1.0.4) or in [**datalad**](http://datasets.datalad.org/?dir=/workshops/nih-2017/ds001734).\n", "- Modify the paths:\n", " - **exp_dir**: directory where the ds001734-download repository is stored\n", " - **result_dir**: directory where the intermediate and final repositories will be stored\n", diff --git a/narps_open/data/description/__init__.py b/narps_open/data/description/__init__.py index 000e1b65..0637f6c7 100644 --- a/narps_open/data/description/__init__.py +++ b/narps_open/data/description/__init__.py @@ -82,7 +82,7 @@ def _load(self): ... Furthermore, we parse the csv derived_description_file. The first line of this file being already a second level identifier, - the firest level identifier will always be 'derived'. + the first level identifier will always be 'derived'. This gives -for example- the following key for the dictionary: 'derived.n_participants' """ diff --git a/narps_open/data/results/__init__.py b/narps_open/data/results/__init__.py index a750ad8a..914dcbf3 100644 --- a/narps_open/data/results/__init__.py +++ b/narps_open/data/results/__init__.py @@ -2,7 +2,7 @@ # coding: utf-8 """ This module allows to get Neurovault collections corresponding - to results from teams involed in NARPS + to results from teams involved in NARPS """ from os import remove, makedirs @@ -20,7 +20,7 @@ class ResultsCollection(): """ Represents a Neurovault collections corresponding - to results from teams involed in NARPS. + to results from teams involved in NARPS. """ def __init__(self, team_id: str): diff --git a/narps_open/pipelines/__init__.py b/narps_open/pipelines/__init__.py index 966df03f..c3834fb6 100644 --- a/narps_open/pipelines/__init__.py +++ b/narps_open/pipelines/__init__.py @@ -266,19 +266,19 @@ def fwhm(self, value): @abstractmethod def get_preprocessing(self): - """ Return a Nipype worflow describing the prerpocessing part of the pipeline """ + """ Return a Nipype workflow describing the prerpocessing part of the pipeline """ @abstractmethod def get_run_level_analysis(self): - """ Return a Nipype worflow describing the run level analysis part of the pipeline """ + """ Return a Nipype workflow describing the run level analysis part of the pipeline """ @abstractmethod def get_subject_level_analysis(self): - """ Return a Nipype worflow describing the subject level analysis part of the pipeline """ + """ Return a Nipype workflow describing the subject level analysis part of the pipeline """ @abstractmethod def get_group_level_analysis(self): - """ Return a Nipype worflow describing the group level analysis part of the pipeline """ + """ Return a Nipype workflow describing the group level analysis part of the pipeline """ def get_preprocessing_outputs(self): """ Return the names of the files the preprocessing is supposed to generate. """ diff --git a/narps_open/pipelines/team_0I4U_debug.py b/narps_open/pipelines/team_0I4U_debug.py index 52607571..bdd1b3e0 100755 --- a/narps_open/pipelines/team_0I4U_debug.py +++ b/narps_open/pipelines/team_0I4U_debug.py @@ -394,7 +394,7 @@ def get_subset_contrasts(file_list, subject_list, participants_file, method): Parameters : - file_list : original file list selected by selectfiles node - subject_list : list of subject IDs that are in the wanted group for the analysis - - participants_file: str, file containing participants caracteristics + - participants_file: str, file containing participants characteristics - method: str, one of "equalRange", "equalIndifference" or "groupComp" This function return the file list containing only the files belonging to subject in the wanted group. diff --git a/narps_open/pipelines/team_1KB2_debug.py b/narps_open/pipelines/team_1KB2_debug.py index 08afb164..d56b939b 100755 --- a/narps_open/pipelines/team_1KB2_debug.py +++ b/narps_open/pipelines/team_1KB2_debug.py @@ -425,7 +425,7 @@ def get_subgroups_contrasts(copes, varcopes, subject_ids, participants_file): - copes: original file list selected by selectfiles node - varcopes: original file list selected by selectfiles node - subject_ids: list of subject IDs that are analyzed - - participants_file: str, file containing participants caracteristics + - participants_file: str, file containing participants characteristics This function return the file list containing only the files belonging to subject in the wanted group. ''' @@ -471,7 +471,7 @@ def get_subgroups_contrasts(copes, varcopes, subject_ids, participants_file): def get_regs(equalRange_id, equalIndifference_id, method, subject_list): """ - Create dictionnary of regressors for group analysis. + Create dictionary of regressors for group analysis. Parameters: - equalRange_id: list of str, ids of subjects in equal range group @@ -479,7 +479,7 @@ def get_regs(equalRange_id, equalIndifference_id, method, subject_list): - method: one of "equalRange", "equalIndifference" or "groupComp" Returns: - - regressors: dict, dictionnary of regressors used to distinguish groups in FSL group analysis + - regressors: dict, dictionary of regressors used to distinguish groups in FSL group analysis """ if method == "equalRange": regressors = dict(group_mean = [1 for i in range(len(equalRange_id))]) diff --git a/narps_open/pipelines/team_2T6S.py b/narps_open/pipelines/team_2T6S.py index a533095e..42aa344f 100755 --- a/narps_open/pipelines/team_2T6S.py +++ b/narps_open/pipelines/team_2T6S.py @@ -269,7 +269,7 @@ def get_subject_level_analysis(self): smooth = Node(Smooth(fwhm = self.fwhm), name = 'smooth') - # Funcion node get_subject_infos - get subject specific condition information + # Function node get_subject_infos - get subject specific condition information subject_infos = Node(Function( function = self.get_subject_infos, input_names = ['event_files', 'runs'], @@ -396,7 +396,7 @@ def get_subset_contrasts(file_list, subject_list, participants_file): Parameters : - file_list : original file list selected by selectfiles node - subject_list : list of subject IDs that are in the wanted group for the analysis - - participants_file: str, file containing participants caracteristics + - participants_file: str, file containing participants characteristics Returns : - equal_indifference_id : a list of subject ids in the equalIndifference group diff --git a/narps_open/pipelines/team_4TQ6_wip.py b/narps_open/pipelines/team_4TQ6_wip.py index f5453e35..623df025 100755 --- a/narps_open/pipelines/team_4TQ6_wip.py +++ b/narps_open/pipelines/team_4TQ6_wip.py @@ -289,7 +289,7 @@ def get_subgroups_contrasts(copes, varcopes, subject_ids, participants_file): - copes: original file list selected by selectfiles node - varcopes: original file list selected by selectfiles node - subject_ids: list of subject IDs that are analyzed - - participants_file: str, file containing participants caracteristics + - participants_file: str, file containing participants characteristics This function return the file list containing only the files belonging to subject in the wanted group. ''' @@ -343,7 +343,7 @@ def get_subgroups_contrasts(copes, varcopes, subject_ids, participants_file): def get_regs(equalRange_id, equalIndifference_id, method, subject_list): """ - Create dictionnary of regressors for group analysis. + Create dictionary of regressors for group analysis. Parameters: - equalRange_id: list of str, ids of subjects in equal range group @@ -352,7 +352,7 @@ def get_regs(equalRange_id, equalIndifference_id, method, subject_list): - subject_list: list of str, ids of subject for which to do the analysis Returns: - - regressors: dict, dictionnary of regressors used to distinguish groups in FSL group analysis + - regressors: dict, dictionary of regressors used to distinguish groups in FSL group analysis """ if method == "equalRange": regressors = dict(group_mean = [1 for i in range(len(equalRange_id))]) diff --git a/narps_open/pipelines/team_98BT.py b/narps_open/pipelines/team_98BT.py index c43cd11f..1c68e0ce 100755 --- a/narps_open/pipelines/team_98BT.py +++ b/narps_open/pipelines/team_98BT.py @@ -73,14 +73,14 @@ def get_dartel_input(structural_files): def get_fieldmap_infos(info_fmap, magnitude): """ - Function to get informations necessary to compute the fieldmap. + Function to get information necessary to compute the fieldmap. Parameters: - - info_fmap: str, file with fieldmap informations + - info_fmap: str, file with fieldmap information - magnitude: list of str, list of magnitude files Returns: - - TE: float, echo time obtained from fieldmap informations file + - TE: float, echo time obtained from fieldmap information file - magnitude_file: str, necessary file to compute fieldmap """ import json @@ -601,7 +601,7 @@ def get_subset_contrasts(file_list, method, subject_list, participants_file): Parameters : - file_list : original file list selected by selectfiles node - subject_list : list of subject IDs that are in the wanted group for the analysis - - participants_file: str, file containing participants caracteristics + - participants_file: str, file containing participants characteristics - method: str, one of "equalRange", "equalIndifference" or "groupComp" This function return the file list containing only the files belonging to subject in the wanted group. diff --git a/narps_open/pipelines/team_C88N.py b/narps_open/pipelines/team_C88N.py index 6e965db6..723b4fa7 100755 --- a/narps_open/pipelines/team_C88N.py +++ b/narps_open/pipelines/team_C88N.py @@ -20,7 +20,7 @@ def get_subject_infos_gain(event_files): Gain and loss amounts were used as parametric regressors. Parameters : - - event_files : list of files containing events informations for each run + - event_files : list of files containing events information for each run Returns : - subject_info : list of Bunch for 1st level analysis. @@ -91,7 +91,7 @@ def get_subject_infos_loss(event_files): Gain and loss amounts were used as parametric regressors. Parameters : - - event_files : list of files containing events informations for each run + - event_files : list of files containing events information for each run Returns : - subject_info : list of Bunch for 1st level analysis. @@ -384,7 +384,7 @@ def get_subset_contrasts(file_list, method, subject_list, participants_file): Parameters : - file_list : original file list selected by selectfiles node - subject_list : list of subject IDs that are in the wanted group for the analysis - - participants_file: str, file containing participants caracteristics + - participants_file: str, file containing participants characteristics - method: str, one of "equalRange", "equalIndifference" or "groupComp" This function return the file list containing only the files belonging to subject in the wanted group. diff --git a/narps_open/pipelines/team_J7F9.py b/narps_open/pipelines/team_J7F9.py index b96f2db6..98d74488 100644 --- a/narps_open/pipelines/team_J7F9.py +++ b/narps_open/pipelines/team_J7F9.py @@ -323,7 +323,7 @@ def get_subset_contrasts(file_list, method, subject_list, participants_file): Parameters : - file_list : original file list selected by selectfiles node - subject_list : list of subject IDs that are in the wanted group for the analysis - - participants_file: str, file containing participants caracteristics + - participants_file: str, file containing participants characteristics - method: str, one of "equalRange", "equalIndifference" or "groupComp" This function return the file list containing only the files belonging to subject in the wanted group. diff --git a/narps_open/pipelines/team_Q6O0.py b/narps_open/pipelines/team_Q6O0.py index cb9f925f..5eb0462a 100755 --- a/narps_open/pipelines/team_Q6O0.py +++ b/narps_open/pipelines/team_Q6O0.py @@ -20,7 +20,7 @@ def get_subject_infos_gain(event_files): Gain and loss amounts were used as parametric regressors. Parameters : - - event_files : list of files containing events informations for each run + - event_files : list of files containing events information for each run Returns : - subject_info : list of Bunch for 1st level analysis. @@ -96,7 +96,7 @@ def get_subject_infos_loss(event_files): Gain and loss amounts were used as parametric regressors. Parameters : - - event_files : list of files containing events informations for each run + - event_files : list of files containing events information for each run Returns : - subject_info : list of Bunch for 1st level analysis. @@ -456,7 +456,7 @@ def get_subset_contrasts(file_list, method, subject_list, participants_file): Parameters : - file_list : original file list selected by selectfiles node - subject_list : list of subject IDs that are in the wanted group for the analysis - - participants_file: str, file containing participants caracteristics + - participants_file: str, file containing participants characteristics - method: str, one of "equalRange", "equalIndifference" or "groupComp" This function return the file list containing only the files belonging to subject in the wanted group. diff --git a/narps_open/pipelines/team_T54A.py b/narps_open/pipelines/team_T54A.py index 1e18eae8..1bb10dcd 100644 --- a/narps_open/pipelines/team_T54A.py +++ b/narps_open/pipelines/team_T54A.py @@ -361,7 +361,7 @@ def get_subgroups_contrasts(copes, varcopes, subject_ids, participants_file): - copes: original file list selected by selectfiles node - varcopes: original file list selected by selectfiles node - subject_ids: list of subject IDs that are analyzed - - participants_file: str, file containing participants caracteristics + - participants_file: str, file containing participants characteristics This function return the file list containing only the files belonging to subject in the wanted group. ''' @@ -413,7 +413,7 @@ def get_subgroups_contrasts(copes, varcopes, subject_ids, participants_file): def get_regs(equalRange_id, equalIndifference_id, method, subject_list): """ - Create dictionnary of regressors for group analysis. + Create dictionary of regressors for group analysis. Parameters: - equalRange_id: list of str, ids of subjects in equal range group @@ -422,7 +422,7 @@ def get_regs(equalRange_id, equalIndifference_id, method, subject_list): - subject_list: list of str, ids of subject for which to do the analysis Returns: - - regressors: dict, dictionnary of regressors used to distinguish groups in FSL group analysis + - regressors: dict, dictionary of regressors used to distinguish groups in FSL group analysis """ if method == "equalRange": regressors = dict(group_mean = [1 for i in range(len(equalRange_id))]) diff --git a/narps_open/pipelines/team_V55J.py b/narps_open/pipelines/team_V55J.py index 69e90f05..9f930608 100755 --- a/narps_open/pipelines/team_V55J.py +++ b/narps_open/pipelines/team_V55J.py @@ -151,7 +151,7 @@ def get_preprocessing(exp_dir, result_dir, working_dir, output_dir, subject_list coreg = Node(Coregister(jobtype = 'estimate', write_mask = False), name = 'coreg') ## Segmentation - # We perfromed segmentation on the structural image for each subject by using the "Segment" + # We performed segmentation on the structural image for each subject by using the "Segment" # routine in SPM12, with default values for each parameter and using the template tissue # probability maps (grey matter, white matter, CSF, bone, soft tissue, and air/background) # in the tpm folder of SPM12. @@ -504,7 +504,7 @@ def get_subset_contrasts(file_list, method, subject_list, participants_file): Parameters : - file_list : original file list selected by selectfiles node - subject_list : list of subject IDs that are in the wanted group for the analysis - - participants_file: str, file containing participants caracteristics + - participants_file: str, file containing participants characteristics - method: str, one of "equalRange", "equalIndifference" or "groupComp" This function return the file list containing only the files belonging to subject in the wanted group. diff --git a/narps_open/pipelines/team_X19V.py b/narps_open/pipelines/team_X19V.py index 743615da..3e0108ef 100755 --- a/narps_open/pipelines/team_X19V.py +++ b/narps_open/pipelines/team_X19V.py @@ -344,7 +344,7 @@ def get_subgroups_contrasts(copes, varcopes, subject_ids, participants_file): - copes: original file list selected by selectfiles node - varcopes: original file list selected by selectfiles node - subject_ids: list of subject IDs that are analyzed - - participants_file: str, file containing participants caracteristics + - participants_file: str, file containing participants characteristics This function return the file list containing only the files belonging to subject in the wanted group. ''' @@ -398,7 +398,7 @@ def get_subgroups_contrasts(copes, varcopes, subject_ids, participants_file): def get_regs(equalRange_id, equalIndifference_id, method, subject_list): """ - Create dictionnary of regressors for group analysis. + Create dictionary of regressors for group analysis. Parameters: - equalRange_id: list of str, ids of subjects in equal range group @@ -407,7 +407,7 @@ def get_regs(equalRange_id, equalIndifference_id, method, subject_list): - subject_list: list of str, ids of subjects for which we do the analysis Returns: - - regressors: dict, dictionnary of regressors used to distinguish groups in FSL group analysis + - regressors: dict, dictionary of regressors used to distinguish groups in FSL group analysis """ if method == "equalRange": regressors = dict(group_mean = [1 for i in range(len(equalRange_id))]) diff --git a/narps_open/pipelines/templates/template_afni.py b/narps_open/pipelines/templates/template_afni.py index 7d38de8a..dc751f3a 100644 --- a/narps_open/pipelines/templates/template_afni.py +++ b/narps_open/pipelines/templates/template_afni.py @@ -4,8 +4,8 @@ """ This template can be use to reproduce a pipeline using AFNI as main software. -- Replace all occurences of 48CD by the actual id of the team. -- All lines beging [INFO], are meant to help you during the reproduction, these can be removed +- Replace all occurrences of 48CD by the actual id of the team. +- All lines starting with [INFO], are meant to help you during the reproduction, these can be removed eventually. -- Also remove lines beging with [TODO], once you did what they suggested. +- Also remove lines starting with [TODO], once you did what they suggested. """ diff --git a/narps_open/pipelines/templates/template_fsl.py b/narps_open/pipelines/templates/template_fsl.py index b0cd55a9..92ac3deb 100644 --- a/narps_open/pipelines/templates/template_fsl.py +++ b/narps_open/pipelines/templates/template_fsl.py @@ -4,10 +4,10 @@ """ This template can be use to reproduce a pipeline using FSL as main software. -- Replace all occurences of 48CD by the actual id of the team. -- All lines beging [INFO], are meant to help you during the reproduction, these can be removed +- Replace all occurrences of 48CD by the actual id of the team. +- All lines starting with [INFO], are meant to help you during the reproduction, these can be removed eventually. -- Also remove lines beging with [TODO], once you did what they suggested. +- Also remove lines starting with [TODO], once you did what they suggested. """ # [TODO] Only import modules you use further in te code, remove others from the import section @@ -44,7 +44,7 @@ def __init__(self): pass def get_preprocessing(self): - """ Return a Nipype worflow describing the prerpocessing part of the pipeline """ + """ Return a Nipype workflow describing the prerpocessing part of the pipeline """ # [INFO] The following part stays the same for all preprocessing pipelines @@ -92,7 +92,7 @@ def get_preprocessing(self): # - the name of the variable in which you store the Node object # - the 'name' attribute of the Node # [TODO] The node_function refers to a NiPype interface that you must import - # at the begining of the file. + # at the beginning of the file. node_name = Node( node_function, name='node_name' @@ -134,7 +134,7 @@ def get_preprocessing(self): # [INFO] There was no run level analysis for the pipelines using FSL def get_run_level_analysis(self): - """ Return a Nipype worflow describing the run level analysis part of the pipeline """ + """ Return a Nipype workflow describing the run level analysis part of the pipeline """ return None # [INFO] This function is used in the subject level analysis pipelines using FSL @@ -219,7 +219,7 @@ def get_contrasts(): return [trial, effect_gain, effect_loss] def get_subject_level_analysis(self): - """ Return a Nipype worflow describing the subject level analysis part of the pipeline """ + """ Return a Nipype workflow describing the subject level analysis part of the pipeline """ # [INFO] The following part stays the same for all pipelines @@ -294,7 +294,7 @@ def get_subject_level_analysis(self): # - the name of the variable in which you store the Node object # - the 'name' attribute of the Node # [TODO] The node_function refers to a NiPype interface that you must import - # at the begining of the file. + # at the beginning of the file. node_name = Node( node_function, name = 'node_name' @@ -575,7 +575,7 @@ def get_group_level_analysis_sub_workflow(self, method): # - the name of the variable in which you store the Node object # - the 'name' attribute of the Node # [TODO] The node_function refers to a NiPype interface that you must import - # at the begining of the file. + # at the beginning of the file. node_name = Node( node_function, name = 'node_name' @@ -624,7 +624,7 @@ def get_group_level_analysis_sub_workflow(self, method): ] ) # Complete with other links between nodes - # [INFO] Here whe define the contrasts used for the group level analysis, depending on the + # [INFO] Here we define the contrasts used for the group level analysis, depending on the # method used. if method in ('equalRange', 'equalIndifference'): contrasts = [('Group', 'T', ['mean'], [1]), ('Group', 'T', ['mean'], [-1])] diff --git a/narps_open/pipelines/templates/template_spm.py b/narps_open/pipelines/templates/template_spm.py index 7e0ec6fc..24e9a51d 100644 --- a/narps_open/pipelines/templates/template_spm.py +++ b/narps_open/pipelines/templates/template_spm.py @@ -4,10 +4,10 @@ """ This template can be use to reproduce a pipeline using SPM as main software. -- Replace all occurences of 48CD by the actual id of the team. -- All lines beging [INFO], are meant to help you during the reproduction, these can be removed +- Replace all occurrences of 48CD by the actual id of the team. +- All lines starting with [INFO], are meant to help you during the reproduction, these can be removed eventually. -- Also remove lines beging with [TODO], once you did what they suggested. +- Also remove lines starting with [TODO], once you did what they suggested. """ # [TODO] Only import modules you use further in te code, remove others from the import section @@ -43,7 +43,7 @@ def __init__(self): pass def get_preprocessing(self): - """ Return a Nipype worflow describing the prerpocessing part of the pipeline """ + """ Return a Nipype workflow describing the prerpocessing part of the pipeline """ # [INFO] The following part stays the same for all preprocessing pipelines @@ -91,7 +91,7 @@ def get_preprocessing(self): # - the name of the variable in which you store the Node object # - the 'name' attribute of the Node # [TODO] The node_function refers to a NiPype interface that you must import - # at the begining of the file. + # at the beginning of the file. node_name = Node( node_function, name='node_name' @@ -133,7 +133,7 @@ def get_preprocessing(self): # [INFO] There was no run level analysis for the pipelines using SPM def get_run_level_analysis(self): - """ Return a Nipype worflow describing the run level analysis part of the pipeline """ + """ Return a Nipype workflow describing the run level analysis part of the pipeline """ return None # [INFO] This function is used in the subject level analysis pipelines using SPM @@ -262,7 +262,7 @@ def get_contrasts(): return [trial, effect_gain, effect_loss] def get_subject_level_analysis(self): - """ Return a Nipype worflow describing the subject level analysis part of the pipeline """ + """ Return a Nipype workflow describing the subject level analysis part of the pipeline """ # [INFO] The following part stays the same for all pipelines @@ -337,7 +337,7 @@ def get_subject_level_analysis(self): # - the name of the variable in which you store the Node object # - the 'name' attribute of the Node # [TODO] The node_function refers to a NiPype interface that you must import - # at the begining of the file. + # at the beginning of the file. node_name = Node( node_function, name = 'node_name' @@ -395,7 +395,7 @@ def get_subset_contrasts( Parameters : - file_list : original file list selected by selectfiles node - subject_list : list of subject IDs that are in the wanted group for the analysis - - participants_file: str, file containing participants caracteristics + - participants_file: str, file containing participants characteristics Returns : - equal_indifference_id : a list of subject ids in the equalIndifference group @@ -515,7 +515,7 @@ def get_group_level_analysis_sub_workflow(self, method): # - the name of the variable in which you store the Node object # - the 'name' attribute of the Node # [TODO] The node_function refers to a NiPype interface that you must import - # at the begining of the file. + # at the beginning of the file. node_name = Node( node_function, name = 'node_name' @@ -547,7 +547,7 @@ def get_group_level_analysis_sub_workflow(self, method): ] ) - # [INFO] Here whe define the contrasts used for the group level analysis, depending on the + # [INFO] Here we define the contrasts used for the group level analysis, depending on the # method used. if method in ('equalRange', 'equalIndifference'): contrasts = [('Group', 'T', ['mean'], [1]), ('Group', 'T', ['mean'], [-1])] diff --git a/narps_open/runner.py b/narps_open/runner.py index 4a6d4289..7a9594bf 100644 --- a/narps_open/runner.py +++ b/narps_open/runner.py @@ -80,7 +80,7 @@ def team_id(self, value: str) -> None: if implemented_pipelines[self._team_id] is None: raise NotImplementedError(f'Pipeline not implemented for team : {self.team_id}') - # Instanciate the pipeline + # Instantiate the pipeline class_type = getattr( import_module('narps_open.pipelines.team_'+self._team_id), implemented_pipelines[self._team_id]) diff --git a/narps_open/utils/__init__.py b/narps_open/utils/__init__.py index 54198316..8cbdfe1d 100644 --- a/narps_open/utils/__init__.py +++ b/narps_open/utils/__init__.py @@ -28,7 +28,7 @@ def show_download_progress(count, block_size, total_size): def get_subject_id(file_name: str) -> str: """ Return the id of the subject corresponding to the passed file name. Return None if the file name is not associated with any subject. - TODO : a feature to be handled globaly to parse data in a file name. + TODO : a feature to be handled globally to parse data in a file name. """ key = 'subject_id' if key not in file_name: diff --git a/narps_open/utils/status.py b/narps_open/utils/status.py index 4e294d76..cc4eb8a7 100644 --- a/narps_open/utils/status.py +++ b/narps_open/utils/status.py @@ -56,7 +56,7 @@ def generate(self): self.contents[team_id] = {} - # Get softwares used in the pipeline, from the team description + # Get software used in the pipeline, from the team description description = TeamDescription(team_id) self.contents[team_id]['softwares'] = \ description.categorized_for_analysis['analysis_SW'] @@ -82,7 +82,7 @@ def generate(self): has_file = team_id in teams_having_pipeline if is_implemeted and not has_file: - raise AttributeError(f'Pipeline {team_id} refered as implemented with no file') + raise AttributeError(f'Pipeline {team_id} referred as implemented with no file') if not is_implemeted and not has_issues and not has_file: self.contents[team_id]['status'] = '2-idle' diff --git a/tests/data/test_description.py b/tests/data/test_description.py index 8bacabb9..3bdc7c2c 100644 --- a/tests/data/test_description.py +++ b/tests/data/test_description.py @@ -31,7 +31,7 @@ def test_creation(): with raises(AttributeError): TeamDescription('wrong_id') - # Instatiation is ok + # Instantiation is ok assert TeamDescription('2T6S') is not None @staticmethod diff --git a/tests/pipelines/test_pipelines.py b/tests/pipelines/test_pipelines.py index a9001832..9016aeb7 100644 --- a/tests/pipelines/test_pipelines.py +++ b/tests/pipelines/test_pipelines.py @@ -105,11 +105,11 @@ def test_create(): with raises(TypeError): pipeline = Pipeline() - # 2 - check the instanciation of an incomplete class inheriting from Pipeline + # 2 - check the instantiation of an incomplete class inheriting from Pipeline with raises(TypeError): pipeline = InheritingErrorPipeline() - # 3 - check the instanciation of a class inheriting from Pipeline + # 3 - check the instantiation of a class inheriting from Pipeline pipeline = InheritingPipeline() # 4 - check accessing the attributes of Pipeline through an inheriting class diff --git a/tests/test_runner.py b/tests/test_runner.py index 0079905d..12a2059c 100644 --- a/tests/test_runner.py +++ b/tests/test_runner.py @@ -48,7 +48,7 @@ def write_to_file(_, text_to_write: str, file_path: str): file.write(text_to_write) def create_workflow(self, workflow_name: str): - """ Return a nipype worflow with two nodes writing in a file """ + """ Return a nipype workflow with two nodes writing in a file """ node_1 = Node(Function( input_names = ['_', 'text_to_write', 'file_path'], output_names = ['_'], @@ -185,19 +185,19 @@ class TestPipelineRunner: def test_create(): """ Test the creation of a PipelineRunner object """ - # 1 - Instanciate a runner without team id + # 1 - Instantiate a runner without team id with raises(KeyError): PipelineRunner() - # 2 - Instanciate a runner with wrong team id + # 2 - Instantiate a runner with wrong team id with raises(KeyError): PipelineRunner('wrong_id') - # 3 - Instanciate a runner with a not implemented team id + # 3 - Instantiate a runner with a not implemented team id with raises(NotImplementedError): PipelineRunner('08MQ') - # 4 - Instanciate a runner with an implemented team id + # 4 - Instantiate a runner with an implemented team id runner = PipelineRunner('2T6S') assert isinstance(runner.pipeline, PipelineTeam2T6S) assert runner.team_id == '2T6S' @@ -254,14 +254,14 @@ def test_start_nok(): with raises(Exception): runner.start() - # 2 - test starting a pipeline with wrong worflow type + # 2 - test starting a pipeline with wrong workflow type runner = PipelineRunner('2T6S') runner._pipeline = MockupWrongPipeline() # hack the runner by setting a test Pipeline with raises(AttributeError): runner.start() - # 2b - test starting a pipeline with wrong worflow type + # 2b - test starting a pipeline with wrong workflow type runner = PipelineRunner('2T6S') runner._pipeline = MockupWrongPipeline2() # hack the runner by setting a test Pipeline diff --git a/tests/utils/test_status.py b/tests/utils/test_status.py index 5d4c5da5..0e98ef83 100644 --- a/tests/utils/test_status.py +++ b/tests/utils/test_status.py @@ -27,7 +27,7 @@ @fixture def mock_api_issue(mocker): - """ Create a mock GitHub API response for successful querry on open issues + """ Create a mock GitHub API response for successful query on open issues (Querying the actual project would lead to non reporducible results) This method uses the mocker from pytest-mock to replace `requests.get`,